input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
f"{cls.__name__}_{t_names}"
key = "key"
parameters0 = f"key:{t[0].__name__}, "
if len(t) == 2:
parameters = parameters0 + f"value:{t[1].__name__}"
value = "value"
else:
parameters = parameters0 + ", ".join(f"value{i}:{ti.__name__}" for i, ti in enumerate(t[1:], 1))
value = "(" + ", ".join(f"value{i}" for i in range(1, len(t))) + ")"
types = "(" + ", ".join(ti.__name__ for ti in t) + ")"
text = f"""
class {name}(cls):
__name__ = '{cls.__name__}[{t_names}]'
def option(self, {parameters}):
# print("mapping meta got", {key}, "=", {value})
self.dict[{key}] = {value}
__types__ = {types}
"""
globals = {'cls': cls, 't': t}
# print("TEXT", text)
exec(text, globals)
return globals[name]
def __repr__(cls):
return f'<{cls.__name__}>'
class mapping(MultiOption, metaclass=MappingMeta):
def init(self, default):
self.dict = {}
if default is not empty:
self.dict.update(dict(default))
def option(self, k:str, v:str):
if k in self.dict:
raise AppealUsageError("defined {k} more than once")
self.dict[k] = v
def render(self):
return self.dict
@must_be_instance
def split(*separators, strip=False):
"""
Creates a converter function that splits a string
based on one or more separator strings.
If you don't supply any separators, splits on
any whitespace.
If strip is True, also calls strip() on the
strings after splitting.
"""
if not separators:
def split(str):
return str.split()
return split
if not all((s and isinstance(s, str)) for s in separators):
raise AppealConfigurationError("split(): every separator must be a non-empty string")
def split(str):
values = multisplit(str, separators)
if strip:
values = [s.strip() for s in values]
return values
return split
@must_be_instance
def validate(*values, type=None):
"""
Creates a converter function that validates a value
from the command-line.
values is a list of permissible values.
type is the type for the value. If not specified,
type defaults to builtins.type(values[0]).
If the value from the command-line is one of the values,
returns value. Otherwise reports a usage error.
"""
if not values:
raise AppealConfigurationError("validate() called without any values.")
if type == None:
type = builtins.type(values[0])
failed = []
for value in values:
if not isinstance(value, type):
failed.append(value)
if failed:
failed = " ".join(repr(x) for x in failed)
raise AppealConfigurationError("validate() called with these non-homogeneous values {failed}")
values_set = set(values)
def validate(value:type):
if value not in values_set:
raise AppealUsageError(f"illegal value {value!r}, should be one of {' '.join(repr(v) for v in values)}")
return value
return validate
@must_be_instance
def validate_range(start, stop=None, *, type=None, clamp=False):
"""
Creates a converter function that validates that
a value from the command-line is within a range.
start and stop are like the start and stop
arguments for range().
type is the type for the value. If unspecified,
it defaults to builtins.type(start).
If the value from the command-line is within the
range established by start and stop, returns value.
If value is not inside the range of start and stop,
and clamp=True, returns either start or stop,
whichever is nearest.
If value is not inside the range of start and stop,
and clamp=False, raise a usage error.
"""
if type is None:
type = builtins.type(start)
if stop is None:
stop = start
start = type()
# ensure start is < stop
if start > stop:
start, stop = stop, start
def validate_range(value:type):
in_range = start <= value <= stop
if not in_range:
if not clamp:
raise AppealUsageError(f"illegal value {value}, should be {start} <= value < {stop}")
if value >= stop:
value = stop
else:
value = start
return value
return validate_range
def no_arguments_callable(): pass
no_arguments_signature = inspect.signature(no_arguments_callable)
# this function isn't published as one of the _to_converter callables
def simple_type_to_converter(parameter, callable):
cls = simple_type_signatures.get(callable)
if not cls:
return None
if (callable == bool) and (parameter.kind == KEYWORD_ONLY):
return BooleanOptionConverter
return cls
none_and_empty = ((None, empty))
def unannotated_to_converter(parameter):
if (parameter.annotation in none_and_empty) and (parameter.default in none_and_empty):
return SimpleTypeConverterStr
def type_to_converter(parameter):
if not isinstance(parameter.annotation, type):
return None
cls = simple_type_to_converter(parameter, parameter.annotation)
if cls:
return cls
if issubclass(parameter.annotation, SingleOption):
return parameter.annotation
return None
def callable_to_converter(parameter):
if (parameter.annotation is empty) or (not builtins.callable(parameter.annotation)):
return None
if parameter.kind == KEYWORD_ONLY:
return Option
return Converter
illegal_inferred_types = {dict, set, tuple, list}
def inferred_type_to_converter(parameter):
if (parameter.annotation is not empty) or (parameter.default is empty):
return None
inferred_type = type(parameter.default)
# print(f"inferred_type_to_converter({parameter=})")
cls = simple_type_to_converter(parameter, inferred_type)
# print(f" {inferred_type=} {cls=}")
if cls:
return cls
if issubclass(inferred_type, SingleOption):
return inferred_type
if inferred_type in illegal_inferred_types:
return None
if parameter.kind == KEYWORD_ONLY:
return InferredOption
return InferredConverter
sequence_types = {tuple, list}
def sequence_to_converter(parameter):
if (parameter.annotation is not empty) or (parameter.default is empty):
return None
inferred_type = type(parameter.default)
if inferred_type not in sequence_types:
return None
if parameter.kind == KEYWORD_ONLY:
return InferredSequenceOption
return InferredSequenceConverter
def _default_option(option, appeal, callable, parameter_name, annotation, default):
if appeal.option_signature(option):
return False
appeal.option(parameter_name, option, annotation=annotation, default=default)(callable)
return True
def default_short_option(appeal, callable, parameter_name, annotation, default):
option = parameter_name_to_short_option(parameter_name)
if not _default_option(option, appeal, callable, parameter_name, annotation, default):
raise AppealConfigurationError(f"couldn't add default option {option} for {callable} parameter {parameter_name}")
def default_long_option(appeal, callable, parameter_name, annotation, default):
if len(parameter_name) < 2:
return
option = parameter_name_to_long_option(parameter_name)
if not _default_option(option,
appeal, callable, parameter_name, annotation, default):
raise AppealConfigurationError(f"couldn't add default option {option} for {callable} parameter {parameter_name}")
def default_options(appeal, callable, parameter_name, annotation, default):
# print(f"default_options({appeal=}, {callable=}, {parameter_name=}, {annotation=}, {default=})")
added_an_option = False
options = [parameter_name_to_short_option(parameter_name)]
if len(parameter_name) > 1:
options.append(parameter_name_to_long_option(parameter_name))
for option in options:
worked = _default_option(option,
appeal, callable, parameter_name, annotation, default)
added_an_option = added_an_option or worked
if not added_an_option:
raise AppealConfigurationError(f"Couldn't add any default options for {callable} parameter {parameter_name}")
def unbound_callable(callable):
"""
Unbinds a callable.
If the callable is bound to an object (a "method"),
returns the unbound callable. Otherwise returns callable.
"""
return callable.__func__ if isinstance(callable, types.MethodType) else callable
event_clock = time.monotonic_ns
event_start = event_clock()
unspecified = object()
class Appeal:
"""
An Appeal object can only process a single command-line.
Once you have called main() or process() on an Appeal object,
you can't call either of those methods again.
"""
def __init__(self,
name=None,
*,
default_options=default_options,
repeat=False,
parent=None,
option_space_oparg = True, # '--long OPARG' and '-s OPARG'
short_option_equals_oparg = True, # -s=OPARG
short_option_concatenated_oparg = True, # -sOPARG
positional_argument_usage_format = "{name}",
# if true:
# * adds a "help" command (if your program supports commands)
# * supports lone "-h" and "--help" options which behave like the "help" command without arguments
help=True,
# if set to a non-empty string,
# * adds a "version" command (if your program has commands)
# * supports lone "-v" and "--version" options which behave like the "version" command without arguments
version=None,
# when printing docstrings: should Appeal add in missing arguments?
usage_append_missing_options = True,
usage_append_missing_arguments = True,
usage_indent_definitions = 4,
# when printing docstrings, how should we sort the options and arguments?
#
# valid options:
# None: don't change order
# "sorted": sort lexigraphically. note that options sort by the first long option.
# "usage": reorder into the order they appear in usage.
#
# note that when sorting, options that appear multiple times will only be shown
# once. the second and subsequent appearances will be discarded.
usage_sort_options = None,
usage_sort_arguments = None,
usage_max_columns = 80,
log_events = bool(want_prints),
):
self.parent = parent
self.repeat = repeat
self.name = name
self.commands = {}
self._global = None
self._global_program = None
self._global_command = None
self._default = None
self._default_program = None
self._default_command = None
self.full_name = ""
self.depth = -1
self.usage_str = self.summary_str = self.doc_str = None
# in root Appeal instance, self.root == self, self.parent == None
# in child Appeal instance, self.root != self, self.parent != None (and != self)
#
# only accept settings parameters if we're the root Appeal instance
if parent is None:
self.root = self
name = name or os.path.basename(sys.argv[0])
self.name = self.full_name = name
self.force_positional = False
self.parsing_option = 0
self.default_options = default_options
self.option_parsing_semantics = (
option_space_oparg,
short_option_equals_oparg,
short_option_concatenated_oparg,
)
self.usage_append_missing_options = usage_append_missing_options
self.usage_append_missing_arguments = usage_append_missing_arguments
self.usage_sort_options = usage_sort_options
self.usage_sort_arguments = usage_sort_arguments
self.usage_max_columns = usage_max_columns
self.usage_indent_definitions = usage_indent_definitions
# slightly hacky and limited! sorry!
self.positional_argument_usage_format = positional_argument_usage_format.replace("name.upper()", "__NAME__")
# an "option entry" is:
# (option, callable, parameter, annotation, default)
#
# option is the normalized option string
# callable is the unbound Python function/method
# note that if callable is a bound method object, we store that.
# we don't unbind it for this application.
# parameter is | |
<reponame>wwxFromTju/hierarchical-marl
"""Implementation of hierarchical cooperative multi-agent RL with skill discovery.
High-level Q-function Q(s,\zbf) is trained with QMIX (with decentralized execution)
using global environment reward
Low-level policies are either
1. parameterized as policy networks pi(a^n|o^n,z^n) and trained with policy gradient
using the intrinsic reward log P(z|tau) + entropy
or
2. induced from Q-functions Q(o^n,z^n,a^n) and trained with independent Q-learning
using only log P(z|tau) as delayed reward
"""
import tensorflow as tf
import numpy as np
import sys
import networks
class Alg(object):
def __init__(self, config_alg, config_h, n_agents, l_state, l_obs, l_action, l_z, nn):
"""
Args:
config_alg: dictionary of general RL params
config_h: dictionary of HSD params
n_agents: number of agents on the team controlled by this alg
l_state, l_obs, l_action, l_z: int
nn: dictionary with neural net sizes
"""
self.l_state = l_state
self.l_obs = l_obs
self.l_action = l_action
self.l_z = l_z
self.nn = nn
self.n_agents = n_agents
self.tau = config_alg['tau']
self.lr_Q = config_alg['lr_Q']
self.lr_actor = config_alg['lr_actor']
self.lr_decoder = config_alg['lr_decoder']
self.gamma = config_alg['gamma']
self.traj_length = config_h['steps_per_assign']
self.traj_skip = config_h['traj_skip']
self.traj_length_downsampled = int(np.ceil( self.traj_length / self.traj_skip ))
self.use_state_difference = config_h['use_state_difference']
if self.use_state_difference:
self.traj_length_downsampled -= 1
# Domain-specific removal of information from agent observation
# Either none (deactivate) or scalar index where obs should be truncated for use by decoder
self.obs_truncate_length = config_h['obs_truncate_length']
assert( (self.obs_truncate_length is None) or (self.obs_truncate_length <= self.l_obs) )
self.low_level_alg = config_h['low_level_alg']
assert(self.low_level_alg == 'reinforce' or self.low_level_alg == 'iac' or self.low_level_alg == 'iql')
if self.low_level_alg == 'iac':
self.lr_V = config_alg['lr_V']
# Initialize computational graph
self.create_networks()
self.list_initialize_target_ops, self.list_update_target_ops, self.list_update_target_ops_low = self.get_assign_target_ops()
self.create_train_op_high()
self.create_train_op_low()
self.create_train_op_decoder()
# TF summaries
self.create_summary()
def create_networks(self):
# Placeholders
self.state = tf.placeholder(tf.float32, [None, self.l_state], 'state')
self.obs = tf.placeholder(tf.float32, [None, self.l_obs], 'obs')
self.z = tf.placeholder(tf.float32, [None, self.l_z], 'z')
# Decoder p(z|tau)
if self.obs_truncate_length:
self.traj = tf.placeholder(dtype=tf.float32, shape=[None, self.traj_length_downsampled, self.obs_truncate_length])
else:
self.traj = tf.placeholder(dtype=tf.float32, shape=[None, self.traj_length_downsampled, self.l_obs])
with tf.variable_scope("Decoder"):
self.decoder_out, self.decoder_probs = networks.decoder(self.traj, self.traj_length_downsampled, self.nn['n_h_decoder'], self.l_z)
# Low-level policy
if self.low_level_alg == 'reinforce' or self.low_level_alg == 'iac':
self.epsilon = tf.placeholder(tf.float32, None, 'epsilon')
with tf.variable_scope("Policy_main"):
probs = networks.actor(self.obs, self.z, self.nn['n_h1_low'], self.nn['n_h2_low'], self.l_action)
self.probs = (1-self.epsilon) * probs + self.epsilon/float(self.l_action)
self.action_samples = tf.multinomial(tf.log(self.probs), 1)
if self.low_level_alg == 'iac':
with tf.variable_scope("V_main"):
self.V = networks.critic(self.obs, self.z, self.nn['n_h1_low'], self.nn['n_h2_low'])
with tf.variable_scope("V_target"):
self.V_target = networks.critic(self.obs, self.z, self.nn['n_h1_low'], self.nn['n_h2_low'])
# Low-level Q-functions
if self.low_level_alg == 'iql':
with tf.variable_scope("Qlow_main"):
self.Q_low = networks.Q_low(self.obs, self.z, self.nn['n_h1_low'], self.nn['n_h2_low'], self.l_action)
with tf.variable_scope("Qlow_target"):
self.Q_low_target = networks.Q_low(self.obs, self.z, self.nn['n_h1_low'], self.nn['n_h2_low'], self.l_action)
self.argmax_Q_low = tf.argmax(self.Q_low, axis=1)
self.actions_low_1hot = tf.placeholder(tf.float32, [None, self.l_action], 'actions_low_1hot')
# High-level QMIX
# Individual agent networks
# output dimension is [time * n_agents, q-values]
with tf.variable_scope("Agent_main"):
self.agent_qs = networks.Qmix_single(self.obs, self.nn['n_h1'], self.nn['n_h2'], self.l_z)
with tf.variable_scope("Agent_target"):
self.agent_qs_target = networks.Qmix_single(self.obs, self.nn['n_h1'], self.nn['n_h2'], self.l_z)
self.argmax_Q = tf.argmax(self.agent_qs, axis=1)
self.argmax_Q_target = tf.argmax(self.agent_qs_target, axis=1)
# To extract Q-value from agent_qs and agent_qs_target
# [batch*n_agents, N_roles]
self.actions_1hot = tf.placeholder(tf.float32, [None, self.l_z], 'actions_1hot')
# [batch*n_agents, 1]
self.q_selected = tf.reduce_sum(tf.multiply(self.agent_qs, self.actions_1hot), axis=1)
# [batch, n_agents]
self.mixer_q_input = tf.reshape( self.q_selected, [-1, self.n_agents] )
self.q_target_selected = tf.reduce_sum(tf.multiply(self.agent_qs_target, self.actions_1hot), axis=1)
self.mixer_target_q_input = tf.reshape( self.q_target_selected, [-1, self.n_agents] )
# Mixing network
with tf.variable_scope("Mixer_main"):
self.mixer = networks.Qmix_mixer(self.mixer_q_input, self.state, self.l_state, self.n_agents, self.nn['n_h_mixer'])
with tf.variable_scope("Mixer_target"):
self.mixer_target = networks.Qmix_mixer(self.mixer_target_q_input, self.state, self.l_state, self.n_agents, self.nn['n_h_mixer'])
def get_assign_target_ops(self):
# ops for equating main and target
list_initial_ops = []
# ops for slow update of target toward main
list_update_ops = []
# ops for slow update of low-level target toward low-level main
list_update_ops_low = []
list_Agent_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Agent_main')
map_name_Agent_main = {v.name.split('main')[1] : v for v in list_Agent_main}
list_Agent_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Agent_target')
map_name_Agent_target = {v.name.split('target')[1] : v for v in list_Agent_target}
if len(list_Agent_main) != len(list_Agent_target):
raise ValueError("get_initialize_target_ops : lengths of Agent_main and Agent_target do not match")
for name, var in map_name_Agent_main.items():
# create op that assigns value of main variable to
# target variable of the same name
list_initial_ops.append( map_name_Agent_target[name].assign(var) )
for name, var in map_name_Agent_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_Agent_target[name].assign( self.tau*var + (1-self.tau)*map_name_Agent_target[name] ) )
list_Mixer_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_main')
map_name_Mixer_main = {v.name.split('main')[1] : v for v in list_Mixer_main}
list_Mixer_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_target')
map_name_Mixer_target = {v.name.split('target')[1] : v for v in list_Mixer_target}
if len(list_Mixer_main) != len(list_Mixer_target):
raise ValueError("get_initialize_target_ops : lengths of Mixer_main and Mixer_target do not match")
# ops for equating main and target
for name, var in map_name_Mixer_main.items():
# create op that assigns value of main variable to
# target variable of the same name
list_initial_ops.append( map_name_Mixer_target[name].assign(var) )
# ops for slow update of target toward main
for name, var in map_name_Mixer_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_Mixer_target[name].assign( self.tau*var + (1-self.tau)*map_name_Mixer_target[name] ) )
if self.low_level_alg == 'iac':
list_V_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'V_main')
map_name_V_main = {v.name.split('main')[1] : v for v in list_V_main}
list_V_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'V_target')
map_name_V_target = {v.name.split('target')[1] : v for v in list_V_target}
if len(list_V_main) != len(list_V_target):
raise ValueError("get_initialize_target_ops : lengths of V_main and V_target do not match")
for name, var in map_name_V_main.items():
list_initial_ops.append( map_name_V_target[name].assign(var) )
for name, var in map_name_V_main.items():
list_update_ops_low.append( map_name_V_target[name].assign( self.tau*var + (1-self.tau)*map_name_V_target[name] ) )
elif self.low_level_alg == 'iql':
list_Qlow_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Qlow_main')
map_name_Qlow_main = {v.name.split('main')[1] : v for v in list_Qlow_main}
list_Qlow_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Qlow_target')
map_name_Qlow_target = {v.name.split('target')[1] : v for v in list_Qlow_target}
if len(list_Qlow_main) != len(list_Qlow_target):
raise ValueError("get_initialize_target_ops : lengths of Qlow_main and Qlow_target do not match")
for name, var in map_name_Qlow_main.items():
list_initial_ops.append( map_name_Qlow_target[name].assign(var) )
for name, var in map_name_Qlow_main.items():
list_update_ops_low.append( map_name_Qlow_target[name].assign( self.tau*var + (1-self.tau)*map_name_Qlow_target[name] ) )
return list_initial_ops, list_update_ops, list_update_ops_low
def run_actor(self, list_obs, roles, epsilon, sess):
"""Get low-level actions for all agents as a batch.
Args:
list_obs: list of vectors, one per agent
roles: np.array where each row is a 1-hot vector
epsilon: exploration parameter
sess: TF session
Returns: np.array of action integers
"""
# convert to batch
obs = np.array(list_obs)
if self.low_level_alg == 'reinforce' or self.low_level_alg == 'iac':
feed = {self.obs : obs, self.z : roles, self.epsilon : epsilon}
actions = sess.run(self.action_samples, feed_dict=feed)
elif self.low_level_alg == 'iql':
feed = {self.obs : obs, self.z : roles}
actions_argmax = sess.run(self.argmax_Q_low, feed_dict=feed)
actions = np.zeros(self.n_agents, dtype=int)
for idx in range(self.n_agents):
if np.random.rand() < epsilon:
actions[idx] = np.random.randint(0, self.l_action)
else:
actions[idx] = actions_argmax[idx]
return actions.flatten()
def assign_roles(self, list_obs, epsilon, sess, N_roles_current):
"""Get high-level role assignment actions for all agents.
Args:
list_obs: list of vectors, one per agent
epsilon: exploration parameter
sess: TF session
N_roles_current: number of activated role dimensions
Returns: np.array of role indices
"""
obs = np.array(list_obs)
feed = {self.obs : obs}
Q_values = sess.run(self.agent_qs, feed_dict=feed)
# Limit the number of activated options based on curriculum
roles_argmax = np.argmax(Q_values[:, 0:N_roles_current], axis=1)
roles = np.zeros(self.n_agents, dtype=int)
for idx in range(self.n_agents):
if np.random.rand() < epsilon:
roles[idx] = np.random.randint(0, N_roles_current)
else:
roles[idx] = roles_argmax[idx]
return roles
def create_train_op_high(self):
"""Ops for training high-level policy."""
self.td_target = tf.placeholder(tf.float32, [None], 'td_target')
self.loss_Q_high = tf.reduce_mean(tf.square(self.td_target - tf.squeeze(self.mixer)))
self.Q_opt = tf.train.AdamOptimizer(self.lr_Q)
self.Q_op = self.Q_opt.minimize(self.loss_Q_high)
def create_train_op_low(self):
"""Ops for training low-level policy."""
if self.low_level_alg == 'reinforce' or self.low_level_alg == 'iac':
self.actions_taken = tf.placeholder(tf.float32, [None, self.l_action], 'action_taken')
# self.probs shape is [batch size * traj length, l_action]
# now log_probs shape is [batch size * traj length]
log_probs = tf.log(tf.reduce_sum(tf.multiply(self.probs, self.actions_taken), axis=1)+1e-15)
if self.low_level_alg == 'reinforce':
# Rehape to [batch size, traj length]
log_probs_reshaped = tf.reshape( log_probs, [-1, self.traj_length])
self.traj_reward = tf.placeholder(tf.float32, [None], 'traj_reward')
# E [ \sum_t \log \pi(a_t|o_t,z) * R ]
self.policy_loss = - tf.reduce_mean( tf.reduce_sum(log_probs_reshaped, axis=1) * self.traj_reward )
elif self.low_level_alg == 'iac':
# Critic train op
self.V_td_target = tf.placeholder(tf.float32, [None], 'V_td_target')
self.loss_V = tf.reduce_mean(tf.square(self.V_td_target - tf.squeeze(self.V)))
self.V_opt = tf.train.AdamOptimizer(self.lr_V)
self.V_op = self.V_opt.minimize(self.loss_V)
# Policy train op
self.V_evaluated = tf.placeholder(tf.float32, [None], 'V_evaluated')
self.V_td_error = self.V_td_target - self.V_evaluated
self.policy_loss = -tf.reduce_mean( tf.multiply( log_probs, self.V_td_error ) )
self.policy_opt = tf.train.AdamOptimizer(self.lr_actor)
self.policy_op = self.policy_opt.minimize(self.policy_loss)
elif self.low_level_alg == 'iql':
self.td_target_IQL = tf.placeholder(tf.float32, [None], 'td_target_IQL')
self.td_error = self.td_target_IQL - tf.reduce_sum(tf.multiply(self.Q_low, self.actions_low_1hot), axis=1)
self.loss_IQL = tf.reduce_mean(tf.square(self.td_error))
self.IQL_opt = tf.train.AdamOptimizer(self.lr_Q)
self.IQL_op = self.IQL_opt.minimize(self.loss_IQL)
def create_train_op_decoder(self):
"""Ops for training skill decoder."""
self.onehot_z = tf.placeholder(tf.float32, [None, self.l_z], 'onehot_z')
self.decoder_loss = tf.losses.softmax_cross_entropy(self.onehot_z, self.decoder_out)
self.decoder_opt = tf.train.AdamOptimizer(self.lr_decoder)
self.decoder_op = self.decoder_opt.minimize(self.decoder_loss)
def create_summary(self):
summaries_Q = [tf.summary.scalar('loss_Q_high', self.loss_Q_high)]
mixer_main_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_main')
| |
import optparse
import time
import numpy as np
from numpy.lib import recfunctions # to append fields to rec arrays
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib.offsetbox import AnchoredText
from matplotlib.backends.backend_pdf import PdfPages
import katpoint
from katpoint import rad2deg, deg2rad, Target, wrap_angle
from katsdpscripts import git_info
import scipy.stats as stats
def get_condition(data):
"""Get condition for grouped target scan.
'ideal' = 0 , \n 'optimal' = 1, \n 'normal' = 2, \n 'other' = 3 """
# Set up limits on environmental conditions
condition_values = np.zeros((4), dtype=dict)
condition_values[0] = {'wind_speed':1.,'temp_low':19.,'temp_high':21.,'sun_el':-5.} # ideal
condition_values[1] = {'wind_speed':2.9,'temp_low':-5.,'temp_high':35.,'sun_el':-5.}# optimal
condition_values[2] = {'wind_speed':9.8,'temp_low':-5.,'temp_high':40.,'sun_el':100.}# normal
condition_values[3] = {'wind_speed':9999.8,'temp_low':-273.,'temp_high':40000.,'sun_el':1000.}# other
for i,values in enumerate(condition_values) :
condition = i
if data['sun_el'].max() < condition_values[i]['sun_el'] :
if data['wind_speed'].max() < condition_values[i]['wind_speed'] :
if data['temperature'].max() < condition_values[i]['temp_high'] :
if data['temperature'].min() > condition_values[i]['temp_low'] :
break # Means conditions have been met
return condition
def read_offsetfile(filename):
# Load data file in one shot as an array of strings
string_fields = ['dataset', 'target', 'timestamp_ut', 'data_unit']
data = np.loadtxt(filename, dtype='string', comments='#', delimiter=', ')
# Interpret first non-comment line as header
fields = data[0].tolist()
# By default, all fields are assumed to contain floats
formats = np.tile(np.float, len(fields))
# The string_fields are assumed to contain strings - use data's string type, as it is of sufficient length
formats[[fields.index(name) for name in string_fields if name in fields]] = data.dtype
# Convert to heterogeneous record array
data = np.rec.fromarrays(data[1:].transpose(), dtype=list(zip(fields, formats)))
# Load antenna description string from first line of file and construct antenna object from it
antenna = katpoint.Antenna(file(filename).readline().strip().partition('=')[2])
# Use the pointing model contained in antenna object as the old model (if not overridden by file)
# If the antenna has no model specified, a default null model will be used
return data, antenna
def referencemetrics(ant,data,num_samples_limit=1,power_sample_limit=0):
"""Determine and sky RMS from the antenna pointing model."""
"""On the calculation of all-sky RMS
Assume the el and cross-el errors have zero mean, are distributed normally, and are uncorrelated
They are therefore described by a 2-dimensional circular Gaussian pdf with zero mean and *per-component*
standard deviation of sigma
The absolute sky error (== Euclidean length of 2-dim error vector) then has a Rayleigh distribution
The RMS sky error has a mean value of sqrt(2) * sigma, since each squared error term is the sum of
two squared Gaussian random values, each with an expected value of sigma^2.
e.g. sky_rms = np.sqrt(np.mean((abs_sky_error-abs_sky_error.mean()) ** 2))
A more robust estimate of the RMS sky error is obtained via the median of the Rayleigh distribution,
which is sigma * sqrt(log(4)) -> convert this to the RMS sky error = sqrt(2) * sigma
e.g. robust_sky_rms = np.median(np.sqrt((abs_sky_error-abs_sky_error.mean())**2)) * np.sqrt(2. / np.log(4.))
"""
#print type(data.shape[0] ), type(num_samples_limit)
beam = data['beam_height_I'].mean()
good_beam = (data['beam_height_I'] > beam*.8) * (data['beam_height_I'] < beam*1.2) * (data['beam_height_I'] > power_sample_limit)
data = data[good_beam]
if data.shape[0] > 0 and not np.all(good_beam) : print("bad scan", data['target'][0])
if data.shape[0] >= num_samples_limit and (data['timestamp'][-1] - data['timestamp'][0]) < 2000: # check all fitted Ipks are valid
condition_str = ['ideal' ,'optimal', 'normal' , 'other']
condition = 3
text = [] #azimuth, elevation, delta_azimuth, delta_azimuth_std, delta_elevation, delta_elevation_std,
measured_delta_xel = data['delta_azimuth']* np.cos(data['elevation']) # scale due to sky shape
abs_sky_error = measured_delta_xel
model_delta_az, model_delta_el = ant.pointing_model.offset(data['azimuth'], data['elevation'])
residual_az = data['delta_azimuth'] - model_delta_az
residual_el = data['delta_elevation'] - model_delta_el
residual_xel = residual_az * np.cos(data['elevation'])
delta_xel_std = data['delta_azimuth_std'] * np.cos(data['elevation'])
abs_sky_delta_std = rad2deg(np.sqrt(delta_xel_std**2 + data['delta_azimuth_std']**2))*3600 # make arc seconds
#for i,val in enumerate(data):
# print ("Test Target: '%s' fit accuracy %.3f\" "%(data['target'][i],abs_sky_delta_std[i]))
abs_sky_error = rad2deg(np.sqrt((residual_xel) ** 2 + (residual_el)** 2)) *3600
condition = get_condition(data)
rms = np.std(abs_sky_error)
robust = np.median(np.abs(abs_sky_error-abs_sky_error.mean())) * np.sqrt(2. / np.log(4.))
text.append("Dataset:%s Test Target: '%s' Reference RMS = %.3f\" {fit-accuracy=%.3f\"} (robust %.3f\") (N=%i Data Points) ['%s']" % (data['dataset'][0],
data['target'][0],rms,np.mean(abs_sky_delta_std),robust,data.shape[0],condition_str[condition]))
output_data = data[0].copy() # make a copy of the rec array
for i,x in enumerate(data[0]) : # make an average of data
if x.dtype.kind == 'f' : # average floats
output_data[i] = data.field(i).mean()
else :
output_data[i] = data.field(i)[0]
sun = Target('Sun,special')
source = Target('%s,azel, %f,%f'%(output_data['target'],np.degrees(output_data['azimuth']),np.degrees(output_data['elevation'])) )
sun_sep = np.degrees(source.separation(sun,timestamp=output_data['timestamp'],antenna=ant))
output_data = recfunctions.append_fields(output_data, 'sun_sep', np.array([sun_sep]), dtypes=np.float, usemask=False, asrecarray=True)
output_data = recfunctions.append_fields(output_data, 'condition', np.array([condition]), dtypes=np.float, usemask=False, asrecarray=True)
output_data = recfunctions.append_fields(output_data, 'rms', np.array([rms]), dtypes=np.float, usemask=False, asrecarray=True)
output_data = recfunctions.append_fields(output_data, 'robust', np.array([robust]), dtypes=np.float, usemask=False, asrecarray=True)
output_data = recfunctions.append_fields(output_data, 'N', np.array([data.shape[0]]), dtypes=np.float, usemask=False, asrecarray=True)
#### Debugging
#residual_az = data['delta_azimuth'] - model_delta_az
#residual_el = data['delta_elevation'] - model_delta_el
#residual_xel = residual_az * np.cos(data['elevation'])
output_data = recfunctions.append_fields(output_data, 'residual_az', np.array([rad2deg(residual_az.std())*3600]), dtypes=np.float, usemask=False, asrecarray=True)
output_data = recfunctions.append_fields(output_data, 'residual_el', np.array([rad2deg(residual_el.std())*3600]), dtypes=np.float, usemask=False, asrecarray=True)
output_data = recfunctions.append_fields(output_data, 'residual_xel', np.array([rad2deg(residual_xel.std())*3600]), dtypes=np.float, usemask=False, asrecarray=True)
#print "%10s %i %3.1f, %s"%(data['target'][0],data['timestamp'][-1] - data['timestamp'][0], rms, str(np.degrees(data['delta_elevation']-data['delta_elevation'].mean())*3600) )
output_data['wind_speed'] = data['wind_speed'].max()
return text,output_data
else :
return None,None
def plot_source_rms(data,title):
"""Plot source pointing accuracy vs sun angles."""
fig = plt.figure(figsize=(16,9))
#params = {'axes.labelsize': 12, 'font.size': 10, 'legend.fontsize': 9,
#'xtick.labelsize': 10, 'ytick.labelsize': 10, 'text.usetex': False}
#plt.rcParams.update(params)
markers = []
colors = ['b','g','r','c','m','y','k']
pointtypes = ['o','*','x','^','s','p','h','+','D','d','v','H','d','v']
for point in pointtypes:
for color in colors:
markers.append(str(color+point))
# sky RMS vs sun angles
ax = fig.add_subplot(121)
i = 0
unique_targets = np.unique(output_data['target'])
for target in unique_targets:
index_list = data['target'] == target
plt.plot(data['sun_sep'][index_list],data['rms'][index_list],markers[i],linewidth = 0, label=target)
i = i + 1
plt.suptitle(title, fontsize=12, fontweight='bold',y=0.95)
plt.legend(numpoints=1,loc='upper right')
plt.axhline(y=5,color='g',lw=1,ls='--')
plt.axhline(y=25,color='b',lw=1,ls='--')
plt.ylabel(r'$\sigma$ (arc sec)')
plt.xlabel('Sun Angular Distance (deg)')
if np.any( data['rms'] > 80 ):
plt.ylim(0,80)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.set_minor_locator(AutoMinorLocator())
# fitted Gaussian peak height vs MJD
i = 0
ax2 = fig.add_subplot(122)
for target in unique_targets:
index_list = data['target'] == target
plt.plot((data['timestamp'][index_list]/3600.)%24,data['beam_height_I'][index_list],markers[i],linewidth = 0, label=target)
i = i + 1
plt.legend(numpoints=1,loc='upper right')
plt.ylabel(r'Fitted $I_{\mathrm{peak}}$ (A.U.)')
plt.xlabel('Time of Day (hr)')
ax2.yaxis.set_minor_locator(AutoMinorLocator())
ax2.xaxis.set_minor_locator(AutoMinorLocator())
return fig
def plot_diagnostics(data,title):
"""Plot offset-pointing accuracy vs environmental conditions."""
fig = plt.figure(figsize=(16,9))
#params = {'axes.labelsize': 12, 'font.size': 10, 'legend.fontsize': 9,
#'xtick.labelsize': 10, 'ytick.labelsize': 10, 'text.usetex': False}
#plt.rcParams.update(params)
colours = ['k','b', 'g', 'y']
markers = ['o','s','^','*']
labels = ['ideal','optimal','normal','other']
labels_sigma = [5,5,10,25]
plt.suptitle(title, fontsize=12, fontweight='bold',y=0.95)
ax = fig.add_subplot(231)
for i,label in enumerate(labels):
index_list = data['condition'] == i
if np.sum(index_list) > 0 :
plt.plot(np.degrees(data['elevation'][index_list]),data['rms'][index_list],marker=markers[i],
color=colours[i],lw=0,label=label)
plt.ylabel(r'$\sigma$ (arc sec)')
plt.xlabel('Elevation (deg)')
plt.legend(numpoints=1,loc='upper right')
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.set_minor_locator(AutoMinorLocator())
if np.any( data['rms'] > 80 ):
plt.ylim(0,80)
ax2 = fig.add_subplot(232)
def time_hour(x):
y = (x/3600.)%24 + (x/3600.)/24
for i in range(x.shape[0]) :
y = time.localtime(x[i]).tm_hour
return y
for i,label in enumerate(labels):
index_list = data['condition'] == i
if np.sum(index_list) > 0 :
plt.plot((data['timestamp'][index_list]/3600.)%24 ,data['rms'][index_list],marker=markers[i],
color=colours[i],lw=0,label=label)
plt.ylabel(r'$\sigma$ (arc sec)')
plt.xlabel('Hour (UTC)')
plt.legend(numpoints=1,loc='upper right')
plt.xlim(0,24)
ax2.yaxis.set_minor_locator(AutoMinorLocator())
ax2.xaxis.set_minor_locator(AutoMinorLocator())
if np.any( data['rms'] > 80 ):
plt.ylim(0,80)
ax3 = fig.add_subplot(233)
for i,label in enumerate(labels):
index_list = data['condition'] == i
if np.sum(index_list) > 0 :
plt.plot(data['wind_speed'][index_list],data['rms'][index_list],marker=markers[i],
color=colours[i],lw=0,label=label)
plt.ylabel(r'$\sigma$ (arc sec)')
plt.xlabel('Wind speed (m/s)')
plt.legend(numpoints=1,loc='upper right')
ax3.yaxis.set_minor_locator(AutoMinorLocator())
ax3.xaxis.set_minor_locator(AutoMinorLocator())
if np.any( data['rms'] > 80 ):
plt.ylim(0,80)
ax4 = fig.add_subplot(234)
for i,label in enumerate(labels):
index_list = data['condition'] == i
if np.sum(index_list) > 0 :
plt.plot(data['sun_sep'][index_list],data['rms'][index_list],marker=markers[i],
color=colours[i],lw=0,label=label)
plt.ylabel(r'$\sigma$ (arc sec)')
plt.xlabel('Sun Angular Distance (deg)')
plt.legend(numpoints=1,loc='upper right')
ax4.yaxis.set_minor_locator(AutoMinorLocator())
ax4.xaxis.set_minor_locator(AutoMinorLocator())
if np.any( data['rms'] > 80 ):
plt.ylim(0,80)
ax5 = fig.add_subplot(235)
for i,label in enumerate(labels):
index_list = data['condition'] == i
if np.sum(index_list) > 0 :
plt.plot(data['temperature'][index_list],data['rms'][index_list],marker=markers[i],
color=colours[i],lw=0,label=label)
plt.ylabel(r'$\sigma$ (arc sec)')
plt.xlabel(r'Temperature ($^o$C)')
plt.legend(numpoints=1,loc='upper right')
ax5.yaxis.set_minor_locator(AutoMinorLocator())
ax5.xaxis.set_minor_locator(AutoMinorLocator())
if np.any( data['rms'] > 80 ):
plt.ylim(0,80)
ax6 = fig.add_subplot(236)
for i,label in enumerate(labels):
index_list = data['condition'] == i
if np.sum(index_list) > 0 :
bin_width = 1
plt.hist(data['rms'][index_list],bins=np.arange(0,85,bin_width),histtype='bar',ec='w',alpha=0.5,
align='mid',color=colours[i],label=label)
plt.legend(numpoints=1,loc='upper right')
plt.ylabel('Number')
plt.xlabel(r'$\sigma$ (arc sec)')
return fig
def plots_histogram(data,title,fit=stats.rayleigh):
"""Plot offset-pointing accuracy with kde bins."""
fig = plt.figure(figsize=(16,9))
colours = ['k','b', 'g', 'y']
markers = ['o','s','^','*']
labels = ['ideal','optimal','normal','other']
labels_sigma = [5,5,10,25]
gridsize = 200
cut = 3
bw = stats.gaussian_kde(data['rms']).scotts_factor() * data['rms'].std(ddof=1)
try:
import seaborn as sns
tmp = sns.distplot(data['rms'])
except ImportError:
tmp = plt.hist(data['rms'],bins= np.arange(0.0, data['rms'].max() + bw * cut, bw) )
#print "Tmp:",tmp
#
plt.ylabel('Normalised Number per bin')
plt.xlabel(r'$\sigma$ (arc sec)')
plt.title(title)
#gridsize = 200
#cut = 3
#bw = stats.gaussian_kde(data['rms']).scotts_factor() * data['rms'].std(ddof=1)
x = np.linspace(0.0, data['rms'].max() + bw * cut, gridsize)
params = fit.fit(data['rms'],floc=0.0 ) # force the distrobution to | |
<reponame>tpeng/magnitude
#!/usr/bin/env python
# See the accompanying LICENSE file.
import os
import sys
import shlex
import glob
import re
import time
import zipfile
import tarfile
try:
if not os.environ.get("APSW_FORCE_DISTUTILS"):
from setuptools import setup, Extension, Command
else:
raise ImportError()
except ImportError:
from distutils.core import setup, Extension, Command
from distutils.command import build_ext, build, sdist
##
## Do your customizations here or by creating a setup.cfg as documented at
## http://www.python.org/doc/2.5.2/dist/setup-config.html
##
include_dirs=['src']
library_dirs=[]
define_macros=[]
libraries=[]
# This includes the functionality marked as experimental in SQLite 3.
# Comment out the line to exclude them
define_macros.append( ('EXPERIMENTAL', '1') )
##
## End of customizations
##
# python 2 and 3 print equivalent
def write(*args):
# py2 won't allow optional keyword arg on end, so work around it
dest=sys.stdout
if args[-1]==sys.stderr:
dest=args[-1]
args=args[:-1]
dest.write(" ".join(args)+"\n")
dest.flush()
py3=sys.version_info>=(3,0)
# ensure files are closed
def read_whole_file(name, mode):
if sys.version_info<(2,4):
if "r" in mode and "U" in mode:
# python 2.3 returns file not found if "U" present!
mode="".join([m for m in mode if m!="U"])
f=open(name, mode)
try:
return f.read()
finally:
f.close()
def write_whole_file(name, mode, data):
f=open(name, mode)
try:
f.write(data)
finally:
f.close()
# They keep messing with where files are in URI
def fixup_download_url(url):
ver=re.search("3[0-9]{6}", url)
if ver:
ver=int(ver.group(0))
if ver>=3071600:
if ver>=3220000:
year="2018"
elif ver>=3160000:
year="2017"
elif ver>=3100000:
year="2016"
elif ver>=3080800:
year="2015"
elif ver>=3080300:
year="2014"
else:
year="2013"
if "/"+year+"/" not in url:
url=url.split("/")
url.insert(3, year)
return "/".join(url)
return url
# Run test suite
class run_tests(Command):
description="Run test suite"
# I did originally try using 'verbose' as the option but it turns
# out that is builtin and defaults to 1 (--quiet is also builtin
# and forces verbose to 0)
user_options=[
("show-tests", "s", "Show each test being run"),
]
# see if you can find boolean_options documented anywhere
boolean_options=['show-tests']
def initialize_options(self):
self.show_tests=0
def finalize_options(self):
pass
def run(self):
import unittest
import tests
tests.setup()
suite=unittest.TestLoader().loadTestsFromModule(tests)
# verbosity of zero doesn't print anything, one prints a dot
# per test and two prints each test name
result=unittest.TextTestRunner(verbosity=self.show_tests+1).run(suite)
if not result.wasSuccessful():
sys.exit(1)
# A hack we dont't document
class build_test_extension(Command):
description="Compiles APSW test loadable extension"
user_options=[]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# On 64 bit windows we have to use MSVC
if sys.platform=='win32': # yes even on 64 bit
try:
import platform
if platform.architecture()[0]=='64bit':
res=os.system("cl /Gd src/testextension.c /I sqlite3 /I . /DDLL /LD /link /export:sqlite3_extension_init /export:alternate_sqlite3_extension_init /out:testextension.sqlext")
if res!=0:
raise RuntimeError("Building test extension failed")
return
except ImportError:
pass
shared="shared"
if sys.platform.startswith("darwin"):
shared="bundle"
res=os.system("gcc -fPIC -%s -o testextension.sqlext -Isqlite3 -I. src/testextension.c" % (shared,))
if res!=0:
raise RuntimeError("Building test extension failed")
# Another hack. Visual Studio 2008 & 2010 ship with 64
# compilers, headers and the Windows SDK but claims it doesn't and
# distutils can't find it. The separate Windows SDK can't find this
# and gets very confused not to mention being one of the buggiest cmd
# scripts I have ever seen. This hack just sets some environment
# variables directly since all the "proper" ways are very broken.
class win64hackvars(Command):
description="Set env vars for Visual Studio 2008/2010 Express 64 bit"
user_options=[]
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
vcver=9
if sys.version_info>=(3,3):
vcver=10
sdkdir=r"C:\Program Files\Microsoft SDKs\Windows\v6.0A"
vsdir=r"C:\Program Files (x86)\Microsoft Visual Studio %d.0\VC" % vcver
assert os.path.isdir(sdkdir), "Expected sdk dir "+sdkdir
assert os.path.isdir(vsdir), "Expected visual studio dir "+vsdir
os.environ["PATH"]=r"%s\bin\amd64;%s\bin" % (vsdir, sdkdir)
os.environ["INCLUDE"]=r"%s\include;%s\include" % (vsdir, sdkdir)
os.environ["LIB"]=r"%s\lib\amd64;%s\lib\x64" % (vsdir, sdkdir)
os.environ["DISTUTILS_USE_SDK"]="1"
os.environ["MSSdk"]=sdkdir
# deal with various python version compatibility issues with how
# to treat returned web data as lines of text
def fixupcode(code):
if sys.version_info<(2,5):
if type(code)!=str:
code=code.read()
if sys.version_info>=(3,0):
if type(code)!=bytes:
code=code.read()
if type(code)==bytes:
code=code.decode("iso8859-1")
if type(code)==str:
return [l+"\n" for l in code.split("\n")]
return code
fetch_parts=[]
class fetch(Command):
description="Automatically downloads SQLite and components"
user_options=[
("version=", None, "Which version of SQLite/components to get (default current)"),
("missing-checksum-ok", None, "Continue on a missing checksum (default abort)"),
("sqlite", None, "Download SQLite amalgamation"),
("all", None, "Download all downloadable components"),
]
fetch_options=['sqlite']
boolean_options=fetch_options+['all', 'missing-checksum-ok']
def initialize_options(self):
self.version=None
self.sqlite=False
self.all=False
self.missing_checksum_ok=False
def finalize_options(self):
# If all is selected then turn on all components
global fetch_parts
if self.all:
for i in self.fetch_options:
setattr(self, i, True)
for i in self.fetch_options:
fetch_parts.append(i)
def run(self):
# work out the version
if self.version is None:
write(" Getting download page to work out current SQLite version")
page=self.download("https://sqlite.org/download.html", text=True, checksum=False)
match=re.search(r'sqlite-amalgamation-3([0-9][0-9])([0-9][0-9])([0-9][0-9])\.zip', page)
if match:
self.version="3.%d.%d.%d" % tuple([int(match.group(n)) for n in range(1,4)])
if self.version.endswith(".0"):
self.version=self.version[:-len(".0")]
else:
write("Unable to determine current SQLite version. Use --version=VERSION", sys.stderr)
write("to set version - eg setup.py fetch --version=3.6.18", sys.stderr)
sys.exit(17)
write(" Version is "+self.version)
# now get each selected component
downloaded=0
if not self.version.startswith("fossil"):
v=[int(x) for x in self.version.split(".")]
if len(v)<4:
v.append(0)
self.webversion="%d%02d%02d%02d" % tuple(v)
## The amalgamation
if self.sqlite:
if self.version.startswith("fossil"):
write(" Getting code from fossil")
else:
write(" Getting the SQLite amalgamation")
if self.version.startswith("fossil"):
if self.version=="fossil":
uuid="trunk"
else:
showmsg=False
if not self.version.startswith("fossil-"):
showmsg=True
else:
uuid=self.version.split("-", 1)[1]
if not uuid:
showmsg=True
if showmsg:
write("Use fossil-HASH to identify a particular commit", sys.stderr)
write("eg fossil-3a82c8e6", sys.stderr)
sys.exit(18)
AURL="https://sqlite.org/src/zip/sqlite3.zip?uuid="+uuid
checksum=False
else:
if sys.platform=="win32":
AURL="https://sqlite.org/sqlite-amalgamation-%s.zip" % (self.webversion,)
else:
AURL="https://sqlite.org/sqlite-autoconf-%s.tar.gz" % (self.webversion,)
checksum=True
AURL=fixup_download_url(AURL)
data=self.download(AURL, checksum=checksum)
if AURL.endswith(".zip"):
zip=zipfile.ZipFile(data, "r")
for name in "sqlite3.c", "sqlite3.h", "sqlite3ext.h":
write("Extracting", name)
f=[n for n in zip.namelist() if n.endswith(name)]
if len(f)!=1:
raise Exception("Can't find %s in zip. Candidates are %s" % (name, f))
# Work around SQLite 3.7.13 bug where a symbol was
# declared SQLITE_API and extern
data=zip.read(f[0])
if name=="sqlite3.c":
data=data.decode("utf8")
data=data.replace("SQLITE_API extern", "SQLITE_API")
data=data.encode("utf8")
open(name, "wb").write(data)
zip.close()
else:
# we need to run configure to get various -DHAVE_foo flags on non-windows platforms
# delete existing sqlite3 directory if it exists, but save sqlite3config.h if it exists
sqlite3config_h=None
if os.path.exists("sqlite3/sqlite3config.h"):
sqlite3config_h=read_whole_file("sqlite3/sqlite3config.h", "rb")
if os.path.exists('sqlite3'):
for dirpath, dirnames, filenames in os.walk('sqlite3', topdown=False):
for file in filenames:
os.remove(os.path.join(dirpath, file))
for dir in dirnames:
os.rmdir(os.path.join(dirpath, dir))
os.rmdir('sqlite3')
if self.version.startswith("fossil"):
zip=zipfile.ZipFile(data, "r")
for name in zip.namelist():
# extract
if name.endswith("/"):
os.mkdir(name)
else:
open(name, "wb").write(zip.read(name))
zip.close()
else:
# if you get an exception here it is likely that you don't have the python zlib module
import zlib
tar=tarfile.open("nonexistentname to keep old python happy", 'r', data)
configmember=None
for member in tar.getmembers():
tar.extract(member)
# find first file named configure
if not configmember and member.name.endswith("/configure"):
configmember=member
tar.close()
# the directory name has changed a bit with each release so try to work out what it is
if not configmember:
write("Unable to determine directory it extracted to.", dest=sys.stderr)
sys.exit(19)
dirname=configmember.name.split('/')[0]
os.rename(dirname, 'sqlite3')
os.chdir('sqlite3')
if self.version.startswith("fossil"):
write(" Building amalgamation from fossil")
res=os.system("make TOP=. -f Makefile.linux-gcc sqlite3.c && cp src/sqlite3ext.h .")
defs=[]
if sqlite3config_h:
open("sqlite3config.h", "wb").write(sqlite3config_h)
else:
write(" Running configure to work out SQLite compilation flags")
res=os.system("./configure >/dev/null")
defline=None
for line in read_whole_file("Makefile", "rtU").split("\n"):
if line.startswith("DEFS = "):
defline=line
break
if not defline:
write("Unable to determine compile flags. Create sqlite3/sqlite3config.h to manually set.", sys.stderr)
sys.exit(18)
defs=[]
for part in shlex.split(defline):
if part.startswith("-DHAVE"):
part=part[2:]
if '=' in part:
part=part.split('=', 1)
else:
part=(part, )
defs.append(part)
if res!=0:
raise ValueError("Command execution failed")
if defs:
op=open("sqlite3config.h", "wt")
op.write("""
/* This file was generated by parsing how configure altered the Makefile
which isn't used when building python extensions. It is specific to the
machine and developer components on which it was run. */
\n""")
for define in defs:
op.write('#define %s %s\n' % tuple(define))
op.close()
os.chdir("..")
downloaded+=1
if not downloaded:
write("You didn't specify any components to fetch. Use")
write(" setup.py fetch --help")
write("for a list and details")
raise ValueError("No components downloaded")
# A function for verifying downloads
def verifyurl(self, url, data):
d=["%s" % (len(data),)]
try:
import hashlib
d.append(hashlib.sha1(data).hexdigest())
d.append(hashlib.md5(data).hexdigest())
except ImportError:
import sha
d.append(sha.new(data).hexdigest())
import md5
d.append(md5.new(data).hexdigest())
write(" Length:", d[0], " SHA1:", d[1], " MD5:", d[2])
sums=os.path.join(os.path.dirname(__file__), "checksums")
for line in read_whole_file(sums, "rt").split("\n"):
line=line.strip()
if len(line)==0 or line[0]=="#":
continue
l=[l.strip() for l in line.split()]
if len(l)!=4:
write("Invalid line in checksums file:", line, sys.stderr)
raise ValueError("Bad checksums file")
if l[0]==url:
if l[1:]==d:
write(" Checksums verified")
return
if l[1]!=d[0]:
write("Length does not match. Expected", l[1], "download was", d[0])
if l[2]!=d[1]:
write("SHA does not match. Expected", l[2], "download was", d[1])
if l[3]!=d[2]:
write("MD5 does not match. Expected", l[3], "download was", d[2])
write("The download does not match the checksums distributed with APSW.\n"
"The download should not have changed since the checksums were\n"
"generated. The | |
from collections import OrderedDict
import cPickle
import os
def prototype_state():
state = {}
# ----- CONSTANTS -----
# Random seed
state['seed'] = 1234
# Logging level
state['level'] = 'DEBUG'
# Out-of-vocabulary token string
state['oov'] = '<unk>'
# These are end-of-sequence marks
state['end_sym_utterance'] = '</s>'
# Special tokens need to be defined here, because model architecture may adapt depending on these
state['unk_sym'] = 0 # Unknown word token <unk>
state['eos_sym'] = 1 # end-of-utterance symbol </s>
state['eod_sym'] = 2 # end-of-dialogue symbol </d>
state['first_speaker_sym'] = 3 # first speaker symbol <first_speaker>
state['second_speaker_sym'] = 4 # second speaker symbol <second_speaker>
state['third_speaker_sym'] = 5 # third speaker symbol <third_speaker>
state['minor_speaker_sym'] = 6 # minor speaker symbol <minor_speaker>
state['voice_over_sym'] = 7 # voice over symbol <voice_over>
state['off_screen_sym'] = 8 # off screen symbol <off_screen>
state['pause_sym'] = 9 # pause symbol <pause>
# ----- MODEL ARCHITECTURE -----
# If this flag is on, the hidden state between RNNs in subsequences is always initialized to zero.
# Set this to reset all RNN hidden states between 'max_grad_steps' time steps
state['reset_hidden_states_between_subsequences'] = False
# If this flag is on, the maxout activation function will be applied to the utterance decoders output unit.
# This requires qdim_decoder = 2x rankdim
state['maxout_out'] = False
# If this flag is on, a one-layer MLP with linear activation function will applied
# on the utterance decoder hidden state before outputting the distribution over words.
state['deep_utterance_decoder_out'] = True
# If this flag is on, there will be an extra MLP between utterance and dialogue encoder
state['deep_dialogue_encoder_input'] = False
# Default and recommended setting is: tanh.
# The utterance encoder and utterance decoder activation function
state['sent_rec_activation'] = 'lambda x: T.tanh(x)'
# The dialogue encoder activation function
state['dialogue_rec_activation'] = 'lambda x: T.tanh(x)'
# Determines how to input the utterance encoder and dialogue encoder into the utterance decoder RNN hidden state:
# - 'first': initializes first hidden state of decoder using encoders
# - 'all': initializes first hidden state of decoder using encoders,
# and inputs all hidden states of decoder using encoders
# - 'selective': initializes first hidden state of decoder using encoders,
# and inputs all hidden states of decoder using encoders.
# Furthermore, a gating function is applied to the encoder input
# to turn off certain dimensions if necessary.
#
# Experiments show that 'all' is most effective.
state['decoder_bias_type'] = 'all'
# Define the gating function for the three RNNs.
state['utterance_encoder_gating'] = 'GRU' # Supports 'None' and 'GRU'
state['dialogue_encoder_gating'] = 'GRU' # Supports 'None' and 'GRU'
state['utterance_decoder_gating'] = 'GRU' # Supports 'None', 'BOW' (Bag of Words), 'GRU' and 'LSTM'
# If this flag is on, two utterances encoders (one forward and one backward) will be used,
# otherwise only a forward utterance encoder is used.
state['bidirectional_utterance_encoder'] = False
# If this flag is on, there will be a direct connection between utterance encoder and utterance decoder RNNs.
state['direct_connection_between_encoders_and_decoder'] = False
# If this flag is on, there will be an extra MLP between utterance encoder and utterance decoder.
state['deep_direct_connection'] = False
# If the 'direct_connection_between_encoders_and_decoder' is on, then enabling this flag will
# change the model so that it does not use the dialogue encoder (context encoder)
state['disable_dialogue_encoder'] = False
# If this flag is on, the model will collaps to a standard RNN:
# 1) The utterance+dialogue encoder input to the utterance decoder will be zero
# 2) The utterance decoder will never be reset
# Note this model will always be initialized with a hidden state equal to zero.
state['collaps_to_standard_rnn'] = False
# If this flag is on, the utterance decoder will be reset after each end-of-utterance token.
state['reset_utterance_decoder_at_end_of_utterance'] = True
# If this flag is on, the utterance encoder will be reset after each end-of-utterance token.
state['reset_utterance_encoder_at_end_of_utterance'] = False
# ----- HIDDEN LAYER DIMENSIONS -----
# Dimensionality of (word-level) utterance encoder hidden state
state['qdim_encoder'] = 512
# Dimensionality of (word-level) utterance decoder (RNN which generates output) hidden state
state['qdim_decoder'] = 512
# Dimensionality of (utterance-level) context encoder hidden layer
state['sdim'] = 1000
# Dimensionality of low-rank word embedding approximation
state['rankdim'] = 256
# ----- LATENT VARIABLES WITH VARIATIONAL LEARNING -----
# If this flag is on, a Gaussian latent variable is added at the beginning of each utterance.
# The utterance decoder will be conditioned on this latent variable,
# and the model will be trained using the variational lower bound.
# See, for example, the variational auto-encoder by Kingma et al. (2013).
state['add_latent_gaussian_per_utterance'] = False
# This flag will condition the latent variables on the dialogue encoder
state['condition_latent_variable_on_dialogue_encoder'] = False
# This flag will condition the latent variable on the DCGM (mean pooling over words) encoder.
# This will replace the conditioning on the utterance encoder.
# If the flag is false, the latent variable will be conditioned on the utterance encoder RNN.
state['condition_posterior_latent_variable_on_dcgm_encoder'] = False
# Dimensionality of Gaussian latent variable, which has diagonal covariance matrix.
state['latent_gaussian_per_utterance_dim'] = 10
# This is a constant by which the diagonal covariance matrix is scaled.
# By setting it to a high number (e.g. 1 or 10),
# the KL divergence will be relatively low at the beginning of training.
state['scale_latent_gaussian_variable_variances'] = 10
state['min_latent_gaussian_variable_variances'] = 0.01
state['max_latent_gaussian_variable_variances'] = 10.0
# If on, will make apply a one-layer MLP to transform the input before computing the prior
# and posterior of the Gaussian latent variable.
state['deep_latent_gaussian_variable_conditioning'] = True
# If this flag is on, the utterance decoder will ONLY be conditioned on the Gaussian latent variable.
state['condition_decoder_only_on_latent_variable'] = False
# If this flag is on, a piecewise latent variable is added at the beginning of each utterance.
# The utterance decoder will be conditioned on this latent variable,
# and the model will be trained using the variational lower bound.
# See, for example, the variational auto-encoder by Kingma et al. (2013).
state['add_latent_piecewise_per_utterance'] = False
# If this flag is on, the posterior piecewise distribution will be interpolated
# with the prior distribution using a linear gating mechanism.
state['gate_latent_piecewise_per_utterance'] = True
state['latent_piecewise_alpha_variables'] = 5
# This is a constant by which the prior piecewise alpha parameters are scaled.
# By setting it to a number in the range (2.0, 10) the piecewise posterior distributions will
# be free to change appropriately to accomodate the real posterior,
# while still leaving some probability mass around 0.5 for the variable to change.
# With scale_latent_piecewise_variable_alpha=10, KL divergence cost is about 10% of overall cost initially.
# With scale_latent_piecewise_variable_alpha=1, KL divergence cost is about 1% of overall cost initially.
state['scale_latent_piecewise_variable_alpha_use_softplus'] = True
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['latent_piecewise_per_utterance_dim'] = 10
# If parameter tying is enabled, a Gaussian convolution is applied to all the the alpha values.
# This makes the alpha values dependent upon each other, and guarantees that a single sample
# will update the weight of all the alpha values with higher gradients to nearby values.
# Julian: This only helped slightly in my intial experiments.
state['latent_piecewise_variable_alpha_parameter_tying'] = False
state['latent_piecewise_variable_alpha_parameter_tying_beta'] = 1.0
# If on, will make apply a one-layer MLP to transform the input before computing the prior
# and posterior of the piecewise latent variable.
state['deep_latent_piecewise_variable_conditioning'] = True
# If this flag is on, the input to the utterance decoder will be passed through
# a one-layer MLP with rectified linear units.
# If batch normalization or layer normalization is on,
# this will also ensure that the inputs to the decoder RNN are normalized.
state['deep_utterance_decoder_input'] = True
# If this flag is on, the KL-divergence term weight for the latent variables
# will be slowly increased from zero to one.
state['train_latent_variables_with_kl_divergence_annealing'] = False
# The KL-divergence term weight is increased by this parameter for every training batch.
# It | |
<reponame>gnosis/gnosisdb<gh_stars>10-100
from time import mktime
from django.conf import settings
from django.test import TestCase
from django_eth_events.utils import normalize_address_without_0x
from django_eth_events.web3_service import Web3Service, Web3ServiceProvider
from eth_tester import EthereumTester
from rest_framework.serializers import ValidationError
from web3.providers.eth_tester import EthereumTesterProvider
from chainevents.abis import abi_file_path, load_json_file
from ipfs.ipfs import Ipfs
from ..models import (OutcomeToken, OutcomeTokenBalance,
ScalarEventDescription, TournamentParticipant,
TournamentParticipantBalance)
from ..serializers import (CategoricalEventSerializer,
CentralizedOracleInstanceSerializer,
CentralizedOracleSerializer,
GenericTournamentParticipantEventSerializerTimestamped,
IPFSEventDescriptionDeserializer,
MarketSerializerTimestamped,
OutcomeTokenInstanceSerializer,
OutcomeTokenIssuanceSerializer,
OutcomeTokenRevocationSerializer,
OutcomeTokenTransferSerializer,
ScalarEventSerializer,
TournamentTokenIssuanceSerializer,
TournamentTokenTransferSerializer,
UportTournamentParticipantSerializerEventSerializerTimestamped)
from .factories import (CategoricalEventDescriptionFactory,
CategoricalEventFactory, CentralizedOracleFactory,
EventFactory, MarketFactory,
OutcomeTokenBalanceFactory, OutcomeTokenFactory,
ScalarEventDescriptionFactory, ScalarEventFactory,
TournamentParticipantBalanceFactory, generate_eth_account)
from .utils import tournament_token_bytecode
class TestSerializers(TestCase):
def setUp(self):
self.ipfs = Ipfs()
def test_create_centralized_oracle(self):
oracle = CentralizedOracleFactory()
block = {
'number': oracle.creation_block,
'timestamp': mktime(oracle.creation_date_time.timetuple())
}
oracle_event = {
'address': oracle.factory,
'params': [
{
'name': 'creator',
'value': oracle.creator
},
{
'name': 'centralizedOracle',
'value': oracle.address,
},
{
'name': 'ipfsHash',
'value': oracle.event_description.ipfs_hash[1:-7] + ''
}
]
}
oracle.delete()
s = CentralizedOracleSerializer(data=oracle_event, block=block)
# ipfs_hash not saved to IPFS
self.assertFalse(s.is_valid(), s.errors)
# oracle.event_description
event_description_json = {
'title': oracle.event_description.title,
'description': oracle.event_description.description,
'resolutionDate': oracle.event_description.resolution_date.isoformat(),
'outcomes': ['Yes', 'No']
}
# save event_description to IPFS
ipfs_hash = self.ipfs.post(event_description_json)
oracle_event.get('params')[2]['value'] = ipfs_hash
s = CentralizedOracleSerializer(data=oracle_event, block=block)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
def test_create_centralized_oracle_scalar_event_with_outcomes(self):
oracle = CentralizedOracleFactory()
block = {
'number': oracle.creation_block,
'timestamp': mktime(oracle.creation_date_time.timetuple())
}
oracle_event = {
'address': oracle.factory,
'params': [
{
'name': 'creator',
'value': oracle.creator
},
{
'name': 'centralizedOracle',
'value': oracle.address,
},
{
'name': 'ipfsHash',
'value': 'something unknown'
}
]
}
# remove test oracle before creating it again
oracle.delete()
s = CentralizedOracleSerializer(data=oracle_event, block=block)
# ipfs_hash not saved to IPFS
self.assertFalse(s.is_valid(), s.errors)
# oracle.event_description
event_description_json = {
'title': oracle.event_description.title,
'description': oracle.event_description.description,
'resolutionDate': oracle.event_description.resolution_date.isoformat(),
'outcomes': [],
'unit': 'unit',
'decimals': 2
}
# save event_description to IPFS
ipfs_hash = self.ipfs.post(event_description_json)
oracle_event.get('params')[2]['value'] = ipfs_hash
s = CentralizedOracleSerializer(data=oracle_event, block=block)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
self.assertEqual(ScalarEventDescription.objects.filter(ipfs_hash=instance.event_description.ipfs_hash).count(), 1)
def test_event_description_different_outcomes(self):
oracle = CentralizedOracleFactory()
oracle.event_description.outcomes = ['Yes', 'No', 'Third']
oracle.event_description.save()
event = CategoricalEventFactory(oracle=oracle)
# Categorical Event with different outcomes
block = {
'number': event.creation_block,
'timestamp': mktime(event.creation_date_time.timetuple())
}
categorical_event = {
'address': event.factory,
'params': [
{
'name': 'creator',
'value': event.creator
},
{
'name': 'collateralToken',
'value': event.creator
},
{
'name': 'oracle',
'value': oracle.address
},
{
'name': 'outcomeCount',
'value': 2
},
{
'name': 'categoricalEvent',
'value': event.address,
}
]
}
event.delete()
s = CategoricalEventSerializer(data=categorical_event, block=block)
self.assertFalse(s.is_valid())
categorical_event['params'][3]['value'] = 3
s2 = CategoricalEventSerializer(data=categorical_event, block=block)
self.assertTrue(s2.is_valid(), s.errors)
instance = s2.save()
self.assertIsNotNone(instance)
def test_create_scalar_with_categorical_description(self):
event_description = CategoricalEventDescriptionFactory()
oracle = CentralizedOracleFactory(event_description=event_description)
# Scalar Event with different outcomes
event = ScalarEventFactory()
block = {
'number': event.creation_block,
'timestamp': mktime(event.creation_date_time.timetuple())
}
scalar_event = {
'address': event.factory,
'params': [
{
'name': 'creator',
'value': event.creator
},
{
'name': 'collateralToken',
'value': event.creator
},
{
'name': 'oracle',
'value': oracle.address
},
{
'name': 'upperBound',
'value': event.upper_bound
},
{
'name': 'lowerBound',
'value': event.lower_bound
},
{
'name': 'scalarEvent',
'value': event.address,
}
]
}
event.delete()
s = ScalarEventSerializer(data=scalar_event, block=block)
self.assertFalse(s.is_valid())
scalar_event['params'][3]['value'] = 3
scalar_descripion = ScalarEventDescriptionFactory()
oracle.event_description = scalar_descripion
oracle.save()
s2 = ScalarEventSerializer(data=scalar_event, block=block)
self.assertTrue(s2.is_valid(), s.errors)
instance = s2.save()
self.assertIsNotNone(instance)
def test_create_scalar_event(self):
event = ScalarEventFactory()
event_description = ScalarEventDescriptionFactory()
oracle = CentralizedOracleFactory(event_description=event_description)
block = {
'number': event.creation_block,
'timestamp': mktime(event.creation_date_time.timetuple())
}
scalar_event = {
'address': event.factory,
'params': [
{
'name': 'creator',
'value': event.creator
},
{
'name': 'collateralToken',
'value': event.collateral_token
},
{
'name': 'oracle',
'value': oracle.address
},
{
'name': 'upperBound',
'value': 1
},
{
'name': 'lowerBound',
'value': 0
}
]
}
event.delete()
s = ScalarEventSerializer(data=scalar_event, block=block)
self.assertFalse(s.is_valid(), s.errors)
scalar_event.get('params').append({
'name': 'scalarEvent',
'value': event.address
})
s = ScalarEventSerializer(data=scalar_event, block=block)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
def test_create_categorical_event(self):
event = CategoricalEventFactory()
oracle = CentralizedOracleFactory()
block = {
'number': event.creation_block,
'timestamp': mktime(event.creation_date_time.timetuple())
}
categorical_event = {
'address': event.factory,
'params': [
{
'name': 'creator',
'value': event.creator
},
{
'name': 'collateralToken',
'value': event.collateral_token
},
{
'name': 'oracle',
'value': oracle.address
},
{
'name': 'outcomeCount',
'value': 2
}
]
}
event.delete()
s = CategoricalEventSerializer(data=categorical_event, block=block)
self.assertFalse(s.is_valid(), s.errors)
categorical_event.get('params').append({
'name': 'categoricalEvent',
'value': event.address
})
s = CategoricalEventSerializer(data=categorical_event, block=block)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
def test_create_market(self):
oracle = CentralizedOracleFactory()
event = CategoricalEventFactory(oracle=oracle)
market = MarketFactory()
block = {
'number': market.creation_block,
'timestamp': mktime(market.creation_date_time.timetuple())
}
market_dict = {
'address': market.factory,
'params': [
{
'name': 'creator',
'value': market.creator
},
{
'name': 'centralizedOracle',
'value': oracle.address,
},
{
'name': 'marketMaker',
'value': market.market_maker
},
{
'name': 'fee',
'value': market.fee
},
{
'name': 'market',
'value': market.address
}
]
}
market.delete()
s = MarketSerializerTimestamped(data=market_dict, block=block)
self.assertFalse(s.is_valid(), s.errors)
market_dict.get('params').append({
'name': 'eventContract',
'value': market.address
})
market_dict.get('params').append({
'name': 'fee',
'value': market.fee
})
s = MarketSerializerTimestamped(data=market_dict, block=block)
self.assertFalse(s.is_valid(), s.errors)
market_dict.get('params')[-2]['value'] = event.address
s = MarketSerializerTimestamped(data=market_dict, block=block)
self.assertFalse(s.is_valid(), s.errors)
marketMaker = [x for x in market_dict.get('params') if x.get('name') == 'marketMaker'][0]
marketMaker.update({'value': normalize_address_without_0x(settings.LMSR_MARKET_MAKER)})
s = MarketSerializerTimestamped(data=market_dict, block=block)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
def test_create_market_with_multiple_addresses(self):
oracle = CentralizedOracleFactory()
event = CategoricalEventFactory(oracle=oracle)
market = MarketFactory()
# Run in updated settings context
with self.settings(LMSR_MARKET_MAKER="0x{},0x{}".format(market.market_maker,
generate_eth_account(only_address=True))):
block = {
'number': market.creation_block,
'timestamp': mktime(market.creation_date_time.timetuple())
}
market_dict = {
'address': market.factory,
'params': [
{
'name': 'creator',
'value': market.creator
},
{
'name': 'centralizedOracle',
'value': oracle.address,
},
{
'name': 'marketMaker',
'value': market.market_maker
},
{
'name': 'fee',
'value': market.fee
},
{
'name': 'market',
'value': market.address
},
{
'name': 'eventContract',
'value': event.address
},
{
'name': 'fee',
'value': market.fee
}
]
}
market.delete()
s = MarketSerializerTimestamped(data=market_dict, block=block)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
# Test lmsr market marker address doesn't exists
market_dict['params'][2]['value'] = generate_eth_account(only_address=True)
s = MarketSerializerTimestamped(data=market_dict, block=block)
self.assertFalse(s.is_valid())
def test_create_categorical_event_description(self):
event_description = CategoricalEventDescriptionFactory()
event_description.delete()
categorical_event_description_json = {
'title': event_description.title,
'description': event_description.description,
'resolution_date': event_description.resolution_date.isoformat(),
'outcomes': event_description.outcomes
}
ipfs_hash = self.ipfs.post(categorical_event_description_json)
serializer = IPFSEventDescriptionDeserializer(data={'ipfs_hash': ipfs_hash})
self.assertTrue(serializer.is_valid(), serializer.errors)
self.assertIsNotNone(serializer.save())
def test_create_scalar_event_description(self):
event_description = ScalarEventDescriptionFactory()
event_description.delete()
scalar_event_description_json = {
'title': event_description.title,
'description': event_description.description,
'resolution_date': event_description.resolution_date.isoformat(),
'unit': event_description.unit,
'decimals': event_description.decimals,
}
ipfs_hash = self.ipfs.post(scalar_event_description_json)
serializer = IPFSEventDescriptionDeserializer(data={'ipfs_hash': ipfs_hash})
self.assertTrue(serializer.is_valid(), serializer.errors)
self.assertIsNotNone(serializer.save())
def test_create_centralized_oracle_instance(self):
oracle = CentralizedOracleFactory()
oracle.delete()
# oracle.event_description
event_description_json = {
'title': oracle.event_description.title,
'description': oracle.event_description.description,
'resolutionDate': oracle.event_description.resolution_date.isoformat(),
'outcomes': oracle.event_description.outcomes
}
block = {
'number': oracle.creation_block,
'timestamp': mktime(oracle.creation_date_time.timetuple())
}
oracle_event = {
'address': oracle.factory,
'params': [
{
'name': 'creator',
'value': oracle.creator
},
{
'name': 'centralizedOracle',
'value': oracle.address,
},
{
'name': 'ipfsHash',
'value': oracle.event_description.ipfs_hash
}
]
}
s = CentralizedOracleInstanceSerializer(data=oracle_event, block=block)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
def test_create_outcome_token_instance(self):
outcome_token = OutcomeTokenFactory()
event = ScalarEventFactory()
outcome_token_event = {
'address': event.address,
'params': [
{
'name': 'outcomeToken',
'value': outcome_token.address,
},
{
'name': 'index',
'value': outcome_token.index
}
]
}
outcome_token.delete()
s = OutcomeTokenInstanceSerializer(data=outcome_token_event)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertIsNotNone(instance)
def test_issuance_outcome_token(self):
outcome_token = OutcomeTokenFactory()
event = EventFactory()
issuance_event = {
'name': 'Issuance',
'address': outcome_token.address,
'params': [
{
'name': 'owner',
'value': event.address
},
{
'name': 'amount',
'value': 20,
}
]
}
s = OutcomeTokenIssuanceSerializer(data=issuance_event)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
balance = OutcomeTokenBalance.objects.get(owner=event.address)
self.assertEqual(balance.balance, 20)
self.assertIsNotNone(instance)
self.assertEqual(OutcomeToken.objects.get(address=outcome_token.address).total_supply,
outcome_token.total_supply + 20)
def test_revocation_outcome_token(self):
balance = OutcomeTokenBalanceFactory()
balance.balance = 20
balance.save()
issuance_event = {
'name': 'Revocation',
'address': balance.outcome_token.address,
'params': [
{
'name': 'owner',
'value': balance.owner
},
{
'name': 'amount',
'value': 20,
}
]
}
s = OutcomeTokenRevocationSerializer(data=issuance_event)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertEqual(OutcomeTokenBalance.objects.get(owner=balance.owner).balance, 0)
self.assertIsNotNone(instance)
self.assertEqual(OutcomeToken.objects.get(address=balance.outcome_token.address).total_supply,
balance.outcome_token.total_supply - 20)
def test_transfer_outcome_token(self):
outcome_token_balance = OutcomeTokenBalanceFactory()
event = EventFactory()
outcome_token_balance.balance = 20
outcome_token_balance.save()
transfer_event = {
'name': 'Transfer',
'address': outcome_token_balance.outcome_token.address,
'params': [
{
'name': 'from',
'value': outcome_token_balance.owner
},
{
'name': 'to',
'value': event.address
},
{
'name': 'value',
'value': outcome_token_balance.balance,
}
]
}
s = OutcomeTokenTransferSerializer(data=transfer_event)
self.assertTrue(s.is_valid(), s.errors)
instance = s.save()
self.assertEqual(OutcomeTokenBalance.objects.get(owner=outcome_token_balance.owner).balance, 0)
self.assertIsNotNone(instance)
self.assertEqual(instance.owner, event.address)
self.assertEqual(instance.balance, 20)
def test_save_generic_tournament_participant(self):
oracle = CentralizedOracleFactory()
block = {
'number': oracle.creation_block,
'timestamp': mktime(oracle.creation_date_time.timetuple())
}
contract_address = "d833215cbcc3f914bd1c9ece3ee7bf8b14f841bb"
registrant_address = "90f8bf6a479f320ead074411a4b0e7944ea8c9c1"
registrant_address2 = "80f8bf6a479f320ead074411a4b0e7944ea8c9c2"
registered_mainnet_address = "ffcf8fdee72ac11b5c542428b35eef5769c409f0"
participant_event = {
"address": contract_address,
"name": "AddressRegistration",
"params": [
{
"name": "registrant",
"value": registrant_address,
},
{
"name": "registeredMainnetAddress",
"value": registered_mainnet_address,
}
]
}
s = GenericTournamentParticipantEventSerializerTimestamped(data=participant_event, block=block)
self.assertTrue(s.is_valid(), s.errors)
self.assertEqual(TournamentParticipant.objects.all().count(), 0)
instance = s.save()
self.assertEqual(TournamentParticipant.objects.all().count(), 1)
self.assertEqual(TournamentParticipant.objects.first().tournament_balance.balance, 0)
self.assertIsNotNone(instance)
self.assertEqual(instance.address, registrant_address)
self.assertEqual(instance.mainnet_address, registered_mainnet_address)
web3_service = Web3Service(provider=EthereumTesterProvider(EthereumTester()))
web3 = web3_service.web3
checksumed_registrant_address2 = web3.toChecksumAddress('0x' + registrant_address2)
tournament_token_abi = load_json_file(abi_file_path('TournamentToken.json'))
# create tournament token
tournament_token = web3.eth.contract(abi=tournament_token_abi, bytecode=tournament_token_bytecode)
tx_hash = tournament_token.constructor().transact()
tournament_token_address = web3.eth.getTransactionReceipt(tx_hash).get('contractAddress')
self.assertIsNotNone(tournament_token_address)
# Get token instance
token_contract = web3.eth.contract(abi=tournament_token_abi, address=tournament_token_address)
# Issue tokens
tokens_amount = 100
tx_hash = token_contract.functions.issue([checksumed_registrant_address2], tokens_amount).transact(
{
'from': web3.eth.coinbase
}
)
blockchain_balance = token_contract.functions.balanceOf(checksumed_registrant_address2).call()
self.assertEqual(blockchain_balance, tokens_amount)
# Save participant 2
oracle = CentralizedOracleFactory()
block = {
'number': oracle.creation_block,
'timestamp': | |
<gh_stars>1-10
# This file was generated automatically by generate_protocols.py
from nintendo.nex import common, streams
import logging
logger = logging.getLogger(__name__)
class ClearCondition:
NORMAL = 0
COLLECT_COINS = 4116396131
KILL_SKIPSQUEAKS = 4042480826
class CourseDifficulty:
EASY = 0
STANDARD = 1
EXPERT = 2
SUPER_EXPERT = 3
class CourseOption:
PLAY_STATS = 1
RATINGS = 2
TIME_STATS = 4
COMMENT_STATS = 8
UNK9 = 16
UNK10 = 32
UNK8 = 64
ONE_SCREEN_THUMBNAIL = 128
ENTIRE_THUMBNAIL = 256
ALL = 511
class CourseTag:
NONE = 0
STANDARD = 1
PUZZLE_SOLVING = 2
SPEEDRUN = 3
AUTOSCROLL = 4
AUTO_MARIO = 5
SHORT_AND_SWEET = 6
MULTIPLAYER_VS = 7
THEMED = 8
MUSIC = 9
class CourseTheme:
GROUND = 0
UNDERGROUND = 1
CASTLE = 2
AIRSHIP = 3
UNDERWATER = 4
GHOST_HOUSE = 5
SNOW = 6
DESERT = 7
SKY = 8
FOREST = 9
class EventCourseOption:
UNK3 = 1
GET_INFO = 2
BEST_TIME = 8
ONE_SCREEN_THUMBNAIL = 16
ENTIRE_THUMBNAIL = 32
UNK1 = 64
MEDAL_TIME = 256
GHOST = 512
ALL = 1023
class GameStyle:
SMB1 = 0
SMB3 = 1
SMW = 2
NSMBU = 3
SM3DW = 4
class MultiplayerStatsKeys:
MULTIPLAYER_SCORE = 0
VERSUS_PLAYS = 2
VERSUS_WINS = 3
COOP_PLAYS = 10
COOP_WINS = 11
class PlayStatsKeys:
PLAYS = 0
CLEARS = 1
ATTEMPTS = 2
DEATHS = 3
class UserOption:
PLAY_STATS = 1
MAKER_STATS = 2
UNK2 = 4
ENDLESS_MODE = 8
MULTIPLAYER_STATS = 16
BADGE_INFO = 32
UNK8 = 64
UNK9 = 128
UNK1 = 512
UNK7 = 1024
UNK11 = 4096
UNK13 = 8192
UNK15 = 32768
ALL = 65535
class BadgeInfo(common.Structure):
def __init__(self):
super().__init__()
self.unk1 = None
self.unk2 = None
def check_required(self, settings):
for field in ['unk1', 'unk2']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.unk1 = stream.u16()
self.unk2 = stream.u8()
def save(self, stream):
self.check_required(stream.settings)
stream.u16(self.unk1)
stream.u8(self.unk2)
class CommentInfo(common.Structure):
def __init__(self):
super().__init__()
self.unk1 = None
self.unk2 = None
self.unk3 = None
self.unk4 = None
self.unk5 = None
self.unk6 = None
self.unk7 = None
self.unk8 = None
self.unk9 = None
self.unk10 = None
self.unk11 = None
self.unk12 = None
self.unk13 = None
self.unk14 = None
self.unk15 = None
self.picture = CommentPictureReqGetInfoWithoutHeaders()
self.unk16 = None
self.unk17 = None
def check_required(self, settings):
for field in ['unk1', 'unk2', 'unk3', 'unk4', 'unk5', 'unk6', 'unk7', 'unk8', 'unk9', 'unk10', 'unk11', 'unk12', 'unk13', 'unk14', 'unk15', 'unk16', 'unk17']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.unk1 = stream.u64()
self.unk2 = stream.string()
self.unk3 = stream.u8()
self.unk4 = stream.u8()
self.unk5 = stream.u64()
self.unk6 = stream.u16()
self.unk7 = stream.u16()
self.unk8 = stream.u8()
self.unk9 = stream.u8()
self.unk10 = stream.u8()
self.unk11 = stream.bool()
self.unk12 = stream.bool()
self.unk13 = stream.datetime()
self.unk14 = stream.qbuffer()
self.unk15 = stream.string()
self.picture = stream.extract(CommentPictureReqGetInfoWithoutHeaders)
self.unk16 = stream.u16()
self.unk17 = stream.u8()
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.unk1)
stream.string(self.unk2)
stream.u8(self.unk3)
stream.u8(self.unk4)
stream.u64(self.unk5)
stream.u16(self.unk6)
stream.u16(self.unk7)
stream.u8(self.unk8)
stream.u8(self.unk9)
stream.u8(self.unk10)
stream.bool(self.unk11)
stream.bool(self.unk12)
stream.datetime(self.unk13)
stream.qbuffer(self.unk14)
stream.string(self.unk15)
stream.add(self.picture)
stream.u16(self.unk16)
stream.u8(self.unk17)
class CommentPictureReqGetInfoWithoutHeaders(common.Structure):
def __init__(self):
super().__init__()
self.url = None
self.data_type = None
self.unk1 = None
self.unk2 = None
self.filename = None
def check_required(self, settings):
for field in ['url', 'data_type', 'unk1', 'unk2', 'filename']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.url = stream.string()
self.data_type = stream.u8()
self.unk1 = stream.u32()
self.unk2 = stream.buffer()
self.filename = stream.string()
def save(self, stream):
self.check_required(stream.settings)
stream.string(self.url)
stream.u8(self.data_type)
stream.u32(self.unk1)
stream.buffer(self.unk2)
stream.string(self.filename)
class CourseInfo(common.Structure):
def __init__(self):
super().__init__()
self.data_id = None
self.code = None
self.owner_id = None
self.name = None
self.description = None
self.game_style = None
self.course_theme = None
self.upload_time = None
self.difficulty = None
self.tag1 = None
self.tag2 = None
self.unk1 = None
self.clear_condition = None
self.clear_condition_magnitude = None
self.unk2 = None
self.unk3 = None
self.play_stats = None
self.ratings = None
self.unk4 = None
self.time_stats = CourseTimeStats()
self.comment_stats = None
self.unk9 = None
self.unk10 = None
self.unk11 = None
self.unk12 = None
self.one_screen_thumbnail = RelationObjectReqGetInfo()
self.entire_thumbnail = RelationObjectReqGetInfo()
def check_required(self, settings):
for field in ['data_id', 'code', 'owner_id', 'name', 'description', 'game_style', 'course_theme', 'upload_time', 'difficulty', 'tag1', 'tag2', 'unk1', 'clear_condition', 'clear_condition_magnitude', 'unk2', 'unk3', 'play_stats', 'ratings', 'unk4', 'comment_stats', 'unk9', 'unk10', 'unk11', 'unk12']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.data_id = stream.u64()
self.code = stream.string()
self.owner_id = stream.pid()
self.name = stream.string()
self.description = stream.string()
self.game_style = stream.u8()
self.course_theme = stream.u8()
self.upload_time = stream.datetime()
self.difficulty = stream.u8()
self.tag1 = stream.u8()
self.tag2 = stream.u8()
self.unk1 = stream.u8()
self.clear_condition = stream.u32()
self.clear_condition_magnitude = stream.u16()
self.unk2 = stream.u16()
self.unk3 = stream.qbuffer()
self.play_stats = stream.map(stream.u8, stream.u32)
self.ratings = stream.map(stream.u8, stream.u32)
self.unk4 = stream.map(stream.u8, stream.u32)
self.time_stats = stream.extract(CourseTimeStats)
self.comment_stats = stream.map(stream.u8, stream.u32)
self.unk9 = stream.u8()
self.unk10 = stream.u8()
self.unk11 = stream.u8()
self.unk12 = stream.u8()
self.one_screen_thumbnail = stream.extract(RelationObjectReqGetInfo)
self.entire_thumbnail = stream.extract(RelationObjectReqGetInfo)
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.data_id)
stream.string(self.code)
stream.pid(self.owner_id)
stream.string(self.name)
stream.string(self.description)
stream.u8(self.game_style)
stream.u8(self.course_theme)
stream.datetime(self.upload_time)
stream.u8(self.difficulty)
stream.u8(self.tag1)
stream.u8(self.tag2)
stream.u8(self.unk1)
stream.u32(self.clear_condition)
stream.u16(self.clear_condition_magnitude)
stream.u16(self.unk2)
stream.qbuffer(self.unk3)
stream.map(self.play_stats, stream.u8, stream.u32)
stream.map(self.ratings, stream.u8, stream.u32)
stream.map(self.unk4, stream.u8, stream.u32)
stream.add(self.time_stats)
stream.map(self.comment_stats, stream.u8, stream.u32)
stream.u8(self.unk9)
stream.u8(self.unk10)
stream.u8(self.unk11)
stream.u8(self.unk12)
stream.add(self.one_screen_thumbnail)
stream.add(self.entire_thumbnail)
class CourseTimeStats(common.Structure):
def __init__(self):
super().__init__()
self.first_completion = None
self.world_record_holder = None
self.world_record = None
self.upload_time = None
def check_required(self, settings):
for field in ['first_completion', 'world_record_holder', 'world_record', 'upload_time']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.first_completion = stream.pid()
self.world_record_holder = stream.pid()
self.world_record = stream.u32()
self.upload_time = stream.u32()
def save(self, stream):
self.check_required(stream.settings)
stream.pid(self.first_completion)
stream.pid(self.world_record_holder)
stream.u32(self.world_record)
stream.u32(self.upload_time)
class DataStoreCompletePostParam(common.Structure):
def __init__(self):
super().__init__()
self.data_id = None
self.success = None
def check_required(self, settings):
for field in ['data_id', 'success']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.data_id = stream.u64()
self.success = stream.bool()
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.data_id)
stream.bool(self.success)
class DataStoreGetMetaParam(common.Structure):
def __init__(self):
super().__init__()
self.data_id = 0
self.persistence_target = DataStorePersistenceTarget()
self.result_option = 0
self.access_password = 0
def check_required(self, settings):
pass
def load(self, stream):
self.data_id = stream.u64()
self.persistence_target = stream.extract(DataStorePersistenceTarget)
self.result_option = stream.u8()
self.access_password = stream.u64()
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.data_id)
stream.add(self.persistence_target)
stream.u8(self.result_option)
stream.u64(self.access_password)
class DataStoreKeyValue(common.Structure):
def __init__(self):
super().__init__()
self.key = None
self.value = None
def check_required(self, settings):
for field in ['key', 'value']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.key = stream.string()
self.value = stream.string()
def save(self, stream):
self.check_required(stream.settings)
stream.string(self.key)
stream.string(self.value)
class DataStoreMetaInfo(common.Structure):
def __init__(self):
super().__init__()
self.data_id = None
self.owner_id = None
self.size = None
self.name = None
self.data_type = None
self.meta_binary = None
self.permission = DataStorePermission()
self.delete_permission = DataStorePermission()
self.create_time = None
self.update_time = None
self.period = None
self.status = None
self.referred_count = None
self.refer_data_id = None
self.flag = None
self.referred_time = None
self.expire_time = None
self.tags = None
self.ratings = None
def check_required(self, settings):
for field in ['data_id', 'owner_id', 'size', 'name', 'data_type', 'meta_binary', 'create_time', 'update_time', 'period', 'status', 'referred_count', 'refer_data_id', 'flag', 'referred_time', 'expire_time', 'tags', 'ratings']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.data_id = stream.u64()
self.owner_id = stream.pid()
self.size = stream.u32()
self.name = stream.string()
self.data_type = stream.u16()
self.meta_binary = stream.qbuffer()
self.permission = stream.extract(DataStorePermission)
self.delete_permission = stream.extract(DataStorePermission)
self.create_time = stream.datetime()
self.update_time = stream.datetime()
self.period = stream.u16()
self.status = stream.u8()
self.referred_count = stream.u32()
self.refer_data_id = stream.u32()
self.flag = stream.u32()
self.referred_time = stream.datetime()
self.expire_time = stream.datetime()
self.tags = stream.list(stream.string)
self.ratings = stream.list(DataStoreRatingInfoWithSlot)
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.data_id)
stream.pid(self.owner_id)
stream.u32(self.size)
stream.string(self.name)
stream.u16(self.data_type)
stream.qbuffer(self.meta_binary)
stream.add(self.permission)
stream.add(self.delete_permission)
stream.datetime(self.create_time)
stream.datetime(self.update_time)
stream.u16(self.period)
stream.u8(self.status)
stream.u32(self.referred_count)
stream.u32(self.refer_data_id)
stream.u32(self.flag)
stream.datetime(self.referred_time)
stream.datetime(self.expire_time)
stream.list(self.tags, stream.string)
stream.list(self.ratings, stream.add)
class DataStorePasswordInfo(common.Structure):
def __init__(self):
super().__init__()
self.data_id = None
self.access_password = None
self.update_password = None
def check_required(self, settings):
for field in ['data_id', 'access_password', 'update_password']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.data_id = stream.u64()
self.access_password = stream.u64()
self.update_password = stream.u64()
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.data_id)
stream.u64(self.access_password)
stream.u64(self.update_password)
class DataStorePermission(common.Structure):
def __init__(self):
super().__init__()
self.permission = 3
self.recipients = []
def check_required(self, settings):
pass
def load(self, stream):
self.permission = stream.u8()
self.recipients = stream.list(stream.pid)
def save(self, stream):
self.check_required(stream.settings)
stream.u8(self.permission)
stream.list(self.recipients, stream.pid)
class DataStorePersistenceInitParam(common.Structure):
def __init__(self):
super().__init__()
self.persistence_id = 65535
self.delete_last_object = True
def check_required(self, settings):
pass
def load(self, stream):
self.persistence_id = stream.u16()
self.delete_last_object = stream.bool()
def save(self, stream):
self.check_required(stream.settings)
stream.u16(self.persistence_id)
stream.bool(self.delete_last_object)
class DataStorePersistenceTarget(common.Structure):
def __init__(self):
super().__init__()
self.owner_id = 0
self.persistence_id = 65535
def check_required(self, settings):
pass
def load(self, stream):
self.owner_id = stream.pid()
self.persistence_id = stream.u16()
def save(self, stream):
self.check_required(stream.settings)
stream.pid(self.owner_id)
stream.u16(self.persistence_id)
class DataStorePrepareGetParam(common.Structure):
def __init__(self):
super().__init__()
self.data_id = 0
self.lock_id = 0
self.persistence_target = DataStorePersistenceTarget()
self.access_password = 0
self.extra_data = []
def check_required(self, settings):
if settings.get("nex.version") >= 30500:
pass
def load(self, stream):
self.data_id = stream.u64()
self.lock_id = stream.u32()
self.persistence_target = stream.extract(DataStorePersistenceTarget)
self.access_password = stream.u64()
if stream.settings.get("nex.version") >= 30500:
self.extra_data = stream.list(stream.string)
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.data_id)
stream.u32(self.lock_id)
stream.add(self.persistence_target)
stream.u64(self.access_password)
if stream.settings.get("nex.version") >= 30500:
stream.list(self.extra_data, stream.string)
class DataStorePrepareGetParamV1(common.Structure):
def __init__(self):
super().__init__()
self.data_id = None
self.lock_id = 0
def check_required(self, settings):
for field in ['data_id']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.data_id = stream.u64()
self.lock_id = stream.u32()
def save(self, stream):
self.check_required(stream.settings)
stream.u64(self.data_id)
stream.u32(self.lock_id)
class DataStorePreparePostParam(common.Structure):
def __init__(self):
super().__init__()
self.size = None
self.name = ""
self.data_type = 0
self.meta_binary = b""
self.permission = DataStorePermission()
self.delete_permission = DataStorePermission()
self.flag = None
self.period = None
self.refer_data_id = 0
self.tags = []
self.rating_init_param = []
self.persistence_init_param = DataStorePersistenceInitParam()
self.extra_data = None
def check_required(self, settings):
for field in ['size', 'flag', 'period']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
if settings.get("nex.version") >= 30500:
for field in ['extra_data']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.size = stream.u32()
self.name = stream.string()
self.data_type = stream.u16()
self.meta_binary = stream.qbuffer()
self.permission = stream.extract(DataStorePermission)
self.delete_permission = stream.extract(DataStorePermission)
self.flag = stream.u32()
self.period = stream.u16()
self.refer_data_id = stream.u32()
self.tags = stream.list(stream.string)
self.rating_init_param = stream.list(DataStoreRatingInitParamWithSlot)
self.persistence_init_param = stream.extract(DataStorePersistenceInitParam)
if stream.settings.get("nex.version") >= 30500:
self.extra_data = stream.list(stream.string)
def save(self, stream):
self.check_required(stream.settings)
stream.u32(self.size)
stream.string(self.name)
stream.u16(self.data_type)
stream.qbuffer(self.meta_binary)
stream.add(self.permission)
stream.add(self.delete_permission)
stream.u32(self.flag)
stream.u16(self.period)
stream.u32(self.refer_data_id)
stream.list(self.tags, stream.string)
stream.list(self.rating_init_param, stream.add)
stream.add(self.persistence_init_param)
if stream.settings.get("nex.version") >= 30500:
stream.list(self.extra_data, stream.string)
class DataStoreRatingInfo(common.Structure):
def __init__(self):
super().__init__()
self.total_value = None
self.count = None
self.initial_value = None
def check_required(self, settings):
for field in ['total_value', 'count', 'initial_value']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.total_value = stream.s64()
self.count = stream.u32()
self.initial_value = stream.s64()
def save(self, stream):
self.check_required(stream.settings)
stream.s64(self.total_value)
stream.u32(self.count)
stream.s64(self.initial_value)
class DataStoreRatingInfoWithSlot(common.Structure):
def __init__(self):
super().__init__()
self.slot = None
self.info = DataStoreRatingInfo()
def check_required(self, settings):
for field in ['slot']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.slot = stream.u8()
self.info = stream.extract(DataStoreRatingInfo)
def save(self, stream):
self.check_required(stream.settings)
stream.u8(self.slot)
stream.add(self.info)
class DataStoreRatingInitParam(common.Structure):
def __init__(self):
super().__init__()
self.flag = None
self.internal_flag = None
self.lock_type = None
self.initial_value = None
self.range_min = None
self.range_max = None
self.period_hour = None
self.period_duration = None
def check_required(self, settings):
for field in ['flag', 'internal_flag', 'lock_type', 'initial_value', 'range_min', 'range_max', 'period_hour', 'period_duration']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.flag = stream.u8()
self.internal_flag = stream.u8()
self.lock_type = stream.u8()
self.initial_value = stream.s64()
self.range_min = stream.s32()
self.range_max = stream.s32()
self.period_hour = stream.s8()
self.period_duration = stream.s16()
def save(self, stream):
self.check_required(stream.settings)
stream.u8(self.flag)
stream.u8(self.internal_flag)
stream.u8(self.lock_type)
stream.s64(self.initial_value)
stream.s32(self.range_min)
stream.s32(self.range_max)
stream.s8(self.period_hour)
stream.s16(self.period_duration)
class DataStoreRatingInitParamWithSlot(common.Structure):
def __init__(self):
super().__init__()
self.slot = None
self.param = DataStoreRatingInitParam()
def check_required(self, settings):
for field in ['slot']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.slot = stream.s8()
self.param = stream.extract(DataStoreRatingInitParam)
def save(self, stream):
self.check_required(stream.settings)
stream.s8(self.slot)
stream.add(self.param)
class DataStoreReqGetInfo(common.Structure):
def __init__(self):
super().__init__()
self.url = None
self.headers = None
self.size = None
self.root_ca_cert = None
self.data_id = None
def check_required(self, settings):
for field in ['url', 'headers', 'size', 'root_ca_cert']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
if settings.get("nex.version") >= 30500:
for field in ['data_id']:
if getattr(self, field) is None:
raise ValueError("No value assigned to required field: %s" %field)
def load(self, stream):
self.url = stream.string()
self.headers = stream.list(DataStoreKeyValue)
self.size = stream.u32()
self.root_ca_cert = stream.buffer()
if stream.settings.get("nex.version") >= 30500:
self.data_id = stream.u64()
def save(self, stream):
self.check_required(stream.settings)
stream.string(self.url)
stream.list(self.headers, stream.add)
stream.u32(self.size)
stream.buffer(self.root_ca_cert)
if stream.settings.get("nex.version") >= 30500:
stream.u64(self.data_id)
class DataStoreReqGetInfoV1(common.Structure):
def __init__(self):
super().__init__()
self.url = None
self.headers = None
self.size = None
self.root_ca_cert = None
def check_required(self, settings):
for field | |
axis, but stops moving towards
the collision. Then update the robot's position. If the robot isn't receiving input to move
forward, decelerate velocities.
"""
# Check if a collision has occurred, and zero the velocity axis associated with it.
_collision_side = self.collision_detector()
self.collision_list.append(_collision_side)
if len(self.collision_list) > 3:
self.collision_list.pop(0)
if not _collision_side:
self.collision_list = []
if "TOP" in self.collision_list:
if self.velocity[1] < 0:
self.velocity[1] = 0
if "BOTTOM" in self.collision_list:
if self.velocity[1] > 0:
self.velocity[1] = 0
if "RIGHT" in self.collision_list:
if self.velocity[0] > 0:
self.velocity[0] = 0
if "LEFT" in self.collision_list:
if self.velocity[0] < 0:
self.velocity[0] = 0
# Update robot position according to the velocity vector.
self.robot.x_pos += self.velocity[0]
self.robot.y_pos += self.velocity[1]
self.robot.rect.center = (self.robot.x_pos, self.robot.y_pos)
self.odo_velocity = self.velocity
if len(self.truth_pos) > 1000:
self.truth_pos.pop(0)
self.truth_pos.append([self.robot.x_pos, self.robot.y_pos])
# Decelerate the velocity vector if no forward input is received.
_deceleration = self.acceleration / 2
if "UP" not in self.cur_keys:
if self.velocity[0] > 0:
self.velocity[0] -= _deceleration
if self.velocity[0] < 0:
self.velocity[0] += _deceleration
if self.velocity[1] > 0:
self.velocity[1] -= _deceleration
if self.velocity[1] < 0:
self.velocity[1] += _deceleration
if self.velocity[0] < _deceleration and self.velocity[0] > _deceleration * -1:
self.velocity[0] = 0
if self.velocity[1] < _deceleration and self.velocity[1] > _deceleration * -1:
self.velocity[1] = 0
def change_velocity(self, _keys):
"""Controls the robot's velocity.
This function receives input from the user and updates the Robot.angular_velocity and
Robot.velocity vectors accordingly.
Attributes:
_keys: An array containing the current state of all keys.
"""
# Get input and sets the rotation according to the angular velocity.
_pressed_keys = self.convert_key(_keys)
if "RIGHT" in _pressed_keys:
self.robot.angle -= self.angular_velocity
if "LEFT" in _pressed_keys:
self.robot.angle += self.angular_velocity
# Bind the robot.angle to remain < 180 and > -180.
if self.robot.angle > 180:
self.robot.angle = -180 + (self.robot.angle - 180)
elif self.robot.angle < -180:
self.robot.angle = 180 + (self.robot.angle + 180)
# Calculate the current magnitude of the velocity vector.
_speed = self.acceleration * 2
self.velocity[2] = np.sqrt(
np.square(self.velocity[0]) + np.square(self.velocity[1]))
# Calculate the axis velocity components according to the current direction and desired
# speed.
_x_vec = np.cos(-1 * np.deg2rad(self.robot.angle + 90)) * _speed
_y_vec = np.sin(-1 * np.deg2rad(self.robot.angle + 90)) * _speed
if "UP" in _pressed_keys:
self.velocity[0] += self.acceleration * _x_vec
self.velocity[1] += self.acceleration * _y_vec
self.velocity[2] = np.sqrt(
np.square(self.velocity[0]) + np.square(self.velocity[1]))
# Normalise the velocity vectors if the velocity's magnitude is greater than the
# desired maximum velocity.
if self.velocity[2] > self.max_velocity:
_divider = self.max_velocity / \
np.sqrt(
np.square(self.velocity[0]) + np.square(self.velocity[1]))
self.velocity[0] = _divider * self.velocity[0]
self.velocity[1] = _divider * self.velocity[1]
def convert_key(self, _keys):
"""Converts the pressed key information into a string array.
This function takes the passed array of pygame keys and converts it to a list of the
currently pressed keys.
Attributes:
keys: An array containing the current state of all keys.
"""
_action = False
_keys_to_check = [[pygame.K_LEFT, "LEFT"],
[pygame.K_RIGHT, "RIGHT"],
[pygame.K_UP, "UP"],
[pygame.K_DOWN, "DOWN"],
[pygame.K_r, "R"]]
for _key in _keys_to_check:
if _keys[_key[0]]:
if _key[1] not in self.cur_keys:
self.cur_keys.append(_key[1])
_action = True
else:
try:
self.cur_keys.remove(_key[1])
except ValueError:
pass
# When a key is added, remove the first keys so that only the last two remain
if _action:
self.cur_keys = self.cur_keys[-2:]
else:
self.cur_keys = []
return self.cur_keys
def collision_detector(self):
"""Finds if the robot is colliding and the associated side.
This function uses sprites to determine all of the objects the robot is colliding with,
then finds the closest wall to determine which side of the robot is colliding. To solve for
cases where the robot is colliding with two walls simultaneously, the function utilises
recursion to find the second closest wall.
"""
_collision_list = pygame.sprite.spritecollide(self.robot,
self.world.wall_list,
False,
pygame.sprite.collide_mask)
if len(_collision_list) > 0:
# Find the closest colliding wall
_closest_distance = self.robot.initial_laser_length
_closest_wall = None
for _wall in _collision_list:
cur_distance = utils.point_distance(self.robot.x_pos,
_wall.rect.center[0],
self.robot.y_pos,
_wall.rect.center[1])
if cur_distance < _closest_distance:
s_closest_wall = _closest_wall
_closest_wall = _wall
_closest_distance = cur_distance
# If performing recursion, find the second closest wall
if self.recursion_depth > 0 and not s_closest_wall is None:
_closest_wall = s_closest_wall
_wall = _closest_wall
# Find which side of the robot is closest to the closest wall
_sides = [self.robot.hitbox.midtop, self.robot.hitbox.midright,
self.robot.hitbox.midbottom, self.robot.hitbox.midleft]
_closest_side = -1
_closest_side_distance = self.robot.initial_laser_length
for _i, _side in enumerate(_sides):
distance = utils.point_distance(_side[0],
_wall.rect.center[0],
_side[1],
_wall.rect.center[1])
if distance < _closest_side_distance:
_closest_side_distance = distance
_closest_side = _i
_to_return = None
if _closest_side == 0:
_to_return = "TOP"
if _closest_side == 1:
_to_return = "RIGHT"
if _closest_side == 2:
_to_return = "BOTTOM"
if _closest_side == 3:
_to_return = "LEFT"
# If the robot is already colliding with a wall, collide the second closest wall
if len(self.collision_list) > 0:
if _to_return == self.collision_list[len(self.collision_list) - 1]:
if self.recursion_depth <= 1:
self.recursion_depth += 1
return self.collision_detector()
self.recursion_depth = 0
return _to_return
return None
class OG_Laser(pygame.sprite.Sprite):
"""Sprite for the lidar sensor's laser beams.
Handles the attributes of each laser. Uses invisible surfaces to calculate positional offsets
for each laser depending on its given rotation. Also contains the laser's collision mask. It
also handles the positional updates sent from RobotControl.
Attributes:
_p_screen: The main pygame screen surface.
_origin: A pygame.math.Vector2() object that is the robot's base position.
_angle: A pygame.math.Vector2() object that contains polar coordinates stating the laser's
length and direction _angle.
"""
def __init__(self, _p_screen, _origin, _angle):
pygame.sprite.Sprite.__init__(self)
# Use a "dummy" surface to determine the width and height of the rotated laser rect
_dummy_screen = pygame.Surface(
(_p_screen.get_height() * 2, _p_screen.get_width() * 2),
pygame.SRCALPHA)
_dummy_rect = pygame.draw.line(_dummy_screen,
(0, 255, 0, 255),
_origin + _origin,
_origin + _origin + _angle)
self.origin = _origin
self.angle = _angle
_int_angle = int(_angle.as_polar()[1])
# Find an offset for the laser's draw position depending on its angle
if 0 <= _int_angle <= 90:
self.x_offset = 0
self.y_offset = 0
elif _int_angle > 90:
self.x_offset = -_dummy_rect.width
self.y_offset = 0
elif _int_angle < -90:
self.x_offset = -_dummy_rect.width
self.y_offset = -_dummy_rect.height
elif -90 <= _int_angle < 0:
self.x_offset = 0
self.y_offset = -_dummy_rect.height
self.screen = _p_screen
self.image = pygame.Surface((_dummy_rect.width, _dummy_rect.height),
pygame.SRCALPHA)
self.new_start = (self.origin.x + self.x_offset,
self.origin.y + self.y_offset)
self.rect = pygame.draw.aaline(self.image,
(255, 0, 0, 255),
(-self.x_offset, - self.y_offset),
(int(_angle.x - self.x_offset),
int(_angle.y - self.y_offset)))
self.mask = pygame.mask.from_surface(self.image, 50)
def update(self):
"""Update the laser's position."""
self.new_start = (self.origin.x + self.x_offset,
self.origin.y + self.y_offset)
self.rect.topleft = self.new_start
class LM_Laser():
"""Laser object containing the attributes of each landmark sensor laser.
Attributes:
_p_screen: The main pygame screen surface.
_origin: A set of coordinates containing the robot's position.
_destination: A set of coordinates containing the location of the detected landmark.
"""
def __init__(self, _p_screen, _origin, _destination):
self.screen = _p_screen
self.destination = _destination
self.update(_origin)
def update(self, _origin):
"""Update the laser's position."""
self.origin = _origin
self.angle = self.find_angle(_origin, self.destination)
self.length = utils.point_distance(_origin[0], self.destination[0],
_origin[1], self.destination[1])
self.polar = (self.length, self.angle)
def find_angle(self, _origin, _destination):
return np.arctan2(_destination[1] - _origin[1],
_destination[0] - _origin[0])
class Wall(pygame.sprite.Sprite):
"""Sprite for the lidar sensor's laser beams.
Handles the attributes of each laser. Uses invisible surfaces to calculate positional offsets
for each laser depending on its given rotation. Also contains the laser's collision mask.
Attributes:
_top: The desired pixel for the top of the wall.
_left: The desired pixel for the left of the wall.
_width: The desired width of the wall.
_height: The desired height of the wall.
"""
def __init__(self, _left, _top, _width, _height):
pygame.sprite.Sprite.__init__(self)
self.rect = pygame.Rect(_left, _top, _width, _height)
self.color = (0, 0, 0, 255)
self.image = pygame.Surface((_width, _height), pygame.SRCALPHA)
self.image.fill(self.color)
self.mask = pygame.mask.from_threshold(self.image,
pygame.Color('black'),
(1, 1, 1, 255))
def update(self, _color):
"""Update the wall's colour.
Used for debugging purposes only at this stage.
"""
self.image.fill(_color)
class World():
"""Writes and draws the world map.
Handles the attributes for the world map and draws.
Attributes:
_p_screen: The main pygame screen surface.
"""
def __init__(self, _p_screen):
self.screen = _p_screen
self.size = 20
self.grid = [[0 for _ in range(self.screen.get_size()[0] // self.size)]
for __ in range(self.screen.get_size()[1] // self.size)]
self.wall_list = pygame.sprite.Group()
self.world_type = "Occupancy Grid"
self.landmark_count = 10
| |
<gh_stars>1-10
# 作者:Forec
# 最后修改日期:2016-12-20
# 邮箱:<EMAIL>
# 关于此文件:此文件包含了服务器除认证外的所有的界面入口,包括首页、云盘界面、
# 文件操作、下载、聊天模块、管理员界面等。
# 蓝本:main
import os, random, shutil, zipfile, os.path
from config import basedir
from datetime import datetime, timedelta
from sqlalchemy import or_, and_, text
from flask import render_template, session, redirect, url_for, \
abort, flash, request, current_app, \
make_response, send_from_directory
from flask_login import login_required, current_user
from .forms import EditProfileForm, EditProfileAdminForm, \
UploadForm, CommentForm, SearchForm, \
FileDeleteConfirmForm, ChatForm, \
SetShareForm, ConfirmShareForm, \
NewFolderForm
from . import main
from .. import db
from ..decorators import admin_required, permission_required
from ..models import User, Role, Permission, File, \
Comment, Message,Pagination, CFILE
from werkzeug.utils import secure_filename
# --------------------------------------------------------------------
# 以下列表为服务器支持的不同图标显示格式(视频、图片、文档、压缩包、音频),会
# 使用对应格式的缩略图显示。可在此扩充后缀名以支持更多文件。
# 如果需要增加显示格式,可增加列表,并在对应的模板文件中修改。
# 增加新类别时,需修改的模板文件包括:
# * file.html
# * _files.html
# * _ownfiles.html
# * _copyownfiles.html
# * _moveownfiles.html
# * _forkownfiles.html
videoList = ['.avi', '.mp4', '.mpeg', '.flv', '.rmvb', '.rm', '.wmv']
photoList = ['.jpg', '.jpeg', '.png', '.svg', '.bmp', '.psd']
docList = ['.doc', '.ppt', '.pptx', '.docx', '.xls', '.xlsx', '.txt', '.md', '.rst', '.note']
compressList = ['.rar', '.zip', '.gz', '.gzip', '.tar', '.7z']
musicList = ['.mp3', '.wav', '.wma', '.ogg']
# --------------------------------------------------------------------------
# generateFileTypes 函数根据传入的文件列表,返回一个二元组列表,此列表中每个二元组的
# 第一个元素为传入的文件,第二个元素为该文件所属的文件类型,文件类型用字符串表示,可为:
# * 'video':文件后缀名在 videoList 中
# * 'music':文件后缀名在 musicList 中
# * 'txt':文件后缀名为 '.txt'
# * 'md':文件后缀名为 '.md' 或 '.rst'
# * 'ppt':文件后缀名为 '.ppt' 或 '.pptx'
# * 'excel':文件后缀名为 '.xls' 或 'xlsx'
# * 'doc':文件后缀名在 docList 中且不属于以上任何一种
# * 'photo':文件后缀名在 photoList 中
# * 'compress':文件后缀名在 compressList 中
def generateFileTypes(files):
file_types = []
for file in files:
filetype = 'file'
suffix = '.'+file.filename.split('.')[-1]
if suffix in videoList:
filetype = 'video'
elif suffix in musicList:
filetype = 'music'
elif suffix == '.txt':
filetype = 'txt'
elif suffix == '.md' or suffix == '.rst':
filetype = 'md'
elif suffix == '.ppt' or suffix == '.pptx':
filetype = 'ppt'
elif suffix == '.xls' or suffix == '.xlsx':
filetype = 'excel'
elif suffix in docList:
filetype = 'doc'
elif suffix in photoList:
filetype = 'photo'
elif suffix in compressList:
filetype = 'compress'
file_types.append((file, filetype))
if file_types == []:
file_types = None
return file_types
# ---------------------------------------------------------------
# generatePathList 根据传入的一个字符串表示的 *nix 路径,生成此路径的
# 每个父路径和该路径代表的文件夹的名称。如:
# generatePathList('/home/forec/work/') =>
# [('/', '/'), ('/home/', 'home/'), ('/home/forec/', 'forec/'),
# ('/home/forec/work/', 'work/')]
def generatePathList(p):
ans = []
parts = p.split('/')[:-1]
sum = ''
for i in range(0, len(parts)):
parts[i] = parts[i] + '/'
sum += parts[i]
ans.append((sum, parts[i]))
return ans
# ----------------------------------------------------------------
# moderate 函数提供了 “管理” 界面的入口
@main.route('/moderate')
@admin_required
def moderate():
return render_template('main/moderate/moderate.html')
# ----------------------------------------------------------------
# home 函数提供了顶点云介绍界面的入口
@main.route('/')
def home():
return render_template('home.html')
# ----------------------------------------------------------------
# index 为服务器主页入口点,将展示用户共享的资源描述,并保证了对于同一路
# 径下的子目录/文件,只展示顶层路径,如:
# /
# - home/
# - work1/
# - file1.dat
# - file2.dat
# - work2/
# 在以上目录结构中,如果用户 a 共享了目录 /home/ 及该目录下所有文件,则只
# 在 index 界面向其它用户显示用户 a 共享了 /home/,而不会显示 /home/ 目
# 录下的其它目录/文件
@main.route('/index', methods=['GET', 'POST'])
def index():
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
# 从 cookie 中获取用户的选择,默认显示全部用户的共享文件
if show_followed:
query = current_user.followed_files
else:
query = File.query.filter(text("private=0")).all()
page = request.args.get('page', 1, type=int)
key = request.args.get('key', '', type=str)
# 关键字搜索表单
form = SearchForm()
if form.validate_on_submit():
return redirect(url_for('main.index',
key=form.key.data,
_external=True))
form.key.data = key
query = sorted(query,
key = lambda x: len(x.path),
reverse = False)
filelist = []
paths_users = [] # 元素为 二元组 (已出现的目录,该路径所属的用户id)
# 根据路径长度对要显示的文件列表排序,保证顶层目录出现在子目录/文
# 件之前
for file in query:
if file.private == True:
continue
if file.path == '/':
filelist.append(file)
if file.isdir:
paths_users.append((file.path+file.filename+'/',
file.ownerid))
else:
sappend = True
for (path, userid) in paths_users:
if path == file.path[:len(path)] and \
userid == file.ownerid:
sappend = False
break
if sappend:
filelist.append(file)
if file.isdir:
paths_users.append((file.path+file.filename+'/',
file.ownerid))
_filelist = sorted(filelist,
key=lambda x:x.created,
reverse=True)
# 按时间对文件排序,最近创建的文件最先显示
if key == '':
filelist = _filelist
else:
# 用户指定搜索关键词,需对结果做匹配检查
filelist = []
for _file in _filelist:
if key in _file.filename:
filelist.append(_file)
pagination = Pagination(page=page,
per_page=current_app.\
config['ZENITH_FILES_PER_PAGE'],
total_count=len(filelist))
files = filelist[(page-1)*current_app.\
config['ZENITH_FILES_PER_PAGE']:
page*current_app.\
config['ZENITH_FILES_PER_PAGE']]
return render_template('index/index.html',
key =key or '',
form = form,
files = files,
_len = len(files),
pagination = pagination,
show_followed=show_followed)
# -----------------------------------------------------------------
# show_all 用于将用户 cookie 中的 show_followed 选项复位
# 注意此处不需 login_required
@main.route('/all')
def show_all():
resp = make_response(redirect(url_for('.index',
_external=True)))
resp.set_cookie('show_followed', '', max_age=30*24*60*60)
return resp
# ------------------------------------------------------------------
# show_followed 用于将用户 cookie 中的 show_followed 选项置位
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index',
_external=True)))
resp.set_cookie('show_followed', '1', max_age=30*24*60*60)
return resp
# -------------------------------------------------------------------
# user 函数为服务器用户资料界面入口点,用户资料界面包括用户基本信息、
# 资料编辑、共享的资源等内容,其中文件显示方式和 index 界面相同
@main.route('/user/<int:id>')
def user(id):
user = User.query.filter_by(uid=id).first()
if user is None:
abort(404)
query = user.files.filter_by(private=False).all()
query = sorted(query,
key = lambda x: len(x.path),
reverse = False)
# 将该用户共享的资源按路径长度排序
page = request.args.get('page', 1, type=int)
filelist = []
paths = []
for file in query:
if file.private == True:
continue
if file.path == '/':
filelist.append(file)
if file.isdir:
paths.append(file.path+file.filename+'/')
else:
sappend = True
for path in paths:
if path == file.path[:len(path)]:
sappend = False
break
if sappend:
filelist.append(file)
if file.isdir:
paths.append(file.path+file.filename+'/')
filelist = sorted(filelist,
key=lambda x:x.created,
reverse=True)
# 将文件按创建时间排序,最近创建的文件显示在最前
filelist = generateFileTypes(filelist)
if filelist is None:
total_count = 0
else:
total_count = len(filelist)
pagination = Pagination(page=page,
per_page=current_app.\
config['PROFILE_ZENITH_FILES_PER_PAGE'],
total_count=total_count)
if filelist is not None:
files = filelist[(page-1)*current_app.\
config['PROFILE_ZENITH_FILES_PER_PAGE']:
page*current_app.\
config['PROFILE_ZENITH_FILES_PER_PAGE']]
else:
files = []
return render_template('main/profile/user.html',
user = user,
files= files,
share_count=total_count, # 用户共享的资源数量
pagination=pagination)
# -----------------------------------------------------------------------
# edit_profile 为当前已登陆用户提供编辑用户资料入口
@main.route('/edit-profile', methods=['GET','POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
# 验证上传头像的合法性
if form.thumbnail.has_file():
while True:
# 创建随机目录
randomBasePath = current_app.\
config['ZENITH_TEMPFILE_STORE_PATH'] + \
''.join(random.sample(
current_app.\
config['ZENITH_RANDOM_PATH_ELEMENTS'],
current_app.\
config['ZENITH_TEMPFOLDER_LENGTH']))
if os.path.exists(randomBasePath):
# 若创建的随机目录已存在则重新创建
continue
break
os.mkdir(randomBasePath)
if not os.path.exists(randomBasePath):
abort(500)
filepath = os.path.join(randomBasePath,
form.thumbnail.data.filename)
suffix = form.thumbnail.data.filename
# 判断后缀名是否合法
suffix = suffix.split('.')
if len(suffix) < 2 or '.' + suffix[-1] not in \
current_app.config['ZENITH_VALID_THUMBNAIL']:
flash('您上传的头像不符合规范!')
os.rmdir(randomBasePath)
return redirect(url_for('main.edit_profile',
_external=True))
suffix = '.' + suffix[-1] # suffix 为后缀名
form.thumbnail.data.save(filepath)
if not os.path.isfile(filepath):
abort(500)
if os.path.getsize(filepath) > \
current_app.config['ZENITH_VALID_THUMBNAIL_SIZE']:
# 头像大小大于 512KB
flash('您上传的头像过大,已被系统保护性删除,请保证'
'上传的头像文件大小不超过 ' +
str(current_app.\
config['ZENITH_VALID_THUMBNAIL_SIZE'] // 1024) +
'KB!')
os.remove(filepath)
os.rmdir(randomBasePath)
return redirect(url_for('main.edit_profile',
_external=True))
else:
# 验证通过,更新头像
for _suffix in current_app.config['ZENITH_VALID_THUMBNAIL']:
thumbnailPath = os.path.join(basedir,
'app/static/thumbnail/' +
str(current_user.uid) + _suffix)
if os.path.isfile(thumbnailPath):
# 之前存在头像则先删除
os.remove(thumbnailPath)
break
# 拷贝新头像
shutil.copy(
filepath,
os.path.join(basedir,
'app/static/thumbnail/' +
str(current_user.uid) + suffix)
)
# 删除缓存
os.remove(filepath)
os.rmdir(randomBasePath)
current_user.avatar_hash = ':' + \
url_for('static',
filename = 'thumbnail/' +
str(current_user.uid) + suffix,
_external=True)
current_user.nickname = form.nickname.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
db.session.commit()
flash('您的资料已更新')
return redirect(url_for('.user',
id=current_user.uid,
_external=True))
form.nickname.data = current_user.nickname
form.about_me.data = current_user.about_me
return render_template('main/profile/edit_profile.html', form=form)
# -----------------------------------------------------------------------
# edit_profile_admin 为具有管理员权限的用户提供编辑任意用户资料的入口
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required # 限制管理员权限
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.nickname = form.nickname.data
user.about_me = form.about_me.data
user.maxm = form.maxm.data
db.session.add(user)
flash('用户 ' + user.nickname +' 资料已更新')
return redirect(url_for('.user',
id=user.uid,
_external=True))
form.email.data = user.email
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.maxm.data = user.maxm
form.nickname.data = user.nickname
form.about_me.data = user.about_me
return render_template('main/profile/edit_profile.html', form=form)
# ----------------------------------------------------------------------
# file 显示具体的资源信息,包括资源的类型、大小(如果非目录)、描述、评论以及操作
@main.route('/file/<int:id>', methods=['GET', 'POST'])
def file(id):
file = File.query.get_or_404(id)
if file.owner != current_user and \
file.private == True and \
not current_user.can(Permission.ADMINISTER):
# 文件为私有且当前用户非文件所有人且不具有管理员权限则返回 403 错误
abort(403)
form =CommentForm()
if form.validate_on_submit():
comment = Comment(body = form.body.data,
file = file,
author = current_user._get_current_object())
db.session.add(comment)
flash('您的评论已发布')
return redirect(url_for('.file',
id=file.uid,
page=-1,
_external=True))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (file.comments.count() - 1)// \
current_app.config['ZENITH_COMMENTS_PER_PAGE'] + 1
pagination = file.comments.order_by(Comment.timestamp.asc()).\
paginate(page,
per_page=current_app.config['ZENITH_COMMENTS_PER_PAGE'],
error_out=False) # 对评论分页
pathLists = generatePathList(file.path)
file_type = generateFileTypes([file])[0][1] # 获取当前显示文件的文件类型
comments = pagination.items
return render_template('main/files/file.html',
comments = comments,
file_type=file_type,
pathlists = pathLists,
pagination = pagination,
file = file,
form = form,
moderate=current_user.\
can(Permission.MODERATE_COMMENTS))
# -----------------------------------------------------------------------
# follow 为用户关注其它用户提供了跳板,若关注成功则跳转到被关注用户的资料界面,否
# 则跳转到主页。用户须具有 follow 权限才可关注(需认证邮箱)
@main.route('/follow/<int:id>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(id):
user = User.query.filter_by(uid=id).first()
if user is None:
flash('不合法的用户')
return redirect(url_for('.index',
_external=True))
if current_user.is_following(user):
flash('您已关注该用户')
return redirect(url_for('.user',
id=user.uid,
_external=True))
current_user.follow(user)
flash('您已关注用户 %s' % user.nickname)
return redirect(url_for('.user',
id=user.uid,
_external=True))
# --------------------------------------------------------------------
# unfollow 为用户提供了 follow 的逆操作
@main.route('/unfollow/<int:id>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(id):
user = User.query.filter_by(uid=id).first()
if user is None:
flash('不合法的用户')
return redirect(url_for('.index',
_external=True))
if not current_user.is_following(user):
flash('您并未关注该用户')
return redirect(url_for('.user',
uid=id,
_external=True))
current_user.unfollow(user)
flash('您已取消对用户 %s 的关注' % user.nickname)
return redirect(url_for('.user',
uid=id,
_external=True))
# --------------------------------------------------------------------
# followers 提供了显示某用户关注者的界面入口
@main.route('/followers/<int:id>')
def followers(id):
user = User.query.filter_by(uid=id).first()
if user is None:
flash('不合法的用户')
return redirect(url_for('.index',
_external=True))
page = request.args.get('page', 1, type=int)
pagination = user.followers.\
paginate(page,
per_page=current_app.\
config['ZENITH_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{
'user' : item.follower,
'timestamp': item.timestamp
}
for item in pagination.items]
return render_template('main/profile/followers.html',
user=user,
title="的关注者",
endpoint='.followers',
pagination=pagination,
follows=follows)
# --------------------------------------------------------------------
# followed_by 提供了显示某用户关注的人的入口
@main.route('/followed-by/<int:id>')
def followed_by(id):
user = User.query.filter_by(uid=id).first()
if user is None:
flash('不合法的用户')
return redirect(url_for('.index',
_external=True))
page = request.args.get('page', 1, type=int)
pagination = user.followed.\
paginate(page,
per_page=current_app.\
config['ZENITH_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{
'user' : item.followed,
'timestamp': item.timestamp
}
for item in pagination.items]
return render_template('main/profile/followers.html',
user=user,
title="关注的人",
endpoint='.followed_by',
pagination=pagination,
follows=follows)
# --------------------------------------------------------------------
# delete_file 为用户提供了删除文件界面的入口,用户需对自己的删除操作进行
# 确认后,方可产生一个一次性的 token,并使用此 token 跳转到 delete_fil-
# e_confirm 入口执行删除操作。
@main.route('/delete-file/<int:id>', methods=['GET','POST'])
@login_required
def delete_file(id):
file= File.query.get_or_404(id)
if current_user != file.owner and \
not current_user.can(Permission.ADMINISTER):
abort(403)
flash('小心!删除操作不能撤回!')
form = FileDeleteConfirmForm()
if form.validate_on_submit():
if form.filename.data == '' or \
form.filename.data is None:
flash("文件名不合法!")
return redirect(url_for('.file',
id=file.uid,
_external=True))
file.filename = form.filename.data
file.description = form.body.data
db.session.add(file)
if | |
<reponame>robinson96/GRAPE<gh_stars>1-10
import os
import subprocess
import utility
import ConfigParser
import grapeConfig
import StringIO
class GrapeGitError(Exception):
# arguments must be kept as keywords to allow pickling
def __init__(self, errmsg='', returnCode=-1, gitOutput='', gitCommand='', cwd=os.getcwd()):
self.msg = errmsg
self.code = returnCode
self.gitOutput = gitOutput
self.gitCommand = gitCommand
self.commError = True if \
(self.code == 128 and "fatal: Could not read from remote" in self.gitOutput ) or \
("fatal: unable to access" in self.gitOutput) or \
("fatal: The remote end hung up unexpectedly" in self.gitOutput) \
else False
self.cwd = cwd
def __getinitargs__(self):
return (self.msg, self.code, self.gitOutput, self.gitCommand, self.cwd)
def __str__(self):
return "\nWORKING DIR: " + self.cwd + "\nCODE: " + str(self.code) + '\nCMD: ' + self.gitCommand + '\nOUTPUT: ' + self.gitOutput
def __repr__(self):
return self.__str__()
def gitcmd(cmd, errmsg):
_cmd = None
try:
cnfg = grapeConfig.grapeConfig()
_cmd = cnfg.get("git", "executable")
except ConfigParser.NoOptionError:
pass
except ConfigParser.NoSectionError:
pass
if _cmd:
_cmd += " %s" % cmd
elif os.name == "nt":
_cmd = "\"C:\\Program Files\\Git\\bin\\git.exe\" %s" % cmd
else:
_cmd = "git %s" % cmd
cwd = os.getcwd()
process = utility.executeSubProcess(_cmd, cwd, verbose=-1)
if process.returncode != 0:
raise GrapeGitError("Error: %s " % errmsg, process.returncode, process.output, _cmd, cwd=cwd)
return process.output.strip()
def add(filedescription):
return gitcmd("add %s" % filedescription, "Could not add %s" % filedescription)
def baseDir():
unixStylePath = gitcmd("rev-parse --show-toplevel", "Could not locate base directory")
path = utility.makePathPortable(unixStylePath)
return path
def allBranches():
return branch("-a").replace("*",' ').replace(" ",'').split()
def remoteBranches():
return branch("-r").replace(" ", '').split()
def branch(argstr=""):
return gitcmd("branch %s" % argstr, "Could not execute git branch command")
def branchPrefix(branchName):
return branchName.split('/')[0]
def branchUpToDateWith(branchName, targetBranch):
try:
allUpToDateBranches = gitcmd("branch -a --contains %s" % targetBranch, "branch contains failed")
except GrapeGitError as e:
# Don't fail if the only issue is a dangling reference for origin/HEAD.
allUpToDateBranches = e.gitOutput
allUpToDateBranches = allUpToDateBranches.replace("error: branch 'origin/HEAD' does not point at a commit\n","")
allUpToDateBranches = allUpToDateBranches.replace("error: some refs could not be read\n","")
if "error: " in allUpToDateBranches:
raise e
allUpToDateBranches = allUpToDateBranches.split("\n")
upToDate = False
for b in allUpToDateBranches:
# remove the * prefix from the active branch
cleanB = b.strip()
if b[0] is '*':
cleanB = b[1:].strip()
upToDate = cleanB == branchName.strip()
if upToDate:
break
return upToDate
def bundle(argstr):
return gitcmd("bundle %s" % argstr, "Bundle failed")
def checkout(argstr):
return gitcmd("checkout %s" % argstr, "Checkout failed")
def clone(argstr):
try:
return gitcmd("clone %s" % argstr, "Clone failed")
except GrapeGitError as e:
if "already exists and is not an empty directory" in e.gitOutput:
raise e
if e.commError:
print ("GRAPE: WARNING: clone failed due to connectivity issues.")
return e.gitOutput
else:
print ("GRAPE: Clone failed. Maybe you ran out of disk space?")
print e.gitOutput
raise e
def commit(argstr):
return gitcmd("commit %s" % argstr, "Commit failed")
def commitDescription(committish):
try:
descr = gitcmd("log --oneline %s^1..%s" % (committish, committish),
"commitDescription failed")
# handle the case when this is called on a 1-commit-long history (occurs mostly in unit testing)
except GrapeGitError as e:
if "unknown revision" in e.gitOutput:
try:
descr = gitcmd("log --oneline %s" % committish, "commitDescription failed")
except GrapeGitError as e:
raise e
return descr
def config(argstr, arg2=None):
if arg2 is not None:
return gitcmd('config %s "%s"' % (argstr, arg2), "Config failed")
else:
return gitcmd('config %s ' % argstr, "Config failed")
def conflictedFiles():
fileStr = diff("--name-only --diff-filter=U").strip()
lines = fileStr.split('\n') if fileStr else []
return lines
def currentBranch():
return gitcmd("rev-parse --abbrev-ref HEAD", "could not determine current branch")
def describe(argstr=""):
return gitcmd("describe %s" % argstr, "could not describe commit")
def diff(argstr):
return gitcmd("diff %s" % argstr, "could not perform diff")
def fetch(repo="", branchArg="", raiseOnCommError=False, warnOnCommError=False):
try:
return gitcmd("fetch %s %s" % (repo, branchArg), "Fetch failed")
except GrapeGitError as e:
if e.commError:
if warnOnCommError:
utility.printMsg("WARNING: could not fetch due to communication error.")
if raiseOnCommError:
raise e
else:
return e.gitOutput
else:
raise e
def getActiveSubmodules():
cwd = os.getcwd()
wsDir = utility.workspaceDir()
os.chdir(wsDir)
if os.name == "nt":
submoduleList = submodule("foreach --quiet \"echo $path\"")
else:
submoduleList = submodule("foreach --quiet \"echo \$path\"")
submoduleList = [] if not submoduleList else submoduleList.split('\n')
submoduleList = [x.strip() for x in submoduleList]
os.chdir(cwd)
return submoduleList
def getAllSubmodules():
subconfig = ConfigParser.ConfigParser()
try:
subconfig.read(os.path.join(baseDir(), ".gitmodules"))
except ConfigParser.ParsingError:
# this is guaranteed to happen due to .gitmodules format incompatibility, but it does
# read section names in successfully, which is all we need
pass
sections = subconfig.sections()
submodules = []
for s in sections:
submodules.append(s.split()[1].split('"')[1])
return submodules
def getAllSubmoduleURLMap():
subconfig = ConfigParser.ConfigParser()
fp = StringIO.StringIO('\n'.join(line.strip() for line in open(os.path.join(baseDir(), ".gitmodules"))))
subconfig.readfp(fp)
sections = subconfig.sections()
submodules = {}
for s in sections:
submodules[subconfig.get(s,"path")] = subconfig.get(s, "url")
return submodules
def getModifiedSubmodules(branch1="", branch2=""):
cwd = os.getcwd()
wsDir = utility.workspaceDir()
os.chdir(wsDir)
submodules = getAllSubmodules()
# if there are no submodules, then return the empty list
if len(submodules) == 0 or (len(submodules) ==1 and not submodules[0]):
return []
submodulesString = ' '.join(submodules)
try:
modifiedSubmodules = diff("--name-only %s %s -- %s" %
(branch1, branch2, submodulesString)).split('\n')
except GrapeGitError as e:
if "bad revision" in e.gitOutput:
utility.printMsg("getModifiedSubmodules: requested difference between one or more branches that do not exist. Assuming no modifications.")
return []
if len(modifiedSubmodules) == 1 and not modifiedSubmodules[0]:
return []
# make sure everything in modifiedSubmodules is in the original list of submodules
# (this can not be the case if the module existed as a regular directory / subtree in the other branch,
# in which case the diff command will list the contents of the directory as opposed to just the submodule)
verifiedSubmodules = []
for s in modifiedSubmodules:
if s in submodules:
verifiedSubmodules.append(s)
os.chdir(cwd)
return verifiedSubmodules
def gitDir():
base = baseDir()
gitPath = os.path.join(base, ".git")
toReturn = None
if os.path.isdir(gitPath):
toReturn = gitPath
elif os.path.isfile(gitPath):
with open(gitPath) as f:
line = f.read()
words = line.split()
if words[0] == 'gitdir:':
relUnixPath = words[1]
toReturn = utility.makePathPortable(relUnixPath)
else:
raise GrapeGitError("print .git file does not have gitdir: prefix as expected", 1, "", "grape gitDir()")
return toReturn
def hasBranch(b):
branches = branch().split()
return b in branches
def isWorkingDirectoryClean(printOutput=False):
statusOutput = status("-u --porcelain")
toRet = len(statusOutput.strip()) == 0
if (printOutput and not toRet):
print os.getcwd()+":"
print statusOutput
return toRet
def log(args=""):
return gitcmd("log %s" % args, "git log failed")
def merge(args):
return gitcmd("merge %s" % args, "merge failed")
def mergeAbort():
return gitcmd("merge --abort", "Could not determine top level git directory.")
def numberCommitsSince(commitStr):
strCount = gitcmd("rev-list --count %s..HEAD" % commitStr, "Rev-list failed")
return int(strCount)
def numberCommitsSinceRoot():
root = gitcmd("rev-list --max-parents=0 HEAD", "rev-list failed")
return numberCommitsSince(root)
def pull(args, throwOnFail=False):
try:
return gitcmd("pull %s" % args, "Pull failed")
except GrapeGitError as e:
if e.commError:
utility.printMsg("WARNING: Pull failed due to connectivity issues.")
if throwOnFail:
raise e
else:
return e.gitOutput
else:
raise e
def push(args, throwOnFail = False):
try:
return gitcmd("push --porcelain %s" % args, "Push failed")
except GrapeGitError as e:
if e.commError:
utility.printMsg("WARNING: Push failed due to connectivity issues.")
if throwOnFail:
raise e
else:
return e.gitOutput
else:
raise e
def rebase(args):
return gitcmd("rebase %s" % args, "Rebase failed")
def reset(args):
return gitcmd("reset %s" % args, "Reset failed")
def revert(args):
return gitcmd("revert %s" % args, "Revert failed")
def rm(args):
return gitcmd("rm %s" % args, "Remove failed")
def safeForceBranchToOriginRef(branchToSync):
# first, check to see that branch exists
branchExists = False
remoteRefExists = False
branches = branch("-a").split("\n")
remoteRef = "remotes/origin/%s" % branchToSync
for b in branches:
b = b.replace('*', '')
branchExists = branchExists or b.strip() == branchToSync.strip()
remoteRefExists = remoteRefExists or b.strip() == remoteRef.strip()
if branchExists and remoteRefExists:
continue
if branchExists and not remoteRefExists:
utility.printMsg("origin does not have branch %s" % branchToSync)
return False
if branchExists and remoteRefExists:
remoteUpToDateWithLocal = branchUpToDateWith(remoteRef, branchToSync)
localUpToDateWithRemote = branchUpToDateWith(branchToSync, remoteRef)
if remoteUpToDateWithLocal and not localUpToDateWithRemote:
if branchToSync == currentBranch():
utility.printMsg("Current branch %s is out of date with origin. Pulling new changes." % branchToSync)
try:
pull("origin %s" % branchToSync, throwOnFail=True)
except:
utility.printMsg("Can't pull %s. Aborting...")
return False
else:
branch("-f %s %s" % (branchToSync, remoteRef))
return True
elif remoteUpToDateWithLocal and localUpToDateWithRemote:
return True
else:
return False
if not branchExists and remoteRefExists:
utility.printMsg("local branch did not exist. Creating %s off of %s now. " % (branchToSync, remoteRef))
branch("%s %s" % (branchToSync, remoteRef))
return True
def shortSHA(branchName="HEAD"):
return gitcmd("rev-parse --short %s" % branchName, "rev-parse of %s failed!" % branchName)
def SHA(branchName="HEAD"):
return gitcmd("rev-parse %s" | |
<reponame>Tanneguydv/Pythonocc-nodes-for-Ryven
from ryven.NENV import *
widgets = import_widgets(__file__)
from OCC.Core.ChFi2d import \
ChFi2d_AnaFilletAlgo
from OCC.Core.gp import \
gp_Pnt, \
gp_Vec, \
gp_Dir, \
gp_Ax2, \
gp_Pln, \
gp_Trsf, \
gp_DX, \
gp_DY, \
gp_DZ, \
gp_Circ, \
gp_XOY, \
gp_YOZ, \
gp_ZOX
from OCC.Core.BRep import \
BRep_Tool
from OCC.Core.BRepBuilderAPI import \
BRepBuilderAPI_Transform, \
BRepBuilderAPI_MakeEdge, \
BRepBuilderAPI_MakeWire, \
BRepBuilderAPI_MakeFace
from OCC.Core.BRepPrimAPI import \
BRepPrimAPI_MakeBox, \
BRepPrimAPI_MakeSphere, \
BRepPrimAPI_MakeCylinder, \
BRepPrimAPI_MakeTorus
from OCC.Core.BRepAdaptor import \
BRepAdaptor_CompCurve
from OCC.Core.BRepAlgoAPI import \
BRepAlgoAPI_Fuse, \
BRepAlgoAPI_Common, \
BRepAlgoAPI_Cut, \
BRepAlgoAPI_Section
from OCC.Core.BRepOffsetAPI import \
BRepOffsetAPI_MakePipe
from OCC.Core.Geom import \
Geom_Circle
from OCC.Core.GeomAbs import \
GeomAbs_C2
from OCC.Core.GCPnts import \
GCPnts_UniformAbscissa
from OCC.Core.GeomAPI import \
GeomAPI_PointsToBSplineSurface
from OCC.Core.STEPControl import \
STEPControl_Writer, \
STEPControl_AsIs
from OCC.Core.TColgp import \
TColgp_Array1OfPnt, \
TColgp_Array2OfPnt
from OCC.Core.TopAbs import \
TopAbs_EDGE, \
TopAbs_FACE, \
TopAbs_SHELL, \
TopAbs_VERTEX, \
TopAbs_WIRE, \
TopAbs_SOLID, \
TopAbs_COMPOUND, \
TopAbs_COMPSOLID
from OCC.Core.TopExp import \
TopExp_Explorer
from OCC.Core.TopoDS import \
topods_Edge, \
TopoDS_Edge, \
topods_Face, \
topods_Shell, \
topods_Vertex, \
topods_Wire, \
TopoDS_Wire, \
topods_Solid, \
topods_Compound, \
topods_CompSolid
from OCC.Extend.DataExchange import \
write_stl_file, \
read_stl_file, \
read_step_file
from OCC.Core.BRepFilletAPI import \
BRepFilletAPI_MakeFillet
from OCC.Extend.TopologyUtils import \
TopologyExplorer
from OCC.Extend.ShapeFactory import \
get_oriented_boundingbox
from OCCUtils.Common import \
filter_points_by_distance, \
curve_length
# 3D Viewer ------------------------------------------
from OCC.Display.SimpleGui import init_display
display, start_display, add_menu, add_function_to_menu = init_display()
add_menu('View')
def Fit_All():
display.FitAll()
def Iso_View():
display.View_Iso()
display.FitAll()
def Top_View():
display.View_Top()
display.FitAll()
def Left_View():
display.View_Left()
display.FitAll()
def Front_View():
display.View_Front()
display.FitAll()
def Right_View():
display.View_Right()
display.FitAll()
def Bottom_View():
display.View_Bottom()
display.FitAll()
def Rear_View():
display.View_Rear()
display.FitAll()
add_function_to_menu('View', Fit_All)
add_function_to_menu('View', Iso_View)
add_function_to_menu('View', Top_View)
add_function_to_menu('View', Left_View)
add_function_to_menu('View', Front_View)
add_function_to_menu('View', Right_View)
add_function_to_menu('View', Bottom_View)
add_function_to_menu('View', Rear_View)
# -----------------------------------------------------
# Base Classes
class PythonOCCNodeBase(Node):
def get_inputs(self):
return (self.input(i) for i in range(len(self.inputs)))
class PythonOCCNodeBase_DynamicInputs(PythonOCCNodeBase):
def __init__(self, params):
super().__init__(params)
self.num_inputs = 0
def setup_actions(self):
self.actions = {}
self.actions['add input'] = {'method': self.add_operand_input}
self.actions['rem input'] = {}
def place_event(self):
self.setup_actions()
if 0 == self.num_inputs < len(self.inputs):
for i in range(len(self.inputs)):
self.register_new_operand_input(i)
def add_operand_input(self):
self.create_input_dt(dtype=dtypes.Data(size='s'))
self.register_new_operand_input(self.num_inputs)
self.update()
def remove_operand_input(self, index):
self.delete_input(index)
self.num_inputs -= 1
del self.actions['rem input'][f'{self.num_inputs}']
self.update()
def register_new_operand_input(self, index):
self.actions['rem input'][f'{index}'] = {
'method': self.remove_operand_input,
'data': index
}
self.num_inputs += 1
def update_event(self, inp=-1):
self.set_output_val(0, self.apply_op([self.input(i) for i in range(len(self.inputs))]))
def apply_op(self, elements: list):
return None
# -------------------------------------------
# GP ----------------------------------------
class GpNodeBase(PythonOCCNodeBase):
version = 'v0.1'
color = '#5e0a91'
class Pnt_Node(GpNodeBase):
"""
Generates Point_______-
o_X___________________-
o_Y___________________-
o_Z___________________-
"""
title = 'point'
init_inputs = [
NodeInputBP('x', dtype=dtypes.Data(size='s')),
NodeInputBP('y', dtype=dtypes.Data(size='s')),
NodeInputBP('z', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
x, y, z = self.clean(self.get_inputs())
self.set_output_val(0, gp_Pnt(x, y, z))
def clean(self, coords):
"""Returns a tuple of coords where `None` values are replaced by 0"""
return ( (c if c is not None else 0) for c in coords )
class PointZero_Node(GpNodeBase):
"""
Generates Point Zero__-
"""
title = 'Point0'
init_outputs = [
NodeOutputBP(),
]
def place_event(self):
point = gp_Pnt(0,0,0)
self.set_output_val(0, point)
class DeconstructPnt_Node(GpNodeBase):
"""
Deconstruct Point_____-
o_Point_______________-
"""
title = 'deconstruct point'
init_inputs = [
NodeInputBP('point', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP('X'),
NodeOutputBP('Y'),
NodeOutputBP('Z'),
]
def update_event(self, inp=-1):
for point in self.get_inputs():
self.set_output_val(0, point.X())
self.set_output_val(1, point.Y())
self.set_output_val(2, point.Z())
class Vec_Node(GpNodeBase):
"""
Generates Vector______-
o_X___________________-
o_Y___________________-
o_Z___________________-
"""
title = 'Vector'
init_inputs = [
NodeInputBP('x', dtype=dtypes.Data(size='s')),
NodeInputBP('y', dtype=dtypes.Data(size='s')),
NodeInputBP('z', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
x, y, z = self.get_inputs()
self.set_output_val(0, gp_Vec(x, y, z))
class DX_Node(GpNodeBase):
"""
Generates Dir X____-
"""
title = 'DirX'
init_outputs = [
NodeOutputBP(),
]
def place_event(self):
dx = gp_DX()
self.set_output_val(0, dx)
class DY_Node(GpNodeBase):
"""
Generates Dir Y____-
"""
title = 'DirY'
init_outputs = [
NodeOutputBP(),
]
def place_event(self):
dy = gp_DY()
self.set_output_val(0, dy)
class DZ_Node(GpNodeBase):
"""
Generates Dir Z____-
"""
title = 'DirZ'
init_outputs = [
NodeOutputBP(),
]
def place_event(self):
dz = gp_DZ()
self.set_output_val(0, dz)
class Dir_Node(GpNodeBase):
"""
Generates Dir_______-
o_X___________________-
o_Y___________________-
o_Z___________________-
"""
title = 'dir'
init_inputs = [
NodeInputBP('x', dtype=dtypes.Data(size='s')),
NodeInputBP('y', dtype=dtypes.Data(size='s')),
NodeInputBP('z', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
x, y, z = self.get_inputs()
self.set_output_val(0, gp_Dir(x, y, z))
class Ax2_Node(GpNodeBase):
"""
Generates Ax2_________-
o_Point_______________-
o_Dir_________________-
"""
title = 'Ax2'
init_inputs = [
NodeInputBP('point', dtype=dtypes.Data(size='s')),
NodeInputBP('dir', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
point, dir_ = self.get_inputs()
self.set_output_val(0, gp_Ax2(point, dir_))
class XOY_Node(GpNodeBase):
"""
Generates Ax Z____-
"""
title = 'AxZ'
init_outputs = [
NodeOutputBP(),
]
def place_event(self):
axz = gp_XOY()
self.set_output_val(0, axz)
class YOZ_Node(GpNodeBase):
"""
Generates Ax X____-
"""
title = 'AxX'
init_outputs = [
NodeOutputBP(),
]
def place_event(self):
axx = gp_YOZ()
self.set_output_val(0, axx)
class ZOX_Node(GpNodeBase):
"""
Generates Ax Y____-
"""
title = 'AxY'
init_outputs = [
NodeOutputBP(),
]
def place_event(self):
axy = gp_ZOX()
self.set_output_val(0, axy)
class Pln_Node(GpNodeBase):
"""
Generates Plane_______-
o_Point_______________-
o_Dir_________________-
"""
title = 'Plane'
init_inputs = [
NodeInputBP('point', dtype=dtypes.Data(size='s')),
NodeInputBP('dir', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
point, dir_ = self.get_inputs()
self.set_output_val(0, gp_Pln(point, dir_))
class Trsf_Node(GpNodeBase):
"""
Generates transform___-
o_[Shapes]____________-
o_[Vectors]___________-
"""
title = 'Transform'
init_inputs = [
NodeInputBP('shapes', dtype=dtypes.Data(size='s')),
NodeInputBP('vectors', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
shapes, vectors = self.get_inputs()
result = []
if isinstance(shapes, list) and isinstance(vectors, list):
for sh, v in zip(shapes, vectors):
trns = gp_Trsf()
trns.SetTranslation(v)
if isinstance(sh, gp_Pnt):
sh2 = sh
sh2.Transform(trns)
translated = sh2
else:
translated = BRepBuilderAPI_Transform(sh, trns).Shape()
result.append(translated)
self.set_output_val(0, result)
elif isinstance(shapes, list) and not isinstance(vectors, list):
for sh in (shapes):
trns = gp_Trsf()
trns.SetTranslation(vectors)
if isinstance(sh, gp_Pnt):
sh2 = sh
sh2.Transform(trns)
translated = sh2
else:
translated = BRepBuilderAPI_Transform(sh, trns).Shape()
result.append(translated)
self.set_output_val(0, result)
elif not isinstance(shapes, list) and isinstance(vectors, list):
for v in (vectors):
trns = gp_Trsf()
trns.SetTranslation(v)
if isinstance(shapes, gp_Pnt):
sh2 = shapes
sh2.Transform(trns)
translated = sh2
else:
translated = BRepBuilderAPI_Transform(shapes, trns).Shape()
result.append(translated)
self.set_output_val(0, result)
else:
trns = gp_Trsf()
trns.SetTranslation(vectors)
if isinstance(shapes, gp_Pnt):
sh2 = shapes
sh2.Transform(trns)
translated = sh2
else:
translated = BRepBuilderAPI_Transform(shapes, trns).Shape()
self.set_output_val(0, translated)
class Move2pts_Node(GpNodeBase):
"""
Move 2 points_________-
o_from pnt____________-
o_to pnt______________-
"""
title = 'Move2pnts'
init_inputs = [
NodeInputBP('shapes', dtype=dtypes.Data(size='s')),
NodeInputBP('from', dtype=dtypes.Data(size='s')),
NodeInputBP('to', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
shapes, from_pnt, to_pnt = self.get_inputs()
vectors = []
result = []
if isinstance(from_pnt, list):
for f, t in zip(from_pnt, to_pnt):
v = gp_Vec()
x = t.X() - f.X()
y = t.Y() - f.Y()
z = t.Z() - f.Z()
v.SetCoord(x, y, z)
vectors.append(v)
for sh, v, in zip(shapes, vectors):
trns = gp_Trsf()
trns.SetTranslation(v.Reversed())
translated = BRepBuilderAPI_Transform(sh, trns).Shape()
result.append(translated)
self.set_output_val(0, result)
else:
v = gp_Vec()
x = to_pnt.X() - from_pnt.X()
y = to_pnt.Y() - from_pnt.Y()
z = to_pnt.Z() - from_pnt.Z()
v.SetCoord(x, y, z)
trns = gp_Trsf()
trns.SetTranslation(v.Reversed())
translated = BRepBuilderAPI_Transform(shapes, trns).Shape()
self.set_output_val(0, translated)
class MidPoint_Node(GpNodeBase):
"""
MidPoint_____________-
o_Point A____________-
o_Point B______________-
"""
title = 'MidPoint'
init_inputs = [
NodeInputBP('pointA', dtype=dtypes.Data(size='s')),
NodeInputBP('pointB', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
pointA, pointB = self.get_inputs()
vec1 = gp_Vec(pointA.XYZ())
vec2 = gp_Vec(pointB.XYZ())
midvec = (vec1 + vec2) / 2.
midpoint = gp_Pnt(midvec.XYZ())
self.set_output_val(0, midpoint)
Gp_nodes = [
Pnt_Node,
DeconstructPnt_Node,
PointZero_Node,
Dir_Node,
Vec_Node,
DX_Node,
DY_Node,
DZ_Node,
Ax2_Node,
XOY_Node,
YOZ_Node,
ZOX_Node,
Pln_Node,
Trsf_Node,
Move2pts_Node,
MidPoint_Node,
]
# -------------------------------------------
# BREPBUILDERAPI-----------------------------
class BrepBuilderAPINodeBase(PythonOCCNodeBase):
version = 'v0.1'
color = '#DAA520'
class TwoPtsEdge_Node(BrepBuilderAPINodeBase):
"""
Generates 2 pts Edge__-
o_Point_______________-
o_Point_______________-
"""
title = '2ptsEdge'
init_inputs = [
NodeInputBP('pnt1', dtype=dtypes.Data(size='s')),
NodeInputBP('Pnt2', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
pnt1, pnt2 = self.get_inputs()
if isinstance(pnt1, list):
edges = []
for p1, p2 in zip(pnt1, pnt2):
edge = BRepBuilderAPI_MakeEdge(p1, p2).Edge()
edges.append(edge)
self.set_output_val(0, edges)
else:
edge = BRepBuilderAPI_MakeEdge(pnt1, pnt2).Edge()
self.set_output_val(0, edge)
class Wire_Node(BrepBuilderAPINodeBase):
"""
Generates Wire________-
o_List of Points______-
"""
title = 'Wire'
init_inputs = [
NodeInputBP('pntslist', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
for pointlist in self.get_inputs():
pointsarray = TColgp_Array1OfPnt(1, len(pointlist))
for n, i in enumerate(pointlist):
pointsarray.SetValue(n + 1, i)
wirebuild = BRepBuilderAPI_MakeWire()
for i in range(1, len(pointlist)):
edgepoint = BRepBuilderAPI_MakeEdge(pointsarray.Value(i), pointsarray.Value(i + 1)).Edge()
wirebuild.Add(edgepoint)
self.set_output_val(0, wirebuild.Shape())
class WireFillet2d_Node(BrepBuilderAPINodeBase):
"""
Generates 2dWireFillet_-
o_List of Points______-
o_Fillet Radius_______-
"""
title = '2dWireFillet'
init_inputs = [
NodeInputBP('pntslist', dtype=dtypes.Data(size='s')),
NodeInputBP('radius', dtype=dtypes.Data(size='s')),
]
init_outputs = [
NodeOutputBP(),
]
def update_event(self, inp=-1):
pointlist, radius = self.get_inputs()
if radius == 0:
radius = 0.01
pointsarray = TColgp_Array1OfPnt(1, len(pointlist))
for n, i in enumerate(pointlist):
pointsarray.SetValue(n + 1, i)
edges = {}
ijk = 0
for i in range(1, len(pointlist)):
edges[ijk] = BRepBuilderAPI_MakeEdge(pointsarray.Value(i), pointsarray.Value(i + 1)).Edge()
ijk += 1
edges_list = list(edges.values())
wirebuild = BRepBuilderAPI_MakeWire()
for index, edge in enumerate(edges_list[:-1]):
| |
while True:
element = route_prefix_parts[index]
parts.append(element)
index += 1
if index == limit:
break
parts.append('/')
continue
def _rebuild_path_parameter(parameter_type, parameter_name):
"""
Rebuilds a typed path part from it's type identifier and from it's name.
Parameters
----------
parameter_type : `int`
Parameter type identifier.
parameter_name : `str`
The parameter's name.
Returns
-------
path_parameter : `str`
"""
parameter_type_name = PARAMETER_TYPE_TO_NAME[parameter_type]
return f'<{parameter_type_name}:{parameter_name}>'
class PathRouter:
"""
Path router for getting which handler function should run for a pre-defined route.
Attributes
----------
route_step_paths : `None` or `dict` of (`str`, ``PathRouter``) items
Generic string paths to route to. Set as `None` if empty.
route_step_validated : `None` or `list` of ``ParameterValidatorPathStep``
Contains `parameter-name`, `validator`, `router` elements to route dynamic names.
route_end : `None` or `dict` of (`str`, ``Rule``) items
If the url ends at the point of this router, then the handler function from ``.route_ends`` is chosen if
applicable. The functions are stored in `method` - `handler` relation.
route_end_all : `None` or ``Rule``
If the url ends at this point of the router and non of the `route-end`-s were matched, the the view function
of this slot is chosen.
route_end_path : `None` or `dict` of (`str`, `tuple` (``Rule``, `str`)) items
Paths, which have dynamic route ends.
route_end_path_all : `None` or `tuple` (``Rule``, `str`)
``.route_end_path`` version, what accepts accepts all type of request methods.
"""
__slots__ = (
'route_end',
'route_end_all',
'route_end_path',
'route_end_path_all',
'route_step_paths',
'route_step_validated',
)
def __init__(self):
"""
Creates a new ``PathRouter`` instance.
"""
self.route_step_paths = None
self.route_step_validated = None
self.route_end = None
self.route_end_all = None
self.route_end_path = None
self.route_end_path_all = None
def dispatch_route(self, path, index, request_method):
"""
Dispatches the given url, getting it's router.
Parameters
----------
path : `tuple` of `str`
The request's url's parts.
index : `int`
The next index of `path` to inspect.
request_method : `str`
The method's request.
Returns
-------
route : `None` or ``Route``
The found route if any.
"""
if index == len(path):
route_end = self.route_end
if route_end is None:
return None
try:
rule = route_end[request_method]
except KeyError:
pass
else:
return Route(rule)
route_end_all = self.route_end_all
if route_end_all is None:
return Route(ROUTE_METHOD_NOT_ALLOWED)
rule = route_end_all
return Route(rule)
rule_part = path[index]
index += 1
route_step_paths = self.route_step_paths
if route_step_paths is not None:
try:
path_router = route_step_paths[rule_part]
except KeyError:
pass
else:
route = path_router.dispatch_route(path, index, request_method)
if route is not None:
return route
route_step_validated = self.route_step_validated
if route_step_validated is not None:
for parameter_validator_path_step in route_step_validated:
value = parameter_validator_path_step.validator(rule_part)
if value is None:
continue
for (
parameter_name,
path_router,
) in parameter_validator_path_step.path_routers:
route = path_router.dispatch_route(path, index, request_method)
if route is None:
continue
route.add_parameter(parameter_name, value)
return route
route_end_path = self.route_end_path
route_end_path_all = self.route_end_path_all
if (route_end_path is not None) or (route_end_path_all is not None):
if route_end_path is not None:
try:
rule, parameter_name = route_end_path[request_method]
except KeyError:
pass
else:
route = Route(rule)
route.add_parameter(parameter_name, '/'.join(path[index:]))
return route
if route_end_path_all is None:
return Route(ROUTE_METHOD_NOT_ALLOWED)
rule, parameter_name = route_end_path_all
route = Route(rule)
route.add_parameter(parameter_name, '/'.join(path[index:]))
return route
return None
def register_route(self, rule, index):
"""
Registers a new handler to the path router.
Parameters
----------
rule : ``Rule``
The rule of the endpoint
index : `int`
The index of the part of the path to process by this router.
"""
url_rule = rule.rule
if index == len(url_rule):
request_methods = rule.request_methods
if request_methods is None:
self.route_end_all = rule
else:
route_end = self.route_end
if route_end is None:
route_end = self.route_end = {}
for request_method in request_methods:
route_end[request_method] = rule
return
rule_part_type, rule_part = url_rule[index]
index += 1
if rule_part_type == PARAMETER_TYPE_STATIC:
route_step_paths = self.route_step_paths
if route_step_paths is None:
route_step_paths = self.route_step_paths = {}
try:
path_router = route_step_paths[rule_part]
except KeyError:
path_router = route_step_paths[rule_part] = PathRouter()
path_router.register_route(rule, index)
return
if rule_part_type == PARAMETER_TYPE_PATH:
rule_rule_part_tuple = (rule, rule_part)
request_methods = rule.request_methods
if request_methods is None:
self.route_end_path_all = rule_rule_part_tuple
else:
route_end_path = self.route_end_path
if route_end_path is None:
route_end_path = self.route_end_path = {}
for request_method in request_methods:
route_end_path[request_method] = rule_rule_part_tuple
return
route_step_validated = self.route_step_validated
if route_step_validated is None:
route_step_validated = self.route_step_validated = []
parameter_validator_path_step, path_router = ParameterValidatorPathStep(
rule_part_type, rule_part
)
path_router.register_route(rule, index)
route_step_validated.append(parameter_validator_path_step)
return
for parameter_validator_path_step in route_step_validated:
if parameter_validator_path_step.parameter_type == rule_part_type:
path_router = parameter_validator_path_step.get_path_router(rule_part)
path_router.register_route(rule, index)
return
parameter_validator_path_step, path_router = ParameterValidatorPathStep(
rule_part_type, rule_part
)
path_router.register_route(rule, index)
route_step_validated.append(parameter_validator_path_step)
def render_structure(self, route_prefix_parts, parts):
"""
Renders the path router's structure to the given `parts` list.
Parameters
----------
route_prefix_parts : `list` of `str`
Prefix parts to the route to render.
parts : `list` of `str`
Rendered path parts.
"""
route_step_paths = self.route_step_paths
if route_step_paths is not None:
for path_part, path_router in route_step_paths.items():
route_prefix_parts.append(path_part)
path_router.render_structure(route_prefix_parts, parts)
del route_prefix_parts[-1]
route_step_validated = self.route_step_validated
if route_step_validated is not None:
for parameter_validator_path_step in route_step_validated:
parameter_validator_path_step.render_structure(
route_prefix_parts, parts
)
route_end = self.route_end
if route_end is not None:
_render_route_prefix_into(route_prefix_parts, parts)
parts.append(' ')
methods = sorted(route_end.keys())
if len(methods) != 1:
methods_repr = repr(methods)
else:
methods_repr = methods[0]
parts.append(methods_repr)
parts.append('\n')
route_end_all = self.route_end_all
if route_end_all is not None:
_render_route_prefix_into(route_prefix_parts, parts)
parts.append(' *\n')
route_end_path = self.route_end_path
if route_end_path is not None:
combinations = {}
for request_method, (rule, parameter_name) in route_end_path.items():
try:
methods = combinations[parameter_name]
except KeyError:
combinations[parameter_name] = methods = []
methods.append(request_method)
for parameter_name, methods in combinations.items():
path_part = _rebuild_path_parameter(PARAMETER_TYPE_PATH, parameter_name)
route_prefix_parts.append(path_part)
_render_route_prefix_into(route_prefix_parts, parts)
del route_prefix_parts[-1]
parts.append(' ')
methods = sorted(route_end.keys())
if len(methods) != 1:
methods_repr = repr(methods)
else:
methods_repr = methods[0]
parts.append(methods_repr)
parts.append('\n')
route_end_path_all = self.route_end_path_all
if route_end_path_all is not None:
parameter_name = route_end_path_all[1]
path_part = _rebuild_path_parameter(PARAMETER_TYPE_PATH, parameter_name)
route_prefix_parts.append(path_part)
_render_route_prefix_into(route_prefix_parts, parts)
del route_prefix_parts[-1]
parts.append(' *\n')
def validate_parameters(self):
"""
Validates the parameters of the rules registered into the router.
Raises
------
RuntimeError
If a `view_func` might not receive a parameter in runtime.
"""
route_step_paths = self.route_step_paths
if route_step_paths is not None:
for path_router in route_step_paths.values():
path_router.validate_parameters()
route_step_validated = self.route_step_validated
if route_step_validated is not None:
for parameter_validator_path_step in route_step_validated:
parameter_validator_path_step.validate_parameters()
route_end = self.route_end
if route_end is not None:
for rule in route_end.values():
rule.validate_parameters()
route_end_all = self.route_end_all
if route_end_all is not None:
route_end_all.validate_parameters()
route_end_path = self.route_end_path
if route_end_path is not None:
for rule, parameter_name in route_end_path.values():
rule.validate_parameters()
route_end_path_all = self.route_end_path_all
if route_end_path_all is not None:
rule, parameter_name = route_end_path_all
rule.validate_parameters()
class AbortRequest(BaseException):
"""
Exception raised when ``abort`` is called.
Attributes
----------
response_code : `int`
The request abortion code.
reason : `None` or `str`
Abortion reason to send.
"""
def __init__(self, response_code, reason=None):
self.response_code = response_code
self.reason = reason
BaseException.__init__(self)
def abort(response_code, reason=None):
"""
Aborts the request when.
Attributes
----------
response_code : `int`
The request abortion code.
reason : `None` or `str`
Abortion reason to send.
"""
raise AbortRequest(response_code, reason)
async def _handler_method_not_allowed():
"""
Aborts the request with error code 405.
This function is a coroutine.
Raises
------
AbortRequest
With response code of `405`.
"""
raise AbortRequest(METHOD_NOT_ALLOWED)
async def _handler_not_found():
"""
Aborts the request with error code 404.
This function is a coroutine.
Raises
------
AbortRequest
With response code of `404`.
"""
raise AbortRequest(NOT_FOUND)
class _RouteAdder:
"""
Route adder returned by ``WebBase.route`` to add a route to it as a decorator.
Attributes
----------
endpoint : `None` or `str`
The internal endpoint of the url. Defaults to the name of the added function.
options : `dict` of (`str`, `Any`) items.
Additional options to be forward to the underlying ``Rule`` object.
parent : ``AppBase``
The parent webapp.
rule : `str`
The url rule as string.
"""
__slots__ = ('endpoint', 'options', 'parent', 'rule')
def __new__(cls, parent, rule, endpoint, options):
"""
Creates a new ``_RouteAdder object with the given parameters.
Parameters
----------
parent : ``AppBase``
The parent webapp.
rule : `str`
The url rule as string.
endpoint : `None` or `str`
The internal endpoint of the url. Defaults to the name of the added function.
options : `dict` of (`str`, `Any`) items.
Additional options to be forward to the underlying ``Rule`` object.
"""
self = object.__new__(cls)
self.parent = parent
self.rule = rule
self.endpoint = endpoint
self.options = options
return self
def __call__(self, view_func):
"""
Adds the given `view_func` and the stored parameters to the parent ``AppBase`` calling it's `.add_url_rule`
method.
Parameters
----------
view_func : `async-callable`
The function to call when | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import combinations
from collections import Counter
import os.path
import numpy as np
from scipy.stats import mode
from scipy.linalg import orth
from numpy.linalg import svd, lstsq, inv, pinv, multi_dot
from scipy.special import logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import as_float_array, check_array, check_X_y, check_random_state
#from sklearn.utils.fixes import expit as sigmoid
from scipy.special import expit as sigmoid
#from sklearn.utils.estimator_checks import check_estimator
#from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import LabelBinarizer, label_binarize
from sklearn.linear_model import Ridge, RidgeClassifier, Lasso
from sklearn import metrics
#import matlab.engine
#from cvxpy import *
#from utils import *
#from mysoftclassifier import *
dot = np.dot # alias for np.dot
#def sigmoid(x):
# return 0.5*np.tanh(0.5*x)+0.5
class ELMClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_hidden=100, C=1.0, batch_size=None, fit_intercept=False, ovo=False, classes=None, activation_func='sigmoid', return_y=False, random_projection=True, random_state=None):
self.n_hidden = n_hidden
self.C = C
self.W = None
self.b = None
self.beta = None
self.P = None # P = (H'*H+C*I)^-1
self.activation_func = activation_func.lower()
self.batch_size = batch_size
self.fit_intercept = fit_intercept
self.random_projection = random_projection
self.random_state = random_state
self.random_state_ = None
self.ovo = ovo
self.classes = classes#np.array([1,2,3,4,5])
self.return_y = return_y
self.label_binarizer = None
self.fitted_ = False
def _validate_X(self, X):
if len(X.shape)==1:
raise ValueError('X should be a 2-dimensional array.')
# if one feature:
# X = X.reshape(1,-1)
# else: # one sample
# X = X.reshape(-1,1)
if X.shape[0]==0:
raise ValueError('Empty samples.')
if X.shape[1]==0:
raise ValueError('0 feature(s) (shape=(3, 0)) while a minimum of %d is required.'%(1,))
return as_float_array(check_array(X))
def _validate_X_y(self, X, y):
X = self._validate_X(X)
X, y = check_X_y(X, y)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
if np.allclose(np.array(y,dtype=int),y):
self.label_binarizer = LabelBinarizer(neg_label=-2,pos_label=2,sparse_output=False)# y \in {-2,2} according to extreme logistic regression
if self.classes is not None:
self.label_binarizer.fit(self.classes)
y = self.label_binarizer.transform(y)
else:
y = self.label_binarizer.fit_transform(y)
self.classes = self.label_binarizer.classes_
if self.label_binarizer.classes_.shape[0]<2:
raise ValueError('Label contains less than 2 classes.')
else:
self.label_binarizer = None
self.fit_intercept = True
return X, y
def fit(self, X, y, sample_weight=None):
self.fitted_ = False
self.random_state_ = check_random_state(self.random_state)
if np.any(np.isnan(y)):
nonnan_ids = np.logical_not(np.isnan(y))
X = X[nonnan_ids,:]
y = y[nonnan_ids]
X, y = self._validate_X_y(X, y)
N, dx = X.shape
N_ = N-self.n_hidden
self.classes_ = self.classes
#self.n_classes_ = len(self.classes)
if self.random_projection and (self.batch_size is None or self.P is None):
self.b = self.random_state_.uniform(size=self.n_hidden)*2-1
self.W = self.random_state_.uniform(size=(dx,self.n_hidden))*2-1
if self.batch_size is None or N_<=0:
# fit all
if self.random_projection:
if self.activation_func == 'sigmoid':
H = sigmoid(dot(X,self.W)+self.b)
else:
raise NotImplementedError('activation_func="%s" is not implemented.')
else:
self.n_hidden = X.shape[1]
H = X
if self.label_binarizer is None:
if self.ovo:
raise NotImplementedError('OVO for probabilistic label is not implemented yet.')
if sample_weight is not None:
raise NotImplementedError('sampled_weight for probabilistic label is not implemented yet.')
if not hasattr(self,'fit_intercept') or not self.fit_intercept:
raise TypeError('For probabilistic labels, self.fit_intercept must be True.')
output_layer=SoftLogisticRegression(C=self.C, learning_rate=0.01, momentum=0.9, max_iter=200,
random_state=self.random_state, tol=1e-4, verbose=False).fit(H,y)
self.beta = np.r_[output_layer.coefs_[-1].ravel(),output_layer.intercepts_[-1]]
else:
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.c_[X,np.ones((N,1))]
nh = self.n_hidden+1
else:
nh = self.n_hidden
if N>self.n_hidden:
if self.ovo:
if sample_weight is not None:
raise NotImplementedError('OVO and sampled_weight at the same time is not implemented yet.')
self.beta = np.empty((nh,self.label_binarizer.classes_.shape[0]*(self.label_binarizer.classes_.shape[0]-1)//2))
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
id_ = np.where(np.logical_or(y[:,ii[0]]==2,y[:,ii[1]]==2))[0]
#if self.C==0:
# self.beta[:,cc] = dot(pinv(H[id_,:]),y[id_,ii[0]])
#else:
Ht_ = H[id_,:].T
self.beta[:,cc] = multi_dot((inv(dot(Ht_,Ht_.T)+self.C*N*1.0/nh*np.eye(nh)),Ht_,y[id_,ii[0]]))
cc += 1
else:
if sample_weight is None:
#if self.C==0:
# self.beta = dot(pinv(H),y)
#else:
self.beta = multi_dot((inv(dot(H.T,H)+self.C*N*1.0/nh*np.eye(nh)),H.T,y))
else:
Ht =sample_weight*H.T
#if self.C==0:
# self.beta = dot(pinv(Ht.T),y)
#else:
self.beta = multi_dot((inv(dot(Ht,H)+self.C*1.0/nh*np.eye(nh)),Ht,y))
else:
if self.ovo:
if sample_weight is not None:
raise NotImplementedError('OVO and sampled_weight at the same time is not implemented yet.')
n_beta = self.label_binarizer.classes_.shape[0]*(self.label_binarizer.classes_.shape[0]-1)//2
self.beta = np.empty((nh,n_beta))
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
id_ = np.where(np.logical_or(y[:,ii[0]]==2,y[:,ii[1]]==2))[0]
H_ = H[id_,:]
#if self.C==0:
# self.beta[:,cc] = dot(pinv(H_),y[id_,ii[0]])
#else:
self.beta[:,cc] = multi_dot((H_.T,inv(dot(H_,H_.T)+self.C*N*1.0/nh*np.eye(N)),y[id_,ii[0]]))
cc += 1
else:
if sample_weight is None:
#if self.C==0:
# self.beta = dot(pinv(H),y)
#else:
self.beta = multi_dot((H.T,inv(dot(H,H.T)+self.C*N*1.0/nh*np.eye(N)),y))
else:
self.beta = multi_dot((H.T,inv((sample_weight*dot(H,H.T)).T+self.C*1.0/nh*np.eye(N)),(sample_weight*y.T).T))
else:
# OS-ELM
raise NotImplementedError('OS-ELM is not implemented yet.')
if self.ovo:
raise NotImplementedError('OVO in batch mode is not implemented yet.')
if sample_weight is not None:
raise NotImplementedError('sampled_weight in batch mode is not implemented yet.')
if N_%self.batch_size==0:
batches = [self.n_hidden]+[self.batch_size]*(N_//self.batch_size)
else:
batches = [self.n_hidden]+[self.batch_size]*(N_//self.batch_size)+[N_%self.batch_size]
#shuffled_id = list(range(N))
#self.random_state_.shuffle(shuffled_id)
#X = X[shuffled_id,:]
#y = y[shuffled_id]
for i in range(len(batches)):
start_n = sum(batches[:i])
end_n = sum(batches[:i+1])
y_part = y[start_n:end_n]
if self.random_projection:
if self.activation_func == 'sigmoid':
H = sigmoid(dot(X[start_n:end_n,:],self.W)+self.b)
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.c_[H,np.ones((batches[i],1))]
else:
raise NotImplementedError('activation_func="%s" is not implemented.')
else:
self.n_hidden = X.shape[1]
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.c_[X[start_n:end_n,:],np.ones((batches[i],1))]
else:
H = X[start_n:end_n,:]
if i==0 or self.P is None:
if hasattr(self,'fit_intercept') and self.fit_intercept:
nh = self.n_hidden+1
else:
nh = self.n_hidden
self.P = inv(dot(H.T,H)+self.C*N*1.0/nh*np.eye(nh))
self.beta = multi_dot((self.P,H.T,y_part))
else:
if N==1:
h = H.ravel()
hht = np.outer(h,h)
self.P = self.P - multi_dot((self.P,hht,self.P))/(1.+(self.P*hht).sum())
else:
PHt = dot(self.P,H.T)
self.P = self.P - multi_dot((PHt,inv(dot(H,PHt)+np.eye(batches[i])),H,self.P))
self.beta = self.beta + dot(dot(self.P,H.T),y_part-dot(H,self.beta))
self.fitted_ = True
return self
def fit_transform(self, X, y):
return self.fit(X,y).transform(X)
def transform(self, X):
return self.decision_function(X)
def decision_function(self, X):
if not self.fitted_:
raise ValueError('This ELMClassifier instance is not fitted yet.')
X = self._validate_X(X)
if self.random_projection:
H = sigmoid(dot(X,self.W)+self.b)
else:
H = X
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.hstack((H,np.ones((X.shape[0],1))))
return dot(H,self.beta)
def predict(self, X):
if self.ovo:
yy = self.decision_function(X)
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
id_ = yy[:,cc]>=0
yy[:,cc][id_] = ii[0]
yy[:,cc][np.logical_not(id_)] = ii[1]
cc += 1
yy = mode(yy,axis=1)[0].ravel()
return self.label_binarizer.inverse_transform(label_binarize(yy, range(self.label_binarizer.classes_.shape[0])))
else:
proba, y = self.predict_proba(X,return_y=True)
if y is None:
return proba
else:
return y
def predict_proba(self, X):
# [1] <NAME>., & <NAME>. (2013).
# Learning from large-scale distributed health data: An approximate logistic regression approach.
# In Proceedings of the 30th International Conference on Machine Learning, Atlanta, Georgia, USA, JMLR: W&CP (pp. 1-8).
# [2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2014, May).
# Extreme Logistic Regression: A Large Scale Learning Algorithm with Application to Prostate Cancer Mortality Prediction.
# In FLAIRS Conference.
#if self.label_binarizer.classes_.shape[0]!=2:
# print('Warning: This is one-vs-all probability for each class.')
if self.ovo:
proba = label_binarize(self.predict(X),self.label_binarizer.classes_)
"""
K = self.label_binarizer.classes_.shape[0]
proba = np.zeros((X.shape[0],K))
for i in range(K):
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
if ii[0]==i:
proba[:,i] = np.maximum(proba[:,i],proba_[:,cc])
elif ii[1]==i:
proba[:,i] = np.maximum(proba[:,i],1-proba_[:,cc])
cc += 1
"""
else:
hb = self.decision_function(X)
proba = sigmoid(hb)
if proba.ndim>1:
proba = (proba.T/proba.sum(axis=1)).T
if self.return_y:
if self.label_binarizer is None:
return proba, None
else:
return proba, self.label_binarizer.inverse_transform(hb)
else:
return proba
class WeightedELMClassifier(ELMClassifier):
def __init__(self, n_hidden=100, C=1.0, batch_size=None, fit_intercept=False, ovo=False, classes=None, activation_func='sigmoid', random_projection=True, return_y=False, random_state=None):
super(WeightedELMClassifier, self).__init__(n_hidden=n_hidden, C=C, batch_size=batch_size, fit_intercept=fit_intercept, ovo=ovo, classes=classes, activation_func=activation_func, random_projection=random_projection, random_state=random_state, return_y=return_y)
def fit(self, X, y):
yc = Counter(y)
sample_weight = np.empty(X.shape[0])
#average_yc = np.mean(yc.values())
for yy in yc:
#if yc[yy]>average_yc:
# sample_weight[y==yy] = 1./np.sqrt(yc[yy])
#else:
# sample_weight[y==yy] = (np.sqrt(5)-1)/2/np.sqrt(yc[yy])
sample_weight[y==yy] = 1./np.sqrt(yc[yy])
return super(WeightedELMClassifier, self).fit(X, y, sample_weight=sample_weight/sample_weight.sum())
class SSELMClassifier(ELMClassifier):
def __init__(self, n_hidden=100, C=1.0, lambda_=1.0, activation_func='sigmoid', matlab_code_path=None, classes=None, random_projection=True, random_state=None):
super(SSELMClassifier, self).__init__(n_hidden=n_hidden, C=C, batch_size=None, fit_intercept=False, activation_func=activation_func, classes=classes, random_projection=random_projection, random_state=random_state)
self.lambda_ = self.lambda_
self.eng = None
self.L = None
#self.model_matlab = None
if matlab_code_path is None:
self.matlab_code_path = None
else:
self.matlab_code_path = os.path.normpath(matlab_code_path)
def start_matlab_connection(self):
if self.matlab_code_path is not None:
if self.eng is None:
self.eng = matlab.engine.start_matlab()
self.eng.addpath(self.matlab_code_path, nargout=0)
else:
self.eng = None
def close_matlab_connection(self):
if self.eng is not None:
self.eng.exit()
self.eng = None
def compute_graph_laplacian(self, X, params):
self.start_matlab_connection()
self.L = self.eng.laplacian(params, matlab.double(X.tolist()), nargout=1)
def fit(self, X, y):
self.fitted_ = False
self.random_state_ = check_random_state(self.random_state)
X, y = self._validate_X_y(X, y)
if self.matlab_code_path is None:
raise NotImplementedError('No Python implementation for SSELM yet.')
"""
N, dx = X.shape
Nu = np.sum(np.isnan(y))
Nl = N-Nu
self.b = self.random_state_.uniform(size=self.n_hidden)*2-1
self.W = self.random_state_.uniform(size=(dx,self.n_hidden))*2-1
if self.activation_func == 'sigmoid':
H = sigmoid(dot(X,self.W)+self.b)
else:
raise NotImplementedError('activation_func="%s" is not implemented.')
C = np.eye(N,dtype=float)*self.C
C[range(Nl,N),range(Nl,N)] = 0.
L = ???
if Nl>self.n_hidden:
self.beta = multi_dot((inv(np.eye(self.n_hidden,dtype=float)+multi_dot((H.T,C+self.lambda_*L,H))),H.T,C,y))
else:
self.beta = multi_dot(H.T,inv(np.eye(N,dtype=float)+multi_dot((C+self.lambda_*L,H,H.T))),C,y)
"""
else:
unlabeled_id = np.isnan(y)
labeled_id = np.logical_not(unlabeled_id)
self.start_matlab_connection()
params = {'NN':50,'GraphWeights':'binary','GraphDistanceFunction':'euclidean',
'LaplacianNormalize':1,'LaplacianDegree':5,
'NoDisplay':1,'Kernel':'sigmoid','random_state':self.random_state,'random_projection':self.random_projection,
'NumHiddenNeuron':self.n_hidden,'C':self.C,'lambda':self.lambda_}
if self.L is None:
L = self.compute_graph_laplacian(X, params)
else:
L = self.L
import scipy.io as sio
sio.savemat('bb.mat',{'paras':params,'X':X,'Xl':X[labeled_id,:],'Yl':y[labeld_id],'Xu':X[unlabeled_id,:],'L':L})
model_matlab = self.eng.sselm(matlab.double(X[labeled_id,:].tolist()),matlab.double(y[labeled_id].tolist()),
matlab.double(X[unlabeled_id,:].tolist()), L, params, nargout=1)
self.W = self.model_matlab._data['InputWeight']
self.b = self.model_matlab._data['InputBias']
self.beta = self.model_matlab._data['OutputWeight']
self.fitted_ = True
return self
class ELMAutoEncoderClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_hiddens, Cs, reg_type='l2',output_layer=None, SSELM_lambda_=1., sigparas=1., sigparas1=1., matlab_code_path=None, random_state=None):
self.n_hiddens = n_hiddens
self.Cs = Cs
self.output_layer = output_layer
self.SSELM_lambda_ = SSELM_lambda_
if type(sigparas)==list:
self.sigparas = | |
"servers_update_addresses")})
def test_instance_details_volume_sorting(self):
server = self.servers.first()
volumes = self.volumes.list()[1:3]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.servers_update_addresses(IsA(http.HttpRequest),
IgnoreArg())
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.first())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
self.assertEqual(res.context['instance'].volumes[0].device,
"/dev/hda")
self.assertEqual(res.context['instance'].volumes[1].device,
"/dev/hdk")
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get"),
api.network: ("server_security_groups",
"servers_update_addresses")})
def test_instance_details_metadata(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.servers_update_addresses(IsA(http.HttpRequest),
IgnoreArg())
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("overview").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "<dd>keyName</dd>", 1)
self.assertContains(res, "<dt>someMetaLabel</dt>", 1)
self.assertContains(res, "<dd>someMetaData</dd>", 1)
self.assertContains(res, "<dt>some<b>html</b>label</dt>",
1)
self.assertContains(res, "<dd><!--</dd>", 1)
self.assertContains(res, "<dt>empty</dt>", 1)
self.assertContains(res, "<dd><em>N/A</em></dd>", 1)
@test.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log(self):
server = self.servers.first()
CONSOLE_OUTPUT = 'output'
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndReturn(CONSOLE_OUTPUT)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertNoMessages()
self.assertIsInstance(res, http.HttpResponse)
self.assertContains(res, CONSOLE_OUTPUT)
@test.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log_exception(self):
server = self.servers.first()
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "Unable to get log for")
def test_instance_vnc(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/vncserver'
console_mock = self.mox.CreateMock(api.nova.VNCConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_OUTPUT + '&title=%s(1)' % server.name
self.assertRedirectsNoFollow(res, redirect)
@test.create_stubs({api.nova: ('server_vnc_console',)})
def test_instance_vnc_exception(self):
server = self.servers.first()
api.nova.server_vnc_console(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_spice(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/spiceserver'
console_mock = self.mox.CreateMock(api.nova.SPICEConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_OUTPUT + '&title=%s(1)' % server.name
self.assertRedirectsNoFollow(res, redirect)
@test.create_stubs({api.nova: ('server_spice_console',)})
def test_instance_spice_exception(self):
server = self.servers.first()
api.nova.server_spice_console(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_get',
'snapshot_create',
'server_list',
'flavor_list',
'server_delete'),
cinder: ('volume_snapshot_list',
'volume_list',),
api.glance: ('image_list_detailed',)})
def test_create_instance_snapshot(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.snapshot_create(IsA(http.HttpRequest),
server.id,
"snapshot1").AndReturn(self.snapshots.first())
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([[], False])
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
cinder.volume_list(IsA(http.HttpRequest)).AndReturn([])
self.mox.ReplayAll()
formData = {'instance_id': server.id,
'method': 'CreateSnapshot',
'name': 'snapshot1'}
url = reverse('horizon:project:images_and_snapshots:snapshots:create',
args=[server.id])
redir_url = reverse('horizon:project:images_and_snapshots:index')
res = self.client.post(url, formData)
self.assertRedirects(res, redir_url)
instance_update_get_stubs = {
api.nova: ('server_get',),
api.network: ('security_group_list',
'server_security_groups',)}
@test.create_stubs(instance_update_get_stubs)
def test_instance_update_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs(instance_update_get_stubs)
def test_instance_update_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def _instance_update_post(self, server_id, server_name, secgroups):
default_role_field_name = 'default_' + \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + '_role'
formData = {'name': server_name,
default_role_field_name: 'member',
SEC_GROUP_ROLE_PREFIX + 'member': secgroups}
url = reverse('horizon:project:instances:update',
args=[server_id])
return self.client.post(url, formData)
instance_update_post_stubs = {
api.nova: ('server_get', 'server_update'),
api.network: ('security_group_list',
'server_security_groups',
'server_update_security_groups')}
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post(self):
server = self.servers.first()
secgroups = self.security_groups.list()[:3]
server_groups = [secgroups[0], secgroups[1]]
wanted_groups = [secgroups[1].id, secgroups[2].id]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(secgroups)
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn(server_groups)
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(IsA(http.HttpRequest),
server.id,
wanted_groups)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, wanted_groups)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest), server.id, server.name) \
.AndRaise(self.exceptions.nova)
api.network.server_update_security_groups(
IsA(http.HttpRequest), server.id, [])
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post_secgroup_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(
IsA(http.HttpRequest),
server.id, []).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list',),
api.glance: ('image_list_detailed',)})
def test_launch_instance_get(self,
block_device_mapping_v2=True,
expect_password_fields=True):
image = self.images.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(block_device_mapping_v2)
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
params = urlencode({"source_type": "image_id",
"source_id": image.id})
res = self.client.get("%s?%s" % (url, params))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(res.context['workflow'].name,
workflows.LaunchInstance.name)
step = workflow.get_step("setinstancedetailsaction")
self.assertEqual(step.action.initial['image_id'], image.id)
self.assertQuerysetEqual(workflow.steps,
['<SetInstanceDetails: setinstancedetailsaction>',
'<SetAccessControls: setaccesscontrolsaction>',
'<SetNetwork: setnetworkaction>',
'<PostCreationStep: customizeaction>'])
boot_from_image_field_label = 'Boot from image (creates a new volume).'
if block_device_mapping_v2:
self.assertContains(res, boot_from_image_field_label)
else:
self.assertNotContains(res, boot_from_image_field_label)
password_field_label = '<PASSWORD>'
if expect_password_fields:
self.assertContains(res, password_field_label)
else:
self.assertNotContains(res, password_field_label)
def test_launch_instance_get_no_block_device_mapping_v2_supported(self):
self.test_launch_instance_get(block_device_mapping_v2=False)
@test_utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_password': False})
def test_launch_instance_get_without_password(self):
self.test_launch_instance_get(expect_password_fields=False)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
network_id=self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'')
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'network': self.networks.first().id,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_boot_from_volume(self):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::0" % volume_choice}
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
network_id=self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'')
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_id',
'source_id': volume_choice,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_size': '1',
'volume_id': volume_choice,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create'),
api.nova: ('server_create',
'extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_no_images_available_boot_from_volume(self):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::0" % volume_choice}
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default | |
return np.argmin(np.abs(converted_dataset['lev'] - pres).values)
def convert_lev_to_pres(dataset, pmid, pedge, lev_type='pmid'):
"""
Convert lev dimension to pressure in a GEOS-Chem dataset
Args:
dataset: xarray Dataset
GEOS-Chem dataset
pmid: np.array
Midpoint pressure values
pedge: np.array
Edge pressure values
lev_type (optional): str
Denote whether lev is 'pedge' or 'pmid' if grid is not 72/73 or 47/48 levels
Default value: 'pmid'
Returns:
dataset: xarray Dataset
Input dataset with "lev" dimension values replaced with pressure values
"""
if dataset.sizes["lev"] in (72, 47):
dataset["lev"] = pmid
elif dataset.sizes["lev"] in (73, 48):
dataset["lev"] = pedge
elif lev_type == 'pmid':
print('Warning: Assuming levels correspond with midpoint pressures')
dataset["lev"] = pmid
else:
dataset["lev"] = pedge
dataset["lev"].attrs["unit"] = "hPa"
dataset["lev"].attrs["long_name"] = "level pressure"
return dataset
class vert_grid:
def __init__(self, AP=None, BP=None, p_sfc=1013.25):
if (len(AP) != len(BP)) or (AP is None):
# Throw error?
print('Inconsistent vertical grid specification')
self.AP = np.array(AP)
self.BP = np.array(BP)
self.p_sfc = p_sfc
def p_edge(self):
# Calculate pressure edges using eta coordinate
return self.AP + self.BP * self.p_sfc
def p_mid(self):
p_edge = self.p_edge()
return (p_edge[1:] + p_edge[:-1]) / 2.0
# Standard vertical grids
_GEOS_72L_AP = np.array([0.000000e+00,
4.804826e-02,
6.593752e+00,
1.313480e+01,
1.961311e+01,
2.609201e+01,
3.257081e+01,
3.898201e+01,
4.533901e+01,
5.169611e+01,
5.805321e+01,
6.436264e+01,
7.062198e+01,
7.883422e+01,
8.909992e+01,
9.936521e+01,
1.091817e+02,
1.189586e+02,
1.286959e+02,
1.429100e+02,
1.562600e+02,
1.696090e+02,
1.816190e+02,
1.930970e+02,
2.032590e+02,
2.121500e+02,
2.187760e+02,
2.238980e+02,
2.243630e+02,
2.168650e+02,
2.011920e+02,
1.769300e+02,
1.503930e+02,
1.278370e+02,
1.086630e+02,
9.236572e+01,
7.851231e+01,
6.660341e+01,
5.638791e+01,
4.764391e+01,
4.017541e+01,
3.381001e+01,
2.836781e+01,
2.373041e+01,
1.979160e+01,
1.645710e+01,
1.364340e+01,
1.127690e+01,
9.292942e+00,
7.619842e+00,
6.216801e+00,
5.046801e+00,
4.076571e+00,
3.276431e+00,
2.620211e+00,
2.084970e+00,
1.650790e+00,
1.300510e+00,
1.019440e+00,
7.951341e-01,
6.167791e-01,
4.758061e-01,
3.650411e-01,
2.785261e-01,
2.113490e-01,
1.594950e-01,
1.197030e-01,
8.934502e-02,
6.600001e-02,
4.758501e-02,
3.270000e-02,
2.000000e-02,
1.000000e-02])
_GEOS_72L_BP = np.array([1.000000e+00,
9.849520e-01,
9.634060e-01,
9.418650e-01,
9.203870e-01,
8.989080e-01,
8.774290e-01,
8.560180e-01,
8.346609e-01,
8.133039e-01,
7.919469e-01,
7.706375e-01,
7.493782e-01,
7.211660e-01,
6.858999e-01,
6.506349e-01,
6.158184e-01,
5.810415e-01,
5.463042e-01,
4.945902e-01,
4.437402e-01,
3.928911e-01,
3.433811e-01,
2.944031e-01,
2.467411e-01,
2.003501e-01,
1.562241e-01,
1.136021e-01,
6.372006e-02,
2.801004e-02,
6.960025e-03,
8.175413e-09,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00])
GEOS_72L_grid = vert_grid(_GEOS_72L_AP, _GEOS_72L_BP)
# Reduced grid
_GEOS_47L_AP = np.zeros(48)
_GEOS_47L_BP = np.zeros(48)
# Fill in the values for the surface
_GEOS_47L_AP[0] = _GEOS_72L_AP[0]
_GEOS_47L_BP[0] = _GEOS_72L_BP[0]
# Build the GEOS 72-layer to 47-layer mapping matrix at the same time
_xmat_i = np.zeros((72))
_xmat_j = np.zeros((72))
_xmat_s = np.zeros((72))
# Index here is the 1-indexed layer number
for _i_lev in range(1, 37):
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Sparse matrix for regridding
# Below layer 37, it's 1:1
_xct = _x_lev
_xmat_i[_xct] = _x_lev
_xmat_j[_xct] = _x_lev
_xmat_s[_xct] = 1.0
# Copy over the pressure edge for the top of the grid cell
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev]
# Now deal with the lumped layers
_skip_size_vec = [2, 4]
_number_lumped = [4, 7]
# Initialize
_i_lev = 36
_i_lev_72 = 36
for _lump_seg in range(2):
_skip_size = _skip_size_vec[_lump_seg]
# 1-indexed starting point in the 47-layer grid
_first_lev_47 = _i_lev + 1
_first_lev_72 = _i_lev_72 + 1
# Loop over the coarse vertical levels (47-layer grid)
for _i_lev_offset in range(_number_lumped[_lump_seg]):
# i_lev is the index for the current level on the 47-level grid
_i_lev = _first_lev_47 + _i_lev_offset
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Get the 1-indexed location of the last layer in the 72-layer grid
# which is below the start of the current lumping region
_i_lev_72_base = _first_lev_72 + (_i_lev_offset * _skip_size) - 1
# Get the 1-indexed location of the uppermost level in the 72-layer
# grid which is within the target layer on the 47-layer grid
_i_lev_72 = _i_lev_72_base + _skip_size
# Do the pressure edges first
# These are the 0-indexed locations of the upper edge for the
# target layers in 47- and 72-layer grids
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev_72]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev_72]
# Get the total pressure delta across the layer on the lumped grid
# We are within the fixed pressure levels so don't need to account
# for variations in surface pressure
_dp_total = _GEOS_47L_AP[_i_lev - 1] - _GEOS_47L_AP[_i_lev]
# Now figure out the mapping
for _i_lev_offset_72 in range(_skip_size):
# Source layer in the 72 layer grid (0-indexed)
_x_lev_72 = _i_lev_72_base + _i_lev_offset_72
_xct = _x_lev_72
_xmat_i[_xct] = _x_lev_72
# Target in the 47 layer grid
_xmat_j[_xct] = _x_lev
# Proportion of 72-layer grid cell, by pressure, within expanded
# layer
_xmat_s[_xct] = (_GEOS_72L_AP[_x_lev_72] -
_GEOS_72L_AP[_x_lev_72 + 1]) / _dp_total
_start_pt = _i_lev
# Do last entry separately (no layer to go with it)
_xmat_72to47 = scipy.sparse.coo_matrix(
(_xmat_s, (_xmat_i, _xmat_j)), shape=(72, 47))
GEOS_47L_grid = vert_grid(_GEOS_47L_AP, _GEOS_47L_BP)
# CAM 26-layer grid
_CAM_26L_AP = np.flip(np.array([219.4067, 489.5209, 988.2418, 1805.201,
2983.724, 4462.334, 6160.587, 7851.243,
7731.271, 7590.131, 7424.086, 7228.744,
6998.933, 6728.574, 6410.509, 6036.322,
5596.111, 5078.225, 4468.96, 3752.191,
2908.949, 2084.739, 1334.443, 708.499,
252.136, 0., 0.]), axis=0) * 0.01
_CAM_26L_BP = np.flip(np.array([0., 0., 0., 0.,
0., 0., 0., 0.,
0.01505309, 0.03276228, 0.05359622, 0.07810627,
0.1069411, 0.14086370, 0.180772, 0.227722,
0.2829562, 0.3479364, 0.4243822, 0.5143168,
0.6201202, 0.7235355, 0.8176768, 0.8962153,
0.9534761, 0.9851122, 1.]), axis=0)
CAM_26L_grid = vert_grid(_CAM_26L_AP, _CAM_26L_BP)
def make_grid_LL(llres, in_extent=[-180, 180, -90, 90], out_extent=[]):
"""
Creates a lat/lon grid description.
Args:
llres: str
lat/lon resolution in 'latxlon' format (e.g. '4x5')
Keyword Args (optional):
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of initial grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of target grid
in the format [minlon, maxlon, minlat, maxlat]. Needed when intending
to use grid to trim extent of input data
Default value: [] (assumes value of in_extent)
Returns:
llgrid: dict
dict grid description of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
"""
# get initial bounds of grid
[minlon, maxlon, minlat, maxlat] = in_extent
[dlat, dlon] = list(map(float, llres.split('x')))
lon_b = np.linspace(minlon - dlon / 2, maxlon - dlon /
2, int((maxlon - minlon) / dlon) + 1)
lat_b = np.linspace(minlat - dlat / 2, maxlat + dlat / 2,
int((maxlat - minlat) / dlat) + 2)
if minlat <= -90:
lat_b = lat_b.clip(-90, None)
if maxlat >= 90:
lat_b = lat_b.clip(None, 90)
lat = (lat_b[1:] + lat_b[:-1]) / 2
lon = (lon_b[1:] + lon_b[:-1]) / 2
# trim grid bounds when your desired extent is not the same as your
# initial grid extent
if out_extent == []:
out_extent = in_extent
if out_extent != in_extent:
[minlon, maxlon, minlat, maxlat] = out_extent
minlon_ind = np.nonzero(lon >= minlon)
maxlon_ind = np.nonzero(lon <= maxlon)
lon_inds = np.intersect1d(minlon_ind, maxlon_ind)
lon = lon[lon_inds]
# make sure to get edges of grid correctly
lon_inds = np.append(lon_inds, np.max(lon_inds) + 1)
lon_b = lon_b[lon_inds]
minlat_ind = np.nonzero(lat >= minlat)
maxlat_ind = np.nonzero(lat <= maxlat)
lat_inds = np.intersect1d(minlat_ind, maxlat_ind)
lat = lat[lat_inds]
# make sure to get edges of grid correctly
lat_inds = np.append(lat_inds, np.max(lat_inds) + 1)
lat_b = lat_b[lat_inds]
llgrid = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
return llgrid
def make_grid_CS(csres):
"""
Creates a cubed-sphere grid description.
Args:
csres: int
cubed-sphere resolution of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres)
csgrid_list = [None] * 6
for i in range(6):
csgrid_list[i] = {'lat': csgrid['lat'][i],
'lon': csgrid['lon'][i],
'lat_b': csgrid['lat_b'][i],
'lon_b': csgrid['lon_b'][i]}
return [csgrid, csgrid_list]
def make_grid_SG(csres, stretch_factor, target_lon, target_lat):
"""
Creates a stretched-grid grid description.
Args:
csres: int
cubed-sphere resolution of target grid
stretch_factor: float
stretch factor of target grid
target_lon: float
target stretching longitude of target grid
target_lon: float
target stretching latitude of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a | |
<gh_stars>0
"""
This is the script of modality prediction of all 4 subtasks
Dependencies:
pip: scikit-learn, anndata, scanpy, numpy
"""
import os
import sys
import logging
import anndata as ad
import numpy as np
import torch
from torch.utils.data import DataLoader
from scipy.sparse import csc_matrix
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LinearRegression
logging.basicConfig(level=logging.INFO)
## VIASH START
task = "gex2adt"
dataset_path = "output/datasets/predict_modality/"
pretrain_path = (
f"output/pretrain/predict_modality/scjoint/{task}_train.output_pretrain/"
)
mode = {
"gex2atac": f"{dataset_path}openproblems_bmmc_multiome_phase2_rna/openproblems_bmmc_multiome_phase2_rna.censor_dataset.output_",
"gex2adt": f"{dataset_path}openproblems_bmmc_cite_phase2_rna/openproblems_bmmc_cite_phase2_rna.censor_dataset.output_",
"adt2gex": f"{dataset_path}openproblems_bmmc_cite_phase2_mod2/openproblems_bmmc_cite_phase2_mod2.censor_dataset.output_",
"atac2gex": f"{dataset_path}openproblems_bmmc_multiome_phase2_mod2/openproblems_bmmc_multiome_phase2_mod2.censor_dataset.output_",
}
par = {
"input_train_mod1": mode[task] + "train_mod1.h5ad",
"input_train_mod2": mode[task] + "train_mod2.h5ad",
"input_test_mod1": mode[task] + "test_mod1.h5ad",
"input_pretrain": pretrain_path,
"output": "output.h5ad",
}
meta = {
"resources_dir": "src/predict_modality/methods/scJoint/",
}
## VIASH END
sys.path.append(meta["resources_dir"])
from resources.modules.model_ae import AutoEncoder, BatchRemovalGAN
from resources.utils.dataloader import SeqDataset
method_id = "scjoint"
# load model
def pretrin_nn(
test_mod1,
model_pth,
mod1_dim,
mod2_dim,
feat_dim,
hidden_dim,
mod1_idx_path=None,
tfidf=0,
idf_matrix=None,
gene_activity=False,
log=False,
):
"""
load the pre-trained nn / cycle AtoB model
input the mod 1 test data, output the mod 2 prediction
Parameters
----------
test_mod1
Path to .h5ad file of mod1 test set
model_pth
Path to pre-trained model
mod1_dim
The dimension of mod1 dataset
mod2_dim
The dimension of mod2 dataset
feat_dim
The dimension of pre-trained model embedding feature
hidden_dim
The dimension of pre-trained model hidden layer
mod1_idx_path
The path to mod1 index path (.np file), use when selection=True
e.g., 2pct, 5pct mode
Default: None
tfidf
The tfidf mode.
0: do not use the tfidf feature (mod1_dim = mod1_dim)
1: use the tfidf feature (mod1_dim = mod1_dim)
2: concat raw & tfidf feature (mod1_dim = mod1_dim * 2)
3: concat gene activity & tfidf feature (mod1_dim = mod1_dim + ga_dim)
Default: 0
idf_matrix
The path to pre-calculated idf matrix, required if tfidf != 0
Default: None
gene_activity
Use gene activity feautre in atac2gex mode
Dafault: False
log
Show the pre-trained model archeture
Default: False
"""
logging.info("Use pretrain model...")
logging.info(f"Model Path: {model_pth}")
# Dataset
testset = SeqDataset(
test_mod1,
mod1_idx_path=mod1_idx_path,
tfidf=tfidf,
mod1_idf=idf_matrix,
gene_activity=gene_activity,
)
test_loader = DataLoader(testset, batch_size=256, shuffle=False)
model_ae = AutoEncoder(
input_dim=mod1_dim, out_dim=mod2_dim, feat_dim=feat_dim, hidden_dim=hidden_dim
).float()
if log:
logging.info(model_ae)
# Load weight
# model_ae.load_state_dict(torch.load(model_pth)) # gpu
model_ae.load_state_dict(
torch.load(model_pth, map_location=torch.device("cpu"))
) # cpu
# Model inference
model_ae.eval()
mod2_matrix = np.zeros((1, mod2_dim))
for _, (mod1_seq, _) in enumerate(test_loader):
mod1_seq = mod1_seq.float()
mod2_rec = model_ae(mod1_seq)
mod2_rec = mod2_rec.data.cpu().numpy()
mod2_matrix = np.vstack((mod2_matrix, mod2_rec))
mod2_pred = np.array(
mod2_matrix[
1:,
]
)
logging.info("Finish Prediction")
return mod2_pred
# do pca
def pca(
input_train_mod1,
input_train_mod2,
input_test_mod1,
n=50,
alg="randomized",
n_iter=5,
seed=6666,
):
"""
Apply PCA on the data
input the mod 1 & mod2 train data, mod1 test data
output the mod 2 prediction
Parameters
----------
input_train_mod1
The anndata format of mod 1 input training data
input_train_mod2
The anndata format of mod 2 input training data
input_test_mod1
The anndata format of mod 1 input testing data
n: int, default=50
Desired dimensionality of output data. Must be strictly less than the number of features.
alg: {‘arpack’, ‘randomized’}, default=’randomized’
SVD solver to use.
Either “arpack” for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds),
or “randomized” for the randomized algorithm due to Halko (2009).
n_iter: int, default=5
Number of iterations for randomized SVD solver. Not used by ARPACK.
seed: int, default=6666
Used during randomized svd.
Pass an int for reproducible results across multiple function calls.
or use None for nonreproducible random states
"""
logging.info("Use PCA...")
input_train = ad.concat(
{"train": input_train_mod1, "test": input_test_mod1},
axis=0,
join="outer",
label="group",
fill_value=0,
index_unique="-",
)
# Do PCA on the input data
logging.info("Performing dimensionality reduction on modality 1 values...")
embedder_mod1 = TruncatedSVD(
n_components=n, algorithm=alg, n_iter=n_iter, random_state=seed
)
mod1_pca = embedder_mod1.fit_transform(input_train.X)
logging.info("Performing dimensionality reduction on modality 2 values...")
embedder_mod2 = TruncatedSVD(
n_components=n, algorithm=alg, n_iter=n_iter, random_state=seed
)
mod2_pca = embedder_mod2.fit_transform(input_train_mod2.X)
# split dimred back up
X_train = mod1_pca[input_train.obs["group"] == "train"]
X_test = mod1_pca[input_train.obs["group"] == "test"]
y_train = mod2_pca
assert len(X_train) + len(X_test) == len(mod1_pca)
logging.info("Running Linear regression...")
# KNN regressor later on.
reg = LinearRegression()
# Train the model on the PCA reduced modality 1 and 2 data
reg.fit(X_train, y_train)
mod2_pred = reg.predict(X_test)
# Project the predictions back to the modality 2 feature space
mod2_pred = mod2_pred @ embedder_mod2.components_
logging.info("Finish Prediction")
return np.array(mod2_pred)
logging.info("Reading `h5ad` files...")
INPUT_TRAIN_MOD1 = ad.read_h5ad(par["input_train_mod1"])
INPUT_TRAIN_MOD2 = ad.read_h5ad(par["input_train_mod2"])
INPUT_TEST_MOD1 = ad.read_h5ad(par["input_test_mod1"])
# Check data shape
LOAD_MODEL = True
MOD1_DIM = int(INPUT_TRAIN_MOD1.X.shape[1])
MOD2_DIM = int(INPUT_TRAIN_MOD2.X.shape[1])
FEAT_DIM = 128
HIDDEN_DIM = 1000
# check input format and apply different methods for each subtask
if INPUT_TRAIN_MOD2.var["feature_types"][0] == "ATAC":
logging.info("GEX to ATAC")
LOAD_MODEL = MOD1_DIM == 13431 and MOD2_DIM == 10000
if LOAD_MODEL:
# model (pretrain 1) concat
MODEL_PTH = (
par["input_pretrain"]
+ "/model_best_AtoB_cycle_gex2atac_tfidfconcat_pretrain1.pt"
)
if not os.path.isfile(MODEL_PTH):
MODEL_PTH = MODEL_PTH.strip("_best")
mod1_idf = np.load(par["input_pretrain"] + "/mod1_idf.npy")
y1_pred_concat = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
MOD1_DIM * 2,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
tfidf=2,
idf_matrix=mod1_idf,
)
# model (2) pca
y2_pred_pca = pca(
INPUT_TRAIN_MOD1, INPUT_TRAIN_MOD2, INPUT_TEST_MOD1, n=50, alg="arpack"
)
# ensemble
y_pred = (np.array(y1_pred_concat) + np.array(y2_pred_pca)) / 2
elif INPUT_TRAIN_MOD2.var["feature_types"][0] == "ADT":
logging.info("GEX to ADT")
LOAD_MODEL = MOD1_DIM == 13953 and MOD2_DIM == 134
if LOAD_MODEL:
# model (pretrain 1a) nn
MODEL_PTH = par["input_pretrain"] + "/model_best_nn_gex2adt_pretrain1a.pt"
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
y1a_pred_nn = pretrin_nn(
par["input_test_mod1"], MODEL_PTH, MOD1_DIM, MOD2_DIM, FEAT_DIM, HIDDEN_DIM
)
# model (pretrain 2b) nn
MODEL_PTH = par["input_pretrain"] + "/model_best_nn_gex2adt_pretrain2b.pt"
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
y2b_pred_nn = pretrin_nn(
par["input_test_mod1"], MODEL_PTH, MOD1_DIM, MOD2_DIM, FEAT_DIM, HIDDEN_DIM
)
# model (pretrain 3a) concat
MODEL_PTH = (
par["input_pretrain"] + "/model_best_nn_gex2adt_tfidfconcat_pretrain3a.pt"
)
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
mod1_idf = np.load(par["input_pretrain"] + "/mod1_idf.npy")
y3a_pred_concat = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
MOD1_DIM * 2,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
tfidf=2,
idf_matrix=mod1_idf,
)
# model (pretrain 4b) concat
MODEL_PTH = (
par["input_pretrain"] + "/model_best_nn_gex2adt_tfidfconcat_pretrain4b.pt"
)
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
mod1_idf = np.load(par["input_pretrain"] + "/mod1_idf.npy")
y4b_pred_concat = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
MOD1_DIM * 2,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
tfidf=2,
idf_matrix=mod1_idf,
)
# model (pretrain 5c) nn
MODEL_PTH = par["input_pretrain"] + "/model_best_nn_gex2adt_pretrain5c.pt"
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
y5c_pred_nn = pretrin_nn(
par["input_test_mod1"], MODEL_PTH, MOD1_DIM, MOD2_DIM, FEAT_DIM, HIDDEN_DIM
)
# model (pretrain 6d) nn
MODEL_PTH = par["input_pretrain"] + "/model_best_nn_gex2adt_pretrain6d.pt"
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
y6d_pred_nn = pretrin_nn(
par["input_test_mod1"], MODEL_PTH, MOD1_DIM, MOD2_DIM, FEAT_DIM, HIDDEN_DIM
)
# model (pretrain 7c) concat
MODEL_PTH = (
par["input_pretrain"] + "/model_best_nn_gex2adt_tfidfconcat_pretrain7c.pt"
)
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
mod1_idf = np.load(par["input_pretrain"] + "/mod1_idf.npy")
y7c_pred_concat = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
MOD1_DIM * 2,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
tfidf=2,
idf_matrix=mod1_idf,
)
# model (pretrain 8d) concat
MODEL_PTH = (
par["input_pretrain"] + "/model_best_nn_gex2adt_tfidfconcat_pretrain8d.pt"
)
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
mod1_idf = np.load(par["input_pretrain"] + "/mod1_idf.npy")
y8d_pred_concat = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
MOD1_DIM * 2,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
tfidf=2,
idf_matrix=mod1_idf,
)
# model (9) pca
y9_pred_pca = pca(INPUT_TRAIN_MOD1, INPUT_TRAIN_MOD2, INPUT_TEST_MOD1, n=100)
# model (10) pca
y10_pred_pca = pca(INPUT_TRAIN_MOD1, INPUT_TRAIN_MOD2, INPUT_TEST_MOD1, n_iter=10)
# ensemble (10)
y_pred = (
np.array(y1a_pred_nn)
+ np.array(y2b_pred_nn)
+ np.array(y3a_pred_concat)
+ np.array(y4b_pred_concat)
+ np.array(y5c_pred_nn)
+ np.array(y6d_pred_nn)
+ np.array(y7c_pred_concat)
+ np.array(y8d_pred_concat)
+ np.array(y9_pred_pca)
+ np.array(y10_pred_pca)
) / 10
elif INPUT_TRAIN_MOD1.var["feature_types"][0] == "ADT":
logging.info("ADT to GEX")
LOAD_MODEL = MOD1_DIM == 134 and MOD2_DIM == 13953
if LOAD_MODEL:
# model (pretrain 1d) concat
MODEL_PTH = (
par["input_pretrain"]
+ "/model_best_AtoB_cycle_adt2gex_tfidfconcat_pretrain1d.pt"
)
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
mod1_idf = np.load(par["input_pretrain"] + "/mod1_idf.npy")
y1d_pred_concat = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
MOD1_DIM * 2,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
tfidf=2,
idf_matrix=mod1_idf,
)
# model (pretrain 2d) cycle
MODEL_PTH = (
par["input_pretrain"] + "/model_best_AtoB_cycle_adt2gex_pretrain2d.pt"
)
MODEL_PTH = (
MODEL_PTH if os.path.isfile(MODEL_PTH) else MODEL_PTH.replace("_best", "")
)
y2d_pred_cycle = pretrin_nn(
par["input_test_mod1"], MODEL_PTH, MOD1_DIM, MOD2_DIM, FEAT_DIM, HIDDEN_DIM
)
# ensemble
y_pred = (np.array(y1d_pred_concat) + np.array(y2d_pred_cycle)) / 2
elif INPUT_TRAIN_MOD1.var["feature_types"][0] == "ATAC":
logging.info("ATAC to GEX")
LOAD_MODEL = MOD1_DIM == 116490 and MOD2_DIM == 13431
if LOAD_MODEL:
# model (pretrain 1) ga
MODEL_PTH = (
par["input_pretrain"] + "/model_best_AtoB_cycle_atac2gex_ga_pretrain1b.pt"
)
y1_pred_ga = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
19039,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
gene_activity=True,
)
# model (pretrain 2) ga
MODEL_PTH = (
par["input_pretrain"] + "/model_best_AtoB_cycle_atac2gex_ga_pretrain2b.pt"
)
y2_pred_ga = pretrin_nn(
par["input_test_mod1"],
MODEL_PTH,
19039,
MOD2_DIM,
FEAT_DIM,
HIDDEN_DIM,
gene_activity=True,
)
# ensemble
y_pred = (np.array(y1_pred_ga) + np.array(y2_pred_ga)) / 2
if not LOAD_MODEL:
# PCA method
y_pred = pca(
INPUT_TRAIN_MOD1, INPUT_TRAIN_MOD2, INPUT_TEST_MOD1, n=50, alg="arpack"
)
y_pred = csc_matrix(y_pred)
# Saving data to anndata format
logging.info("Storing annotated data...")
adata = ad.AnnData(
X=y_pred,
| |
== 2 else self.username)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startGABinaryInput_args)
startGABinaryInput_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.LIST, 'dataset', (TType.STRUCT, [BinaryInputArchitecture, None], False), None, ), # 2
(3, TType.STRING, 'username', 'UTF8', None, ), # 3
)
class startGABinaryInput_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('startGABinaryInput_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startGABinaryInput_result)
startGABinaryInput_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
class startGADiscreteInput_args(object):
"""
Attributes:
- problem
- dataset
- username
"""
def __init__(self, problem=None, dataset=None, username=None,):
self.problem = problem
self.dataset = dataset
self.username = username
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.dataset = []
(_etype261, _size258) = iprot.readListBegin()
for _i262 in range(_size258):
_elem263 = DiscreteInputArchitecture()
_elem263.read(iprot)
self.dataset.append(_elem263)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.username = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('startGADiscreteInput_args')
if self.problem is not None:
oprot.writeFieldBegin('problem', TType.STRING, 1)
oprot.writeString(self.problem.encode('utf-8') if sys.version_info[0] == 2 else self.problem)
oprot.writeFieldEnd()
if self.dataset is not None:
oprot.writeFieldBegin('dataset', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.dataset))
for iter264 in self.dataset:
iter264.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 3)
oprot.writeString(self.username.encode('utf-8') if sys.version_info[0] == 2 else self.username)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startGADiscreteInput_args)
startGADiscreteInput_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.LIST, 'dataset', (TType.STRUCT, [DiscreteInputArchitecture, None], False), None, ), # 2
(3, TType.STRING, 'username', '<PASSWORD>', None, ), # 3
)
class startGADiscreteInput_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('startGADiscreteInput_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startGADiscreteInput_result)
startGADiscreteInput_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
class startGAScheduling_args(object):
"""
Attributes:
- problem
- dataset
- inputArches
- historicalInfo
- username
"""
def __init__(self, problem=None, dataset=None, inputArches=None, historicalInfo=None, username=None,):
self.problem = problem
self.dataset = dataset
self.inputArches = inputArches
self.historicalInfo = historicalInfo
self.username = username
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.dataset = []
(_etype268, _size265) = iprot.readListBegin()
for _i269 in range(_size265):
_elem270 = SchedulingInputArchitecture()
_elem270.read(iprot)
self.dataset.append(_elem270)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.inputArches = []
(_etype274, _size271) = iprot.readListBegin()
for _i275 in range(_size271):
_elem276 = BinaryInputArchitecture()
_elem276.read(iprot)
self.inputArches.append(_elem276)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.historicalInfo = []
(_etype280, _size277) = iprot.readListBegin()
for _i281 in range(_size277):
_elem282 = MissionMeasurements()
_elem282.read(iprot)
self.historicalInfo.append(_elem282)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.username = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('startGAScheduling_args')
if self.problem is not None:
oprot.writeFieldBegin('problem', TType.STRING, 1)
oprot.writeString(self.problem.encode('utf-8') if sys.version_info[0] == 2 else self.problem)
oprot.writeFieldEnd()
if self.dataset is not None:
oprot.writeFieldBegin('dataset', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.dataset))
for iter283 in self.dataset:
iter283.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.inputArches is not None:
oprot.writeFieldBegin('inputArches', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.inputArches))
for iter284 in self.inputArches:
iter284.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.historicalInfo is not None:
oprot.writeFieldBegin('historicalInfo', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.historicalInfo))
for iter285 in self.historicalInfo:
iter285.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 5)
oprot.writeString(self.username.encode('utf-8') if sys.version_info[0] == 2 else self.username)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startGAScheduling_args)
startGAScheduling_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'problem', 'UTF8', None, ), # 1
(2, TType.LIST, 'dataset', (TType.STRUCT, [SchedulingInputArchitecture, None], False), None, ), # 2
(3, TType.LIST, 'inputArches', (TType.STRUCT, [BinaryInputArchitecture, None], False), None, ), # 3
(4, TType.LIST, 'historicalInfo', (TType.STRUCT, [MissionMeasurements, None], False), None, ), # 4
(5, TType.STRING, 'username', 'UTF8', None, ), # 5
)
class startGAScheduling_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('startGAScheduling_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startGAScheduling_result)
startGAScheduling_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
class getArchScienceInformationBinaryInput_args(object):
"""
Attributes:
- problem
- arch
"""
def __init__(self, problem=None, arch=None,):
self.problem = problem
self.arch = arch
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.problem = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == | |
#!/usr/bin/env python3
import argparse
import base64
import contextlib
import io
import json
import logging
import os
import sys
import tarfile
import time
import docker
from ..common import PatchStatus
from ..database import Db
l = logging.getLogger("patchbot")
def bin_diff(byte_arr1, byte_arr2):
byte_diff = 0
for (x,y) in zip(byte_arr1, byte_arr2):
if x != y:
byte_diff += 1
return byte_diff + abs(len(byte_arr1) - len(byte_arr2))
# Need this for ooows to set the device parameter to /dev/kvm
DOCKER_DEVICE_OPTION = [os.environ["DOCKER_DEVICE_OPTION"]] if "DOCKER_DEVICE_OPTION" in os.environ else []
@contextlib.contextmanager
def launch_container(client, docker_api, image, network_name=None, pull_latest=False, command=None, mem_limit=None, mem_reservation=None, environment=None, hostname=None):
if mem_limit is None: mem_limit = '512M'
if mem_reservation is None: mem_reservation = '512M'
l.debug(f"Going to run image {image}")
if pull_latest:
l.info(f"Getting the latest {image}")
client.images.pull(image)
# INSANE HACK TO SUPPORT A CHALLENGE, WHY DO WE DO THIS TO OURSELVES AT THE LAST MINUTE EVERY TIME
security_opt = []
if os.path.exists("/etc/default.json"):
with open("/etc/default.json", 'r') as f:
security_opt = [f"seccomp={f.read()}"]
try:
container = client.containers.run(image, command=command,
network=network_name,
detach=True,
mem_limit=mem_limit,
mem_reservation=mem_reservation,
environment=environment,
hostname=hostname,
devices=DOCKER_DEVICE_OPTION,
security_opt=security_opt,
)
except Exception:
l.exception(f"Error running {image}")
return None, None
l.debug(f"container {container.name} (image {image}) launched")
# Give it some time to come up [ TODO: necessary? wait for endpoint IP/port? ]
time.sleep(2)
try:
yield container
finally:
l.debug(f"stopping container {container.name} {image} ")
l.info(f"container {container.name} {image} output {container.logs(stderr=True)}")
container.reload()
l.debug(f"container {container.name} {image} status {container.status}")
if container.status != 'exited':
try:
# TODO: why does this happen?
l.debug(f"status != exited after container.reload(), killing container {container.name} {image}")
container.kill()
except Exception:
l.info(f"exception when killing container {container.name} {image}, likely nothing, so continuing.")
try:
container.remove()
except Exception:
l.exception(f"error removing container {container.name} image {image}, this is likely bad but going to carry on.")
def get_ip(docker_api, container_name):
result = docker_api.inspect_container(container_name)
if not result:
l.critical(f"Unable to inspect {container_name} {result}")
return None
networks = result['NetworkSettings']['Networks']
assert len(networks) == 1
ip = list(networks.values())[0]['IPAddress']
l.debug(f"{container_name} has ip address {ip}")
return ip
def get_docker_network(client, service_id):
network_name = f"no-inet-service-{service_id}"
try:
l.debug(f"Trying to get the network {network_name}")
client.networks.get(network_id=network_name)
except Exception:
l.info(f"Network {network_name} doesn't exist, let's create it")
ipam_pool = docker.types.IPAMPool(
subnet=f"10.231.{service_id}.0/24",
)
ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
client.networks.create(network_name, driver='bridge', internal=True, ipam=ipam_config)
return network_name
def get_file_from_container_as_tar(container, file_location):
bits, stat = container.get_archive(file_location)
tar_archive = b""
for b in bits:
tar_archive += b
return tarfile.TarFile(fileobj=io.BytesIO(tar_archive))
def drop_file_on_container(container, path_to_file, file_contents, mode, uid, gid):
tarinfo = tarfile.TarInfo(name=path_to_file)
tarinfo.mode = mode
tarinfo.uid = uid
tarinfo.gid = gid
tarinfo.size = len(file_contents)
pw_tarstream = io.BytesIO()
pw_tar = tarfile.TarFile(fileobj=pw_tarstream, mode='w')
pw_tar.addfile(tarinfo, io.BytesIO(file_contents))
pw_tar.close()
pw_tarstream.seek(0)
l.info(f"dropping file {path_to_file} on container {container.name} with mode {oct(mode)} uid {uid} gid {gid}")
result = container.put_archive('/', pw_tarstream)
l.debug(f"put_archive result {result}")
return result
def get_patch_byte_diff(container, docker_location_to_patch, new_file):
l.info(f"Going to get the file {docker_location_to_patch}")
archive = get_file_from_container_as_tar(container, docker_location_to_patch)
l.info(f"got the following files {archive.getnames()}")
assert len(archive.getmembers()) == 1
base_to_patch = os.path.basename(docker_location_to_patch)
current_file_info = archive.getmember(base_to_patch)
assert current_file_info
current_file = archive.extractfile(current_file_info)
assert current_file
bytes_diff = bin_diff(current_file.read(), new_file)
return bytes_diff, current_file_info
def get_patch_tag(service_id, patch_id):
return f"service-{service_id}-patch-{patch_id}"
def patch_file_and_tag(container, patch_tag, docker_location_to_patch, new_file, mode, uid, gid):
result = drop_file_on_container(container, docker_location_to_patch, new_file, mode, uid, gid)
if not result:
l.error(f"Unable to create file {docker_location_to_patch}")
return None
result = container.commit(patch_tag, tag="latest")
l.info(f"tagged the patched version as {patch_tag} result {result}")
if not result:
l.critical(f"unable to commit the tag {patch_tag} result {result}")
return None
return True
def deploy_container(client, docker_api, previous, target):
l.info(f"deploying patched container to {target} from {previous}")
result = docker_api.tag(previous, target)
if not result:
l.critical(f"Unable to tag {previous} to {target}")
return None
l.debug(f"docker_api.tag result: {result}")
result = client.images.push(target)
if not result:
l.critical(f"Unable to push {target}")
return None
l.debug(f"client.images.push result: {result}")
l.info(f"deploying {previous} to {target} was successful")
return True
def get_public_metadata(container_output):
"""
Return a string containing any public metadata from stdout from the logs of this container.
"""
PREFIX=b'PUBLIC: '
to_return = b""
for line in container_output.splitlines():
if line.startswith(PREFIX):
to_return += line[len(PREFIX):]
to_return += b"\n"
if to_return == b"":
return None
else:
return to_return.strip()
class dummy_context_mgr():
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def test_remote_interactions(client, docker_api, container_to_test, remote_interaction_container_name, network_name, remote_interaction_scripts, service_port, check_timeout, team_id, service_id=None, registry=None, **kwargs):
with dummy_context_mgr() as server_container: # In 2020 we used this to spawn the game-server container for Yanick's rhg service
prior_pull_latest = kwargs['pull_latest'] if 'pull_latest' in kwargs else None
kwargs['pull_latest'] = False
environment = {}
hostname = None
with launch_container(client, docker_api, container_to_test, network_name=network_name, environment=environment, hostname=hostname, **kwargs) as testing_container:
kwargs['pull_latest'] = prior_pull_latest
ip = get_ip(docker_api, testing_container.name)
for script in remote_interaction_scripts:
l.info(f"running remote interaction script {script} on {container_to_test} {ip}:{service_port}")
with launch_container(client, docker_api, remote_interaction_container_name, network_name="host", command=[script, str(ip), str(service_port)], environment={'TEAM_ID': team_id}, **kwargs) as interaction_container:
try:
result = interaction_container.wait(timeout=check_timeout)
l.info(f"Result from running remote interaction script {result}")
except Exception:
l.info(f"Got a timeout on SLA check {sys.exc_info()[1]}.")
l.info(f"stdout from interaction_container {remote_interaction_container_name} {interaction_container.logs(stdout=True)}")
return PatchStatus.SLA_TIMEOUT, get_public_metadata(interaction_container.logs())
exit_code = result['StatusCode']
container_output = interaction_container.logs(stdout=True)
l.info(f"stdout from interaction_container {remote_interaction_container_name} {container_output}")
if exit_code != 0:
l.info(f"Failed SLA check with exit code {exit_code}")
return PatchStatus.SLA_FAIL, get_public_metadata(container_output)
l.debug(f"passed SLA check for script {script}")
return True, None
def test_local_changes(client, docker_api, local_interaction_tag, local_interaction_container_name, docker_location_to_patch, new_file, new_file_mode, new_file_uid, new_file_gid, network_name, local_interaction_scripts, check_timeout, team_id, **kwargs):
# If there are no local interaction scripts, then our job here is done
l.info(f"Testing local interaction scripts for {local_interaction_tag} {local_interaction_container_name}")
if len(local_interaction_scripts) == 0:
l.warning(f"No local interaction scripts, behaving as if they succeeded.")
return True, None
l.debug(f"Going to run the original local container {local_interaction_container_name}")
with launch_container(client, docker_api, local_interaction_container_name, network_name=network_name, **kwargs) as original_local_container:
# need to patch the local interaction container
result = patch_file_and_tag(original_local_container, local_interaction_tag, docker_location_to_patch, new_file, new_file_mode, new_file_uid, new_file_gid)
if not result:
l.error(f"unable to patch and tag local container")
return None, None
l.info(f"Created a patched version {local_interaction_tag} of the local container {local_interaction_container_name}.")
# run each of the scripts
for script in local_interaction_scripts:
l.info(f"running local interaction script {script} on {local_interaction_tag}.")
prior_pull_latest = kwargs['pull_latest'] if 'pull_latest' in kwargs else None
kwargs['pull_latest'] = False
with launch_container(client, docker_api, local_interaction_tag, network_name="host", command=[script], environment={'TEAM_ID': team_id}, **kwargs) as local_container:
kwargs['pull_latest'] = prior_pull_latest
try:
result = local_container.wait(timeout=check_timeout)
l.info(f"Result from running local interaction script {result}")
except Exception:
l.info(f"Got a timeout on SLA check {sys.exc_info()[1]}.")
l.info(f"stdout from local_container {local_interaction_tag} {local_container.logs(stdout=True)}")
return PatchStatus.SLA_TIMEOUT, get_public_metadata(local_container.logs())
exit_code = result['StatusCode']
container_output = local_container.logs(stdout=True)
l.info(f"stdout from local_container {local_interaction_tag} {container_output}")
if exit_code != 0:
l.info(f"Failed SLA check with exit code {exit_code}")
return PatchStatus.SLA_FAIL, get_public_metadata(container_output)
l.debug(f"passed SLA check for script {script}")
return True, None
def check_and_deploy_service(client, docker_api, container_name, deployed_container_name, remote_interaction_container_name, local_interaction_container_name, patch_id, service_id, service_port, remote_interaction_scripts, local_interaction_scripts, docker_location_to_patch, new_file, max_bytes, check_timeout, team_id=1, pull_latest=True, deploy_service=True, registry=None, **kwargs):
network_name = get_docker_network(client, service_id)
l.debug(f"Got network_name {network_name}")
with launch_container(client, docker_api, container_name, network_name, pull_latest=pull_latest, **kwargs) as container:
# Check the diff in bytes
bytes_diff, current_file_info = get_patch_byte_diff(container, docker_location_to_patch, new_file)
l.info(f"patched file resulted in {bytes_diff} with {max_bytes} total bytes to patch")
if bytes_diff > max_bytes:
l.info(f"Too many bytes diff")
return PatchStatus.TOO_MANY_BYTES, f"Had {bytes_diff} difference, only {max_bytes} allowed"
l.info(f"Going to create a patched version of the service.")
patch_tag = get_patch_tag(service_id, patch_id)
result = patch_file_and_tag(container, patch_tag, docker_location_to_patch, new_file, current_file_info.mode, current_file_info.uid, current_file_info.gid)
if not result:
l.error(f"unable to patch and tag")
return None, None
local_interaction_tag = f"{patch_tag}-local-interaction"
l.info(f"Going to test local interactions for {local_interaction_tag}")
result, metadata = test_local_changes(client, docker_api, local_interaction_tag, local_interaction_container_name, docker_location_to_patch, new_file, current_file_info.mode, current_file_info.uid, current_file_info.gid, network_name, local_interaction_scripts, check_timeout, team_id, pull_latest=pull_latest, **kwargs)
if not result == True:
return result, metadata
# Test the patched service
l.info(f"Going to test remote interactions for {patch_tag}")
result, metadata = test_remote_interactions(client, docker_api, patch_tag, remote_interaction_container_name, network_name, remote_interaction_scripts, service_port, check_timeout, team_id=team_id, service_id=service_id, registry=registry, pull_latest=pull_latest, **kwargs)
if not result == True:
return result, metadata
# Deploy the patched service
if deploy_service:
result = deploy_container(client, docker_api, patch_tag, deployed_container_name)
if not result:
l.critical("error deploying container")
return None, None
else:
l.info(f"Skipping deploying the service because deploy_service is {deploy_service}")
return PatchStatus.ACCEPTED, None
def get_service_docker_info(registry, service):
return f"{registry}{service['service_docker']}", f"{registry}{service['interaction_docker']}", f"{registry}{service['local_interaction_docker']}"
def test_patch(patch_id, dbapi=None, the_db=None, registry=None, update_db=True, deploy_service=True, **kwargs):
"""
Test the patch_id and use the given dbapi (which will be taken
from DATABASE_API in the environment if not given), timeout if any
check takes longer than check_timeout seconds. Registry will be used to push to.
Use kwargs for launch_container arguments such as mem_limit and mem_reservation.
"""
if not dbapi:
if 'DATABASE_API' in os.environ:
dbapi = os.environ['DATABASE_API']
if dbapi and the_db:
l.critical(f"cannot specify dbapi {dbapi} and the_db {the_db}")
return
if not registry:
if not 'DOCKER_REGISTRY' in os.environ:
l.critical(f"No registry given, can't do anything")
return
registry = os.environ['DOCKER_REGISTRY']
if not registry.endswith("/"):
l.warning(f"registry | |
"<< miljardiin[ >>];",
"(2000000000, 999999999999)": "<< miljardiin[ >>];",
"(1000000000000, 1999999999999)": "<< biljoonaan[ >>];",
"(2000000000000, 999999999999999999)": "<< biljoonaan[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-illative-plural": {
"0": "nolliin;",
"1": "yksiin;",
"2": "kaksiin;",
"3": "kolmiin;",
"4": "neljiin;",
"5": "viisiin;",
"6": "kuusiin;",
"7": "seitsemiin;",
"8": "kahdeksiin;",
"9": "yhdeksiin;",
"10": "kymmeniin;",
"(11, 19)": ">>toista;",
"(20, 99)": "<<kymmeniin[>>];",
"(100, 199)": "satoihin[>>];",
"(200, 999)": "<<satoihin[>>];",
"(1000, 1999)": "tuhansiin[>>];",
"(2000, 999999)": "<<tuhansiin[>>];",
"(1000000, 1999999)": "<< miljooniin[ >>];",
"(2000000, 999999999)": "<< miljooniin[ >>];",
"(1000000000, 1999999999)": "<< miljardeihin[ >>];",
"(2000000000, 999999999999)": "<< miljardeihin[ >>];",
"(1000000000000, 1999999999999)": "<< biljooniin[ >>];",
"(2000000000000, 999999999999999999)": "<< biljooniin[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-inessive": {
"0": "nollassa;",
"1": "yhdessä;",
"2": "kahdessa;",
"3": "kolmessa;",
"4": "neljässä;",
"5": "viidessä;",
"6": "kuudessa;",
"7": "seitsemässä;",
"8": "kahdeksassa;",
"9": "yhdeksässä;",
"10": "kymmenessä;",
"(11, 19)": ">>toista;",
"(20, 99)": "<<kymmenessä[>>];",
"(100, 199)": "sadassa[>>];",
"(200, 999)": "<<sadassa[>>];",
"(1000, 1999)": "tuhannessa[>>];",
"(2000, 999999)": "<<tuhannessa[>>];",
"(1000000, 1999999)": "<< miljoonassa[ >>];",
"(2000000, 999999999)": "<< miljoonassa[ >>];",
"(1000000000, 1999999999)": "<< miljardissa[ >>];",
"(2000000000, 999999999999)": "<< miljardissa[ >>];",
"(1000000000000, 1999999999999)": "<< biljoonassa[ >>];",
"(2000000000000, 999999999999999999)": "<< biljoonassa[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-inessive-plural": {
"0": "nollissa;",
"1": "yksissä;",
"2": "kaksissa;",
"3": "kolmissa;",
"4": "neljissä;",
"5": "viisissä;",
"6": "kuusissa;",
"7": "seitsemissä;",
"8": "kahdeksissa;",
"9": "yhdeksissä;",
"10": "kymmenissä;",
"(11, 19)": ">>toista;",
"(20, 99)": "<<kymmenissä[>>];",
"(100, 199)": "sadoissa[>>];",
"(200, 999)": "<<sadoissa[>>];",
"(1000, 1999)": "tuhansissa[>>];",
"(2000, 999999)": "<<tuhansissa[>>];",
"(1000000, 1999999)": "<< miljoonissa[ >>];",
"(2000000, 999999999)": "<< miljoonissa[ >>];",
"(1000000000, 1999999999)": "<< miljardeissa[ >>];",
"(2000000000, 999999999999)": "<< miljardeissa[ >>];",
"(1000000000000, 1999999999999)": "<< biljoonissa[ >>];",
"(2000000000000, 999999999999999999)": "<< biljoonissa[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-partitive": {
"0": "nollaa;",
"1": "yhtä;",
"2": "kahta;",
"3": "kolmea;",
"4": "neljää;",
"5": "viittä;",
"6": "kuutta;",
"7": "seitsemää;",
"8": "kahdeksaa;",
"9": "yhdeksää;",
"10": "kymmentä;",
"(11, 19)": ">>toista;",
"(20, 99)": "<<kymmentä[>>];",
"(100, 199)": "sataa[>>];",
"(200, 999)": "<<sataa[>>];",
"(1000, 1999)": "tuhatta[>>];",
"(2000, 999999)": "<<tuhatta[>>];",
"(1000000, 1999999)": "<< miljoonaa[ >>];",
"(2000000, 999999999)": "<< miljoonaa[ >>];",
"(1000000000, 1999999999)": "<< miljardia[ >>];",
"(2000000000, 999999999999)": "<< miljardia[ >>];",
"(1000000000000, 1999999999999)": "<< biljoonaa[ >>];",
"(2000000000000, 999999999999999999)": "<< biljoonaa[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-partitive-plural": {
"0": "nollia;",
"1": "yksiä;",
"2": "kaksia;",
"3": "kolmia;",
"4": "neljiä;",
"5": "viisiä;",
"6": "kuusia;",
"7": "seitsemiä;",
"8": "kahdeksia;",
"9": "yhdeksiä;",
"10": "kymmeniä;",
"(11, 19)": ">>toista;",
"(20, 99)": "<<kymmeniä[>>];",
"(100, 199)": "satoja[>>];",
"(200, 999)": "<<satoja[>>];",
"(1000, 1999)": "tuhansia[>>];",
"(2000, 999999)": "<<tuhansia[>>];",
"(1000000, 1999999)": "<< miljoonia[ >>];",
"(2000000, 999999999)": "<< miljoonia[ >>];",
"(1000000000, 1999999999)": "<< miljardeja[ >>];",
"(2000000000, 999999999999)": "<< miljardeja[ >>];",
"(1000000000000, 1999999999999)": "<< biljoonia[ >>];",
"(2000000000000, 999999999999999999)": "<< biljoonia[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-translative": {
"0": "nollaksi;",
"1": "yhdeksi;",
"2": "kahdeksi;",
"3": "kolmeksi;",
"4": "neljäksi;",
"5": "viideksi;",
"6": "kuudeksi;",
"7": "seitsemäksi;",
"8": "kahdeksaksi;",
"9": "yhdeksäksi;",
"10": "kymmeneksi;",
"(11, 19)": ">>toista;",
"(20, 99)": "<<kymmeneksi[>>];",
"(100, 199)": "sadaksi[>>];",
"(200, 999)": "<<sadaksi[>>];",
"(1000, 1999)": "tuhanneksi[>>];",
"(2000, 999999)": "<<tuhanneksi[>>];",
"(1000000, 1999999)": "<< miljoonaksi[ >>];",
"(2000000, 999999999)": "<< miljoonaksi[ >>];",
"(1000000000, 1999999999)": "<< miljardiksi[ >>];",
"(2000000000, 999999999999)": "<< miljardiksi[ >>];",
"(1000000000000, 1999999999999)": "<< biljoonaksi[ >>];",
"(2000000000000, 999999999999999999)": "<< biljoonaksi[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-translative-plural": {
"0": "nolliksi;",
"1": "yksiksi;",
"2": "kaksiksi;",
"3": "kolmiksi;",
"4": "neljiksi;",
"5": "viisiksi;",
"6": "kuusiksi;",
"7": "seitsemiksi;",
"8": "kahdeksiksi;",
"9": "yhdeksiksi;",
"10": "kymmeniksi;",
"(11, 19)": ">>toista;",
"(20, 99)": "<<kymmeniksi[>>];",
"(100, 199)": "sadoiksi[>>];",
"(200, 999)": "<<sadoiksi[>>];",
"(1000, 1999)": "tuhansiksi[>>];",
"(2000, 999999)": "<<tuhansiksi[>>];",
"(1000000, 1999999)": "<< miljooniksi[ >>];",
"(2000000, 999999999)": "<< miljooniksi[ >>];",
"(1000000000, 1999999999)": "<< miljardeiksi[ >>];",
"(2000000000, 999999999999)": "<< miljardeiksi[ >>];",
"(1000000000000, 1999999999999)": "<< biljooniksi[ >>];",
"(2000000000000, 999999999999999999)": "<< biljooniksi[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-numbering": {
"(0, 'inf')": "=%spellout-cardinal=;"
},
"%spellout-numbering-year": {
"(0, 'inf')": "=%spellout-numbering=;"
},
"%spellout-ordinal": {
"0": "nolla;",
"1": "ensimmäinen;",
"2": "toinen;",
"3": "kolmas;",
"4": "neljäs;",
"5": "viides;",
"6": "kuudes;",
"7": "seitsemäs;",
"8": "kahdeksas;",
"9": "yhdeksäs;",
"10": "kymmenes;",
"11": "yhdestoista;",
"12": "kahdestoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-larger<kymmenes[>>];",
"(100, 199)": "sadas[>>];",
"(200, 999)": "<%%spellout-ordinal-larger<sadas[>>];",
"(1000, 1999)": "tuhannes[>>];",
"(2000, 999999)": "<%%spellout-ordinal-larger<tuhannes[>>];",
"(1000000, 1999999)": "miljoonas[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-larger< miljoonas[>>];",
"(1000000000, 1999999999)": "miljardis[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-larger< miljardis[>>];",
"(1000000000000, 1999999999999)": "biljoonas[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-larger< biljoonas[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-ablative": {
"0": "nollannelta;",
"1": "ensimmäiseltä;",
"2": "toiselta;",
"3": "kolmannelta;",
"4": "neljänneltä;",
"5": "viidenneltä;",
"6": "kuudennelta;",
"7": "seitsemänneltä;",
"8": "kahdeksannelta;",
"9": "yhdeksänneltä;",
"10": "kymmenenneltä;",
"11": "yhdenneltätoista;",
"12": "kahdenneltatoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-ablative-larger<kymmenenneltä[>>];",
"(100, 199)": "sadannelta[>>];",
"(200, 999)": "<%%spellout-ordinal-ablative-larger<sadannelta[>>];",
"(1000, 1999)": "tuhannennelta[>>];",
"(2000, 999999)": "<%%spellout-ordinal-ablative-larger<tuhannennelta[>>];",
"(1000000, 1999999)": "miljoonannelta[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-ablative-larger<miljoonannelta[>>];",
"(1000000000, 1999999999)": "miljardinnelta[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-ablative-larger<miljardinnelta[>>];",
"(1000000000000, 1999999999999)": "biljoonannelta[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-ablative-larger<biljoonannelta[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-ablative-plural": {
"0": "nollannilta;",
"1": "ensimmäisiltä;",
"2": "toisilta;",
"3": "kolmannilta;",
"4": "neljänniltä;",
"5": "viidenniltä;",
"6": "kuudennilta;",
"7": "seitsemänniltä;",
"8": "kahdeksannilta;",
"9": "yhdeksänniltä;",
"10": "kymmenenniltä;",
"11": "yhdenniltätoista;",
"12": "kahdenniltatoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-ablative-plural-larger<kymmenenniltä[>>];",
"(100, 199)": "sadannilta[>>];",
"(200, 999)": "<%%spellout-ordinal-ablative-plural-larger<sadannilta[>>];",
"(1000, 1999)": "tuhannennilta[>>];",
"(2000, 999999)": "<%%spellout-ordinal-ablative-plural-larger<tuhannennilta[>>];",
"(1000000, 1999999)": "miljoonannilta[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-ablative-plural-larger<miljoonannilta[>>];",
"(1000000000, 1999999999)": "miljardinnilta[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-ablative-plural-larger<miljardinnilta[>>];",
"(1000000000000, 1999999999999)": "biljoonannilta[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-ablative-plural-larger<biljoonannilta[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-adessive": {
"0": "nollannella;",
"1": "ensimmäisellä;",
"2": "toisella;",
"3": "kolmannella;",
"4": "neljännellä;",
"5": "viidennellä;",
"6": "kuudennella;",
"7": "seitsemännellä;",
"8": "kahdeksannella;",
"9": "yhdeksännellä;",
"10": "kymmenennellä;",
"11": "yhdennellätoista;",
"12": "kahdennellatoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-adessive-larger<kymmenennellä[>>];",
"(100, 199)": "sadannella[>>];",
"(200, 999)": "<%%spellout-ordinal-adessive-larger<sadannella[>>];",
"(1000, 1999)": "tuhannennella[>>];",
"(2000, 999999)": "<%%spellout-ordinal-adessive-larger<tuhannennella[>>];",
"(1000000, 1999999)": "miljoonannella[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-adessive-larger<miljoonannella[>>];",
"(1000000000, 1999999999)": "miljardinnella[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-adessive-larger<miljardinnella[>>];",
"(1000000000000, 1999999999999)": "biljoonannella[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-adessive-larger<biljoonannella[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-adessive-plural": {
"0": "nollannilla;",
"1": "ensimmäisillä;",
"2": "toisilla;",
"3": "kolmannilla;",
"4": "neljännillä;",
"5": "viidennillä;",
"6": "kuudennilla;",
"7": "seitsemännillä;",
"8": "kahdeksannilla;",
"9": "yhdeksännillä;",
"10": "kymmenennillä;",
"11": "yhdennillätoista;",
"12": "kahdennillatoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-adessive-plural-larger<kymmenennillä[>>];",
"(100, 199)": "sadannilla[>>];",
"(200, 999)": "<%%spellout-ordinal-adessive-plural-larger<sadannilla[>>];",
"(1000, 1999)": "tuhannennilla[>>];",
"(2000, 999999)": "<%%spellout-ordinal-adessive-plural-larger<tuhannennilla[>>];",
"(1000000, 1999999)": "miljoonannilla[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-adessive-plural-larger<miljoonannilla[>>];",
"(1000000000, 1999999999)": "miljardinnilla[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-adessive-plural-larger<miljardinnilla[>>];",
"(1000000000000, 1999999999999)": "biljoonannilla[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-adessive-plural-larger<biljoonannilla[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-allative": {
"0": "nollannelle;",
"1": "ensimmäiselle;",
"2": "toiselle;",
"3": "kolmannelle;",
"4": "neljännelle;",
"5": "viidennelle;",
"6": "kuudennelle;",
"7": "seitsemännelle;",
"8": "kahdeksannelle;",
"9": "yhdeksännelle;",
"10": "kymmenennelle;",
"11": "yhdennelletoista;",
"12": "kahdennelletoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-allative-larger<kymmenennelle[>>];",
"(100, 199)": "sadannelle[>>];",
"(200, 999)": "<%%spellout-ordinal-allative-larger<sadannelle[>>];",
"(1000, 1999)": "tuhannennelle[>>];",
"(2000, 999999)": "<%%spellout-ordinal-allative-larger<tuhannennelle[>>];",
"(1000000, 1999999)": "miljoonannelle[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-allative-larger<miljoonannelle[>>];",
"(1000000000, 1999999999)": "miljardinnelle[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-allative-larger<miljardinnelle[>>];",
"(1000000000000, 1999999999999)": "biljoonannelle[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-allative-larger<biljoonannelle[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-allative-plural": {
"0": "nollannille;",
"1": "ensimmäisille;",
"2": "toisille;",
"3": "kolmannille;",
"4": "neljännille;",
"5": "viidennille;",
"6": "kuudennille;",
"7": "seitsemännille;",
"8": "kahdeksannille;",
"9": "yhdeksännille;",
"10": "kymmenennille;",
"11": "yhdennilletoista;",
"12": "kahdennilletoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-allative-plural-larger<kymmenennille[>>];",
"(100, 199)": "sadannille[>>];",
"(200, 999)": "<%%spellout-ordinal-allative-plural-larger<sadannille[>>];",
"(1000, 1999)": "tuhannennille[>>];",
"(2000, 999999)": "<%%spellout-ordinal-allative-plural-larger<tuhannennille[>>];",
"(1000000, 1999999)": "miljoonannille[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-allative-plural-larger<miljoonannille[>>];",
"(1000000000, 1999999999)": "miljardinnille[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-allative-plural-larger<miljardinnille[>>];",
"(1000000000000, 1999999999999)": "biljoonannille[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-allative-plural-larger<biljoonannille[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-elative": {
"0": "nollannesta;",
"1": "ensimmäisestä;",
"2": "toisesta;",
"3": "kolmannesta;",
"4": "neljännestä;",
"5": "viidennestä;",
"6": "kuudennesta;",
"7": "seitsemännestä;",
"8": "kahdeksannesta;",
"9": "yhdeksännestä;",
"10": "kymmenennestä;",
"11": "yhdennestätoista;",
"12": "kahdennestatoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-elative-larger<kymmenennestä[>>];",
"(100, 199)": "sadannesta[>>];",
"(200, 999)": "<%%spellout-ordinal-elative-larger<sadannesta[>>];",
"(1000, 1999)": "tuhannennesta[>>];",
"(2000, 999999)": "<%%spellout-ordinal-elative-larger<tuhannennesta[>>];",
"(1000000, 1999999)": "miljoonannesta[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-elative-larger<miljoonannesta[>>];",
"(1000000000, 1999999999)": "miljardinnesta[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-elative-larger<miljardinnesta[>>];",
"(1000000000000, 1999999999999)": "biljoonannesta[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-elative-larger<biljoonannesta[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-elative-plural": {
"0": "nollansista;",
"1": "ensimmäisistä;",
"2": "toisista;",
"3": "kolmansista;",
"4": "neljänsistä;",
"5": "viidensistä;",
"6": "kuudensista;",
"7": "seitsemänsistä;",
"8": "kahdeksansista;",
"9": "yhdeksänsistä;",
"10": "kymmenensistä;",
"11": "yhdensistätoista;",
"12": "kahdensistatoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-elative-plural-larger<kymmenensistä[>>];",
"(100, 199)": "sadansista[>>];",
"(200, 999)": "<%%spellout-ordinal-elative-plural-larger<sadansista[>>];",
"(1000, 1999)": "tuhannensista[>>];",
"(2000, 999999)": "<%%spellout-ordinal-elative-plural-larger<tuhannensista[>>];",
"(1000000, 1999999)": "miljoonansista[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-elative-plural-larger<miljoonansista[>>];",
"(1000000000, 1999999999)": "miljardinsista[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-elative-plural-larger<miljardinsista[>>];",
"(1000000000000, 1999999999999)": "biljoonansista[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-elative-plural-larger<biljoonansista[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-essive": {
"0": "nollana;",
"1": "ensimmäisenä;",
"2": "toisena;",
"3": "kolmantena;",
"4": "neljäntenä;",
"5": "viidentenä;",
"6": "kuudentena;",
"7": "seitsemäntenä;",
"8": "kahdeksantena;",
"9": "yhdeksäntenä;",
"10": "kymmenentenä;",
"11": "yhdentenätoista;",
"12": "kahdentenatoista;",
"(13, 19)": ">>toista;",
"(20, 99)": "<%%spellout-ordinal-essive-larger<kymmenentenä[>>];",
"(100, 199)": "sadantena[>>];",
"(200, 999)": "<%%spellout-ordinal-essive-larger<sadantena[>>];",
"(1000, 1999)": "tuhannentena[>>];",
"(2000, 999999)": "<%%spellout-ordinal-essive-larger<tuhannentena[>>];",
"(1000000, 1999999)": "miljoonantena[>>];",
"(2000000, 999999999)": "<%%spellout-ordinal-essive-larger<miljoonantena[>>];",
"(1000000000, 1999999999)": "miljardintena[>>];",
"(2000000000, 999999999999)": "<%%spellout-ordinal-essive-larger<miljardintena[>>];",
"(1000000000000, 1999999999999)": "biljoonantena[ >>];",
"(2000000000000, 999999999999999999)": "<%%spellout-ordinal-essive-larger<biljoonantena[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-ordinal-essive-plural": {
"0": "nollansina;",
"1": "ensimmäisinä;",
"2": "toisina;",
"3": "kolmansina;",
"4": | |
install pysam on your system. Please install LLVM compiler first."
doInstall=False
if doInstall:
# dont set static building libs on OSX, sseems to cause compile issues for jellyfish
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
os.system("python setup.py build_ext --inplace")
os.system("python setup.py build")
os.system("python setup.py install --home=%spython"%(utils.INITIAL_UTILS+os.sep))
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir(METAMOS_ROOT)
os.system("rm -rf pysam.tar.gz")
#WARNING: matplotlib causes install issues for multiple users
fail = 0
try:
import numpy
except ImportError:
fail = 1
if "numpy" in packagesToInstall:
dl = 'y'
elif fail:
print "numpy python modules not found, necessary for html report, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L http://downloads.sourceforge.net/project/numpy/NumPy/1.7.1/numpy-1.7.1.tar.gz -o ./numpy.tar.gz")
os.system("tar -C ./Utilities/python -xvf numpy.tar.gz")
os.system("mv ./Utilities/python/numpy-1.7.1 ./Utilities/python/numpy")
os.chdir("./Utilities/python/numpy")
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
os.system("python setup.py install --home=%spython"%(utils.INITIAL_UTILS+os.sep))
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir(METAMOS_ROOT)
os.system("rm -rf numpy.tar.gz")
if 1:
fail = 0
try:
import matplotlib
if (matplotlib.__version__ < "1.1.0"):
fail = 1
except ImportError:
fail = 1
if "matplotlib" in packagesToInstall:
dl = 'y'
elif fail:
print "Current matplot lib version is incompatible with metAMOS or matplotlib is not installed. Need version 1.1.0+, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L http://downloads.sourceforge.net/project/matplotlib/matplotlib/matplotlib-1.1.0/matplotlib-1.1.0.tar.gz -o ./matplotlib.tar.gz")
os.system("tar -C ./Utilities/python -xvf matplotlib.tar.gz")
os.system("mv ./Utilities/python/matplotlib-1.1.0 ./Utilities/python/matplotlib")
os.chdir("./Utilities/python/matplotlib")
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir(METAMOS_ROOT)
os.system("rm -rf matplotlib.tar.gz")
# now software
if not os.path.exists("./AMOS") or 0:
if "amos" in packagesToInstall:
dl = 'y'
else:
print "AMOS binaries not found, needed for all steps, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/amos-3.2-BETA-%s-%s.binaries.tar.gz -o ./amos-binaries.tar.gz"%(OSTYPE, MACHINETYPE))
os.system("tar -xvf amos-binaries.tar.gz")
os.system("rm -rf amos-binaries.tar.gz")
# descriptive perl module
stat = utils.getCommandOutput("perl -MStatistics::Descriptive -e 0 && echo $?", True)
if stat == "":
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/Statistics-Descriptive-3.0203.tar.gz -o stat.tar.gz")
os.system("tar -xvzf stat.tar.gz")
os.chdir("Statistics-Descriptive-3.0203")
os.system("perl Makefile.PL PREFIX=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathToCopy = utils.getCommandOutput("find Statistics-Descriptive-3.0203/build -type d -name \"Statistics\" |grep -v auto", False)
copyPerlLib(pathToCopy, "AMOS%s%s-%s%slib"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf stat.tar.gz")
os.system("rm -rf Statistics-Descriptive-3.0203")
if not os.path.exists("./Utilities/cpp%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "kraken" in packagesToInstall:
dl = 'y'
else:
print "Kraken not found, optional for Annotate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "kraken.tar.gz"
os.system("curl -L http://ccb.jhu.edu/software/kraken/dl/kraken-0.10.4-beta.tgz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv kraken-0.10.4-beta ./Utilities/cpp/%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./install_kraken.sh `pwd`/bin")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
os.system("rm -rf ./Utilities/DB/kraken")
if not os.path.exists("./Utilities/DB/kraken"):
if "kraken" in packagesToInstall:
dl = 'y'
else:
print "Kraken DB not found, required for Kraken, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
settings = utils.Settings(1, 1, "", "")
settings.OSTYPE = OSTYPE
mem = utils.getAvailableMemory(settings)
if (mem < 100) and not nodbs:
print "Insufficient memory to build full Kraken database. Requires at least 100GB of memory, using mini DB"
archive = "minikraken.tgz"
os.system("curl -L http://ccb.jhu.edu/software/kraken/dl/%s -o %s"%(archive, archive))
os.system("tar xvzf %s"%(archive))
os.system("mv minikraken_* ./Utilities/DB/kraken")
os.system("rm %s"%(archive))
elif not nodbs:
# first we need jellyfish which is used to build DB
# kraken needs jellyfish, if we don't find it build it and add to path
jellyfish = utils.getFromPath("jellyfish", "Jellyfish", False)
# check jellyfish version, kraken needs version 1
version=""
if jellyfish != "":
version = utils.getCommandOutput("%s/jellyfish --version |awk '{print substr($NF, 1, index($NF, \".\")-1)}'"%(jellyfish), False)
if int(version) > 1:
jellyfish=""
if jellyfish == "":
archive = "jellyfish.tar.gz"
os.system("curl -L http://www.cbcb.umd.edu/software/jellyfish/jellyfish-1.1.11.tar.gz -o %s"%(archive))
os.system("tar xvzf %s"%(archive))
os.system("mv jellyfish-1.1.11 ./Utilities/cpp%s%s-%s%s/jellyfish"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%s/jellyfish"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./configure --prefix=`pwd`")
os.system("make")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathUpdate = "%s/Utilities/cpp%s%s-%s%sjellyfish/bin/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep)
if "PATH" in os.environ:
pathUpdate = "%s%s%s"%(os.environ["PATH"], os.pathsep, pathUpdate)
os.environ["PATH"]=pathUpdate
os.chdir("./Utilities/cpp/%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./bin/kraken-build --standard --threads %d --db %s/Utilities/DB/kraken"%(multiprocessing.cpu_count() - 1, METAMOS_ROOT))
os.chdir("%s"%(METAMOS_ROOT))
if not os.path.exists("./LAP"):
if "lap" in packagesToInstall:
dl = 'y'
else:
print "LAP tool not found, needed for multiple assembly pipeline, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://www.cbcb.umd.edu/~cmhill/files/lap_release_1.1.zip -o lap_release_1.1.zip")
os.system("unzip lap_release_1.1.zip")
os.system("mv ./lap_release_1.1 ./LAP")
os.system("rm -rf lap_release_1.1.zip")
if not os.path.exists("KronaTools") or 0:
if "kronatools" in packagesToInstall:
dl = 'y'
else:
print "KronaTools not found, needed for Postprocess, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
# TODO: KronaTools should be on the FTP site for robustness to URL changes
os.system("curl -L 'ftp://ftp.cbcb.umd.edu/pub/data/metamos/" + kronaTools + ".tar' -o %s.tar"%(kronaTools))
os.system("tar -xvf %s.tar"%(kronaTools))
os.system("rm -rf %s.tar"%(kronaTools))
os.system("mv %s KronaTools"%(kronaTools))
os.system("cd KronaTools && ./install.pl --prefix=.")
if not os.path.exists("KronaTools/taxonomy/taxonomy.tab") or 0:
if "kronatools" in packagesToInstall:
dl = 'y'
else:
print "KronaTools taxonomy data not found, needed for Postprocess, download now (will take around 20 minutes)?"
dl = raw_input("Enter Y/N: ")
if (dl == 'y' or dl == 'Y') and not nodbs:
os.system("cd KronaTools && ./updateTaxonomy.sh")
os.chdir("%s"%(METAMOS_ROOT))
os.system("cat KronaTools/taxonomy/taxonomy.tab |awk -F \"\\t\" '{print $1\"\\t\"$NF}' > ./Utilities/DB/tax_key.tab")
if not os.path.exists("./FastQC"):
if "fastqc" in packagesToInstall:
dl = 'y'
else:
print "FastQC not found, optional for Preprocess, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "fastqc_v0.10.0.zip"
os.system("curl -L http://www.bioinformatics.babraham.ac.uk/projects/fastqc/%s -o %s" % (archive,archive))
os.system("unzip %s" % archive)
os.system("rm %s" % archive)
os.system("chmod a+rx FastQC/fastqc")
os.system("chmod -R a+rX FastQC/")
if not os.path.exists("./Utilities/DB/uniprot_sprot.fasta"):
if "uniprot" in packagesToInstall:
dl = 'y'
else:
print "Uniprot/Swissprot DB not found, optional for Functional Annotation, download now?"
dl = raw_input("Enter Y/N: ")
if (dl == 'y' or dl == 'Y') and not nodbs:
archive = "uniprot.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/%s -o %s" %(archive, archive))
os.system("tar -C ./Utilities/DB/ -xvf %s" % archive)
os.system("rm %s"%archive)
# velvet
if not os.path.exists("./Utilities/cpp%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "velvet" in packagesToInstall:
dl = 'y'
else:
print "Velvet not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "velvet.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/velvet_1.2.10.tgz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv velvet_1.2.10 ./Utilities/cpp/%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make clean")
os.system("make CATEGORIES=16 MAXKMERLENGTH=127 OPENMP=1 BUNDLEDZLIB=1")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
# velvet-sc
if not os.path.exists("./Utilities/cpp%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "velvet-sc" in packagesToInstall:
dl = 'y'
else:
print "Velvet-SC not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "velvet-sc.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/velvet-sc.tar.gz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv velvet-sc ./Utilities/cpp/%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make clean")
os.system("make CATEGORIES=16 MAXKMERLENGTH=127 OPENMP=1")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
# metavelvet
if not os.path.exists("./Utilities/cpp%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "metavelvet" in packagesToInstall:
dl = 'y'
else:
print "MetaVelvet not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "MetaVelvet-1.2.02.tgz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/MetaVelvet-1.2.02.tgz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv MetaVelvet-1.2.02 ./Utilities/cpp/%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
if OSTYPE == "Darwin":
os.system("cp Utils/Utils.hh Utils/Utils.hh.orig")
os.system("cat Utils/Utils.hh.orig |awk '{if (match($0, \"#define MAX_STRING_LENGTH\")) { print \"#include <sys/types.h>\\n\"$0; } else { print $0; }}' > Utils/Utils.hh")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make clean")
os.system("make CATEGORIES=16 MAXKMERLENGTH=127")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
if "viritas" in enabledWorkflows or manual:
if not os.path.exists("./Utilities/cpp%s%s-%s%strnascan"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "trnascan" in packagesToInstall:
dl = 'y'
else:
print "tRNAscan not found, optional for Annotate step, download now?"
dl = raw_input("Enter Y/N: | |
which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
self.Logfile.insert('changing directory to ' + os.path.join(self.initial_model_directory,sample))
os.chdir(os.path.join(self.initial_model_directory,sample))
self.Logfile.insert(sample + ': copying ' + os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
os.system('/bin/cp %s .' %os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
for old_event_map in glob.glob('*-BDC_*.ccp4'):
if not os.path.isdir('old_event_maps'):
os.mkdir('old_event_maps')
self.Logfile.warning(sample + ': moving ' + old_event_map + ' to old_event_maps folder')
os.system('/bin/mv %s old_event_maps' %old_event_map)
for event_map in glob.glob(os.path.join(self.panddas_directory,'processed_datasets',sample,'*-BDC_*.ccp4')):
self.Logfile.insert(sample + ': copying ' + event_map)
os.system('/bin/cp %s .' %event_map)
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
return samples_to_export
class run_pandda_export(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,update_datasource_only,which_models,pandda_params):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.update_datasource_only=update_datasource_only
self.which_models=which_models
self.already_exported_models=[]
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '' }
def run(self):
# v1.3.8.2 - removed option to update database only
# if not self.update_datasource_only:
samples_to_export=self.export_models()
self.import_samples_into_datasouce(samples_to_export)
# if not self.update_datasource_only:
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in samples_to_export: self.Logfile.insert(xtal)
# sample_list=self.db.execute_statement("select CrystalName,CompoundCode from mainTable where RefinementOutcome='2 - PANDDA model';")
# for item in sample_list:
# xtal=str(item[0])
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
try:
os.system('/bin/rm *-ensemble-model.pdb *restraints*')
except:
self.Logfile.error("Restraint files didn't exist to remove. Will try to continue")
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
Refine=XChemRefine.panddaRefine(self.initial_model_directory,xtal,compoundID,self.datasource)
os.symlink(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb'),xtal+'-ensemble-model.pdb')
Refine.RunQuickRefine(Serial,self.RefmacParams,self.external_software,self.xce_logfile,'pandda_refmac',None)
# elif xtal in os.path.join(self.panddas_directory,'processed_datasets',xtal,'modelled_structures',
# '{}-pandda-model.pdb'.format(xtal)):
# self.Logfile.insert('{}: cannot start refinement because {}'.format(xtal,xtal) +
# ' does not have a modelled structure. Check whether you expect this dataset to ' +
# ' have a modelled structure, compare pandda.inspect and datasource,'
# ' then tell XCHEMBB ')
else:
self.Logfile.error('%s: cannot find %s-ensemble-model.pdb; cannot start refinement...' %(xtal,xtal))
self.Logfile.error('Please check terminal window for any PanDDA related tracebacks')
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def import_samples_into_datasouce(self,samples_to_export):
# first make a note of all the datasets which were used in pandda directory
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'False',DimplePANDDApath='{0!s}' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
# do the same as before, but look for rejected datasets
try:
os.chdir(os.path.join(self.panddas_directory,'rejected_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'True',DimplePANDDApath='{0!s}',DimplePANDDAhit = 'False' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
except OSError:
pass
site_list = []
pandda_hit_list=[]
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_sites.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
self.Logfile.insert('reding pandda_inspect_sites.csv')
for i,line in enumerate(csv_dict):
self.Logfile.insert(str(line).replace('\n','').replace('\r',''))
site_index=line['site_idx']
name=line['Name'].replace("'","")
comment=line['Comment']
site_list.append([site_index,name,comment])
self.Logfile.insert('add to site_list_:' + str([site_index,name,comment]))
progress_step=1
for i,line in enumerate(open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))):
n_lines=i
if n_lines != 0:
progress_step=100/float(n_lines)
else:
progress_step=0
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('reading '+os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
for i,line in enumerate(csv_dict):
db_dict={}
sampleID=line['dtag']
if sampleID not in samples_to_export:
self.Logfile.warning('%s: not to be exported; will not add to panddaTable...' %sampleID)
continue
if sampleID not in pandda_hit_list:
pandda_hit_list.append(sampleID)
site_index=str(line['site_idx']).replace('.0','')
event_index=str(line['event_idx']).replace('.0','')
self.Logfile.insert(str(line))
self.Logfile.insert('reading {0!s} -> site {1!s} -> event {2!s}'.format(sampleID, site_index, event_index))
for entry in site_list:
if entry[0]==site_index:
site_name=entry[1]
site_comment=entry[2]
break
# check if EVENT map exists in project directory
event_map=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*ccp4')):
filename=file[file.rfind('/')+1:]
if filename.startswith(sampleID+'-event_'+event_index) and filename.endswith('map.native.ccp4'):
event_map=file
self.Logfile.insert('found respective event maps in {0!s}: {1!s}'.format(self.initial_model_directory, event_map))
break
# initial pandda model and mtz file
pandda_model=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*pdb')):
filename=file[file.rfind('/')+1:]
if filename.endswith('-ensemble-model.pdb'):
pandda_model=file
if sampleID not in self.already_exported_models:
self.already_exported_models.append(sampleID)
break
inital_mtz=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*mtz')):
filename=file[file.rfind('/')+1:]
if filename.endswith('pandda-input.mtz'):
inital_mtz=file
break
db_dict['CrystalName'] = sampleID
db_dict['PANDDApath'] = self.panddas_directory
db_dict['PANDDA_site_index'] = site_index
db_dict['PANDDA_site_name'] = site_name
db_dict['PANDDA_site_comment'] = site_comment
db_dict['PANDDA_site_event_index'] = event_index
db_dict['PANDDA_site_event_comment'] = line['Comment'].replace("'","")
db_dict['PANDDA_site_confidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_InspectConfidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_ligand_placed'] = line['Ligand Placed']
db_dict['PANDDA_site_viewed'] = line['Viewed']
db_dict['PANDDA_site_interesting'] = line['Interesting']
db_dict['PANDDA_site_z_peak'] = line['z_peak']
db_dict['PANDDA_site_x'] = line['x']
db_dict['PANDDA_site_y'] = line['y']
db_dict['PANDDA_site_z'] = line['z']
db_dict['PANDDA_site_ligand_id'] = ''
db_dict['PANDDA_site_event_map'] = event_map
db_dict['PANDDA_site_initial_model'] = pandda_model
db_dict['PANDDA_site_initial_mtz'] = inital_mtz
db_dict['PANDDA_site_spider_plot'] = ''
# find apo structures which were used
# XXX missing XXX
self.db.update_insert_site_event_panddaTable(sampleID,db_dict)
# this is necessary, otherwise RefinementOutcome will be reset for samples that are actually already in refinement
self.db.execute_statement("update panddaTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and RefinementOutcome is null".format(sampleID))
self.db.execute_statement("update mainTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and (RefinementOutcome is null or RefinementOutcome is '1 - Analysis Pending')".format(sampleID))
self.db.execute_statement("update mainTable set DimplePANDDAhit = 'True' where CrystalName is '{0!s}'".format(sampleID))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('done reading pandda_inspect_sites.csv')
# finally find all samples which do not have a pandda hit
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
self.Logfile.insert('check which datasets are not interesting')
# DimplePANDDAhit
# for xtal in glob.glob('*'):
# if xtal not in pandda_hit_list:
# self.Logfile.insert(xtal+': not in interesting_datasets; updating database...')
# self.db.execute_statement("update mainTable set DimplePANDDAhit = 'False' where CrystalName is '{0!s}'".format(xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
elif self.which_models == 'selected':
for i in range(0, self.pandda_analyse_data_table.rowCount()):
if str(self.pandda_analyse_data_table.item(i, 0).text()) == sample:
if self.pandda_analyse_data_table.cellWidget(i, 1).isChecked():
self.Logfile.insert('Dataset selected by user -> exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
break
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA | |
#
# Copyright <NAME> 2009
#
"""
Code to implement a test harness.
"""
import corebio.seq_io.fasta_io, os, logging, hmm, cPickle, numpy as N, sys, numpy.random as R
from hmm.pssm import seq_to_numpy, numpy_to_seq
from optparse import OptionParser
from cookbook.dicts import DictOf
from itertools import imap, cycle
_logger=logging.getLogger(__name__)
def fragment_name(fragment):
if 'T00671' == fragment:
return 'p53'
if 'T00759' == fragment:
return 'Sp1'
if 'T99002' == fragment:
return 'GABP'
if 'T99003' == fragment:
return 'NRSF'
if 'T99005' == fragment:
return 'Stat5a'
if 'T99006' == fragment:
return 'Stat5b'
return fragment
def load_sequences(fasta):
"Load sequences."
_logger.info('Loading sequences: %s', fasta)
sequences = [
seq.remove('Nn')
for seq
in corebio.seq_io.fasta_io.iterseq(open(fasta, 'r'), corebio.seq.reduced_nucleic_alphabet)
]
return sequences
def is_unknown(x):
"@return: True iff x == 4."
return 4 == x
def partition(iterable, keyfunc=None):
"Partition an iterable into groups for which keyfunc returns the same value. Yields (key, begin, end) tuples."
if keyfunc is None:
keyfunc = lambda x: x
iterable = iter(iterable)
lastkey = object()
begin = None
for i, x in enumerate(iterable):
currkey = keyfunc(x)
if currkey != lastkey:
if None != begin:
yield lastkey, begin, i
begin = i
lastkey = currkey
if None != begin:
yield lastkey, begin, i
def shuffle_sequence(seq):
"@return: A shuffled version of the sequence leaving unknowns in place."
ords = N.array(seq.ords())
for key, begin, end in partition(ords, keyfunc=is_unknown):
if not key:
R.shuffle(ords[begin:end])
shuffled = seq.alphabet.chrs(ords)
shuffled.name = '%s (shuffled)' % seq.name
shuffled.description = '%s (shuffled)' % seq.description
return shuffled
def shorten_seq(to_shorten, sequence_to_match_length):
"Shorten a sequence to match the length of the other."
if len(to_shorten) < len(sequence_to_match_length):
raise RuntimeError('Not enough bases in sequence: %d < %d' % (len(to_shorten), len(sequence_to_match_length)))
ords = N.array(to_shorten.ords()[:len(sequence_to_match_length)])
ords[N.where(4==N.array(sequence_to_match_length.ords()))] = 4 # set unknown bases same as original sequence
result = sequence_to_match_length.alphabet.chrs(ords)
result.name = '%s (matched)' % sequence_to_match_length.name
result.description = '%s (matched)' % sequence_to_match_length.description
return result
def build_model_by_states(freqs, gaps, p_binding_site=0.001):
"""
Build a HMM representing the gapped PWM with the given frequencies and gaps. Can handle consecutive gaps
and gaps at beginning or end.
"""
if len(gaps) != len(freqs):
raise ValueError('Frequencies and gaps must be same length.')
K = len(gaps)
# normalise frequencies
freqs = (freqs.T / freqs.sum(axis=1)).T
# create model
model = hmm.ModelByStates(M=4, markov_order=0)
# add background state
bg = model.add_state()
bg.pi = model.add_parameter(1.)
uniform_param = model.add_parameter(.25)
for m in xrange(bg.M):
bg.b[m] = uniform_param
# add the binding site states in positive and negative directions
positive_states = [model.add_state() for i in xrange(K)]
negative_states = [model.add_state() for i in xrange(K)]
# connect background to initial binding site states
binding_param = model.add_parameter()
not_binding_param = model.add_parameter(1.-p_binding_site)
bg.add_successor(positive_states[0], binding_param)
bg.add_successor(negative_states[-1], binding_param)
bg.add_successor(bg, not_binding_param)
always_one_param = model.add_parameter(1.)
positive_states[-1].add_successor(bg, always_one_param)
negative_states[0].add_successor(bg, always_one_param)
# set up emissions
for freq, positive_state, negative_state in zip(freqs, positive_states, negative_states):
for b, f in enumerate(freq):
emission_param = model.add_parameter(f)
positive_state.b[b] = emission_param
negative_state.b[-b-1] = emission_param
# set up transitions
def setup_transitions(states, gaps):
for k in xrange(-1, K):
if -1 == k:
k_state = bg
p_skip = p_binding_site/2.
else:
k_state = states[k]
p_skip = 1.
for m in xrange(k+1, K):
gap_param = model.add_parameter(p_skip * gaps[m])
k_state.add_successor(states[m], gap_param)
p_skip *= (1. - gaps[m])
if 0. == p_skip:
break
if p_skip > 0.:
states[k].add_successor(bg, model.add_parameter(p_skip))
setup_transitions(positive_states, gaps)
setup_transitions(negative_states[::-1], gaps[::-1])
return model
def build_hmm_model(freqs, gaps, p_binding_site=.001):
"@return: A hmm.Model representing the gapped PWM defined by the arguments."
model_by_states = build_model_by_states(freqs, gaps, p_binding_site=p_binding_site)
model = hmm.as_model(model_by_states)
model.normalise()
return model
def make_classifier(model):
"""
Given a model, creates a classifier from it. A classifier is a function that is given a sequence and returns the threshold
above which the sequence would be considered a positive.
"""
def classifier(sequence):
"A classifier that takes a sequence and returns at what threshold it would be treated as positive."
LL, alpha, beta, c = model.forward_backward(sequence)
alphabeta = alpha * beta
gamma0 = alphabeta[:,0] / alphabeta.sum(axis=1)
gamma0[N.where(sequence.as_numpy()==4)] = 1. # make sure that where we have Ns aren't counted
# return how often we are not in state 0
return len(gamma0) - gamma0.sum()
return classifier
def test_hmm_forward_backward(model, seqs):
"""
Test a HMM on positive and negative sequences using forward-backward algorithm.
Counts how many bases are expected to be binding sites in each sequence.
"""
classifier = make_classifier(model)
scores = map(classifier, seqs)
scores.sort()
return scores
class TestHarness(object):
"A test harness."
def __init__(self, options):
"Construct."
self.options = options
"Options for the test harness."
# put defaults in if not specified
if not len(options.fragments):
self.options.fragments = default_fragments
if not len(options.backgrounds):
self.options.backgrounds = default_backgrounds
self.lazy_sequences = DictOf(load_sequences, take_key_as_arg=True)
"Reads sequences lazily."
self.lazy_sequences_for_hmm = DictOf(self.get_sequence_for_hmm, take_key_as_arg=True)
"Converts sequences to HMM format lazily."
def get_sequence_for_hmm(self, fasta):
"Get sequences converted to how HMM module likes them."
_logger.info('Converting sequences: %s', fasta)
sequences = self.lazy_sequences[fasta]
numpy_seqs = map(hmm.preprocess_sequence, imap(N.array, imap(corebio.seq.Seq.ords, sequences)))
return numpy_seqs
def tag(self, dataset):
"@return: A tag for the dataset."
if 2 == len(dataset):
return '%s-x%d' % dataset # positive data set
elif 3 == len(dataset):
return '%s-x%d-neg-%s' % dataset # negative data set
else:
raise RuntimeError('Unexpected length of data set arguments')
def fasta_filename(self, dataset):
"@return: The fasta filename for the data set."
return os.path.join(self.options.data_dir, '%s.fa' % self.tag(dataset))
def sequences(self, dataset):
"@return: The sequences for the dataset."
return self.lazy_sequences[self.fasta_filename(dataset)]
def hmm_sequences(self, dataset):
"@return: The HMM sequences for the dataset."
return self.lazy_sequences_for_hmm[self.fasta_filename(dataset)]
def results_filename(self, dataset, method):
"@return: The results filename."
return os.path.join(self.options.results_dir, '%s-%s.results' % (self.tag(dataset), method))
def results(self, dataset, method):
"@return: Load results from disk."
return cPickle.load(open(self.results_filename(dataset, method), 'rb'))
def run_method_on_dataset(self, dataset, method, model):
"Run the method on the dataset."
logging.info('Running %s on %s', method, dataset)
seqs = self.hmm_sequences(dataset)
scores = test_hmm_forward_backward(model, seqs)
filename = self.results_filename(dataset, method)
logging.info('First results are: %s', scores[:4])
logging.info('Writing results to %s', filename)
cPickle.dump(scores, open(filename, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
def all_datasets(self):
"@return: Yield all the datasets."
for fragment in self.options.fragments:
for fold in self.folds():
yield (fragment, fold)
for bg in self.options.backgrounds:
yield (fragment, fold, bg)
def folds(self):
"@return: The folds."
return range(1, self.options.num_folds+1)
def build_data_dir(self, original_dir, backgrounds):
"""
Create and populate a directory of fasta files for the test harness.
Link positive test fasta files to originals.
Read background fasta files in and create negative test sets of correct lengths.
"""
# make sure data directory exists
if not os.path.exists(self.options.data_dir):
os.makedirs(self.options.data_dir)
# create a dictionary of generators, one for each background
background_seqs = dict(
(bg, cycle(corebio.seq_io.fasta_io.iterseq(open(fasta), corebio.seq.reduced_nucleic_alphabet)))
for bg, fasta
in backgrounds.iteritems()
)
def get_output_file(dataset):
"Get output filename for dataset, removing existing file if it exists."
filename = self.fasta_filename(dataset)
if os.path.exists(filename):
os.remove(filename)
return filename
# for each fragment and fold
for fragment in self.options.fragments:
for fold in self.folds():
# link positive sequences to original sequences
dataset = (fragment, fold)
original_fasta = os.path.join(original_dir, '%strimRM-test-x%d.fa' % dataset)
os.symlink(original_fasta, get_output_file(dataset))
positive_seqs = self.sequences(dataset)
# get a set of negative sequences for each background and write to new file
for bg, bg_seqs in background_seqs.iteritems():
dataset = (fragment, fold, bg)
bg_seqs = corebio.seq.SeqList(
imap(shorten_seq, bg_seqs, positive_seqs),
name=None,
description=None,
alphabet=corebio.seq.reduced_nucleic_alphabet,
)
if len(bg_seqs) != len(positive_seqs):
raise RuntimeError('Not enough background negative sequences %s: %d != %d' % (bg, len(bg_seqs), len(positive_seqs)))
corebio.seq_io.fasta_io.write(open(get_output_file(dataset), 'w'), bg_seqs)
# create a negative dataset of shuffled versions of the positive dataset
dataset = (fragment, fold, 'shuffle')
f = open(get_output_file(dataset), 'w')
for seq in positive_seqs:
corebio.seq_io.fasta_io.writeseq(f, shuffle_sequence(seq))
f.close()
@staticmethod
def add_options(option_parser):
"Add test harness options to the parser."
option_parser.add_option(
'-d',
'--data-dir',
dest="data_dir",
default='.',
help="Where the sequences are stored."
)
option_parser.add_option(
'-r',
'--results-dir',
dest="results_dir",
default='.',
help="Where the test harness results are stored."
)
option_parser.add_option(
'-b',
dest="backgrounds",
default=[],
action='append',
help="The background data sets. If none specified defaults are used: %s" % default_backgrounds
)
option_parser.add_option(
'-f',
dest="fragments",
default=[],
action='append',
help="The fragments. If none specified defaults are used: %s" % default_fragments
)
option_parser.add_option(
'-n',
'--num-folds',
dest="num_folds",
type='int',
default=5,
help="How many folds in the cross validation."
)
default_fragments = [
'T00671',
'T00759',
'T99002',
'T99003',
'T99005',
'T99006',
]
default_backgrounds = [
'r1-back',
'r3-TSS',
'shuffle',
]
def choose_existing_dir(candidate_dirs):
"Given a list of candidate directories, chooses the first one that exists or returns None."
for dir in candidate_dirs:
if os.path.exists(dir):
return dir
return None
if '__main__' == __name__:
#
# Initialise the logging
#
logging.basicConfig(level=logging.INFO)
log_filename = 'create-test-harness-data.log'
file_handler = logging.FileHandler(log_filename)
file_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logging.getLogger('').addHandler(file_handler)
logging.info('Writing log to %s', | |
<reponame>KriSun95/sunxspex
"""
The following code is for instrument specific classes each using their own methods to create and edit their `_loaded_spec_data` attrbutes.
Tips that I have been following:
* None of the instrument loaders should have public attributes; ie., all attributes should be preceded with `_`
* Only obvious and useful methods and setters should be public, all else preceded with `_`
"""
import warnings
from os import path as os_path
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from astropy.time import Time
from . import io
from . import nu_spec_code as nu_spec
from . import rhes_spec_code as rhes_spec
from . import stix_spec_code as stix_spec
__all__ = ["NustarLoader", "StixLoader", "RhessiLoader", "CustomLoader", "rebin_any_array"]
# Get a default class for the instrument specfic loaders
# Once the instrument specific loaders inherit from this then all they really have to do is get the spectral
# data they want to fit in the correct dictionary form and assigned to `self._loaded_spec_data`.
class InstrumentBlueprint:
""" The blueprint class for an instruemnt to be given to the `DataLoader` class in data_loader.py.
The main aim of these classes is to:
(1) produce a `_loaded_spec_data` attribute with the instrument spectral data in the
form {"photon_channel_bins":Photon Space Bins (e.g., [keV,keV],[keV,keV],...]),
"photon_channel_mids":Photon Space Bin Mid-points (e.g., [keV,...]),
"photon_channel_binning":Photon Space Binwidths (e.g., [keV,...]),
"count_channel_bins":Count Space Bins (e.g., [keV,keV],[keV,keV],...]),
"count_channel_mids":Count Space Bin Mid-points (e.g., [keV,...]),
"count_channel_binning":Count Space Binwidths (e.g., [keV,...]),
"counts":counts (e.g., cts),
"count_error":Count Error for `counts`,
"count_rate":Count Rate (e.g., cts/keV/s),
"count_rate_error":Count Rate Error for `count_rate`,
"effective_exposure":Effective Exposure (e.g., s),
"srm":Spectral Response Matrix (e.g., cts/ph * cm^2),
"extras":{"any_extra_info":or_empty_dict}
};
(2) provide instrument specific methods such as time/spatial/spectral range selectors
and SRM rebinning methods that then update the `_loaded_spec_data` attrbute
appropriately.
Instrument loader classes are expected to receive the PHA spctral file (`pha_file`) as the first
argument then other spectral information (`arf_file`, `rmf_file`, `srm_custom`,
`custom_channel_bins`). Obviously not all of these files need be used and so just pass then through
as **kwargs.
The `DataLoader` class in data_loader.py then creates a dictionary attrbute called `loaded_spec_data`
(note no underscore) that is then getable by the user when spectral fitting with the `SunXspex` class
in fitter.py where the keys are each spectum's ID (e.g, spectrum1, spectrum2, etc.).
This means that, while fitting STIX data with spectrum ID "spectrum1" for example, if the user wants
to change the time interval for the spectrum (e.g., with an intrument specific method time_range)
they can do this by `loaded_spec_data["spectrum1"].time_range(new_time_range)` which will update the
StixLoader `_loaded_spec_data` attribute located in loaded_spec_data["spectrum1"].
"""
_UNIVERSAL_DOC_ = """Parameters
----------
pha_file : string
The PHA file for the spectrum to be loaded.
arf_file, rmf_file : string
The ARF and RMF files associated with the PHA file(s). If none are given (e.g, with
NuSTAR data) it is assumed that these are in the same directory with same filename
as the PHA file(s) but with extensions '.arf' and '.rmf', respectively.
srm_file : string
The file that contains the spectral response matrix for the given spectrum.
srm_custom : 2d array
User defined spectral response matrix. This is accepted over the SRM created from any
ARF and RMF files given.
custom_channel_bins, custom_photon_bins : 2d array
User defined channel bins for the columns and rows of the SRM matrix.
E.g., custom_channel_bins=[[1,1.5],[1.5,2],...]
Attributes
----------
_construction_string : string
String to show how class was constructed.
_loaded_spec_data : dict
Loaded spectral data.
"""
def _rebin_rmf(self, matrix, old_count_bins=None, new_count_bins=None, old_photon_bins=None, new_photon_bins=None, axis="count"):
""" Rebins the photon and/or count channels of the redistribution matrix if needed.
This will rebin any 2d array by taking the mean across photon space (rows) and summing
across count space (columns).
If no effective area information from the instrument then this is passed straight
to `_rebin_srm`, if there is then the `_rebin_srm` should be overwritten.
Parameters
----------
matrix : 2d array
Redistribution matrix.
old_count_bins, new_count_bins : 1d arrays
The old count channel binning and the new binning to be for the redistribution matrix columns (sum columns).
old_photon_bins, new_photon_bins : 1d arrays
The old photon channel binning and the new binning to be for the redistribution matrix columns (average rows).
axis : string
Define what \'axis\' the binning should be applied to. E.g., \'photon\', \'count\', or \'photon_and_count\'.
Returns
-------
The rebinned 2d redistribution matrix.
"""
# across channel bins, we sum. across energy bins, we average
# appears to be >2x faster to average first then sum if needing to do both
if (axis == "photon") or (axis == "photon_and_count"):
# very slight difference to rbnrmf when binning across photon axis, <2% of entries have a ratio (my way/rbnrmf) >1 (up to 11)
# all come from where the original rmf has zeros originally so might be down to precision being worked in, can't expect the exact same numbers essentially
matrix = rebin_any_array(data=matrix, old_bins=old_photon_bins, new_bins=new_photon_bins, combine_by="mean")
if (axis == "count") or (axis == "photon_and_count"):
matrix = rebin_any_array(data=matrix.T, old_bins=old_count_bins, new_bins=new_count_bins, combine_by="sum").T # need to go along columns so .T then .T back
return matrix
def _channel_bin_info(self, axis):
""" Returns the old and new channel bins for the indicated axis (count axis, photon axis or both).
Parameters
----------
axis : string
Set to "count", "photon", or "photon_and_count" to return the old and new count
channel bins, photon channel bins, or both.
Returns
-------
Arrays of old_count_bins, new_count_bins, old_photon_bins, new_photon_bins or Nones.
"""
old_count_bins, new_count_bins, old_photon_bins, new_photon_bins = None, None, None, None
if (axis == "count") or (axis == "photon_and_count"):
old_count_bins = self._loaded_spec_data["extras"]["original_count_channel_bins"]
new_count_bins = self._loaded_spec_data["count_channel_bins"]
if (axis == "photon") or (axis == "photon_and_count"):
old_photon_bins = self._loaded_spec_data["extras"]["orignal_photon_channel_bins"]
new_photon_bins = self._loaded_spec_data["photon_channel_bins"]
return old_count_bins, new_count_bins, old_photon_bins, new_photon_bins
def _rebin_srm(self, axis="count"):
""" Rebins the photon and/or count channels of the spectral response matrix (SRM) if needed.
Note: If the instrument has a spatial aspect and effective information is present (e.g.,
NuSTAR from its ARF file) then this method should be overwritten in the instrument
specific loader in order to rebin the redistribution matrix and effective area separately
before re-construction the new SRM.
Parameters
----------
matrix : 2d array
Spectral response matrix.
old_count_bins, new_count_bins : 1d arrays
The old count channel binning and the new binning to be for the spectral response matrix columns (sum columns).
old_photon_bins, new_photon_bins : 1d arrays
The old photon channel binning and the new binning to be for the spectral response matrix columns (average rows).
axis : string
Define what \'axis\' the binning should be applied to. E.g., \'photon\', \'count\', or \'photon_and_count\'.
Returns
-------
The rebinned 2d spectral response matrix.
"""
old_count_bins, new_count_bins, old_photon_bins, new_photon_bins = self._channel_bin_info(axis)
matrix = self._loaded_spec_data["srm"]
return self._rebin_rmf(matrix, old_count_bins=old_count_bins, new_count_bins=new_count_bins, old_photon_bins=old_photon_bins, new_photon_bins=new_photon_bins, axis="count")
def __getitem__(self, item):
"""Index the entries in `_loaded_spec_data`"""
return self._loaded_spec_data[item]
def __setitem__(self, item, new_value):
"""Allows entries in `_loaded_spec_data` to be changed."""
self._loaded_spec_data[item] = new_value
def __call__(self):
"""When the class is called (n=NustarLoader()->n()) then `_loaded_spec_data` is returned."""
return self._loaded_spec_data
def __repr__(self):
"""String representation of `_loaded_spec_data`."""
return str(self._loaded_spec_data)
# Instrument specific data loaders
# As long as these loaders get the spectral data to fit into the correct dictionary form and assigned to self._loaded_spec_data then
# they should work but they can also overwrite the _rebin_srm(self, axis="count") method if the SRM rebinning is instrument specific.
# The benefit here is that the class can have other methods/properties/setters (like time selection for STIX/RHESSI;e.g.,
# .select_time(new_time)?) which can be accessed at the user level easily when fitting through the loaded_spec_data attribute
# (e.g., .loaded_spec_data["spectrum1"].select_time(new_time)).
class NustarLoader(InstrumentBlueprint):
"""
Loader specifically for NuSTAR spectral data.
NustarLoader Specifics
----------------------
Changes how the spectral response matrix (SRM) is rebinned. The NuSTAR SRM is constructed from
the effective areas (EFs) and redistribution matrix (RM) and so the EFs and RM are rebinned
separately then used to construct the rebinned SRM.
Superclass Override: _rebin_srm()
Attributes
----------
_construction_string : string
String to show how class was constructed.
_loaded_spec_data : dict
Instrument loaded spectral data.
"""
__doc__ += InstrumentBlueprint._UNIVERSAL_DOC_
def __init__(self, pha_file, arf_file=None, rmf_file=None, srm_custom=None, custom_channel_bins=None, custom_photon_bins=None, **kwargs):
"""Construct a string to show how the class was constructed (`_construction_string`) and set | |
<reponame>temelkirci/Motion_Editor<filename>venv/Lib/site-packages/astropy/units/core.py
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from ..utils.decorators import lazyproperty
from ..utils.exceptions import AstropyWarning
from ..utils.misc import isiterable, InheritDocstrings
from .utils import (is_effectively_unity, sanitize_scale, validate_power,
resolve_fractions)
from . import format as unit_format
__all__ = [
'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError',
'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit',
'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry',
'set_enabled_units', 'add_enabled_units',
'set_enabled_equivalencies', 'add_enabled_equivalencies',
'dimensionless_unscaled', 'one']
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
if not (funit is Unit(funit) and
(tunit is None or tunit is Unit(tunit)) and
callable(a) and
callable(b)):
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[]):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {k: v.copy() for k, v in
init._by_physical_type.items()}
else:
self._reset_units()
self._reset_equivalencies()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if (st in self._registry and unit != self._registry[st]):
raise ValueError(
"Object with name {0!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them.".format(st))
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(
_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(
equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | |
<reponame>jlevy44/JoshuaTree2<filename>scaffolding_tool_bin/old_scripts/genomeScaffolding.py
import subprocess, os, sys
from collections import defaultdict, OrderedDict
import numpy as np
from multiprocessing import Pool, Queue, Process
from threading import Thread
import subprocess,shutil
from pybedtools import BedTool
from jcvi.formats import gff
from pyfaidx import Fasta
import time
"""python genomeScaffolding.py ReferenceBuild sampleBuild CDSProtID OldCDSGeneName protID1 weight1 protID2 weight2 ..."""
CDSgeneNaming = sys.argv[4]
CDSspecies = sys.argv[3]
args = sys.argv[5:]
root = os.getcwd()+'/'
weights = OrderedDict()
listSamplesv0 = [folder for folder in os.listdir('v0') if folder.endswith('v0')]
try:
ReferenceBuild = int(sys.argv[1])
except:
ReferenceBuild = 1
try:
sampleBuild = int(sys.argv[2])
except:
sampleBuild = 1
print args
print CDSgeneNaming
print CDSspecies
for i in np.arange(0,len(args),2):
try:
weights[args[i]]=int(args[i+1])
except:
print args
print weights
runCommand = lambda x: subprocess.call(x,shell=True)
binbash = "#!/bin/bash"
makeTrashFolder = 'mkdir oldFiles'
moduleLoads = """module load cufflinks/2.2.1
module load samtools/1.3.1
module load gmap
module load parallel/20150222
module load bedtools/2.25.0
module unload gcc
module load gcc/6.3.0
"""
def runCommands(q):
while not q.empty():
print q
try:
print q.get()
runCommand(q.get())
except:
with open('Error.txt','a') as f:
f.write(q.get()+'\n')
q.task_done()
def buildReferences(reference): # essentially keys of weights
global root
global binbash, makeTrashFolder, moduleLoads
print reference
os.chdir('./referenceGenomes/'+reference)
#print os.getcwd()
#print os.listdir('.')
fastaOld = [fasta for fasta in os.listdir('.') if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
#Fasta(fastaOld)
#gff.load([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0])
writeCommands = [binbash,moduleLoads,makeTrashFolder,'samtools faidx %s'%fastaOld,
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],fastaOld,reference+'.cds'),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],reference+'.bed'),
'python %sreplacepath.py %s'%(root,reference+'.bed'),'mv %s %s ..'%(reference+'.bed',reference+'.cds')]
#binbash,makeTrashFolder,moduleLoads,
#print '\n'.join(writeCommands)
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in writeCommands:
q.put(command)
runCommands(q)"""
"""for command in writeCommands:
print command
try:
runCommand(command)
except:
with open('Error.txt','a') as f:
f.write(command+'\n')"""
"""for i, command in writeCommands:
print command
if (i == 3 or i==4) and (reference + '.bed' not in os.listdir('..') or os.stat('../'+reference + '.bed').st_size == 0):
runCommand(command)
elif i == 2 and (reference + '.cds' not in os.listdir('..') or os.stat('../'+reference + '.cds').st_size == 0):
runCommand(command)
elif i not in range(2, 7):
runCommand(command)"""
with open('buildReference.sh','w') as f:
f.write('\n'.join(writeCommands))
subprocess.call(['nohup','sh','buildReference.sh'])
os.chdir(root)
#print ReferenceBuild
CDSOld = [fasta for fasta in os.listdir('./referenceGenomes/%s'%CDSspecies) if 'cds' in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
linkReferences = ['ln -s %s%s/%s.cds %s.cds\nln -s %s%s/%s.bed %s.bed'%(root,'referenceGenomes',ref,ref,root,'referenceGenomes',ref,ref) for ref in weights.keys()]
def buildSamplesv0(sample): #sample = Bdist_xxx_v0.fa
global root
global CDSspecies, CDSOld
global binbash, makeTrashFolder, moduleLoads
global CDSgeneNaming, linkReferences
print sample
os.chdir('v0/'+sample)
fastaNew = sample+'.fa'
geneNaming = sample.replace('_','') # -t is number of worker threads
runCommand('rm finishBuild.txt')
writeCommands = [binbash,moduleLoads,makeTrashFolder,'rm -r %s %s.gff3.db %s.chromosome *.iit %s.coords'%(geneNaming,geneNaming,geneNaming,geneNaming),
'samtools faidx %s' %fastaNew,
'gmap_build --dir=. -d %s %s' % (geneNaming,fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 6 %s > %s 2> %s' % (
geneNaming, '../../referenceGenomes/%s/'%CDSspecies + CDSOld, geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' %(root,geneNaming + '.gff3', CDSgeneNaming ,geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (geneNaming + '.gff3', sample + '.bed'),
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s' % (
geneNaming+'.gff3', fastaNew,sample + '.cds')]+linkReferences+['> finishBuild.txt']
#"""'python %sgff2CDSBed.py %s'%(root,geneNaming + '.gff3'),'sortBed -i %s.CDS.bed > %s.CDS2.bed'%(geneNaming,geneNaming),
#'python %sformatBed.py s %s v0 1'%(root,geneNaming+'.CDS2'),'bedtools getfasta -name -fi ./%s -bed %s.CDS2.bed -fo %s.cds'%(fastaNew,geneNaming,sample)
#]"""#'mv %s %s ..'%(sample+'.cds',sample+'.bed') binbash, moduleLoads, makeTrashFolder,
#'python -m jcvi.formats.gff load %s %s --feature=CDS --id_attribute=Name -o %s' % (geneNaming + '.gff3', fastaNew,sample + '.cds'),
#'mergeBed -c 4 -i %s.CDS2.bed > %s.CDS.bed'%(geneNaming,geneNaming)
#print writeCommands
#print os.getcwd()
#open('buildSample.sh', 'w').close()
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in writeCommands:
q.put(command)
runCommands(q)"""
i=0
"""
for command in writeCommands:
#print i,command
#print i
if (i == 2 or i == 3 or i == 4) and (geneNaming + '.gff3' not in os.listdir('.') or os.stat(geneNaming + '.gff3').st_size ==0):
print(command)
runCommand(command)
elif i==5 and (sample + '.bed' not in os.listdir('.') or os.stat(sample + '.bed').st_size ==0):
print(command)
runCommand(command)
elif i == 6 and (sample + '.cds' not in os.listdir('.') or os.stat(sample + '.cds').st_size ==0):
print(command)
runCommand(command)
elif i not in range(2,7):
print(command)
runCommand(command)
i+=1
"""
with open('buildSample.sh', 'w') as f:
f.write('\n'.join(writeCommands))
#subprocess.call(['nohup', 'sh', 'buildSample.sh'])
runCommand('qsub -P plant-analysis.p -N %s -cwd -l high.c -pe pe_slots 16 -e %s %s' % (
'build'+sample.split('_')[1], 'ErrFile.txt', 'buildSample.sh'))
while True:
if os.path.isfile('finishBuild.txt'):
break
else:
time.sleep(10)
os.chdir(root)
"""try:
runCommand(command)
except:
with open('Error.txt','a') as f:
f.write(command+'\n')"""
"""with open('buildSample.sh','w') as f:
f.write('\n'.join(writeCommands))
try:
subprocess.call(['nohup','sh','buildSample.sh'])
except:
with open('output.txt', 'a') as f:
f.write('Error in %s'%sample)"""
"""writeCommands2 = [binbash, moduleLoads,'gmap_build --dir=. -d %s %s' % (geneNaming,fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 8 %s > %s 2> %s' % (
geneNaming, CDSOld, geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' % (root, geneNaming + '.gff3', CDSgeneNaming, geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.bed'),
'python -m jcvi.formats.gff bed --type=CDS --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.CDS.bed'),
'bedtools getfasta -name -fi ./%s -bed %s.CDS.bed -fo %s.cds' % (
fastaNew, sample, sample)]
with open('buildSample.sh', 'w') as f:
f.write('\n'.join(writeCommands2))
subprocess.call(['nohup', 'sh', 'buildSample.sh'])"""
try:
os.mkdir('v1')
for folder in listSamplesv0:
os.mkdir('v1/%s'%folder.replace('v0','v1'))
os.mkdir('v1/%s/OldFiles'%folder.replace('v0','v1'))
except:
pass
buildCorrespondence = {folder:folder.replace('v0','v1') for folder in listSamplesv0}
listSamplesv1 = buildCorrespondence.values()
print listSamplesv1
def replaceGeneNames(sample,ref,count=0,nuc=0):
refGeneCount = 0
synmap = '%s.%s.lifted.anchors' % (sample, ref)
if nuc:
nucAdd = 'nuc'
synmap = 'nucMap.bed'
refbed = ref + '_nucSyn.bed'
sampbed = sample + '_nucSyn.bed'
a, b = 1, 0
else:
nucAdd = ''
refbed = ref + '.bed'
sampbed = sample + '.bed'
a, b = 0, 1
sampleProt = sample.split('_')[1]
with open(refbed,'r') as f:
refBedLines = f.readlines()
refBedOut = []
refGenes = defaultdict(list)
for line in refBedLines:
if line:
refGenes[line.split('\t')[3]] = ref+nucAdd+'_'+str(refGeneCount)
refBedOut.append(line.replace(line.split('\t')[3],ref+nucAdd+'_'+str(refGeneCount)))
refGeneCount+=1
#ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed'
#print refGenes
with open(sampbed,'r') as f:
sampBedLines = f.readlines()
sampBedOut = []
sampGenes = defaultdict(list)
for line in sampBedLines:
if line:
sampGenes[line.split('\t')[3]] = sampleProt+nucAdd+'_'+str(count)
sampBedOut.append(line.replace(line.split('\t')[3], sampleProt + nucAdd + '_' + str(count)))
count+=1
with open(synmap,'r') as f:
synRead = f.readlines()
synOut = []
for line in synRead:
if line and '###' not in line:
try:
genes = line.split('\t')
print genes
synOut.append(line.replace(genes[0],refGenes[genes[a]]).replace(genes[1],sampGenes[genes[b]]))
except:
with open('Err.txt','a') as f:
f.write(line+'\n')
"""
if nuc:
print sampBedOut[0:10]
print refBedOut[0:10]
print sampGenes.items()[0:10]
print refGenes.items()[0:10]
print synOut[0:10]
with open('nucMap.bed','r') as f:
print f.readlines()[0:10]
"""
if nuc == 0:
for writeTuple in [(ref+'_syn'+'.bed',refBedOut),(sample+'_%ssyn'%ref+'.bed',sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
else:
for writeTuple in [(refbed,refBedOut),(sampbed,sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
return count
def tiling2bed(tilingFile,ref,sample,sampBed):
with open(tilingFile,'r') as f:
tilingLines = f.read().split('\n')
genesDict = defaultdict(list)
with open(ref+'_nucSyn.bed','w') as f1, open(sample+'_nucSyn.bed','w') as f2:
for line in tilingLines:
if line:
lineList = line.split('\t')
int1 = sorted(map(int,lineList[0:2]))
int1[0] -= 1
int2 = sorted(map(int,lineList[2:4]))
int2[0] -= 1
f1.write('\t'.join([lineList[-2]]+map(str,int1)+['_'.join([lineList[-2]]+map(str,int1)),'0','+']) + '\n')
f2.write('\t'.join([lineList[-1]]+map(str,int2)+['_'.join([lineList[-1]]+map(str,int2)),'0','+']) + '\n')
genesDict['_'.join([lineList[-1]]+map(str,int2))] = '_'.join([lineList[-2]]+map(str,int1))
b = BedTool(sample+'_nucSyn.bed').subtract(BedTool(sampBed),A=True)
#print b.head()
#print genesDict.keys()[0:10]
origGenes = set(genesDict.keys())
#print str(b).split('\n')[0:10]
#print [ line.split('\t')[3] for line in str(b).split('\n') if line][0:10]
remainGenes = set([ line.split('\t')[3] for line in str(b).split('\n') if line])
#print list(remainGenes)[0:10]
BadGenes = list(origGenes - remainGenes)
#print BadGenes[0:10]
#print len(origGenes), len(remainGenes), len(BadGenes)
#exit()
for gene in BadGenes:
try:
del genesDict[gene]
except:
pass
with open('nucMap.bed','w') as f:
f.write('\n'.join('%s\t%s\t100'%item for item in genesDict.items() if item))
fastaNucOld = [fasta for fasta in os.listdir('./referenceGenomes/%s'%CDSspecies) if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
def generatev1(sample):
os.chdir('v0/%s'%sample)
print sample.replace('v0', 'v1')
global binbash, makeTrashFolder, moduleLoads, root, weights, fastaNucOld, CDSspecies
#print weights
print '\n'.join('%s %d'%(key,weights[key]) for key in weights.keys())#weights.keys()#'\n'.join('%s %d'%(key,weights[key]) for key in sorted(weights, key=weights.get, reverse=True).keys())
print 'hi'
"""if __name__ == '__main__':
p = Pool(None)
p.imap(pairwise, [(sample,ref) for ref in weights.keys()])"""
with open('weights.txt','w') as f:
f.write('\n'.join([weights.keys()[0]+' %d'%weights[weights.keys()[0]],'%snuc %d'%(CDSspecies,weights[CDSspecies]-1)]+['%s %d'%(key,weights[key]) for key in weights.keys()[1:]]))
nucCommands = [binbash,moduleLoads]+ ['nucmer -t 6 -p %s %s %s'%(CDSspecies+'nuc',root+'referenceGenomes/%s/'%CDSspecies+fastaNucOld,sample+'.fa'),
'delta-filter -m -q -i 85 -u 50 %snuc.delta > %snuc2.delta'%(CDSspecies,CDSspecies),'show-tiling -a %snuc2.delta > %snuc.tiling'%(CDSspecies,CDSspecies)]
commands1 = [binbash, moduleLoads]+['rm *.anchors *.last *.filtered *.prj']+\
['nohup python -m jcvi.compara.catalog ortholog %s %s\nmv %s %s'%(ref,sample,'%s.%s.lifted.anchors'%(ref,sample),'%s.%s.lifted.anchors'%(sample,ref)) for ref in weights.keys()]
commands2=[binbash, moduleLoads]+['rm multipleMapping.bed','\n'.join('python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %s'%('%s.%s.lifted.anchors'%(sample,ref),ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed','%s.synteny.bed'%(ref)) for ref in weights.keys()),
'python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %snuc.synteny.bed'%('nucMap.bed',CDSspecies+'_nucSyn.bed',sample+'_nucSyn.bed',CDSspecies),
'nohup python -m jcvi.assembly.allmaps mergebed %s -o %s'%(' '.join(['%s.synteny.bed'%(ref) for ref in (weights.keys() + [CDSspecies+'nuc'])]),'multipleMapping.bed')]
qsub=[binbash,moduleLoads]+['python -m jcvi.assembly.allmaps path --skipconcorde --cpus=32 --ngen=300 --npop=50 multipleMapping.bed | |
"b", "c"),
new_field_name="new_field",
expected={"a": {"b": {"c": [7, 3, 5]}, "new_field": [7, 3, 5]}}),
dict(
testcase_name="repeated_repeated_scalar_repeated2",
st=[{"a": [{"b": {"c": [[7, 3], [17]]}}, {"b": {"c": [[3, 13]]}}]},
{"a": [{"b": {"c": [[5, 15]]}}]}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": [{"b": {"c": [[7, 3], [17]]},
"new_field": [[7, 3], [17]]},
{"b": {"c": [[3, 13]]},
"new_field": [[3, 13]]}]},
{"a": [{"b": {"c": [[5, 15]]},
"new_field": [[5, 15]]}]}]),
dict(testcase_name="example_4_promote_of_labeled_vector",
st=[{"user_info": [{"gaia_id": {"vec": [0, 1, 2]}}]},
{"user_info": [{"gaia_id": {"vec": [3, 4, 5]}}]}],
source_path=("user_info", "gaia_id"),
new_field_name="user_info_gaia_id",
expected=[{"user_info": [{"gaia_id": {"vec": [0, 1, 2]}}],
"user_info_gaia_id": [{"vec": [0, 1, 2]}]},
{"user_info": [{"gaia_id": {"vec": [3, 4, 5]}}],
"user_info_gaia_id": [{"vec": [3, 4, 5]}]}]),
dict(
testcase_name="promote_structure",
st=[{"a": [{"aa": [{"b": {"c": 1}}, {"b": {"c": 8}}]}],},
{"a": [{"aa": [{"b": {"c": 12}}]}],}],
source_path=("a", "aa", "b"),
new_field_name="new_field",
expected=[{"a": [{"aa": [{"b": {"c": 1}}, {"b": {"c": 8}}],
"new_field": [{"c": 1}, {"c": 8}]}]},
{"a": [{"aa": [{"b": {"c": 12}}],
"new_field": [{"c": 12}]}]}])]) # pyformat: disable
def testPromote(self, st, source_path, new_field_name, expected):
st2 = StructuredTensor.from_pyval(st)
expected2 = StructuredTensor.from_pyval(expected)
result = st2.promote(source_path, new_field_name)
self.assertAllEqual(result, expected2)
def testPromoteDense(self):
st = StructuredTensor.from_fields(
{
"a":
StructuredTensor.from_fields(
{"b": [[[1, 11], [2, 12]], [[3, 13], [4, 14]]]},
shape=[2, 2, 2])
},
shape=[2])
result = st.promote(("a", "b"), "new_field")
self.assertEqual(st.rank, 1)
self.assertEqual(st.field_value("a").rank, 3)
self.assertAllEqual(
result.field_value("new_field"), [[1, 11, 2, 12], [3, 13, 4, 14]])
def testMergeDimsGeneric(self):
"""This is an example of a dense tensor being merged, when outer=rank.
Note that outer=rank is equivalent to outer=rank - 1. And yet, from the
perspective of promote, it is nice to be able to have this functionality
directly available, because sometimes the rank of the parent equals the
rank of the child.
Finally, note that merge_dims for Ragged and StructuredTensor would not
accept this as a valid argument.
Note: _merge_dims_generic is private, but these unit tests help to
discuss the proper API definition.
"""
t = array_ops.constant([[[1, 11], [2, 12]], [[3, 13], [4, 14]]])
t2 = structured_tensor._merge_dims_generic(t, 1, 3)
self.assertAllEqual(t2, [[1, 11, 2, 12], [3, 13, 4, 14]])
def testMergeDimsGenericNoop(self):
"""This is an example of a dense tensor being merged, when outer=inner.
Sometimes, when promoting, the parent and grandparent ranks are equal.
Finally, note that merge_dims for Ragged and StructuredTensor would not
accept this as a valid argument. This should be aligned.
"""
t = array_ops.constant([[[1, 11], [2, 12]], [[3, 13], [4, 14]]])
t2 = structured_tensor._merge_dims_generic(t, 2, 2)
self.assertAllEqual(t2, [[[1, 11], [2, 12]], [[3, 13], [4, 14]]])
def testRepr(self):
st = StructuredTensor.from_pyval({"a": 5, "b": {"c": [1, 2, 3]}})
if context.executing_eagerly():
expected = textwrap.dedent("""
<StructuredTensor(
fields={
"a": tf.Tensor(5, shape=(), dtype=int32),
"b": <StructuredTensor(
fields={
"c": tf.Tensor([1 2 3], shape=(3,), dtype=int32)},
shape=())>},
shape=())>""")[1:]
else:
expected = textwrap.dedent("""
<StructuredTensor(
fields={
"a": Tensor("Const:0", shape=(), dtype=int32),
"b": <StructuredTensor(
fields={
"c": Tensor("RaggedConstant/Const:0", shape=(3,), dtype=int32)},
shape=())>},
shape=())>""")[1:]
self.assertEqual(repr(st), expected)
def testPartitionOuterDimension2DDenseField(self):
struct = structured_tensor.StructuredTensor.from_fields(
fields={"r": array_ops.constant([[1, 2], [3, 4]])}, shape=[2])
result = struct.partition_outer_dimension(
row_partition.RowPartition.from_uniform_row_length(2, 2))
r = result.field_value("r")
self.assertAllEqual(r, [[[1, 2], [3, 4]]])
@parameterized.parameters([
# Simple example.
(
{"a": 12, "b": 23},
{"a": 7},
),
# New field.
(
{"a": 12},
{("b",): 13},
),
# Nested example.
(
{"a": 12, "b": {"c": 23}},
{("b", "c"): 7},
),
# Multipe updates.
(
{"a": 12, "b": {"c": 23}},
{"a": 3, ("b", "c"): 7},
),
# Deep updates.
(
{"a": 12, "b": {"c": 23, "d": {"e": 11}}},
{("b", "c"): 7, ("b", "d", "e"): 13},
),
# Multiple updates to the same substructure.
(
{"a": 12, "b": {"c": 23, "d": {"e": 11}}},
{("b", "c"): 7, ("b", "f"): 13},
),
# Scalar to non-scalar elements. Shape remains unchanged.
(
{"a": 5},
{"a": ragged_factory_ops.constant_value([[51, 52], [61, 62, 63]])},
),
# Non-scalar element to scalar.
(
{"c": {"a": [5, 3], "b": 2}},
{("c", "a"): 5},
),
# Rank-1 StructuredTensor: shape is preserved and an item is added.
(
[{"a": 5}, {"a": 6}],
{"a": [15, 16], "b": np.array([0.9, 1.1])},
),
# Non-scalar ragged elements, within a rank-2 StructuredTensor: elements
# rows (inner dimensions) are changed, but StructuredTensor shape
# (outer dimensions) are preserved.
(
[[{"a": [5]}], [{"a": [3, 4]}, {"a": [8]}]],
{"a": ragged_factory_ops.constant_value([[[50, 60]], [[30], []]])},
),
]) # pyformat: disable
def testWithUpdatesValues(self, pyval, updates):
st = StructuredTensor.from_pyval(pyval)
updated_st = st.with_updates(updates, validate=False)
for key, value in updates.items():
got = updated_st.field_value(key)
self.assertAllEqual(
value, got,
"Update failed: key={}, value={}, got={}".format(key, value, got))
def testWithUpdatesFunctions(self):
pyval = {"a": 12, "b": {"c": 23, "d": {"e": 11}}}
st = StructuredTensor.from_pyval(pyval)
st_updated = st.with_updates(
{
"a": lambda x: x + 1,
("b", "d", "e"): lambda x: x + 7
}, validate=True)
# Updated values.
self.assertAllEqual(st_updated.field_value("a"), 13)
self.assertAllEqual(st_updated.field_value(("b", "d", "e")), 18)
# Unchanged value.
self.assertAllEqual(st_updated.field_value(("b", "c")), 23)
def test_from_pyval_list_of_empty(self):
"""See b/183245576."""
st = structured_tensor.StructuredTensor.from_pyval([{}])
self.assertAllEqual([1], st.shape.as_list())
def test_from_pyval_list_of_empty_three(self):
"""See b/183245576."""
st = structured_tensor.StructuredTensor.from_pyval([{}, {}, {}])
self.assertAllEqual([3], st.shape.as_list())
self.assertEmpty(st.field_names())
def test_from_pyval_deep_list_of_empty(self):
"""See b/183245576."""
st = structured_tensor.StructuredTensor.from_pyval([[{
"a": {},
"b": [3, 4]
}, {
"a": {},
"b": [5]
}], [{
"a": {},
"b": [7, 8, 9]
}]])
self.assertAllEqual(2, st.rank)
self.assertEqual(2, st.shape[0])
self.assertEmpty(st.field_value("a").field_names())
def testWithUpdatesChecks(self):
pyval = {"a": 12, "b": {"c": 23, "d": {"e": 11}}}
st = StructuredTensor.from_pyval(pyval)
# Try to set non-existant sub-structure.
with self.assertRaisesRegex(
ValueError, r"cannot create new sub-field.*\('b', 'x'\).*is not set"):
st.with_updates({("b", "x", "e"): 5})
# Try to set with path to a non-sub-structure.
with self.assertRaisesRegex(
ValueError, r"cannot create new sub-field.*\('b', 'c'\).*is not a "
r"`StructuredTensor`"):
st.with_updates({("b", "c", "e"): 5})
# Try to apply function to non-existing value.
with self.assertRaisesRegex(
ValueError, r"cannot update.*\('b', 'd', 'x'\).*does not already "
r"exist"):
st.with_updates({("b", "d", "x"): lambda x: x + 1})
# Empty names not allowed.
with self.assertRaisesRegex(ValueError, r"does not allow empty names"):
st.with_updates({(): lambda x: x + 1})
with self.assertRaisesRegex(ValueError, r"does not allow empty names"):
st.with_updates({("b", ""): lambda x: x + 1})
# Parent and child nodes cannot be updated simultaneously.
with self.assertRaisesRegex(
ValueError, r"does not allow both parent and child nodes.*"
r"parent=\('b'.*child=\('b', 'd'"):
st.with_updates({("b", "d"): lambda x: x + 1, "a": 3, "b": 10})
# Invalid shape change.
with self.assertRaisesRegex(
ValueError,
r"`StructuredTensor.with_updates` failed for field \('c',\)"):
st_with_shape = StructuredTensor.from_pyval([[{
"c": {
"a": 5,
"b": 2
}
}], [{
"c": {
"a": 3,
"b": 1
}
}, {
"c": {
"a": 8,
"b": 18
}
}]])
st_with_shape.with_updates({("c", "a"): 3})
def testWithUpdatesDelete(self):
pyval = {"a": 12, "b": {"c": 23, "d": {"e": 11}}}
st = StructuredTensor.from_pyval(pyval)
updated_st = st.with_updates({("b", "c"): None}, validate=True)
self.assertNotIn("c", updated_st.field_value("b").field_names())
with self.assertRaisesRegex(ValueError,
r"cannot delete.*\('b', 'x'\).*not present"):
st.with_updates({("b", "x"): None}, validate=True)
with self.assertRaisesRegex(ValueError,
r"cannot delete.*\'x'.*not present"):
st.with_updates({"x": None}, validate=False)
# Test that nrows() and rowpartitions() is preserved after removal.
pyval = [[{"a": 1}, {"a": 2}], [{"a": 3}]]
st = StructuredTensor.from_pyval(pyval)
self.assertLen(st.row_partitions, 1)
self.assertAllEqual(st.nrows(), 2)
self.assertAllEqual(st.row_partitions[0].row_lengths(), [2, 1])
updated_st = st.with_updates({("a",): None}, validate=True)
self.assertLen(updated_st.row_partitions, 1)
self.assertAllEqual(updated_st.nrows(), 2)
self.assertAllEqual(updated_st.row_partitions[0].row_lengths(), [2, 1])
# Test that it works also for rank-1 and rank-0 empty results.
pyval = [{"a": 1}, {"a": 2}]
st = StructuredTensor.from_pyval(pyval)
self.assertEqual(st.rank, 1)
updated_st = st.with_updates({("a",): None}, validate=True)
self.assertEqual(updated_st.rank, 1)
# assertEqual won't work because nrows() returns a tensor, and
# assertEqual doesn't do the magic to convert them to numbers in a
# way that works in eager/non-eager mode.
self.assertAllEqual(updated_st.nrows(), 2)
pyval = {"a": [0, 1]}
st = StructuredTensor.from_pyval(pyval)
self.assertEqual(st.rank, 0)
updated_st = st.with_updates({("a",): None}, validate=True)
self.assertEqual(updated_st.rank, 0)
self.assertFalse(updated_st.row_partitions)
self.assertIsNone(updated_st.nrows())
def test_from_pyval_deep_row_partitions(self):
"""See b/179195750."""
st = structured_tensor.StructuredTensor.from_pyval([{
"foo": [{
"bar": [{
"baz": [b"FW"]
}]
}]
}])
st2 = st.field_value(("foo", "bar"))
self.assertLen(st2.row_partitions, st2.rank - 1)
def test_from_fields_deep_row_partitions(self):
"""Test a field with its own row_partition. See b/179195750."""
st = structured_tensor.StructuredTensor.from_pyval([[[{"baz": [b"FW"]}]]])
self.assertLen(st.row_partitions, st.rank - 1)
st2 = structured_tensor.StructuredTensor.from_fields(
fields={"bar": st}, shape=(None, None), validate=False)
st3 = st2.field_value("bar")
self.assertLen(st3.row_partitions, st3.rank - 1)
def test_structured_tensor_spec_shape_property(self):
spec = structured_tensor.StructuredTensorSpec([1, 2], {})
self.assertEqual(spec.shape.as_list(), [1, 2])
spec = structured_tensor.StructuredTensorSpec([None], {})
self.assertEqual(spec.shape.as_list(), [None])
def test_dynamic_ragged_shape_init_vector(self):
x = constant_op.constant([1, 2, 3, 4])
y = constant_op.constant([[1, 2], [3, 4], [5, 6], [7, 8]])
fields = {"x": x, "y": y}
nrows = constant_op.constant(4)
shape = tensor_shape.TensorShape((4,))
row_partitions = ()
rs = structured_tensor_dynamic._dynamic_ragged_shape_init(
fields, shape, nrows, row_partitions)
self.assertEqual(
repr(rs._to_tensor_shape()), repr(tensor_shape.TensorShape((4,))))
def test_dynamic_ragged_shape_init_scalar(self):
x = constant_op.constant([1, 2, 3, 4])
y = constant_op.constant([[1, 2], [3, 4], [5, 6], [7, 8]])
fields = {"x": x, "y": y}
nrows = | |
stored in a ValueMap
process.PassingCicVeryLoose = cms.EDProducer("BtagGsfElectronSelector",
input = cms.InputTag( ELECTRON_COLL ),
selection = cms.InputTag('eidVeryLoose'),
cut = cms.double(14.5) ### 15== passing all iso,id,tip cuts
)
process.PassingCicLoose = process.PassingCicVeryLoose.clone()
process.PassingCicLoose.selection = cms.InputTag('eidLoose')
process.PassingCicMedium = process.PassingCicVeryLoose.clone()
process.PassingCicMedium.selection = cms.InputTag('eidMedium')
process.PassingCicTight = process.PassingCicVeryLoose.clone()
process.PassingCicTight.selection = cms.InputTag('eidTight')
process.PassingCicSuperTight = process.PassingCicVeryLoose.clone()
process.PassingCicSuperTight.selection = cms.InputTag('eidSuperTight')
process.PassingCicHyperTight1 = process.PassingCicVeryLoose.clone()
process.PassingCicHyperTight1.selection = cms.InputTag('eidHyperTight1')
process.PassingCicHyperTight2 = process.PassingCicVeryLoose.clone()
process.PassingCicHyperTight2.selection = cms.InputTag('eidHyperTight2')
process.PassingCicHyperTight3 = process.PassingCicVeryLoose.clone()
process.PassingCicHyperTight3.selection = cms.InputTag('eidHyperTight3')
process.PassingCicHyperTight4 = process.PassingCicVeryLoose.clone()
process.PassingCicHyperTight4.selection = cms.InputTag('eidHyperTight4')
## _____ _ __ __ _ _ _
## |_ _| __(_) __ _ __ _ ___ _ __ | \/ | __ _| |_ ___| |__ (_)_ __ __ _
## | || '__| |/ _` |/ _` |/ _ \ '__| | |\/| |/ _` | __/ __| '_ \| | '_ \ / _` |
## | || | | | (_| | (_| | __/ | | | | | (_| | || (__| | | | | | | | (_| |
## |_||_| |_|\__, |\__, |\___|_| |_| |_|\__,_|\__\___|_| |_|_|_| |_|\__, |
## |___/ |___/ |___/
##
# Trigger ##################
process.PassingHLT = cms.EDProducer("trgMatchedGsfElectronProducer",
InputProducer = cms.InputTag( ELECTRON_COLL ),
hltTags = cms.VInputTag(cms.InputTag(HLTPath,"", HLTProcessName)),
triggerEventTag = cms.untracked.InputTag("hltTriggerSummaryAOD","",HLTProcessName),
triggerResultsTag = cms.untracked.InputTag("TriggerResults","",HLTProcessName)
)
## _____ _ _ __ __
## | ____|_ _| |_ ___ _ __ _ __ __ _| | \ \ / /_ _ _ __ ___
## | _| \ \/ / __/ _ \ '__| '_ \ / _` | | \ \ / / _` | '__/ __|
## | |___ > <| || __/ | | | | | (_| | | \ V / (_| | | \__ \
## |_____/_/\_\\__\___|_| |_| |_|\__,_|_| \_/ \__,_|_| |___/
##
## Here we show how to use a module to compute an external variable
## process.load("JetMETCorrections.Configuration.DefaultJEC_cff")
## ak5PFResidual.useCondDB = False
process.superClusterDRToNearestJet = cms.EDProducer("DeltaRNearestJetComputer",
probes = cms.InputTag("goodSuperClusters"),
# ^^--- NOTA BENE: if probes are defined by ref, as in this case,
# this must be the full collection, not the subset by refs.
objects = cms.InputTag(JET_COLL),
objectSelection = cms.string(JET_CUTS + " && pt > 20.0"),
)
process.JetMultiplicityInSCEvents = cms.EDProducer("CandMultiplicityCounter",
probes = cms.InputTag("goodSuperClusters"),
objects = cms.InputTag(JET_COLL),
objectSelection = cms.string(JET_CUTS + " && pt > 20.0"),
)
process.SCConvRejVars = cms.EDProducer("ElectronConversionRejectionVars",
probes = cms.InputTag("goodSuperClusters")
)
process.GsfConvRejVars = process.SCConvRejVars.clone()
process.GsfConvRejVars.probes = cms.InputTag( ELECTRON_COLL )
process.PhotonDRToNearestJet = process.superClusterDRToNearestJet.clone()
process.PhotonDRToNearestJet.probes =cms.InputTag("goodPhotons")
process.JetMultiplicityInPhotonEvents = process.JetMultiplicityInSCEvents.clone()
process.JetMultiplicityInPhotonEvents.probes = cms.InputTag("goodPhotons")
process.PhotonConvRejVars = process.SCConvRejVars.clone()
process.PhotonConvRejVars.probes = cms.InputTag("goodPhotons")
process.GsfDRToNearestJet = process.superClusterDRToNearestJet.clone()
process.GsfDRToNearestJet.probes = cms.InputTag( ELECTRON_COLL )
process.JetMultiplicityInGsfEvents = process.JetMultiplicityInSCEvents.clone()
process.JetMultiplicityInGsfEvents.probes = cms.InputTag( ELECTRON_COLL )
process.ext_ToNearestJet_sequence = cms.Sequence(
#process.ak5PFResidual +
process.superClusterDRToNearestJet +
process.JetMultiplicityInSCEvents +
process.SCConvRejVars +
process.PhotonDRToNearestJet +
process.JetMultiplicityInPhotonEvents +
process.PhotonConvRejVars +
process.GsfDRToNearestJet +
process.JetMultiplicityInGsfEvents +
process.GsfConvRejVars
)
## _____ ____ __ _ _ _ _
## |_ _|_ _ __ _ | _ \ ___ / _(_)_ __ (_) |_(_) ___ _ __
## | |/ _` |/ _` | | | | |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \
## | | (_| | (_| | | |_| | __/ _| | | | | | |_| | (_) | | | |
## |_|\__,_|\__, | |____/ \___|_| |_|_| |_|_|\__|_|\___/|_| |_|
## |___/
##
process.Tag = process.PassingHLT.clone()
process.Tag.InputProducer = cms.InputTag( "PassingWP80" )
process.TagMatchedSuperClusterCandsClean = cms.EDProducer("ElectronMatchedCandidateProducer",
src = cms.InputTag("goodSuperClustersClean"),
ReferenceElectronCollection = cms.untracked.InputTag("Tag"),
deltaR = cms.untracked.double(0.3)
)
process.TagMatchedPhotonCands = process.TagMatchedSuperClusterCandsClean.clone()
process.TagMatchedPhotonCands.src = cms.InputTag("goodPhotons")
process.WP95MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.WP95MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP95")
process.WP90MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.WP90MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP90")
process.WP85MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.WP85MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP85")
process.WP80MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.WP80MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP80")
process.WP70MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.WP70MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP70")
process.WP60MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.WP60MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP60")
process.CicVeryLooseMatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicVeryLooseMatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicVeryLoose")
process.CicLooseMatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicLooseMatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicLoose")
process.CicMediumMatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicMediumMatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicMedium")
process.CicTightMatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicTightMatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicTight")
process.CicSuperTightMatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicSuperTightMatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicSuperTight")
process.CicHyperTight1MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicHyperTight1MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight1")
process.CicHyperTight2MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicHyperTight2MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight2")
process.CicHyperTight3MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicHyperTight3MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight3")
process.CicHyperTight4MatchedSuperClusterCandsClean = process.TagMatchedSuperClusterCandsClean.clone()
process.CicHyperTight4MatchedSuperClusterCandsClean.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight4")
process.WP95MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.WP95MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP95")
process.WP90MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.WP90MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP90")
process.WP85MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.WP85MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP85")
process.WP80MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.WP80MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP80")
process.WP70MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.WP70MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP70")
process.WP60MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.WP60MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingWP60")
process.CicVeryLooseMatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicVeryLooseMatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicVeryLoose")
process.CicLooseMatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicLooseMatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicLoose")
process.CicMediumMatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicMediumMatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicMedium")
process.CicTightMatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicTightMatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicTight")
process.CicSuperTightMatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicSuperTightMatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicSuperTight")
process.CicHyperTight1MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicHyperTight1MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight1")
process.CicHyperTight2MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicHyperTight2MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight2")
process.CicHyperTight3MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicHyperTight3MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight3")
process.CicHyperTight4MatchedPhotonCands = process.GsfMatchedPhotonCands.clone()
process.CicHyperTight4MatchedPhotonCands.ReferenceElectronCollection = cms.untracked.InputTag("PassingCicHyperTight4")
process.ele_sequence = cms.Sequence(
process.goodElectrons +
process.GsfMatchedSuperClusterCands +
process.GsfMatchedPhotonCands +
process.PassingWP95 +
process.PassingWP90 +
process.PassingWP85 +
process.PassingWP80 +
process.PassingWP70 +
process.PassingWP60 +
process.PassingCicVeryLoose +
process.PassingCicLoose +
process.PassingCicMedium +
process.PassingCicTight +
process.PassingCicSuperTight +
process.PassingCicHyperTight1 +
process.PassingCicHyperTight2 +
process.PassingCicHyperTight3 +
process.PassingCicHyperTight4 +
process.PassingHLT +
process.Tag +
process.TagMatchedSuperClusterCandsClean +
process.TagMatchedPhotonCands +
process.WP95MatchedSuperClusterCandsClean +
process.WP90MatchedSuperClusterCandsClean +
process.WP85MatchedSuperClusterCandsClean +
process.WP80MatchedSuperClusterCandsClean +
process.WP70MatchedSuperClusterCandsClean +
process.WP60MatchedSuperClusterCandsClean +
process.CicVeryLooseMatchedSuperClusterCandsClean +
process.CicLooseMatchedSuperClusterCandsClean +
process.CicMediumMatchedSuperClusterCandsClean +
process.CicTightMatchedSuperClusterCandsClean +
process.CicSuperTightMatchedSuperClusterCandsClean +
process.CicHyperTight1MatchedSuperClusterCandsClean +
process.CicHyperTight2MatchedSuperClusterCandsClean +
process.CicHyperTight3MatchedSuperClusterCandsClean +
process.CicHyperTight4MatchedSuperClusterCandsClean +
process.WP95MatchedPhotonCands +
process.WP90MatchedPhotonCands +
process.WP85MatchedPhotonCands +
process.WP80MatchedPhotonCands +
process.WP70MatchedPhotonCands +
process.WP60MatchedPhotonCands +
process.CicVeryLooseMatchedPhotonCands +
process.CicLooseMatchedPhotonCands +
process.CicMediumMatchedPhotonCands +
process.CicTightMatchedPhotonCands +
process.CicSuperTightMatchedPhotonCands +
process.CicHyperTight1MatchedPhotonCands +
process.CicHyperTight2MatchedPhotonCands +
process.CicHyperTight3MatchedPhotonCands +
process.CicHyperTight4MatchedPhotonCands
)
## _____ ___ ____ ____ _
## |_ _( _ ) | _ \ | _ \ __ _(_)_ __ ___
## | | / _ \/\ |_) | | |_) / _` | | '__/ __|
## | || (_> < __/ | __/ (_| | | | \__ \
## |_| \___/\/_| |_| \__,_|_|_| |___/
##
##
# Tag & probe selection ######
process.tagSC = cms.EDProducer("CandViewShallowCloneCombiner",
decay = cms.string("Tag goodSuperClustersClean"), # charge coniugate states are implied
checkCharge = cms.bool(False),
cut = cms.string("40 < mass < 1000"),
)
process.tagPhoton = process.tagSC.clone()
process.tagPhoton.decay = cms.string("Tag goodPhotons")
process.GsfGsf = process.tagSC.clone()
process.GsfGsf.decay = cms.string("goodElectrons goodElectrons")
process.tagGsf = process.tagSC.clone()
process.tagGsf.decay = cms.string("Tag goodElectrons")
process.tagWP95 = process.tagSC.clone()
process.tagWP95.decay = cms.string("Tag PassingWP95")
process.tagWP90 = process.tagSC.clone()
process.tagWP90.decay = cms.string("Tag PassingWP90")
process.tagWP85 = process.tagSC.clone()
process.tagWP85.decay = cms.string("Tag PassingWP85")
process.tagWP80 = process.tagSC.clone()
process.tagWP80.decay = cms.string("Tag PassingWP80")
process.tagWP70 = process.tagSC.clone()
process.tagWP70.decay = cms.string("Tag PassingWP70")
process.tagWP60 = process.tagSC.clone()
process.tagWP60.decay = cms.string("Tag PassingWP60")
process.tagCicVeryLoose = process.tagSC.clone()
process.tagCicVeryLoose.decay = cms.string("Tag PassingCicVeryLoose")
process.tagCicLoose = process.tagSC.clone()
process.tagCicLoose.decay = cms.string("Tag PassingCicLoose")
process.tagCicMedium = process.tagSC.clone()
process.tagCicMedium.decay = cms.string("Tag PassingCicMedium")
process.tagCicTight = process.tagSC.clone()
process.tagCicTight.decay = cms.string("Tag PassingCicTight")
process.tagCicSuperTight = process.tagSC.clone()
process.tagCicSuperTight.decay = cms.string("Tag PassingCicSuperTight")
process.tagCicHyperTight1 = process.tagSC.clone()
process.tagCicHyperTight1.decay = cms.string("Tag PassingCicHyperTight1")
process.tagCicHyperTight2 = process.tagSC.clone()
process.tagCicHyperTight2.decay = cms.string("Tag PassingCicHyperTight2")
process.tagCicHyperTight3 = process.tagSC.clone()
process.tagCicHyperTight3.decay = cms.string("Tag PassingCicHyperTight3")
process.tagCicHyperTight4 = process.tagSC.clone()
process.tagCicHyperTight4.decay = cms.string("Tag PassingCicHyperTight4")
process.elecMet = process.tagSC.clone()
process.elecMet.decay = cms.string("pfMet PassingWP90")
process.elecMet.cut = cms.string("mt > 0")
process.CSVarsTagGsf = cms.EDProducer("ColinsSoperVariablesComputer",
parentBoson = cms.InputTag("tagGsf")
)
process.CSVarsGsfGsf = process.CSVarsTagGsf.clone()
process.CSVarsGsfGsf.parentBoson = cms.InputTag("GsfGsf")
process.allTagsAndProbes = cms.Sequence(
process.tagSC +
process.tagPhoton +
process.tagGsf +
process.GsfGsf +
process.tagWP95 +
process.tagWP90 +
process.tagWP85 +
process.tagWP80 +
process.tagWP70 +
process.tagWP60 +
process.tagCicVeryLoose +
process.tagCicLoose +
process.tagCicMedium +
process.tagCicTight +
process.tagCicSuperTight +
process.tagCicHyperTight1 +
process.tagCicHyperTight2 +
process.tagCicHyperTight3 +
process.tagCicHyperTight4 +
process.elecMet +
process.CSVarsTagGsf +
process.CSVarsGsfGsf
)
## __ __ ____ __ __ _ _
## | \/ |/ ___| | \/ | __ _| |_ ___| |__ ___ ___
## | |\/| | | | |\/| |/ _` | __/ __| '_ \ / _ \/ __|
## | | | | |___ | | | | (_| | || (__| | | | __/\__ \
## |_| |_|\____| |_| |_|\__,_|\__\___|_| |_|\___||___/
##
process.McMatchTag = cms.EDProducer("MCTruthDeltaRMatcherNew",
matchPDGId = cms.vint32(11),
src = cms.InputTag("Tag"),
distMin = cms.double(0.3),
matched = cms.InputTag("genParticles"),
checkCharge = cms.bool(True)
)
process.McMatchSC = cms.EDProducer("MCTruthDeltaRMatcherNew",
matchPDGId = cms.vint32(11),
src = cms.InputTag("goodSuperClustersClean"),
distMin = cms.double(0.3),
matched = cms.InputTag("genParticles")
)
process.McMatchPhoton = process.McMatchSC.clone()
process.McMatchPhoton.src = cms.InputTag("goodPhotons")
process.McMatchGsf = process.McMatchTag.clone()
process.McMatchGsf.src = cms.InputTag("goodElectrons")
process.McMatchWP95 = process.McMatchTag.clone()
process.McMatchWP95.src = cms.InputTag("PassingWP95")
process.McMatchWP90 = process.McMatchTag.clone()
process.McMatchWP90.src = cms.InputTag("PassingWP90")
process.McMatchWP85 = process.McMatchTag.clone()
process.McMatchWP85.src = cms.InputTag("PassingWP85")
process.McMatchWP80 = process.McMatchTag.clone()
process.McMatchWP80.src = cms.InputTag("PassingWP80")
process.McMatchWP70 = process.McMatchTag.clone()
process.McMatchWP70.src = cms.InputTag("PassingWP70")
process.McMatchWP60 = process.McMatchTag.clone()
process.McMatchWP60.src = cms.InputTag("PassingWP60")
process.McMatchCicVeryLoose = process.McMatchTag.clone()
process.McMatchCicVeryLoose.src = cms.InputTag("PassingCicVeryLoose")
process.McMatchCicLoose = process.McMatchTag.clone()
process.McMatchCicLoose.src = cms.InputTag("PassingCicLoose")
process.McMatchCicMedium = process.McMatchTag.clone()
process.McMatchCicMedium.src = cms.InputTag("PassingCicMedium")
process.McMatchCicTight = process.McMatchTag.clone()
process.McMatchCicTight.src = cms.InputTag("PassingCicTight")
process.McMatchCicSuperTight = process.McMatchTag.clone()
process.McMatchCicSuperTight.src = cms.InputTag("PassingCicSuperTight")
process.McMatchCicHyperTight1 = process.McMatchTag.clone()
process.McMatchCicHyperTight1.src = cms.InputTag("PassingCicHyperTight1")
process.McMatchCicHyperTight2 = process.McMatchTag.clone()
process.McMatchCicHyperTight2.src = cms.InputTag("PassingCicHyperTight2")
process.McMatchCicHyperTight3 = process.McMatchTag.clone()
process.McMatchCicHyperTight3.src = cms.InputTag("PassingCicHyperTight3")
process.McMatchCicHyperTight4 = process.McMatchTag.clone()
process.McMatchCicHyperTight4.src = cms.InputTag("PassingCicHyperTight4")
process.mc_sequence = cms.Sequence(
process.McMatchTag +
process.McMatchSC +
process.McMatchPhoton +
process.McMatchGsf +
process.McMatchWP95 +
process.McMatchWP90 +
process.McMatchWP85 +
process.McMatchWP80 +
process.McMatchWP70 +
process.McMatchWP60 +
process.McMatchCicVeryLoose +
process.McMatchCicLoose +
process.McMatchCicMedium +
process.McMatchCicTight +
process.McMatchCicSuperTight +
process.McMatchCicHyperTight1 +
process.McMatchCicHyperTight2 +
process.McMatchCicHyperTight3 +
process.McMatchCicHyperTight4
)
############################################################################
## _____ _ _ ____ _ _ _ ____ ##
## |_ _|_ _ __ _( )_ __ ( ) _ \ _ __ ___ | |__ ___ | \ | |/ ___| ##
## | |/ _` |/ _` |/| '_ \|/| |_) | '__/ _ \| '_ \ / _ \ | \| | | _ ##
## | | (_| | (_| | | | | | | __/| | | (_) | |_) | __/ | |\ | |_| | ##
## |_|\__,_|\__, | |_| |_| |_| |_| \___/|_.__/ \___| |_| \_|\____| ##
## |___/ ##
## ##
############################################################################
## ____ _ _
## | _ \ ___ _ _ ___ __ _| |__ | | |
* a / 27.0 - a * b / 3.0 + c
det = q * q / 4.0 + p * p * p / 27.0
# The model changes behaviours when the discriminant equates to zero. From this point we need a different root
# resolution mechanism.
sol = np.zeros(det.shape)
mask = det >= 0
sqrt_det = np.sqrt(det[mask])
t1 = -q[mask] * 0.5 + sqrt_det
t2 = -q[mask] * 0.5 - sqrt_det
sol[mask] = np.cbrt(t1) + np.cbrt(t2)
sqrt_minus_p = np.sqrt(-p[np.logical_not(mask)])
q_masked = q[np.logical_not(mask)]
asin_argument = 3.0 * np.sqrt(3.0) * q_masked / (2.0 * sqrt_minus_p ** 3)
asin_argument = np.clip(asin_argument, -1.0, 1.0)
if selected_root == 0:
sol[np.logical_not(mask)] = (
2.0 / np.sqrt(3.0) * sqrt_minus_p * np.sin((1.0 / 3.0) * np.arcsin(asin_argument))
)
elif selected_root == 1:
sol[np.logical_not(mask)] = (
-2.0
/ np.sqrt(3.0)
* sqrt_minus_p
* np.sin((1.0 / 3.0) * np.arcsin(asin_argument) + np.pi / 3.0)
)
elif selected_root == 2:
sol[np.logical_not(mask)] = (
2.0
/ np.sqrt(3.0)
* sqrt_minus_p
* np.cos((1.0 / 3.0) * np.arcsin(asin_argument) + np.pi / 6.0)
)
else:
raise RuntimeError("Invalid root selected. Choose 0, 1 or 2.")
return sol - a / 3.0
def invWLC_equation(d, Lp, Lc, St, kT=4.11):
return solve_formatter(WLC_equation("f", Lp, Lc, St, kT), "f", d)
def invWLC_equation_tex(d, Lp, Lc, St, kT=4.11):
return solve_formatter_tex(WLC_equation_tex("f", Lp, Lc, St, kT), "f", d)
def invWLC(d, Lp, Lc, St, kT=4.11):
"""Inverted Odijk's Worm-like Chain model
References:
1. T. Odijk, Stiff Chains and Filaments under Tension, Macromolecules
28, 7016-7018 (1995).
2. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Stretching
DNA with optical tweezers., Biophysical journal 72, 1335-46 (1997).
Parameters
----------
d : array_like
extension [um]
Lp : float
persistence length [nm]
Lc : float
contour length [um]
St : float
stretching modulus [pN]
kT : float
Boltzmann's constant times temperature (default = 4.11 [pN nm]) [pN nm]
"""
#
# In short, this model was analytically solved, since it is basically a cubic equation. There are some issues
# with this inverted model. Its derivatives are not defined everywhere, as they contain divisions by zero in
# specific places due to the cube roots involved. This was worked around by preventing the denominators in these
# equations to go to zero by clamping them to a lower bound. This results in far better numerical behaviour than
# even the finite difference approximations.
#
# Define:
# alpha = (distance/Lc) - 1.0
# beta = 1.0 / St
# gamma = kT / Lp
#
# This allows us to obtain simple polynomial coefficients for the Odijk model. We divide the polynomial by the
# leading coefficient to make things simpler for us. This leads to the following equations:
#
# denom = beta ** 2.0
# a = - (2.0 * alpha * beta) / denom = - 2.0 * alpha / beta
# b = alpha * alpha / denom = (alpha * alpha) / (beta * beta)
# c = - 0.25 * gamma / denom = - gamma / (4 * beta * beta)
#
# We can see now that parameterizing w.r.t. St is easier than b and define:
alpha = (d / Lc) - 1.0
gamma = kT / Lp
a = -2.0 * alpha * St
b = (alpha * alpha) * (St * St)
c = -0.25 * gamma * (St * St)
return solve_cubic_wlc(a, b, c, 2)
def calc_root1_invwlc(det, p, q, dp_da, dq_da, dq_db):
# Calculate the first root for det < 0
# Note that dp/dc = 0, dp_db = 1, dq_dc = 1
sqrt_det = np.sqrt(det)
term1 = np.abs(sqrt_det - 0.5 * q) # Technically, the absolute is not part of this solution.
term2 = np.abs(
-sqrt_det - 0.5 * q
) # But it can cause numerical issues if we raise negative values to a root.
t1 = term1 ** (2 / 3)
t2 = term2 ** (2 / 3)
# Derivatives of cube roots are not defined everywhere.
#
# The derivatives go over the bit where the cube roots are non-differentiable in the region of the model where it
# switches from entropic to enthalpic behaviour. Even the finite difference derivatives look terrible here.
#
# When we approach Lc, t2 tends to zero, which causes problems with the division later.
t1[abs(t1) < 1e-5] = 1e-5
t2[abs(t2) < 1e-5] = 1e-5
# When the discriminant goes to zero however, it means there are now repeated roots.
sqrt_det[abs(sqrt_det) < 1e-5] = 1e-5
# Compute all the elements required to evaluate the chain rule
dy_ddet = 1.0 / (6.0 * sqrt_det * t1) - 1.0 / (6.0 * sqrt_det * t2)
dy_dq = -1.0 / (6.0 * t1) - 1.0 / (6.0 * t2)
dy_da = -(1.0 / 3.0)
ddet_dp = p * p / 9.0
ddet_dq = 0.5 * q
# Total derivatives, terms multiplied by zero are omitted. Terms that are one are also omitted.
# dp_db = dq_dc = 1
total_dy_da = dy_ddet * ddet_dp * dp_da + dy_ddet * ddet_dq * dq_da + dy_dq * dq_da + dy_da
total_dy_db = dy_ddet * ddet_dp + dy_ddet * ddet_dq * dq_db + dy_dq * dq_db
total_dy_dc = dy_ddet * ddet_dq + dy_dq
return total_dy_da, total_dy_db, total_dy_dc
def calc_triple_root_invwlc(p, q, dp_da, dq_da, dq_db, root):
# If we define:
# sqmp = sqrt(-p)
# F = 3 * sqrt(3) * q / (2 * sqmp**3 )
#
# Then the solution is:
# 2 /sqrt(3) * sqmp * cos((1/3) * asin(F) + pi/6) - a / 3
#
# Note that dp/dc = 0, dp_db = 1, dq_dc = 1
# The rest of this file is simply applying the chain rule.
sqmp = np.sqrt(-p)
F = 3.0 * np.sqrt(3.0) * q / (2.0 * sqmp ** 3)
dF_dsqmp = -9 * np.sqrt(3) * q / (2 * sqmp ** 4)
dF_dq = 3.0 * np.sqrt(3) / (2.0 * sqmp ** 3)
dsqmp_dp = -1.0 / (2.0 * sqmp)
dy_da = -1.0 / 3.0
if root == 0:
arg = np.arcsin(F) / 3.0
dy_dsqmp = 2.0 * np.sqrt(3.0) * np.sin(arg) / 3.0
dy_dF = 2.0 * np.sqrt(3.0) * sqmp * np.cos(arg) / (9.0 * np.sqrt(1.0 - F ** 2))
elif root == 1:
arg = np.arcsin(F) / 3.0 + np.pi / 3.0
dy_dsqmp = -2.0 * np.sqrt(3.0) * np.sin(arg) / 3.0
dy_dF = -2.0 * np.sqrt(3.0) * sqmp * np.cos(arg) / (9.0 * np.sqrt(1.0 - F ** 2))
elif root == 2:
arg = np.arcsin(F) / 3.0 + np.pi / 6.0
dy_dsqmp = 2.0 * np.sqrt(3.0) * np.cos(arg) / 3.0
dy_dF = -2.0 * np.sqrt(3.0) * sqmp * np.sin(arg) / (9.0 * np.sqrt(1.0 - F * F))
# Total derivatives
total_dy_da = (
dy_dsqmp * dsqmp_dp * dp_da + dy_dF * (dF_dsqmp * dsqmp_dp * dp_da + dF_dq * dq_da) + dy_da
)
total_dy_db = dy_dsqmp * dsqmp_dp + dy_dF * (dF_dsqmp * dsqmp_dp + dF_dq * dq_db)
total_dy_dc = dy_dF * dF_dq
return total_dy_da, total_dy_db, total_dy_dc
def invwlc_root_derivatives(a, b, c, selected_root):
"""Calculate the root derivatives of a cubic polynomial with respect to the polynomial coefficients.
For a polynomial of the form:
x**3 + a * x**2 + b * x + c = 0
Note that this is not a general root-finding function, but tailored for use with the inverted WLC model. For det < 0
it returns the derivatives of the first root. For det > 0 it returns the derivatives of the third root.
Parameters
----------
a, b, c: array_like
Coefficients of the reduced cubic polynomial.
x**3 + a x**2 + b x + c = 0
selected_root: integer
which root to compute the derivative of
"""
p = b - a * a / 3.0
q = 2.0 * a * a * a / 27.0 - a * b / 3.0 + c
det = | |
hd == 0:
cols = cols + ['zeroHdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_2:
cols = cols + ['below2Hdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_3:
cols = cols + ['below3Hdistance_genes']
writing_data = writing_data + [gene]
self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,cols] = writing_data
class extract_barcodes_NN_new():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, registered_counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df):
self.counts_df = registered_counts
self.analysis_parameters = analysis_parameters
self.experiment_config = experiment_config
self.codebook_df = codebook_df
self.logger = selected_logger()
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = self.counts_df.loc[0]['barcode_length']
self.registration_errors = Registration_errors()
self.stitching_channel = self.counts_df['stitching_channel'].iloc[0]
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
for k,v in groupby(idx_selected_coords_compare):
if len(list(v)) > 3:
print("key: '{}'--> group: {}".format(k, len(list(v))))
# ref_idx = indices[idx_selected_coords_compare].squeeze()
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
self.barcoded_spec = data_models.barcode_analysis_df
if not self.counts_df[self.counts_df['dot_id'].isnull()].empty:
print('shitty FOV')
self.all_combine_df = pd.concat([self.counts_df,self.barcoded_spec],axis=1)
elif (min(self.counts_df['min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
self.counts_df['min_number_matching_dots_registration'] = registration_errors.registration_below_extraction_resolution
self.all_combine_df = pd.concat([self.counts_df,self.barcoded_spec],axis=1)
else:
self.counts_df = pd.concat([self.counts_df,self.barcoded_spec],axis=1)
self.fish_counts = self.counts_df.loc[self.counts_df.channel != self.stitching_channel,:]
hd_2 = 2 / self.barcode_length
hd_3 = 3 / self.barcode_length
rounds = np.arange(1,self.barcode_length+1)
self.codebook_df = self.convert_str_codebook(self.codebook_df,'Code')
codebook_array = self.make_codebook_array(self.codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
self.index_df = pd.DataFrame(index=self.fish_counts.index,
columns = ['barcode_reference_dot_id','raw_barcodes'])
barcoded_df_list = []
self.barcoded_fov_df = pd.DataFrame()
for round_num in rounds:
compare_df, barcoded_df = self.barcode_nn(self.fish_counts, round_num, self.barcodes_extraction_resolution)
barcoded_df_list.append(barcoded_df)
self.fish_counts = compare_df
self.barcoded_fov_df = pd.concat(barcoded_df_list, ignore_index=True)
self.fish_counts['barcode_reference_dot_id'] = self.fish_counts.dot_id
self.barcoded_fov_df = self.barcoded_fov_df.append(self.fish_counts, ignore_index=True)
self.barcoded_fov_df['barcodes_extraction_resolution'] = self.barcodes_extraction_resolution
self.grpd = self.barcoded_fov_df.groupby('barcode_reference_dot_id')
barcode_reference_dot_id_list = []
num_unique_dots = np.unique(self.barcoded_fov_df.loc[:,'barcode_reference_dot_id']).shape[0]
# There are no dots is the df
if num_unique_dots > 0:
all_barcodes = np.zeros([num_unique_dots,self.barcode_length],dtype=np.int8)
for idx, (name, group) in enumerate(self.grpd):
barcode_reference_dot_id_list.append(name)
barcode = np.zeros([self.barcode_length],dtype=np.int8)
rounds_num = group.round_num.values
rounds_num = rounds_num.astype(int)
barcode[(rounds_num-1)] += 1
all_barcodes[idx,:] = barcode
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
self.all_combine_df_list = []
self.all_combine_df = pd.DataFrame()
for idx, (name, group) in enumerate(self.grpd):
barcode = all_barcodes[idx,:]
gene = genes[idx]
hd = dists_arr[idx][0]
group_df = group
group_df['raw_barcodes'] = barcode.tostring()
group_df['all_Hdistance_genes'] = gene
group_df['number_positive_bits'] = barcode.sum()
group_df['hamming_distance'] = hd
group_df['zeroHdistance_genes'] = np.nan
group_df['below2Hdistance_genes'] = np.nan
group_df['below3Hdistance_genes'] = np.nan
if hd == 0:
group_df['zeroHdistance_genes'] = gene
if hd < hd_2:
group_df['below2Hdistance_genes'] = gene
if hd < hd_3:
group_df['below3Hdistance_genes'] = gene
self.all_combine_df_list.append(group_df)
# self.all_combine_df = pd.concat(self.all_combine_df_list, axis=0,copy=False)
chunk_size = 500
self.all_combine_df = merge_with_concat((merge_with_concat(dfs) for dfs in chunk_dfs(self.all_combine_df_list, chunk_size)))
else:
self.all_combine_df = self.fish_counts # add the missing column and control error for stitching
def decoder_fun(registered_counts, analysis_parameters,experiment_info,codebook_df):
dc = extract_barcodes_NN_new(registered_counts, analysis_parameters,experiment_info,codebook_df)
dc.run_extraction()
return dc.all_combine_df
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
# chunk_size = 100
# self.all_combine_df = merge_with_concat((merge_with_concat(dfs) for dfs in chunk_dfs(self.all_combine_df_list, chunk_size)))
def extract_barcodes_NN_fast(registered_counts_df, analysis_parameters:Dict,codebook_df):
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
logger = selected_logger()
barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
barcode_length = registered_counts_df.loc[0]['barcode_length']
registration_errors = Registration_errors()
stitching_channel = registered_counts_df['stitching_channel'].iloc[0]
fish_counts = registered_counts_df.loc[registered_counts_df.channel != stitching_channel,:]
stitching_channel_counts = registered_counts_df.loc[registered_counts_df.channel == stitching_channel,:]
fish_counts.dropna(subset=['dot_id'],inplace=True)
# Starting level for selection of dots
dropping_counts = fish_counts.copy(deep=True)
all_decoded_dots_list = []
if fish_counts['r_px_registered'].isnull().values.any():
all_decoded_dots_df = pd.DataFrame(columns = fish_counts.columns)
all_decoded_dots_df['decoded_genes'] = np.nan
all_decoded_dots_df['hamming_distance'] = np.nan
all_decoded_dots_df['number_positive_bits'] = np.nan
all_decoded_dots_df['barcode_reference_dot_id'] = np.nan
all_decoded_dots_df['raw_barcodes'] = np.nan
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return fish_counts, all_decoded_dots_df
else:
for ref_round_number in np.arange(1,barcode_length+1):
#ref_round_number = 1
reference_round_df = dropping_counts.loc[dropping_counts.round_num == ref_round_number,:]
# Step one (all dots not in round 1)
compare_df = dropping_counts.loc[dropping_counts.round_num != ref_round_number,:]
if (not reference_round_df.empty) and (not compare_df.empty):
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_round_df[['r_px_registered','c_px_registered']])
dists, indices = nn.kneighbors(compare_df[['r_px_registered','c_px_registered']], return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_distances_below_resolution = np.where(dists <= barcodes_extraction_resolution)[0]
comp_idx = idx_distances_below_resolution
ref_idx = indices[comp_idx].flatten()
# Subset the dataframe according to the selected points
# The reference selected will have repeated points
comp_selected_df = compare_df.iloc[comp_idx]
ref_selected_df = reference_round_df.iloc[ref_idx]
# The size of ref_selected_df w/o duplicates may be smaller of reference_round_df if
# some of the dots in reference_round_df have no neighbours
# Test approach where we get rid of the single dots
comp_selected_df.loc[:,'barcode_reference_dot_id'] = ref_selected_df['dot_id'].values
ref_selected_df_no_duplicates = ref_selected_df.drop_duplicates()
ref_selected_df_no_duplicates.loc[:,'barcode_reference_dot_id'] = ref_selected_df_no_duplicates['dot_id'].values
barcoded_round = pd.concat([comp_selected_df, ref_selected_df_no_duplicates], axis=0,ignore_index=False)
barcoded_round_grouped = barcoded_round.groupby('barcode_reference_dot_id')
for brdi, grp in barcoded_round_grouped:
barcode = np.zeros([barcode_length],dtype=np.int8)
barcode[grp.round_num.values.astype(np.int8)-1] = 1
#hamming_dist, index_gene = nn_sklearn.kneighbors(barcode.reshape(1, -1), return_distance=True)
#gene= codebook_df.loc[index_gene.reshape(index_gene.shape[0]),'Gene'].tolist()
barcode = barcode.tostring()
ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'raw_barcodes'] = barcode
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'decoded_gene_name'] = gene
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'hamming_distance'] = hamming_dist.flatten()[0]
#fish_counts.loc[grp.index,'barcode_reference_dot_id'] = brdi
#fish_counts.loc[grp.index,'raw_barcodes'] = barcode
#dists, index = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
all_decoded_dots_list.append(ref_selected_df_no_duplicates)
all_decoded_dots_df = pd.concat(all_decoded_dots_list,ignore_index=False)
compare_df = compare_df.drop(comp_selected_df.index)
dropping_counts = compare_df
codebook_df = convert_str_codebook(codebook_df,'Code')
codebook_array = make_codebook_array(codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
all_barcodes = np.vstack(all_decoded_dots_df.raw_barcodes.map(lambda x: np.frombuffer(x, np.int8)).values)
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
all_decoded_dots_df.loc[:,'decoded_genes'] = genes
all_decoded_dots_df.loc[:,'hamming_distance'] = dists_arr
all_decoded_dots_df.loc[:,'number_positive_bits'] = all_barcodes.sum(axis=1)
all_decoded_dots_df = pd.concat([all_decoded_dots_df,stitching_channel_counts])
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return barcoded_round, all_decoded_dots_df
# chunk_size = 100
# self.all_combine_df = merge_with_concat((merge_with_concat(dfs) for dfs in chunk_dfs(self.all_combine_df_list, chunk_size)))
# ---------------------------------------------------
class extract_barcodes():
"""
Class used to extract the barcodes from the registered
counts
Parameters:
-----------
counts: pandas dataframe
contains all the counts for a fov
pxl: int
size of the hood to search for positive
barcodes
"""
def __init__(self, counts, pxl:int,save=True):
self.counts = counts
self.pxl = pxl
self.save = save
self.logger = logging.getLogger(__name__)
@staticmethod
def combine_coords(counts, round_num, counts_type='registered'):
data_reference = counts.loc[counts['round_num'] == round_num]
if counts_type == 'registered':
r_px = data_reference.r_px_registered.to_list()
c_px = data_reference.c_px_registered.to_list()
elif counts_type == 'original':
r_px = data_reference.r_px_original.to_list()
c_px = data_reference.c_px_original.to_list()
coords = np.array(list(zip(r_px,c_px)))
position_idx = data_reference.index
return coords, position_idx
@staticmethod
def dots_hoods(coords,pxl):
r_tl = coords[:,0]-pxl
r_br = coords[:,0]+pxl
c_tl = coords[:,1]-pxl
c_tr = coords[:,1]+pxl
r_tl = r_tl[:,np.newaxis]
r_br = r_br[:,np.newaxis]
c_tl = c_tl[:,np.newaxis]
c_tr = c_tr[:,np.newaxis]
chunks_coords = np.hstack((r_tl,r_br,c_tl,c_tr))
chunks_coords = chunks_coords.astype(int)
return chunks_coords
@staticmethod
def barcode_detection(all_coords_image,chunk_coords,pxl):
selected_region = all_coords_image[:,chunk_coords[0]:chunk_coords[1]+1,chunk_coords[2]:chunk_coords[3]+1]
barcode = np.sum(selected_region, axis=(1,2))
return barcode, selected_region
def run_extraction(self):
# add check to make sure that the counts are there
column_names = ['r_px_original','c_px_original','dot_id','fov_num','round_num','dot_intensity_norm',
'dot_intensity_not','selected_thr' ,'channel','r_shift','c_shift','r_px_registered',
'c_px_registered','pixel_microns']
column_names_output = column_names.copy()
column_names_output.append('pxl_hood_size')
column_names_output.append('raw_barcodes')
self.barcodes_binary_images = {}
if len(self.counts['r_px_registered']) > 1:
r_num = int(self.counts['r_px_registered'].max())
c_num = int(self.counts['c_px_registered'].max())
rounds_num = len(self.counts['round_num'].unique())
all_coords_image = np.zeros([rounds_num,r_num+1,c_num+1],dtype=np.int8)
for round_num in np.arange(1,rounds_num+1):
coords, ref_position_idx = self.combine_coords(self.counts, round_num, counts_type='registered')
if np.any(np.isnan(coords)):
stop = True
break
else:
stop = False
coords = coords.astype(np.int)
all_coords_image[round_num-1,coords[:,0],coords[:,1]] = 1
if stop:
self.logger.error(f'missing round {coords} no barcodes')
self.output = pd.DataFrame(np.nan,index=[0],columns = column_names_output)
else:
self.output = pd.DataFrame(columns = column_names_output)
self.output['pxl_hood_size'] = np.nan
| |
<filename>02_neural_networks/2-6_sentiment_analysis/2-6_orig/network_v4.py
'''
NOTE: this is a reduced project file, it does not contain all the steps from the nanodegree lecture
'''
######### parameters
min_count = 100
polarity_cutoff = 0.2
####### Lesson: Curate a Dataset
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('../data/reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('../data/labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
'''
Note: The data in reviews.txt we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way.
'''
print("number of reviews in dataset: ", len(reviews))
################################################
############# Project 6: Reducing Noise by Strategically Reducing the Vocabulary
################################################
'''
TODO: Improve SentimentNetwork's performance by reducing more noise in the vocabulary. Specifically, do the following:
Copy the SentimentNetwork class from the previous project into the following cell.
Modify pre_process_data:
Add two additional parameters: min_count and polarity_cutoff
Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)
Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like.
Change so words are only added to the vocabulary if they occur in the vocabulary more than min_count times.
Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least polarity_cutoff
Modify __init__:
Add the same two parameters (min_count and polarity_cutoff) and use them when you call pre_process_data
'''
# TODO: -Copy the SentimentNetwork class from Project 5 lesson
# -Modify it according to the above instructions
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
# TODO Add the same two parameters
# (min_count and polarity_cutoff) and use them when you call pre_process_data
def __init__(self, reviews, labels, min_count, polarity_cutoff, hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
min_count(int) - Words are only added to the vocabulary if they occur more than min_count times
polarity_cutoff(float) - Words are only added to the vocabulary if the absolute value of their
postive-to-negative ratio is at least polarity_cutoff
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels, min_count, polarity_cutoff)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
# TODO Add two additional parameters: min_count and polarity_cutoff
def pre_process_data(self, reviews, labels, min_count, polarity_cutoff):
# TODO Calculate the positive-to-negative ratios of words used in the reviews.
# (You can use code you've written elsewhere in the notebook, but we are
# moving it into the class like we did with other helper code earlier.)
#### NOTE: using these objects from above outside the class
from collections import Counter
# Create three Counter objects to store positive, negative and total counts
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
# Loop over all the words in all the reviews and increment the counts in the appropriate counter objects
for i in range(len(reviews)):
for word in reviews[i].split(' '):
total_counts.update([word])
if labels[i] == 'POSITIVE':
positive_counts.update([word])
if labels[i] == 'NEGATIVE':
negative_counts.update([word])
# TODO Andrew's solution only calculates a postive-to-negative ratio for words that
# occur at least 50 times. This keeps the network from attributing too much
# sentiment to rarer words. You can choose to add this to your solution if you would like.
# Calculate the ratios of positive and negative uses of the most common words
# Convert ratios to logs
pos_neg_ratios = Counter()
for word, number in total_counts.items():
if number > min_count:
ratio = positive_counts[word] / float(negative_counts[word]+1)
pos_neg_ratios.update({word: ratio})
for word, number in pos_neg_ratios.items():
if number > 1:
pos_neg_ratios[word] = np.log(number)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
# TODO Change so words are only added to the vocabulary if they occur in
# the vocabulary more than min_count times.
# TODO Change so words are only added to the vocabulary if the absolute value
# of their postive-to-negative ratio is at least polarity_cutoff
review_vocab = set()
for rev in reviews:
for word in rev.split(' '):
if total_counts[word] > min_count and np.abs(pos_neg_ratios[word]) >= polarity_cutoff:
review_vocab.add(word)
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set()
for l in labels:
label_vocab.add(l)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
for idx, word in enumerate(self.review_vocab):
self.word2index[word] = idx
# Create a dictionary of labels mapped to index positions
self.label2index = {}
for idx, label in enumerate(self.label_vocab):
self.label2index[label] = idx
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
self.weights_0_1 = None
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
self.weights_1_2 = None
sigma = self.hidden_nodes**-0.5 # NOTE: recommended in the course
self.weights_1_2 = np.random.normal(0, sigma, (self.hidden_nodes, self.output_nodes))
# TODO: You no longer need a separate input layer, so remove any mention of self.layer_0
# TODO: You will be dealing with the old hidden layer more directly,so create self.layer_1,
# a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero
self.layer_1 = np.zeros((1, hidden_nodes))
def get_target_for_label(self,label):
if label == "POSITIVE":
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
# TODO Change the name of the input parameter training_reviews to training_reviews_raw.
# This will help with the next step.
def train(self, training_reviews_raw, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews_raw) == len(training_labels))
# TODO At the beginning of the function, you'll want to preprocess your reviews
# to convert them to a list of indices (from word2index) that are actually used
# in the review. This is equivalent to what you saw in the video when Andrew set
# specific indices to 1. Your code should create a local list variable named
# training_reviews that should contain a list for each review in training_reviews_raw.
# Those lists should contain the indices for words found in the review.
training_reviews = list()
for review_raw in training_reviews_raw:
indices = set()
for word in review_raw.split(' '):
idx = self.word2index.get(word, -1)
if idx != -1:
indices.add(idx) # ---> contains the indices of the input vector that are 1
training_reviews.append(indices)
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember | |
<reponame>phistuck/chromium-dashboard
# TODO(ericbidelman): generate this file from
# http://src.chromium.org/viewvc/blink/trunk/Source/core/frame/UseCounter.cpp
CSS_PROPERTY_BUCKETS = {
# 1 was reserved for number of CSS Pages Visited
2: 'color',
3: 'direction',
4: 'display',
5: 'font',
6: 'font-family',
7: 'font-size',
8: 'font-style',
9: 'font-variant',
10: 'font-weight',
11: 'text-rendering',
12: 'webkit-font-feature-settings',
13: 'webkit-font-kerning',
14: 'webkit-font-smoothing',
15: 'webkit-font-variant-ligatures',
16: 'webkit-locale',
17: 'webkit-text-orientation',
18: 'webkit-writing-mode',
19: 'zoom',
20: 'line-height',
21: 'background',
22: 'background-attachment',
23: 'background-clip',
24: 'background-color',
25: 'background-image',
26: 'background-origin',
27: 'background-position',
28: 'background-position-x',
29: 'background-position-y',
30: 'background-repeat',
31: 'background-repeat-x',
32: 'background-repeat-y',
33: 'background-size',
34: 'border',
35: 'border-bottom',
36: 'border-bottom-color',
37: 'border-bottom-left-radius',
38: 'border-bottom-right-radius',
39: 'border-bottom-style',
40: 'border-bottom-width',
41: 'border-collapse',
42: 'border-color',
43: 'border-image',
44: 'border-image-outset',
45: 'border-image-repeat',
46: 'border-image-slice',
47: 'border-image-source',
48: 'border-image-width',
49: 'border-left',
50: 'border-left-color',
51: 'border-left-style',
52: 'border-left-width',
53: 'border-radius',
54: 'border-right',
55: 'border-right-color',
56: 'border-right-style',
57: 'border-right-width',
58: 'border-spacing',
59: 'border-style',
60: 'border-top',
61: 'border-top-color',
62: 'border-top-left-radius',
63: 'border-top-right-radius',
64: 'border-top-style',
65: 'border-top-width',
66: 'border-width',
67: 'bottom',
68: 'box-shadow',
69: 'box-sizing',
70: 'caption-side',
71: 'clear',
72: 'clip',
73: 'webkit-clip-path',
74: 'content',
75: 'counter-increment',
76: 'counter-reset',
77: 'cursor',
78: 'empty-cells',
79: 'float',
80: 'font-stretch',
81: 'height',
82: 'image-rendering',
83: 'left',
84: 'letter-spacing',
85: 'list-style',
86: 'list-style-image',
87: 'list-style-position',
88: 'list-style-type',
89: 'margin',
90: 'margin-bottom',
91: 'margin-left',
92: 'margin-right',
93: 'margin-top',
94: 'max-height',
95: 'max-width',
96: 'min-height',
97: 'min-width',
98: 'opacity',
99: 'orphans',
100: 'outline',
101: 'outline-color',
102: 'outline-offset',
103: 'outline-style',
104: 'outline-width',
105: 'overflow',
106: 'overflow-wrap',
107: 'overflow-x',
108: 'overflow-y',
109: 'padding',
110: 'padding-bottom',
111: 'padding-left',
112: 'padding-right',
113: 'padding-top',
114: 'page',
115: 'page-break-after',
116: 'page-break-before',
117: 'page-break-inside',
118: 'pointer-events',
119: 'position',
120: 'quotes',
121: 'resize',
122: 'right',
123: 'size',
124: 'src',
125: 'speak',
126: 'table-layout',
127: 'tab-size',
128: 'text-align',
129: 'text-decoration',
130: 'text-indent',
131: 'text-line-through',
132: 'text-line-through-color',
133: 'text-line-through-mode',
134: 'text-line-through-style',
135: 'text-line-through-width',
136: 'text-overflow',
137: 'text-overline',
138: 'text-overline-color',
139: 'text-overline-mode',
140: 'text-overline-style',
141: 'text-overline-width',
142: 'text-shadow',
143: 'text-transform',
144: 'text-underline',
145: 'text-underline-color',
146: 'text-underline-mode',
147: 'text-underline-style',
148: 'text-underline-width',
149: 'top',
150: 'transition',
151: 'transition-delay',
152: 'transition-duration',
153: 'transition-property',
154: 'transition-timing-function',
155: 'unicode-bidi',
156: 'unicode-range',
157: 'vertical-align',
158: 'visibility',
159: 'white-space',
160: 'widows',
161: 'width',
162: 'word-break',
163: 'word-spacing',
164: 'word-wrap',
165: 'z-index',
166: 'webkit-animation',
167: 'webkit-animation-delay',
168: 'webkit-animation-direction',
169: 'webkit-animation-duration',
170: 'webkit-animation-fill-mode',
171: 'webkit-animation-iteration-count',
172: 'webkit-animation-name',
173: 'webkit-animation-play-state',
174: 'webkit-animation-timing-function',
175: 'webkit-appearance',
176: 'webkit-aspect-ratio',
177: 'webkit-backface-visibility',
178: 'webkit-background-clip',
179: 'webkit-background-composite',
180: 'webkit-background-origin',
181: 'webkit-background-size',
182: 'webkit-border-after',
183: 'webkit-border-after-color',
184: 'webkit-border-after-style',
185: 'webkit-border-after-width',
186: 'webkit-border-before',
187: 'webkit-border-before-color',
188: 'webkit-border-before-style',
189: 'webkit-border-before-width',
190: 'webkit-border-end',
191: 'webkit-border-end-color',
192: 'webkit-border-end-style',
193: 'webkit-border-end-width',
194: 'webkit-border-fit',
195: 'webkit-border-horizontal-spacing',
196: 'webkit-border-image',
197: 'webkit-border-radius',
198: 'webkit-border-start',
199: 'webkit-border-start-color',
200: 'webkit-border-start-style',
201: 'webkit-border-start-width',
202: 'webkit-border-vertical-spacing',
203: 'webkit-box-align',
204: 'webkit-box-direction',
205: 'webkit-box-flex',
206: 'webkit-box-flex-group',
207: 'webkit-box-lines',
208: 'webkit-box-ordinal-group',
209: 'webkit-box-orient',
210: 'webkit-box-pack',
211: 'webkit-box-reflect',
212: 'webkit-box-shadow',
213: 'webkit-color-correction',
214: 'webkit-column-axis',
215: 'webkit-column-break-after',
216: 'webkit-column-break-before',
217: 'webkit-column-break-inside',
218: 'webkit-column-count',
219: 'webkit-column-gap',
220: 'webkit-column-progression',
221: 'webkit-column-rule',
222: 'webkit-column-rule-color',
223: 'webkit-column-rule-style',
224: 'webkit-column-rule-width',
225: 'webkit-column-span',
226: 'webkit-column-width',
227: 'webkit-columns',
228: 'webkit-box-decoration-break',
229: 'webkit-filter',
230: 'webkit-align-content',
231: 'webkit-align-items',
232: 'webkit-align-self',
233: 'webkit-flex',
234: 'webkit-flex-basis',
235: 'webkit-flex-direction',
236: 'webkit-flex-flow',
237: 'webkit-flex-grow',
238: 'webkit-flex-shrink',
239: 'webkit-flex-wrap',
240: 'webkit-justify-content',
241: 'webkit-font-size-delta',
242: 'webkit-grid-columns',
243: 'webkit-grid-rows',
244: 'webkit-grid-start',
245: 'webkit-grid-end',
246: 'webkit-grid-before',
247: 'webkit-grid-after',
248: 'webkit-grid-column',
249: 'webkit-grid-row',
250: 'webkit-grid-auto-flow',
251: 'webkit-highlight',
252: 'webkit-hyphenate-character',
253: 'webkit-hyphenate-limit-after',
254: 'webkit-hyphenate-limit-before',
255: 'webkit-hyphenate-limit-lines',
256: 'webkit-hyphens',
257: 'webkit-line-box-contain',
258: 'webkit-line-align',
259: 'webkit-line-break',
260: 'webkit-line-clamp',
261: 'webkit-line-grid',
262: 'webkit-line-snap',
263: 'webkit-logical-width',
264: 'webkit-logical-height',
265: 'webkit-margin-after-collapse',
266: 'webkit-margin-before-collapse',
267: 'webkit-margin-bottom-collapse',
268: 'webkit-margin-top-collapse',
269: 'webkit-margin-collapse',
270: 'webkit-margin-after',
271: 'webkit-margin-before',
272: 'webkit-margin-end',
273: 'webkit-margin-start',
274: 'webkit-marquee',
275: 'webkit-marquee-direction',
276: 'webkit-marquee-increment',
277: 'webkit-marquee-repetition',
278: 'webkit-marquee-speed',
279: 'webkit-marquee-style',
280: 'webkit-mask',
281: 'webkit-mask-box-image',
282: 'webkit-mask-box-image-outset',
283: 'webkit-mask-box-image-repeat',
284: 'webkit-mask-box-image-slice',
285: 'webkit-mask-box-image-source',
286: 'webkit-mask-box-image-width',
287: 'webkit-mask-clip',
288: 'webkit-mask-composite',
289: 'webkit-mask-image',
290: 'webkit-mask-origin',
291: 'webkit-mask-position',
292: 'webkit-mask-position-x',
293: 'webkit-mask-position-y',
294: 'webkit-mask-repeat',
295: 'webkit-mask-repeat-x',
296: 'webkit-mask-repeat-y',
297: 'webkit-mask-size',
298: 'webkit-max-logical-width',
299: 'webkit-max-logical-height',
300: 'webkit-min-logical-width',
301: 'webkit-min-logical-height',
302: 'webkit-nbsp-mode',
303: 'webkit-order',
304: 'webkit-padding-after',
305: 'webkit-padding-before',
306: 'webkit-padding-end',
307: 'webkit-padding-start',
308: 'webkit-perspective',
309: 'webkit-perspective-origin',
310: 'webkit-perspective-origin-x',
311: 'webkit-perspective-origin-y',
312: 'webkit-print-color-adjust',
313: 'webkit-rtl-ordering',
314: 'webkit-ruby-position',
315: 'webkit-text-combine',
316: 'webkit-text-decorations-in-effect',
317: 'webkit-text-emphasis',
318: 'webkit-text-emphasis-color',
319: 'webkit-text-emphasis-position',
320: 'webkit-text-emphasis-style',
321: 'webkit-text-fill-color',
322: 'webkit-text-security',
323: 'webkit-text-stroke',
324: 'webkit-text-stroke-color',
325: 'webkit-text-stroke-width',
326: 'webkit-transform',
327: 'webkit-transform-origin',
328: 'webkit-transform-origin-x',
329: 'webkit-transform-origin-y',
330: 'webkit-transform-origin-z',
331: 'webkit-transform-style',
332: 'webkit-transition',
333: 'webkit-transition-delay',
334: 'webkit-transition-duration',
335: 'webkit-transition-property',
336: 'webkit-transition-timing-function',
337: 'webkit-user-drag',
338: 'webkit-user-modify',
339: 'webkit-user-select',
340: 'webkit-flow-into',
341: 'webkit-flow-from',
342: 'webkit-region-overflow',
343: 'webkit-region-break-after',
344: 'webkit-region-break-before',
345: 'webkit-region-break-inside',
346: 'webkit-shape-inside',
347: 'webkit-shape-outside',
348: 'webkit-shape-margin',
349: 'webkit-shape-padding',
350: 'webkit-wrap-flow',
351: 'webkit-wrap-through',
352: 'webkit-wrap',
353: 'webkit-tap-highlight-color',
354: 'webkit-app-region',
355: 'clip-path',
356: 'clip-rule',
357: 'mask',
358: 'enable-background',
359: 'filter',
360: 'flood-color',
361: 'flood-opacity',
362: 'lighting-color',
363: 'stop-color',
364: 'stop-opacity',
365: 'color-interpolation',
366: 'color-interpolation-filters',
367: 'color-profile',
368: 'color-rendering',
369: 'fill',
370: 'fill-opacity',
371: 'fill-rule',
372: 'marker',
373: 'marker-end',
374: 'marker-mid',
375: 'marker-start',
376: 'mask-type',
377: 'shape-rendering',
378: 'stroke',
379: 'stroke-dasharray',
380: 'stroke-dashoffset',
381: 'stroke-linecap',
382: 'stroke-linejoin',
383: 'stroke-miterlimit',
384: 'stroke-opacity',
385: 'stroke-width',
386: 'alignment-baseline',
387: 'baseline-shift',
388: 'dominant-baseline',
389: 'glyph-orientation-horizontal',
390: 'glyph-orientation-vertical',
391: 'kerning',
392: 'text-anchor',
393: 'vector-effect',
394: 'writing-mode',
395: 'webkit-svg-shadow',
396: 'webkit-cursor-visibility',
397: 'image-orientation',
398: 'image-resolution',
399: 'webkit-blend-mode',
400: 'webkit-background-blend-mode',
401: 'webkit-text-decoration-line',
402: 'webkit-text-decoration-style',
403: 'webkit-text-decoration-color',
404: 'webkit-text-align-last',
405: 'webkit-text-underline-position',
406: 'max-zoom',
407: 'min-zoom',
408: 'orientation',
409: 'user-zoom',
410: 'webkit-dashboard-region',
411: 'webkit-overflow-scrolling',
412: 'webkit-app-region',
413: 'webkit-filter',
414: 'webkit-box-decoration-break',
415: 'webkit-tap-highlight-color',
416: 'buffered-rendering',
417: 'grid-auto-rows',
418: 'grid-auto-columns',
419: 'background-blend-mode',
420: 'mix-blend-mode',
421: 'touch-action',
422: 'grid-area',
423: 'grid-template-areas',
424: 'animation',
425: 'animation-delay',
426: 'animation-direction',
427: 'animation-duration',
428: 'animation-fill-mode',
429: 'animation-iteration-count',
430: 'animation-name',
431: 'animation-play-state',
432: 'animation-timing-function',
433: 'object-fit',
434: 'paint-order',
435: 'mask-source-type',
436: 'isolation',
437: 'object-position',
438: 'internal-callback',
439: 'shape-image-threshold',
440: 'column-fill',
441: 'text-justify',
442: 'touch-action-delay',
443: 'justify-self',
444: 'scroll-behavior',
445: 'will-change',
446: 'transform',
447: 'transform-origin',
448: 'transform-style',
449: 'perspective',
450: 'perspective-origin',
451: 'backface-visibility',
452: 'grid-template',
453: 'grid',
}
PAGE_VISITS_BUCKET_ID = 52 # corresponds to the property below.
# http://src.chromium.org/viewvc/blink/trunk/Source/core/frame/UseCounter.h
FEATUREOBSERVER_BUCKETS = {
0: 'PageDestruction',
1: 'LegacyNotifications',
2: 'MultipartMainResource',
3: 'PrefixedIndexedDB',
4: 'WorkerStart',
5: 'SharedWorkerStart',
6: 'LegacyWebAudio',
7: 'WebAudioStart',
9: 'UnprefixedIndexedDB',
10: 'OpenWebDatabase',
12: 'LegacyTextNotifications',
13: 'UnprefixedRequestAnimationFrame',
14: 'PrefixedRequestAnimationFrame',
15: 'ContentSecurityPolicy',
16: 'ContentSecurityPolicyReportOnly',
18: 'PrefixedTransitionEndEvent',
19: 'UnprefixedTransitionEndEvent',
20: 'PrefixedAndUnprefixedTransitionEndEvent',
21: 'AutoFocusAttribute',
23: 'DataListElement',
24: 'FormAttribute',
25: 'IncrementalAttribute',
26: 'InputTypeColor',
27: 'InputTypeDate',
28: 'InputTypeDateTime',
29: 'InputTypeDateTimeFallback',
30: 'InputTypeDateTimeLocal',
31: 'InputTypeEmail',
32: 'InputTypeMonth',
33: 'InputTypeNumber',
34: 'InputTypeRange',
35: 'InputTypeSearch',
36: 'InputTypeTel',
37: 'InputTypeTime',
38: 'InputTypeURL',
39: 'InputTypeWeek',
40: 'InputTypeWeekFallback',
41: 'ListAttribute',
42: 'MaxAttribute',
43: 'MinAttribute',
44: 'PatternAttribute',
45: 'PlaceholderAttribute',
46: 'PrecisionAttribute',
47: 'PrefixedDirectoryAttribute',
48: 'PrefixedSpeechAttribute',
49: 'RequiredAttribute',
50: 'ResultsAttribute',
51: 'StepAttribute',
52: 'PageVisits', # counts are divided by this number for actual %
53: 'HTMLMarqueeElement',
55: 'Reflection',
57: 'PrefixedStorageInfo',
58: 'XFrameOptions',
59: 'XFrameOptionsSameOrigin',
60: 'XFrameOptionsSameOriginWithBadAncestorChain',
61: 'DeprecatedFlexboxWebContent',
62: 'DeprecatedFlexboxChrome',
63: 'DeprecatedFlexboxChromeExtension',
65: 'UnprefixedPerformanceTimeline',
66: 'PrefixedPerformanceTimeline',
67: 'UnprefixedUserTiming',
69: 'WindowEvent',
70: 'ContentSecurityPolicyWithBaseElement',
71: 'PrefixedMediaAddKey',
72: 'PrefixedMediaGenerateKeyRequest',
74: 'DocumentClear',
76: 'SVGFontElement',
77: 'XMLDocument',
78: 'XSLProcessingInstruction',
79: 'XSLTProcessor',
80: 'SVGSwitchElement',
82: 'HTMLShadowElementOlderShadowRoot',
83: 'DocumentAll',
84: 'FormElement',
85: 'DemotedFormElement',
86: 'CaptureAttributeAsEnum',
87: 'ShadowDOMPrefixedPseudo',
88: 'ShadowDOMPrefixedCreateShadowRoot',
89: 'ShadowDOMPrefixedShadowRoot',
90: 'SVGAnimationElement',
91: 'KeyboardEventKeyLocation',
92: 'CaptureEvents',
93: 'ReleaseEvents',
94: 'CSSDisplayRunIn',
95: 'CSSDisplayCompact',
96: 'LineClamp',
97: 'SubFrameBeforeUnloadRegistered',
98: 'SubFrameBeforeUnloadFired',
99: 'CSSPseudoElementPrefixedDistributed',
100: 'TextReplaceWholeText',
101: 'PrefixedShadowRootConstructor',
102: 'ConsoleMarkTimeline',
103: 'CSSPseudoElementUserAgentCustomPseudo',
104: 'DocumentTypeEntities',
105: 'DocumentTypeInternalSubset',
106: 'DocumentTypeNotations',
107: 'ElementGetAttributeNode',
108: 'ElementSetAttributeNode',
109: 'ElementRemoveAttributeNode',
110: 'ElementGetAttributeNodeNS',
111: 'DocumentCreateAttribute',
112: 'DocumentCreateAttributeNS',
113: 'DocumentCreateCDATASection',
114: 'DocumentInputEncoding',
115: 'DocumentXMLEncoding',
116: 'DocumentXMLStandalone',
117: 'DocumentXMLVersion',
118: 'NodeIsSameNode',
119: 'NodeIsSupported',
120: 'NodeNamespaceURI',
122: 'NodeLocalName',
123: 'NavigatorProductSub',
124: 'NavigatorVendor',
125: 'NavigatorVendorSub',
126: 'FileError',
127: 'DocumentCharset',
128: 'PrefixedAnimationEndEvent',
129: 'UnprefixedAnimationEndEvent',
130: 'PrefixedAndUnprefixedAnimationEndEvent',
131: 'PrefixedAnimationStartEvent',
132: 'UnprefixedAnimationStartEvent',
133: 'PrefixedAndUnprefixedAnimationStartEvent',
134: 'PrefixedAnimationIterationEvent',
135: 'UnprefixedAnimationIterationEvent',
136: 'PrefixedAndUnprefixedAnimationIterationEvent',
137: 'EventReturnValue',
138: 'SVGSVGElement',
139: 'SVGAnimateColorElement',
140: 'InsertAdjacentText',
141: 'InsertAdjacentElement',
142: 'HasAttributes',
143: 'DOMSubtreeModifiedEvent',
144: 'DOMNodeInsertedEvent',
145: 'DOMNodeRemovedEvent',
146: 'DOMNodeRemovedFromDocumentEvent',
147: 'DOMNodeInsertedIntoDocumentEvent',
148: 'DOMCharacterDataModifiedEvent',
150: 'DocumentAllLegacyCall',
151: 'HTMLAppletElementLegacyCall',
152: 'HTMLEmbedElementLegacyCall',
153: 'HTMLObjectElementLegacyCall',
154: 'BeforeLoadEvent',
155: 'GetMatchedCSSRules',
156: 'SVGFontInCSS',
157: 'ScrollTopBodyNotQuirksMode',
158: 'ScrollLeftBodyNotQuirksMode',
160: 'AttributeOwnerElement',
162: 'AttributeSpecified',
163: 'BeforeLoadEventInIsolatedWorld',
164: 'PrefixedAudioDecodedByteCount',
165: 'PrefixedVideoDecodedByteCount',
166: 'PrefixedVideoSupportsFullscreen',
167: 'PrefixedVideoDisplayingFullscreen',
168: 'PrefixedVideoEnterFullscreen',
169: 'PrefixedVideoExitFullscreen',
170: 'PrefixedVideoEnterFullScreen',
171: 'PrefixedVideoExitFullScreen',
172: 'PrefixedVideoDecodedFrameCount',
173: 'PrefixedVideoDroppedFrameCount',
176: 'PrefixedElementRequestFullscreen',
177: 'PrefixedElementRequestFullScreen',
178: 'BarPropLocationbar',
179: 'BarPropMenubar',
180: 'BarPropPersonalbar',
181: 'BarPropScrollbars',
182: 'BarPropStatusbar',
183: 'BarPropToolbar',
184: 'InputTypeEmailMultiple',
| |
value of the nematic order parameter (between 0 and 1), or number of neighbors
"""
# local nematic order parameter: average 1/2 (3 cos^2(theta) - 1), where theta = angle diff btw neighbors
p1 = self.particles[type][layer][id]
pos1 = self.trajectory[time][type][layer][id]
if not pos1:
return 0
if pos1.nematic:
if returnnbhrs:
return pos1.nbhrs
else:
return pos1.nematic
typelist = [type]
if type in [0, 2]:
typelist = [0, 2]
r_min = 2.0*p1._radius*1.5#0 # 6*2*1.3 = 15.6 nm
r_max = (p1._length + 2.0*p1._radius) * 1.1 # (14.5+12)*1.1 = 29.15 nm
nbhrs = 0.0
sum = 0.0
nbhrs_above_cutoff = 0.0
close_nbhrs = 0.0
for itype in typelist:
for p2 in self.particles[itype][layer]:
if p2:
if p2._id != id:
pos2 = self.trajectory[time][itype][layer][p2._id]
dx = self.PBC_diff(pos1.x, pos2.x, self.params.getWidth(time))
dy = self.PBC_diff(pos1.y, pos2.y, self.params.getHeight(time))
r = sqrt(dx*dx + dy*dy)
if r < r_max:# and r > r_min:
nbhrs = nbhrs + 1.0
theta = pos1.theta - pos2.theta
costheta = cos(theta)
val = (3.0*costheta*costheta - 1.0)/2.0
sum += val
if val > 0.9:
nbhrs_above_cutoff += 1.0
if r < r_min:
close_nbhrs += 1.0
if nbhrs == 0:
nematic = 0
else:
nematic = sum / nbhrs
self.trajectory[time][type][layer][id].nematic = nematic
self.trajectory[time][type][layer][id].nbhrs = nbhrs
if returnnbhrs:
return (nbhrs_above_cutoff/6.0)*(close_nbhrs/2.0)
else:
return nematic
def calculateHexatic(self, type, id, layer, time):
# use Lester's trig-free method for hexatic order parameter sum(exp(6 * i * theta))
p1 = self.particles[type][layer][id]
pos1 = self.trajectory[time][type][layer][id]
if not pos1:
return 0
# if hasattr(pos1, "hexatic"):
# return pos1.hexatic
typelist = [type]
if type in [0, 2]:
typelist = [0, 2]
r_cut = p1._radius * 2.0 * 1.73 # ~ cos(30/180*pi) * 2
nbhrs = 0.0
realpart = 0.0
impart = 0.0
for itype in typelist:
for p2 in self.particles[itype][layer]:
if p2:
if p2._id != id:
pos2 = self.trajectory[time][itype][layer][p2._id]
dx = self.PBC_diff(pos1.x, pos2.x, self.params.getWidth(time))
dy = self.PBC_diff(pos1.y, pos2.y, self.params.getHeight(time))
r = sqrt(dx*dx + dy*dy)
if r < r_cut:
nbhrs = nbhrs + 1.0
c = dx / r
s = dy / r
realpart = realpart + pow(c, 6) - 15.0 * pow(c, 4) * pow(s, 2) + 15.0 * pow (c, 2) * pow(s, 4) - pow(s, 6)
impart = impart + 6.0 * pow(c, 5) * s - 20.0 * pow(c, 3) * pow(s, 3) + 6.0 * c * pow(s, 5)
if nbhrs < 2:
hexatic = 0
else:
hexatic = 1.0 / nbhrs / nbhrs * (realpart*realpart + impart*impart)
pos1.hexatic = hexatic
return hexatic
def buildETClusters(self, time, ilayer, rsqmax=2.0*2.0):
"""public: builds clusters using energy transfer criteria, stores info in self.cluster_sizes
@param self The object pointer
@param time Int for time
@param ilayer Int for layer id
@param rsqmax Float for maximum distance to be counted as energy transfer partners, default 4.0 nm
"""
# based on Carl's code
icluster = 0
nbhrs_to_check = [] # Carl's "reserve"
if time not in self.cluster_sizes:
self.cluster_sizes[time] = dict()
if ilayer not in self.cluster_sizes[time]:
self.cluster_sizes[time][ilayer] = []
maybetypes = [0, 1]
types = []
for itype in range(len(maybetypes)):
if len(self.trajectory[time][itype]) > 0:
types.append(maybetypes[itype])
unsorted_clusters =[]
# remove any remembered clusters
#for itype in types:
# for ipos in range(len(self.trajectory[time][itype][ilayer])):
# self.trajectory[time][itype][ilayer][ipos].already_clustered = False
# build cluster list
for itype in types:
for ipos in range(len(self.trajectory[time][itype][ilayer])):
if self.trajectory[time][itype][ilayer][ipos]:
if not self.trajectory[time][itype][ilayer][ipos].already_clustered:
self.trajectory[time][itype][ilayer][ipos].already_clustered = True
nbhrs_to_check.append([itype, ipos])
unsorted_clusters.append([0 for i in range(max(types)+1)])
# while there are nbhrs whose nbhrs haven't been checked yet...
while len(nbhrs_to_check) > 0:
# add reference particle to cluster
[itype1, ipos1] = nbhrs_to_check.pop()
self.trajectory[time][itype1][ilayer][ipos1].clusterID = icluster
unsorted_clusters[icluster][itype1] += 1
# loop to find neighbors
for itype2 in types:
for ipos2 in range(len(self.trajectory[time][itype2][ilayer])):
if self.trajectory[time][itype2][ilayer][ipos2]:
if not self.trajectory[time][itype2][ilayer][ipos2].already_clustered:
sep = self.getClosestApproach(time, ilayer, itype1, ipos1, itype2, ipos2)
if sep*sep < rsqmax:
# add actual neighbor to queue
nbhrs_to_check.append([itype2, ipos2])
self.trajectory[time][itype2][ilayer][ipos2].already_clustered = True
# finished this cluster, on to the next one
icluster += 1
# sort cluster list and re-id
sortedIDs = sorted([[val, i] for i, val in enumerate(unsorted_clusters)], reverse=True)
IDmap = []
for i, val in enumerate(unsorted_clusters):
newID = sortedIDs.index([val, i])
IDmap.append(newID)
for itype in types:
for ipos in range(len(self.trajectory[time][itype][ilayer])):
if self.trajectory[time][itype][ilayer][ipos]:
if self.trajectory[time][itype][ilayer][ipos].already_clustered:
self.trajectory[time][itype][ilayer][ipos].clusterID = IDmap[self.trajectory[time][itype][ilayer][ipos].clusterID]
self.cluster_sizes[time][ilayer] = [val for [val, i] in sortedIDs]
print "found", len(self.cluster_sizes[time][ilayer]), "clusters at time", time, "in layer", ilayer#, "with sizes:", self.cluster_sizes[time][ilayer]
def buildArrayClusters(self, time, ilayer):
"""public: builds clusters using PSII semi-crystalline array criteria, stores info in self.array_sizes
@param self The object pointer
@param time Int for time
@param ilayer Int for layer id
@retval largest_cluster Int for size of largest cluster
"""
# based on Carl's code
iarray = 0
nbhrs_to_check = [] # Carl's "reserve"
far_nbhrs_to_check = [] # secondary queue
if time not in self.array_sizes:
self.array_sizes[time] = dict()
if ilayer not in self.array_sizes[time]:
self.array_sizes[time][ilayer] = []
types = [1]
unsorted_arrays =[]
r_min = 12.0*1.5#0 # 6*2*1.3 = 15.6 nm
r_max = 26.5 * 1.1 # (14.5+12)*1.1 = 29.15 nm
perp_cutoff = 14.0
par_cutoff = 26.5
# remove any remembered arrays
#for itype in types:
# for ipos in range(len(self.trajectory[time][itype][ilayer])):
# self.trajectory[time][itype][ilayer][ipos].already_arrayed = False
# build array list
for itype in types:
for ipos in range(len(self.trajectory[time][itype][ilayer])):
#print "checking particle", ipos
if self.trajectory[time][itype][ilayer][ipos]:
if not self.trajectory[time][itype][ilayer][ipos].already_arrayed:
#print ipos, "not yet arrayed so checking for nbhrs"
self.trajectory[time][itype][ilayer][ipos].already_arrayed = True
nbhrs_to_check.append([itype, ipos])
unsorted_arrays.append([0 for i in range(max(types)+1)])
# while there are nbhrs whose nbhrs haven't been checked yet...
while len(nbhrs_to_check) > 0:
# add reference particle to array
[itype1, ipos1] = nbhrs_to_check.pop()
#print "checking particle", ipos1, "for nbhrs, started with", ipos
#if self.trajectory[time][itype1][ilayer][ipos1].already_arrayed:
#print "oops", ipos1, "is already in array", self.trajectory[time][itype1][ilayer][ipos1].arrayID
self.trajectory[time][itype1][ilayer][ipos1].arrayID = iarray
unsorted_arrays[iarray][itype1] += 1
#print iarray, unsorted_arrays[iarray]
# set up secondary nbhrs_to_check list, for ipos1 particle that isn't yet in linear array
far_nbhrs_to_check = []
# loop to find neighbors
for itype2 in types:
for ipos2 in range(len(self.trajectory[time][itype2][ilayer])):
if self.trajectory[time][itype2][ilayer][ipos2]:
if not self.trajectory[time][itype2][ilayer][ipos2].already_arrayed:
#print "checking particle", ipos2, "for nbhr to", ipos1
# get pairwise nematic value
pos1 = self.trajectory[time][itype1][ilayer][ipos1]
pos2 = self.trajectory[time][itype2][ilayer][ipos2]
theta = pos1.theta - pos2.theta
costheta = cos(theta)
val = (3.0*costheta*costheta - 1.0)/2.0
# if pairwise value above cutoff, check distance
if val > 0.9:
# get distance between centers
dx = self.PBC_diff(pos1.x, pos2.x, self.params.getWidth(time))
dy = self.PBC_diff(pos1.y, pos2.y, self.params.getHeight(time))
if True: # new method
unit_vector = [cos(pos1.theta), sin(pos1.theta)]
proj = unit_vector[0]*dx + unit_vector[1]*dy
par_dist = abs(proj)
perp_dist = sqrt(dx*dx + dy*dy - par_dist*par_dist)
# print "proj", proj, "par", par_dist, "perp", perp_dist, "unit", unit_vector
if par_dist < par_cutoff:
if perp_dist < perp_cutoff:
#print "in cutoffs, adding", ipos2
# add neighbor to queue
nbhrs_to_check.append([itype2, ipos2])
self.trajectory[time][itype2][ilayer][ipos2].already_arrayed = True
self.trajectory[time][itype2][ilayer][ipos2].in_linear_array = True
else: # old method
r = sqrt(dx*dx + dy*dy)
if r < r_min:
# add close neighbor to close queue
nbhrs_to_check.append([itype2, ipos2])
self.trajectory[time][itype2][ilayer][ipos2].already_arrayed = True
self.trajectory[time][itype2][ilayer][ipos2].in_linear_array = True
# if this is ipos1's first close neighbor, recheck its far nbhrs
if not self.trajectory[time][itype1][ilayer][ipos1].in_linear_array:
self.trajectory[time][itype1][ilayer][ipos1].in_linear_array = True
nbhrs_to_check.extend(far_nbhrs_to_check)
far_nbhrs_to_check = []
elif r < r_max:
if self.trajectory[time][itype1][ilayer][ipos1].in_linear_array:
# add far neighbor to queue, if ipos1 particle is in linear array
nbhrs_to_check.append([itype2, ipos2])
self.trajectory[time][itype2][ilayer][ipos2].already_arrayed = True
else:
# add far nbhr to tmp queue
far_nbhrs_to_check.append([itype2, ipos2])
# finished this array, on to the next one
iarray += 1
# sort array list and re-id
sortedIDs = sorted([[val, i] for i, val in enumerate(unsorted_arrays)], reverse=True)
IDmap = []
for i, val in enumerate(unsorted_arrays):
newID = sortedIDs.index([val, i])
IDmap.append(newID)
for itype in types:
for ipos in range(len(self.trajectory[time][itype][ilayer])):
if self.trajectory[time][itype][ilayer][ipos]:
if self.trajectory[time][itype][ilayer][ipos].already_arrayed:
self.trajectory[time][itype][ilayer][ipos].arrayID = IDmap[self.trajectory[time][itype][ilayer][ipos].arrayID]
self.array_sizes[time][ilayer] = [val for [val, i] in sortedIDs]
total_ps = 0
for arr in self.array_sizes[time][ilayer]:
total_ps += arr[1]
print "found", len(self.array_sizes[time][ilayer]), "arrays at time", time, "in layer", ilayer, "with largest cluster:", self.array_sizes[time][ilayer][0], "and total PSII", total_ps
return self.array_sizes[time][ilayer][0]
def getClosestApproach(self, time, ilayer, itype1, ipos1, itype2, ipos2):
"""private: returns closest distance between *edges* of particles, not centers
@param self The object pointer
@param time Int for time
@param ilayer Int for layer id
@param itype1 Int for | |
pattern.match(unicode_desc)
# Grapheme's unicode description is non-standard
if(not match_obj):
# Underscore, dash, hastag have special meaning
if(graph in ("_", "-", "#")):
graph_dict = {
'CHAR_TYPE': 'LINK',
'SYMBOL': graph,
'NAME': graph
}
# The grapheme is whitespace
elif(unicode_desc in ("ZERO WIDTH SPACE",
"ZERO WIDTH NON-JOINER",
"ZERO WIDTH JOINER",
"SPACE")):
# Ignore whitespace
continue
else:
graph_dict = {'SYMBOL': graph, 'NAME': 'NOT_FOUND'}
# Grapheme's unicode description is standard
else:
graph_dict = match_obj.groupdict()
graph_dict["SYMBOL"] = graph
# Add tags to dictionary (The first element of tags is actually
# the base grapheme, so we only check all tags after the first.
if(len(tags) > 1):
for i, t in enumerate(tags[1:]):
graph_dict["TAG" + str(i)] = unicodedata.name(t)
# Add grapheme unicode description dictionary to baseform list
baseform_transcription.append(graph_dict)
# Add baseform transcription to unicode transcription list
unicode_transcription.append(baseform_transcription)
return unicode_transcription
def encode(unicode_transcription, tag_percentage, log=False):
'''
Arguments:
unicode_transcription -- a list of words whose graphemes are
respresented as a list of dictionaries whose
fields contain information about parsed
unicode descriptions.
tag_percentage -- percent of least frequent graphemes to tag
log -- optional printing
Outputs:
encoded_transcription -- baseforms mapped to the graphemeic
acoustic units
'''
# Constants
VOWELS = "AEIOU"
SKIP = "/()"
table = []
graphemes = []
encoded_transcription = []
# Accumulate grapheme statistics over corpus at some point. For now just
# use the lexicon word list. For estimating grapheme frequency this is
# probably sufficient since we have many words each with many
# graphemes. We do unfortunately have to assume that case does not matter.
# We do not count dashes, underscores, parentheses, etc. . Just letters.
graph_list = []
for w in unicode_transcription:
for graph in w:
if graph["SYMBOL"] not in "()\/,-_#.":
graph_list.append(graph["SYMBOL"].lower())
graph2int = {v: k for k, v in enumerate(set(graph_list))}
int2graph = {v: k for k, v in graph2int.items()}
graph_list_int = [graph2int[g] for g in graph_list]
bin_edges = range(0, len(int2graph.keys()) + 1)
graph_counts = np.histogram(graph_list_int, bins=bin_edges)[0]/float(len(graph_list_int))
# Set count threshold to frequency that tags the bottom 10% of graphemes
bottom_idx = int(np.floor(tag_percentage * len(graph_counts)))
count_thresh = sorted(graph_counts)[bottom_idx]
graph_counts_dict = {}
for i, count in enumerate(graph_counts):
graph_counts_dict[int2graph[i]] = count
graph_counts = graph_counts_dict
# Print grapheme counts to histogram
if log:
graph_counts_sorted = sorted(graph_counts, reverse=True,
key=graph_counts.get)
if not os.path.exists(log):
os.makedirs(log)
with codecs.open(os.path.join(log, "grapheme_histogram.txt"), "w", "utf-8") as fp:
fp.write("Graphemes (Count Threshold = %.6f) (Tag Percentage "
"= %.2f)\n" % (count_thresh, tag_percentage))
for g in graph_counts_sorted:
weight = ("-" * int(np.ceil(500.0 * graph_counts[g])) +
" %.6f\n" % graph_counts[g])
fp.write("%s -" % (g) + weight)
# Find a new baseform for each word
for w in unicode_transcription:
word_transcription = ""
# Find a "pronunciation" for each grapheme in the word
for graph in w:
# Case 1: Check that the grapheme has a unicode description type
# ---------------------------------------------------------------
if("CHAR_TYPE" not in [k.strip() for k in graph.keys()]):
if(graph["SYMBOL"] == "."):
try:
graph["MAP0"] = "\t"
if word_transcription[-1] == " ":
word_transcription = word_transcription[:-1] + "\t"
except IndexError:
print("Word starting with . detected")
graph["MAP0"] = "."
word_transcription = ". "
elif(graph["SYMBOL"] not in SKIP):
graph["MAP0"] = graph["SYMBOL"].lower()
word_transcription += graph["MAP0"] + " "
# Case 2: Standard Grapheme
# ---------------------------------------------------------------
elif(graph["CHAR_TYPE"].strip() in
("LETTER", "VOWEL", "VOWEL SIGN", "SIGN")):
# Backoff diacritics
base_grapheme = graph["NAME"].strip().replace(" ", "-").lower()
graph["MAP0"] = _backoff_diacritics(graph["SYMBOL"].lower(),
base_grapheme,
graph_counts,
count_thresh)
# Add final space
word_transcription += graph["MAP0"] + " "
# Case 3: Syllable (Assume consonant vowel pattern)
# At some point we will make it (cvc), but for now
# this is basically just here for Amharic
# ----------------------------------------------------------------
elif(graph["CHAR_TYPE"].strip() == "SYLLABLE"):
# Multi-word description
if(len(graph["NAME"].strip().split(' ')) > 1):
g_name = graph["NAME"].strip().replace(" ", "-").lower()
graph["MAP0"] = g_name + "\t"
word_transcription += graph["MAP0"]
# Consonant Vowel Pattern
else:
cv_pattern = (r"(?P<CONSONANT>[^%s]*)(?P<VOWEL>[%s]+)" %
(VOWELS, VOWELS))
parsed_graph = re.match(cv_pattern, graph["NAME"])
if(not parsed_graph):
sys.exit("Syllable did not obey"
"consonant-vowel pattern.")
graph_dict = parsed_graph.groupdict()
# Get consonant if it exists
if("CONSONANT" in graph_dict.keys() and
graph_dict["CONSONANT"]):
graph["MAP0"] = graph_dict["CONSONANT"].lower()
word_transcription += graph["MAP0"] + " "
# Get vowel if it exists
if("VOWEL" in graph_dict.keys() and graph_dict["VOWEL"]):
graph["MAP1"] = graph_dict["VOWEL"].lower() + "\t"
word_transcription += graph["MAP1"]
# Case 4: Commonly occurring symbols
# ----------------------------------------------------------------
elif(graph["CHAR_TYPE"].strip() == "LINK"):
# Add tab for underscores (kaldi lexicon format)
if(graph["SYMBOL"] in ("_", "#")):
graph["MAP0"] = "\t"
if(len(word_transcription) >= 3 and
word_transcription[-2] == "\t"):
word_transcription = word_transcription[:-3] + "\t"
elif(len(word_transcription) >= 1):
word_transcription += "\t"
else:
sys.exit("Unknown rule for initial underscore")
elif(graph["SYMBOL"] == "-"):
graph["MAP0"] = "\t"
else:
sys.exit("Unknown linking symbol found.")
sys.exit(1)
# Update table of observed graphemes
if(graph["SYMBOL"] not in graphemes):
table.append(graph)
graphemes.append(graph["SYMBOL"])
# Append the newly transcribed word
encoded_transcription.append(word_transcription.strip())
# Create grapheme to graphemic-acoustic-unit map
grapheme_map = {}
for g_dict in table:
g_map = ""
map_number = 0
for g_field, g_val in sorted(g_dict.items()):
if(g_field == ("MAP" + str(map_number))):
g_map = g_map + g_val + " "
map_number = map_number + 1
grapheme_map[g_dict["SYMBOL"]] = g_map.strip(' ')
return encoded_transcription, table, grapheme_map
def _backoff_diacritics(grapheme, base_grapheme, graph_counts, count_thresh):
'''
Add diacritics as tags if the grapheme with diacritics occurs
infrequently. The grapheme built by successively peeling away
diacritics until a frequent grapheme in the lexicon is discovered.
This grapheme is then considered a distinct unit and all peeled off
diacritics are added as kaldi style tags
Arguments:
grapheme -- the raw grapheme to be processed
base_grapheme -- the grapheme with no combining marks
(see unicode normalization NFD for more details)
graph_counts -- A dictionary of all seen graphemes as keys with
counts as values
count_thresh -- The frequency threshold below which diacritics
should be peeled away
'''
# Initialize variables before loop
new_grapheme = grapheme
removed = []
parts = unicodedata.normalize("NFD", new_grapheme)
# Find a backed-off (in terms of number of diacritics) grapheme with count
# above the frequency threshold (count_thresh)
while(len(parts) > 1 and
(graph_counts[new_grapheme] <= count_thresh)):
new_grapheme = unicodedata.normalize("NFC", parts[0:-1])
tag = unicodedata.name(parts[-1]).strip().replace(" ", "").lower()
removed.append(tag)
parts = unicodedata.normalize("NFD", new_grapheme)
# Collect all diactritics that will not be added as tags
split_tags = []
for p in parts[1:]:
split_tag = unicodedata.name(p).strip().replace(" ", "").lower()
split_tags.append(split_tag)
# Append non-tag diacritics to the base grapheme
base_grapheme = "".join([base_grapheme] + split_tags)
# Return the tagged grapheme
return "_".join([base_grapheme] + removed)
def apply_map(grapheme_map, baseforms):
'''
Apply the grapheme_map to the baseforms
Arguments:
grapheme_map -- dictionary storing mapping from grapheme to
graphemic-acoustic units
baseforms -- the words to which we want to apply the mappings
Outputs:
encoded_transcription -- See encode (function). It's the exact same
format.
'''
encoded_transcription = []
for w, bf in baseforms:
word_transcription = ""
for graph in bf:
try:
if grapheme_map[graph][-1] == "\t":
word_transcription += grapheme_map[graph]
else:
word_transcription += grapheme_map[graph] + " "
except KeyError:
pass
encoded_transcription.append(word_transcription.strip())
return encoded_transcription
def write_table(table, outfile):
'''
Creates table of graphemes and fields of each grapheme's corresponding
unicode description.
Arguments:
table -- table to write
outfile -- name of the output lexicon debug file
'''
# Create output table name
#outfile = os.path.splitext(outfile)[0]
# Sort keys for convenience
table_sorted = sorted(table, key=lambda k: k["NAME"])
# Start writing to output
with codecs.open(outfile, "w", "utf-8") as fo:
# Get header names
header_names = sorted(set().union(*[list(d.keys()) for d in table]))
# Write headers
for h in header_names[:-1]:
fo.write("%s\t" % h)
fo.write("%s\n" % header_names[-1])
# Write values if present
for t in table_sorted:
for h in header_names[:-1]:
if(h in t.keys() and t[h]):
fo.write("%s\t" % t[h])
else:
fo.write("''\t")
if(header_names[-1] in t.keys() and t[header_names[-1]]):
fo.write("%s\n" % t[header_names[-1]])
else:
fo.write("''\n")
def write_map(grapheme_map, mapfile):
'''
Write out a file with the mapping from graphemes to
graphemic-acoustic units. The format is one grapheme per line
followed by a space and then the graphemic acoustic units to which
the grapheme was mapped. Compatible with utils/apply_map.pl
Arguments:
grapheme_map -- dictionary mapping graphemes to graphemic-acoustic
units as output by encode()
mapfile -- the path to whch the grapheme map will be written
'''
with codecs.open(mapfile, 'w', encoding='utf-8') as f:
for | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import int
from builtins import range
from past.builtins import basestring
from builtins import object
from ctypes import c_char, c_char_p, c_ubyte, c_int, c_void_p
from ctypes import c_uint, c_uint32, c_uint64
from ctypes import Structure, Union
from ctypes import CDLL, POINTER, pointer
from ctypes import string_at, addressof
from datetime import datetime
import os
import sys
import time
import codecs
CODEC = 'utf8'
HEX = 'hex'
try:
codecs.decode('A0', 'hex')
except LookupError:
HEX = 'hex_codec'
if os.name == "posix" and sys.platform == "darwin":
try:
lib = CDLL('libtraildb.dylib')
except:
# is there a better way to figure out the path?
lib = CDLL('/usr/local/lib/libtraildb.dylib')
elif os.name == "posix" and "linux" in sys.platform:
lib = CDLL('libtraildb.so')
def api(fun, args, res=None):
fun.argtypes = args
fun.restype = res
tdb = c_void_p
tdb_cons = c_void_p
tdb_field = c_uint32
tdb_val = c_uint64
tdb_item = c_uint64
tdb_cursor = c_void_p
tdb_error = c_int
tdb_event_filter = c_void_p
tdb_multi_cursor = c_void_p
class tdb_event(Structure):
_fields_ = [("timestamp", c_uint64),
("num_items", c_uint64),
("items", POINTER(tdb_item))]
class tdb_multi_event(Structure):
_fields_ = [("db", tdb),
("tdb_event", POINTER(tdb_event)),
("cursor_idx", c_uint64)]
class tdb_opt_value(Union):
_fields_ = [("ptr", c_void_p),
("value", c_uint64)]
TDB_OPT_EVENT_FILTER = 101
api(lib.tdb_cons_init, [], tdb_cons)
api(lib.tdb_cons_open,
[tdb_cons, c_char_p, POINTER(c_char_p), c_uint64], tdb_error)
api(lib.tdb_cons_close, [tdb_cons])
api(lib.tdb_cons_add,
[tdb_cons, POINTER(c_ubyte), c_uint64,
POINTER(c_char_p), POINTER(c_uint64)],
tdb_error)
api(lib.tdb_cons_append, [tdb_cons, tdb], tdb_error)
api(lib.tdb_cons_finalize, [tdb_cons], tdb_error)
api(lib.tdb_init, [], tdb)
api(lib.tdb_open, [tdb, c_char_p], tdb_error)
api(lib.tdb_close, [tdb])
api(lib.tdb_lexicon_size, [tdb, tdb_field], tdb_error)
api(lib.tdb_get_field, [tdb, c_char_p], tdb_error)
api(lib.tdb_get_field_name, [tdb, tdb_field], c_char_p)
api(lib.tdb_get_item, [tdb, tdb_field, POINTER(c_char), c_uint64], tdb_item)
api(lib.tdb_get_value,
[tdb, tdb_field, tdb_val, POINTER(c_uint64)], POINTER(c_char))
api(lib.tdb_get_item_value,
[tdb, tdb_item, POINTER(c_uint64)], POINTER(c_char))
api(lib.tdb_get_uuid, [tdb, c_uint64], POINTER(c_ubyte))
api(lib.tdb_get_trail_id,
[tdb, POINTER(c_ubyte), POINTER(c_uint64)], tdb_error)
api(lib.tdb_error_str, [tdb_error], c_char_p)
api(lib.tdb_num_trails, [tdb], c_uint64)
api(lib.tdb_num_events, [tdb], c_uint64)
api(lib.tdb_num_fields, [tdb], c_uint64)
api(lib.tdb_min_timestamp, [tdb], c_uint64)
api(lib.tdb_max_timestamp, [tdb], c_uint64)
api(lib.tdb_version, [tdb], c_uint64)
api(lib.tdb_cursor_new, [tdb], tdb_cursor)
api(lib.tdb_cursor_free, [tdb])
api(lib.tdb_cursor_next, [tdb_cursor], POINTER(tdb_event))
api(lib.tdb_get_trail, [tdb_cursor, c_uint64], tdb_error)
api(lib.tdb_get_trail_length, [tdb_cursor], c_uint64)
api(lib.tdb_cursor_set_event_filter, [tdb_cursor, tdb_event_filter], tdb_error)
api(lib.tdb_multi_cursor_new, [POINTER(tdb_cursor), c_uint64], tdb_multi_cursor)
api(lib.tdb_multi_cursor_free, [tdb_multi_cursor])
api(lib.tdb_multi_cursor_reset, [tdb_multi_cursor])
api(lib.tdb_multi_cursor_next, [tdb_multi_cursor], POINTER(tdb_multi_event))
api(lib.tdb_multi_cursor_next_batch, [tdb_multi_cursor, POINTER(tdb_multi_event), c_uint64])
api(lib.tdb_event_filter_new, [], tdb_event_filter)
api(lib.tdb_event_filter_add_term, [tdb_event_filter, tdb_item, c_int], tdb_error)
api(lib.tdb_event_filter_add_time_range, [c_uint64, c_uint64], tdb_error)
api(lib.tdb_event_filter_new_clause, [tdb_event_filter], tdb_error)
api(lib.tdb_event_filter_new_match_none, [], tdb_event_filter)
api(lib.tdb_event_filter_new_match_all, [], tdb_event_filter)
api(lib.tdb_event_filter_free, [tdb_event_filter])
api(lib.tdb_set_opt, [tdb, c_uint, tdb_opt_value], tdb_error)
api(lib.tdb_set_trail_opt, [tdb, c_uint64, c_uint, tdb_opt_value], tdb_error)
def uuid_hex(uuid):
"""
:returns: Given a binary UUID, encodes it into hex.
"""
if isinstance(uuid, basestring):
return uuid
return codecs.encode(string_at(uuid, 16), HEX).decode(CODEC)
def uuid_raw(uuid):
"""
:returns: Given a hex UUID, encodes it into binary.
"""
if isinstance(uuid, basestring):
return (c_ubyte * 16).from_buffer_copy(codecs.decode(uuid, HEX))
return uuid
def nullterm(strs, size):
return '\x00'.join(strs) + (size - len(strs) + 1) * '\x00'
# Port of tdb_item_field and tdb_item_val in tdb_types.h. Cannot use
# them directly as they are inlined functions.
def tdb_item_is32(item):
return not (item & 128)
def tdb_item_field32(item):
return item & 127
def tdb_item_val32(item):
return (item >> 8) & 4294967295 # UINT32_MAX
def tdb_item_field(item):
"""Return field-part of an item."""
if tdb_item_is32(item):
return tdb_item_field32(item)
else:
return (item & 127) | (((item >> 8) & 127) << 7)
def tdb_item_val(item):
"""Return value-part of an item."""
if tdb_item_is32(item):
return tdb_item_val32(item)
else:
return item >> 16
class TrailDBError(Exception):
"""This is the exception thrown when something fails with TrailDB."""
pass
class TrailDBConstructor(object):
"""Objects of this class are used to Construct new TrailDBs."""
def __init__(self, path, ofields=()):
"""Initialize a new TrailDB constructor.
:param path: TrailDB output path (without .tdb).
:param ofields: List of field (names) in this TrailDB.
.. code-block:: python
import traildb
tdbcons = traildb.TrailDBConstructor('example', ['type', 'flavor'])
c.add('00000000000000000000000000000001', 123, ['click', 'tasty'])
c.add('00000000000000000000000000000002', 129, ['flash', 'sour'])
c.finalize() # Don't forget to finalize, otherwise you won't get a full TrailDB.
"""
if not path:
raise TrailDBError("Path is required")
n = len(ofields)
if isinstance(path, str):
path = path.encode(CODEC)
ofield_names = (c_char_p * n)(*[name.encode(CODEC)
for name in ofields])
self._cons = lib.tdb_cons_init()
if lib.tdb_cons_open(self._cons, path, ofield_names, n) != 0:
raise TrailDBError("Cannot open constructor")
self.path = path
self.ofields = ofields
def __del__(self):
if hasattr(self, '_cons'):
lib.tdb_cons_close(self._cons)
def add(self, uuid, tstamp, values):
"""Add an event in TrailDB.
:param uuid: UUID of this event.
:param tstamp: Timestamp of this event (datetime or integer).
:param values: value of each field.
.. code-block:: python
cons.add('00000000000000000000000000000001', 123, ['click', 'tasty'])
"""
if isinstance(tstamp, datetime):
tstamp = int(time.mktime(tstamp.timetuple()))
n = len(self.ofields)
values = [v.encode(CODEC) if not isinstance(v, bytes)
else v for v in values]
value_array = (c_char_p * n)(*values)
value_lengths = (c_uint64 * n)(*[len(v) for v in values])
f = lib.tdb_cons_add(self._cons, uuid_raw(uuid), tstamp, value_array,
value_lengths)
if f:
raise TrailDBError("Too many values: %s" % values[f])
def append(self, db):
"""Merge an existing TrailDB in this TrailDB.
:param db: An instance of :py:class:`~traildb.TrailDB` you want to merge to this one.
"""
f = lib.tdb_cons_append(self._cons, db._db)
if f < 0:
raise TrailDBError("Wrong number of fields: %d" % db.num_fields)
if f > 0:
raise TrailDBError("Too many values: %s" % db.num_fields)
def finalize(self, decode=True):
"""Finalize this TrailDB. You cannot add new events in this TrailDB
after calling this function.
You need to finalize :py:class:`~traildb.TrailDBConstructor` or you
will not have an openable TrailDB later. Finalization is where all the
compression and preparation happen and is typically the most
resource-intensive part of TrailDB building.
:returns: Opened :py:class:`~traildb.TrailDB`:
"""
r = lib.tdb_cons_finalize(self._cons)
if r:
raise TrailDBError("Could not finalize (%d)" % r)
return TrailDB(self.path, decode)
class TrailDBCursor(object):
"""TrailDBCursor iterates over events of a trail.
Typically this class is not instantiated directly but it is
returned by TrailDB.trail() or TrailDB.cursor()
A cursor can be reused for different trails by calling
TrailDBCursor.get_trail(trail_id)
"""
def __init__(self,
cursor,
cls,
valuefun,
parsetime,
only_timestamp,
event_filter_obj):
self.cursor = cursor
self.valuefun = valuefun
self.parsetime = parsetime
self.cls = cls
self.only_timestamp = only_timestamp
if event_filter_obj:
self.event_filter_obj = event_filter_obj
if lib.tdb_cursor_set_event_filter(cursor, event_filter_obj.flt):
raise TrailDBError("cursor_set_event_filter failed")
else:
self.event_filter_obj = None
def __del__(self):
if self.cursor:
lib.tdb_cursor_free(self.cursor)
def __iter__(self):
return self
def __next__(self):
"""Return the next event in the trail."""
event = lib.tdb_cursor_next(self.cursor)
if not event:
raise StopIteration()
address = addressof(event.contents.items)
items = (tdb_item * event.contents.num_items).from_address(address)
timestamp = event.contents.timestamp
if self.parsetime:
timestamp = datetime.fromtimestamp(event.contents.timestamp)
if self.only_timestamp:
return timestamp
elif self.valuefun:
return self.cls(False, timestamp, *items)
else:
return self.cls(True, timestamp, *items)
def get_trail(self, trail_id):
if lib.tdb_get_trail(self.cursor, trail_id) != 0:
raise TrailDBError("Failed to initalize trail in cursor")
if self.event_filter_obj:
if lib.tdb_cursor_set_event_filter(self.cursor, self.event_filter_obj.flt):
raise TrailDBError("cursor_set_event_filter failed")
class TrailDBMultiCursor(object):
"""
TrailDBMultiCursor iterates over the events of multiple trails,
merged together into a single trail with events sorted in the ascending
time order. The trails can be from different traildbs.
To use, initialize and then set the cursors using the set_cursors method.
To reuse a multicursor, set new trails on the underlying cursors and then
call :py:meth:`~traildb.TrailDBMultiCursor.reset()`. If filtering, apply event filters to the underlying
cursors individually before setting them on the multicursor, or call reset after doing so
if already set.
"""
def __init__(self, parsetime, rawitems, only_timestamp):
"""
:param parsetime: If True, returns datetime objects instead of integer timestamps.
:param rawitems: Return raw integer items instead of stringified values. Using raw items is usually a bit more efficient than using string values.
:param only_timestamp: If True, only return timestamps, not event objects.
"""
self.parsetime = parsetime
self.rawitems = rawitems
self.only_timestamp = only_timestamp
self.multicursor = None
self._ready = False
def __del__(self):
if self.multicursor:
lib.tdb_multi_cursor_free(self.multicursor)
def __iter__(self):
return self
def __next__(self):
"""
return the next event in the combined trails, in ascending timestamp order
this will return tuples in the form of `(event, traildb)`, where the traildb
is the :py:class:`~traildb.TrailDB` the event belongs to. This can be used to
get the values if rawitems is used.
"""
if not self._ready:
raise TrailDBError("Multicursor not initialized, call set_cursors")
multi_event = lib.tdb_multi_cursor_next(self.multicursor)
if multi_event:
event = self.to_event(multi_event.contents)
else:
raise StopIteration()
return event
def to_event(self, multi_event):
event = multi_event.tdb_event
tdb_ptr = multi_event.db
timestamp = event.contents.timestamp
if self.parsetime:
timestamp = datetime.fromtimestamp(event.contents.timestamp)
if self.only_timestamp:
return timestamp
try:
traildb = self._traildbs[tdb_ptr]
except KeyError:
raise TrailDBError("TrailDBMultiCursor encountered a traildb that was not included in set_cursors")
address = addressof(event.contents.items)
items = (tdb_item * event.contents.num_items).from_address(address)
if self.rawitems:
return traildb._event_cls(True, timestamp, *items), traildb
else:
return traildb._event_cls(False, timestamp, *items), traildb
def set_cursors(self, cursors, traildbs):
"""
configure this multicursor to merge the specified cursors. This is required before use.
:param cursors: list of :py:class:`~traildb.TrailDBCursor` instances to merge
:param traildbs: list of :py:class:`~traildb.TrailDB` instances from which the cursors were created (only needs to be specified once, even if there are multiple cursors from the same TrailDB)
"""
n_cursors = len(cursors)
cursor_array = (tdb_cursor * n_cursors)(*[c.cursor for c in cursors])
# maintain references to these in python so they wont get garbage collected
self._cursor_arr = cursor_array
self.cursors = cursors
self.multicursor = lib.tdb_multi_cursor_new(cursor_array, | |
interpreted as face-relative and each pair of entries
will be taken as a (face, triangle) pair to be considered for
intersection. Thus, the face-triangle pair (10, 0) means the first
triangle on face 10. If neither faceIds nor triIds is given, then
all face-triangles in the mesh will be considered.
The maxParam and testBothDirections flags can be used to control the
radius of the search around the raySource point.
The search proceeds by testing all applicable face-triangles looking
for intersections. If the accelParams parameter is given then the
mesh builds an intersection acceleration structure based on it. This
acceleration structure is used to speed up the intersection
operation, sometimes by a factor of several hundred over the non-
accelerated case. Once created, the acceleration structure is cached
and will be reused the next time this method (or anyIntersection()
or allIntersections()) is called with an identically-configured
MMeshIsectAccelParams object. If a different MMeshIsectAccelParams
object is used, then the acceleration structure will be deleted and
re-created according to the new settings. Once created, the
acceleration structure will persist until either the object is
destroyed (or rebuilt by a construction history operation), or the
freeCachedIntersectionAccelerator() method is called. The
cachedIntersectionAcceleratorInfo() and
globalIntersectionAcceleratorsInfo() methods provide useful
information about the resource usage of individual acceleration
structures, and of all such structures in the system.
If the ray hits the mesh, the details of the intersection point
will be returned as a tuple containing the following:
* hitPoint (MFloatPoint) - coordinates of the point hit, in
the space specified by the caller.* hitRayParam (float) - parametric distance along the ray to
the point hit.* hitFace (int) - ID of the face hit
* hitTriangle (int) - face-relative ID of the triangle hit
* hitBary1 (float) - first barycentric coordinate of the
point hit. If the vertices of the hitTriangle are (v1, v2, v3)
then the barycentric coordinates are such that the hitPoint =
(*hitBary1)*v1 + (*hitBary2)*v2 + (1-*hitBary1-*hitBary2)*v3.* hitBary2 (float) - second barycentric coordinate of the point hit.
If no point was hit then the arrays will all be empty.
"""
pass
def assignColor(*args, **kwargs):
"""
assignColor(faceId, vertexIndex, colorId, colorSet='') -> self
Assigns a color from a colorSet to a specified vertex of a face.
"""
pass
def assignColors(*args, **kwargs):
"""
assignColors(colorIds, colorSet=') -> self
Assigns colors to all of the mesh's face-vertices. The colorIds
sequence must contain an entry for every vertex of every face, in
face order, meaning that the entries for all the vertices of face 0
come first, followed by the entries for the vertices of face 1, etc.
"""
pass
def assignUV(*args, **kwargs):
"""
assignUV(faceId, vertexIndex, uvId, uvSet='') -> self
Assigns a UV coordinate from a uvSet to a specified vertex of a face.
"""
pass
def assignUVs(*args, **kwargs):
"""
assignUVs(uvCounts, uvIds, uvSet='') -> self
Assigns UV coordinates to the mesh's face-vertices.
uvCounts contains the number of UVs to assign for each of the mesh's
faces. That number must equal the number of vertices in the
corresponding face or be 0 to indicate that no UVs will be assigned
to that face.
"""
pass
def booleanOp(*args, **kwargs):
"""
booleanOp(Boolean Operation constant, MFnMesh, MFnMesh) -> self
Replaces this mesh's geometry with the result of a boolean operation
on the two specified meshes.
"""
pass
def cachedIntersectionAcceleratorInfo(*args, **kwargs):
"""
cachedIntersectionAcceleratorInfo() -> string
Retrieves a string that describes the intersection acceleration
structure for this object, if any. The string will be of the
following form:
10x10x10 uniform grid, (build time 0.5s), (memory footprint 2000KB)
It describes the configuration of the cached intersection
accelerator, as well as how long it took to build it, and how much
memory it is currently occupying. If the mesh has no cached
intersection accelerator, the empty string is returned.
"""
pass
def cleanupEdgeSmoothing(*args, **kwargs):
"""
cleanupEdgeSmoothing() -> self
Updates the mesh after setEdgeSmoothing has been done. This should
be called only once, after all the desired edges have been had their
smoothing set. If you don't call this method, the normals may not be
correct, and the object will look odd in shaded mode.
"""
pass
def clearBlindData(*args, **kwargs):
"""
clearBlindData(compType) -> self
clearBlindData(compType, blindDataId, compId=None, attr='') -> self
The first version deletes all blind data from all the mesh's
components of the given type (an MFn Type constant).
The second version deletes values of the specified blind data type
from the mesh's components of a given type. If a component ID is
provided then the data is only deleted from that component,
otherwise it is deleted from all of the mesh's components of the
specified type. If a blind data attribute name is provided then only
data for that attribute is deleted, otherwise data for all of the
blind data type's attributes is deleted.
"""
pass
def clearColors(*args, **kwargs):
"""
clearColors(colorSet='') -> self
Clears out all colors from a colorSet, and leaves behind an empty
colorset. This method should be used if it is needed to shrink the
actual size of the color set. In this case, the user should call
clearColors(), setColors() and then assignColors() to rebuild the
mapping info.
When called on mesh data, the colors are removed. When called on a
shape with no history, the colors are removed and the attributes are
set on the shape. When called on a shape with history, the
polyColorDel command is invoked and a polyColorDel node is created.
If no colorSet is specified the mesh's current color set will be used.
"""
pass
def clearUVs(*args, **kwargs):
"""
clearUVs(uvSet='') -> self
Clears out all uvs from a uvSet, and leaves behind an empty
uvset. This method should be used if it is needed to shrink the
actual size of the uv set. In this case, the user should call
clearUVs(), setUVs() and then assignUVs() to rebuild the
mapping info.
When called on mesh data, the uvs are removed. When called on a
shape with no history, the uvs are removed and the attributes are
set on the shape. When called on a shape with history, the
polyMapDel command is invoked and a polyMapDel node is created.
If no uvSet is specified the mesh's current uv set will be used.
"""
pass
def closestIntersection(*args, **kwargs):
"""
closestIntersection(raySource, rayDirection, space, maxParam,
testBothDirections, faceIds=None, triIds=None, idsSorted=False,
accelParams=None, tolerance=kIntersectTolerance)
-> (hitPoint, hitRayParam, hitFace, hitTriangle, hitBary1, hitBary2)
Finds the closest intersection of a ray starting at raySource and
travelling in rayDirection with the mesh.
If faceIds is specified, then only those faces will be considered
for intersection. If both faceIds and triIds are given, then the
triIds will be interpreted as face-relative and each pair of entries
will be taken as a (face, triangle) pair to be considered for
intersection. Thus, the face-triangle pair (10, 0) means the first
triangle on face 10. If neither faceIds nor triIds is given, then
all face-triangles in the mesh will be considered.
The maxParam and testBothDirections flags can be used to control the
radius of the search around the raySource point.
The search proceeds by testing all applicable face-triangles looking
for intersections. If the accelParams parameter is given then the
mesh builds an intersection acceleration structure based on it. This
acceleration structure is used to speed up the intersection
operation, sometimes by a factor of several hundred over the non-
accelerated case. Once created, the acceleration structure is cached
and will be reused the next time this method (or anyIntersection()
| |
HTTPHeader(HelmYaml):
"""
:param name: The header field name
:param value: The header field value
"""
def __init__(self, name: str, value: str):
self.name = name
self.value = value
class HTTPGetAction(HelmYaml):
"""
:param path: Path to access on the HTTP server.
:param port: Name or number of the port to access on the container. Number must be \
in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param http_headers: Custom headers to set in the request. HTTP allows repeated \
headers.
:param host: Host name to connect to, defaults to the pod IP. You probably want to \
set "Host" in httpHeaders instead.
:param scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
def __init__(
self,
path: str,
port: Union[int, str],
http_headers: Optional[List[HTTPHeader]] = None,
host: Optional[str] = None,
scheme: Optional[str] = None,
):
self.httpHeaders = http_headers
self.path = path
self.port = port
self.host = host
self.scheme = scheme
class Handler(HelmYaml):
"""
:param exec: One and only one of the following should be specified. Exec specifies \
the action to take.
:param http_get: HTTPGet specifies the http request to perform.
:param tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks \
not yet supported
"""
def __init__(
self,
exec: Optional[ExecAction] = None,
http_get: Optional[HTTPGetAction] = None,
tcp_socket: Optional[TCPSocketAction] = None,
):
self.exec = exec
self.httpGet = http_get
self.tcpSocket = tcp_socket
class Lifecycle(HelmYaml):
"""
:param post_start: PostStart is called immediately after a container is created. If \
the handler fails, the container is terminated and restarted according to its \
restart policy. Other management of the container blocks until the hook \
completes. More info: \
https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks # noqa
:param pre_stop: PreStop is called immediately before a container is terminated due \
to an API request or management event such as liveness/startup probe failure, \
preemption, resource contention, etc. The handler is not called if the \
container crashes or exits. The reason for termination is passed to the \
handler. The Pod's termination grace period countdown begins before the \
PreStop hooked is executed. Regardless of the outcome of the handler, the \
container will eventually terminate within the Pod's termination grace period. \
Other management of the container blocks until the hook completes or until the \
termination grace period is reached. More info: \
https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks # noqa
"""
def __init__(
self, post_start: Optional[Handler] = None, pre_stop: Optional[Handler] = None
):
self.postStart = post_start
self.preStop = pre_stop
class ContainerPort(HelmYaml):
"""
:param container_port: Number of port to expose on the pod's IP address. This must \
be a valid port number, 0 < x < 65536.
:param host_ip: What host IP to bind the external port to.
:param host_port: Number of port to expose on the host. If specified, this must be \
a valid port number, 0 < x < 65536. If HostNetwork is specified, this must \
match ContainerPort. Most containers do not need this.
:param name: If specified, this must be an IANA_SVC_NAME and unique within the pod. \
Each named port in a pod must have a unique name. Name for the port that can \
be referred to by services.
:param protocol: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
def __init__(
self,
container_port: int,
host_ip: Optional[str] = None,
host_port: Optional[int] = None,
name: Optional[str] = None,
protocol: Optional[str] = None,
):
self.containerPort = container_port
self.hostIP = host_ip
self.hostPort = host_port
self.name = name
self.protocol = protocol
class VolumeMount(HelmYaml):
"""
:param name: This must match the Name of a Volume.
:param mount_path: Path within the container at which the volume should be mounted. \
Must not contain ':'.
:param mount_propagation: mountPropagation determines how mounts are propagated \
from the host to container and the other way around. When not set, \
MountPropagationNone is used. This field is beta in 1.10.
:param read_only: Mounted read-only if true, read-write otherwise (false or \
unspecified). Defaults to false.
:param sub_path: Path within the volume from which the container's volume should be \
mounted. Defaults to "" (volume's root).
:param sub_path_expr: Expanded path within the volume from which the container's \
volume should be mounted. Behaves similarly to SubPath but environment \
variable references $(VAR_NAME) are expanded using the container's \
environment. Defaults to "" (volume's root). SubPathExpr and SubPath are \
mutually exclusive.
"""
def __init__(
self,
name: str,
mount_path: str,
mount_propagation: Optional[str] = None,
read_only: Optional[bool] = None,
sub_path: Optional[str] = None,
sub_path_expr: Optional[str] = None,
):
self.name = name
self.mountPath = mount_path
self.mountPropagation = mount_propagation
self.readOnly = read_only
self.subPath = sub_path
self.subPathExpr = sub_path_expr
class Probe(HelmYaml):
"""
:param exec: One and only one of the following should be specified. Exec specifies \
the action to take.
:param http_get: HTTPGet specifies the http request to perform.
:param initial_delay_seconds: Number of seconds after the container has started \
before liveness probes are initiated. More info: \
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa
:param period_seconds: How often (in seconds) to perform the probe. Default to 10 \
seconds. Minimum value is 1.
:param tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks \
not yet supported
:param failure_threshold: Minimum consecutive failures for the probe to be \
considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param success_threshold: Minimum consecutive successes for the probe to be \
considered successful after having failed. Defaults to 1. Must be 1 for \
liveness and startup. Minimum value is 1.
:param timeout_seconds: Number of seconds after which the probe times out. Defaults \
to 1 second. Minimum value is 1. More info: \
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa
"""
def __init__(
self,
exec: Optional[ExecAction] = None,
http_get: Optional[HTTPGetAction] = None,
initial_delay_seconds: Optional[int] = None,
period_seconds: Optional[int] = None,
tcp_socket: Optional[TCPSocketAction] = None,
failure_threshold: Optional[int] = None,
success_threshold: Optional[int] = None,
timeout_seconds: Optional[int] = None,
):
self.exec = exec
self.httpGet = http_get
self.initialDelaySeconds = initial_delay_seconds
self.periodSeconds = period_seconds
self.tcpSocket = tcp_socket
self.failureThreshold = failure_threshold
self.successThreshold = success_threshold
self.timeoutSeconds = timeout_seconds
class Container(HelmYaml):
"""
:param name: Name of the container specified as a DNS_LABEL. Each container in a \
pod must have a unique name (DNS_LABEL). Cannot be updated.
:param args: Arguments to the entrypoint. The docker image's CMD is used if this is \
not provided. Variable references $(VAR_NAME) are expanded using the \
container's environment. If a variable cannot be resolved, the reference in \
the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with \
a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, \
regardless of whether the variable exists or not. Cannot be updated. More \
info: \
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa
:param command: Entrypoint array. Not executed within a shell. The docker image's \
ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) \
are expanded using the container's environment. If a variable cannot be \
resolved, the reference in the input string will be unchanged. The $(VAR_NAME) \
syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references \
will never be expanded, regardless of whether the variable exists or not. \
Cannot be updated. More info: \
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa
:param env: List of environment variables to set in the container. Cannot be \
updated.
:param env_from: List of sources to populate environment variables in the \
container. The keys defined within a source must be a C_IDENTIFIER. All \
invalid keys will be reported as an event when the container is starting. When \
a key exists in multiple sources, the value associated with the last source \
will take precedence. Values defined by an Env with a duplicate key will take \
precedence. Cannot be updated.
:param image: Docker image name. More info: \
https://kubernetes.io/docs/concepts/containers/images This field is optional \
to allow | |
<filename>download/xml_pickle/xml_pickle-0.30.py
"""Store Python objects to (pickle-like) XML Documents
Note 0:
See http://gnosis.cx/publish/programming/xml_matters_1.txt
for a detailed discussion of this module.
Note 1:
The XML-SIG distribution is changed fairly frequently while
it is in beta versions. The changes in turn are extremely
likely to affect the functioning of [xml_pickle].
This version of [xml_pickle] is known to work with PyXML
0.6.1, and will probably continue to work with later betas
and also with the XML-SIG distribution included in Python
2.0. To download a current version of PyXML, go to:
http://download.sourceforge.net/pyxml/
Version 0.22 of [xml_pickle] was designed around PyXML 0.5.2.
If you use an older version of PyXML, you may download a
compatible version of [xml_pickle] at:
http://gnosis.cx/download/xml_pickle-0.22.py
And a known-to-be-compatible PyXML distribution at:
http://gnosis.cx/download/py_xml_04-21-00.exe
http://gnosis.cx/download/py_xml_04-21-00.zip
The first URL is the Windows self-installer, the latter is
simply an archive of those files to be unpacked under
$PYTHONPATH/xml.
Usage:
# By inheritence
from xml_pickle import XML_Pickler
class MyClass(XML_Pickler):
# create some behavior and attributes for MyClass...
o1 = MyClass()
xml_str = o.dumps()
o2 = MyClass()
o2.loads(xml_str)
# With inline instantiation
from xml_pickle import XML_Pickler
o1 = DataClass()
# ...assign attribute values to o1...
xml_str = XML_Pickler(o1).dumps()
o2 = XML_Pickler().loads(xml_str)
Classes:
PyObject
XML_Pickler
Functions:
thing_from_dom(dom_node, container)
obj_from_node(node)
subnodes(node)
_attr_tag(...)
_item_tag(...)
_entry_tag(...)
_tag_completer(...)
_klass(...)
safe_eval(s)
safe_string(s)
unsafe_string(s)
"""
__version__ = "$Revision: 0.30 $"
__author__=["<NAME> (<EMAIL>)",]
__thanks_to__=["<NAME> (<EMAIL>)",
"<NAME> (<EMAIL>)",
"<NAME> (<EMAIL>)"]
__copyright__="""
This file is released to the public domain. I (dqm) would
appreciate it if you choose to keep derived works under terms
that promote freedom, but obviously am giving up any rights
to compel such.
"""
__history__="""
0.1 Initial version
0.22 Compatible with PyXML 0.52
0.30 Compatible with PyXML 0.61+
"""
from types import *
from xml.dom import ext
from xml.dom.ext.reader import Sax2
import cStringIO
XMLPicklingError = "xml_pickle.XMLPicklingError"
XMLUnpicklingError = "xml_pickle.XMLUnpicklingError"
class PyObject:
"""Placeholder template class"""
def __init__(self, __fakename__=None):
if __fakename__: self.__fakename__ = __fakename__
class XML_Pickler:
"""Framework for 'pickle to XML'"""
def __init__(self, py_obj=None):
if py_obj is not None:
if type(py_obj)<>InstanceType:
raise ValueError, \
"XML_Pickler must be initialized with Instance (or None)"
self.py_obj = py_obj
else:
self.py_obj = PyObject(self.__class__.__name__)
def __setattr__(self, name, value):
if name == 'py_obj':
self.__dict__[name] = value
else:
setattr(self.py_obj, name, value)
def __getattr__(self, name):
return getattr(self.py_obj, name)
def __delattr__(self, name):
del self.py_obj.__dict__[name]
def dump(self, fh):
# admittedly, our approach requires creating whole output XML in
# memory first, which could be large for complex object. Maybe
# we'll make this more efficient later.
fh.write(self.dumps())
def load(self, fh):
return thing_from_dom(Sax2.FromXml(fh.read(), validate=0))
def dumps(self):
xml_str = '<?xml version="1.0"?>\n' +\
'<!DOCTYPE PyObject SYSTEM "PyObjects.dtd">\n'
xml_str = xml_str+'<PyObject class="%s">\n' % _klass(self.py_obj)
for name in dir(self.py_obj):
xml_str = xml_str+_attr_tag(name, getattr(self, name))
xml_str = xml_str+'</PyObject>'
return xml_str
def loads(self, xml_str):
fh = cStringIO.StringIO(xml_str)
obj = self.load(fh)
fh.close()
return obj
#-- support functions
def thing_from_dom(dom_node, container=None):
"""Converts an [xml_pickle] DOM tree to a "native" Python object"""
for node in subnodes(dom_node):
if node.nodeName == "PyObject":
# Add all the subnodes to PyObject container
container = thing_from_dom(node, obj_from_node(node))
elif node.nodeName == 'attr':
try:
node_type = node.attributes[('','type')].value
except:
print "node", node.attributes, repr(node.attributes)
print node.attributes.keys()
raise # WHAT?!
node_name = node.attributes[('', 'name')].value
if node_type == 'None':
setattr(container, node_name, None)
elif node_type == 'numeric':
node_val = safe_eval(node.attributes[('','value')].value)
setattr(container, node_name, node_val)
elif node_type == 'string':
node_val = node.attributes[('','value')].value
node_val = unsafe_string(node_val)
setattr(container, node_name, node_val)
elif node_type == 'list':
subcontainer = thing_from_dom(node, [])
setattr(container, node_name, subcontainer)
elif node_type == 'tuple':
subcontainer = thing_from_dom(node, []) # use list then convert
setattr(container, node_name, tuple(subcontainer))
elif node_type == 'dict':
subcontainer = thing_from_dom(node, {})
setattr(container, node_name, subcontainer)
elif node_type == 'PyObject':
subcontainer = thing_from_dom(node, obj_from_node(node))
setattr(container, node_name, subcontainer)
elif node.nodeName in ['item', 'key', 'val']:
# -- Odd behavior warning --
# The 'node_type' expression has an odd tendency to be a
# one-element tuple rather than a string. Doing the str()
# fixes things, but I'm not sure why!
# -- About key/val nodes --
# There *should not* be mutable types as keys, but to cover
# all cases, elif's are defined for mutable types. Furthermore,
# there should only ever be *one* item in any key/val list,
# but we again rely on other validation of the XML happening.
node_type = str(node.attributes[('','type')].value)
if node_type == 'numeric':
node_val = safe_eval(node.attributes[('','value')].value)
container.append(node_val)
elif node_type == 'string':
node_val = node.attributes[('','value')].value
node_val = unsafe_string(node_val)
container.append(node_val)
elif node_type == 'list':
subcontainer = thing_from_dom(node, [])
container.append(subcontainer)
elif node_type == 'tuple':
subcontainer = thing_from_dom(node, []) # use list then convert
container.append(tuple(subcontainer))
elif node_type == 'dict':
subcontainer = thing_from_dom(node, {})
container.append(subcontainer)
elif node_type == 'PyObject':
subcontainer = thing_from_dom(node, obj_from_node(node))
container.append(subcontainer)
elif node.nodeName == 'entry':
keyval = thing_from_dom(node, [])
key, val = keyval[0], keyval[1]
container[key] = val
else:
raise XMLUnpicklingError, \
"element %s is not in PyObjects.dtd" % node.nodeName
return container
def obj_from_node(node):
# Get classname of object (with fallback to 'PyObject')
try:
if node.attributes:
klass = node.attributes[('','class')].value
else:
klass = 'PyObject'
except KeyError: klass = 'PyObject'
# does the class exist, or should we create it?
try: safe_eval(klass)
except NameError:
exec ('class %s: pass' % klass)
return eval('%s()' % klass)
def subnodes(node):
return filter(lambda n: n.nodeName<>'#text', node.childNodes)
def _attr_tag(name, thing, level=0):
start_tag = ' '*level+('<attr name="%s" ' % name)
close_tag =' '*level+'</attr>\n'
if name == '__fakename__': return ''
else:
return _tag_completer(start_tag, thing, close_tag, level)
def _item_tag(thing, level=0):
start_tag = ' '*level+'<item '
close_tag =' '*level+'</item>\n'
return _tag_completer(start_tag, thing, close_tag, level)
def _entry_tag(key, val, level=0):
start_tag = ' '*level+'<entry>\n'
close_tag = ' '*level+'</entry>\n'
start_key = ' '*level+' <key '
close_key = ' '*level+' </key>\n'
key_block = _tag_completer(start_key, key, close_key, level+1)
start_val = ' '*level+' <val '
close_val = ' '*level+' </val>\n'
val_block = _tag_completer(start_val, val, close_val, level+1)
return (start_tag + key_block + val_block + close_tag)
def _tag_completer(start_tag, thing, close_tag, level=0):
tag_body = ''
if type(thing) == NoneType:
start_tag = start_tag+'type="None" />\n'
close_tag = ''
elif type(thing) in [IntType, LongType, FloatType, ComplexType]:
start_tag = start_tag+'type="numeric" value="%s" />\n' % `thing`
close_tag = ''
elif type(thing) in [StringType]:
thing = safe_string(thing)
start_tag = start_tag+'type="string" value="%s" />\n' % thing
close_tag = ''
elif type(thing) in [TupleType]:
start_tag = start_tag+'type="tuple">\n'
for item in thing:
tag_body = tag_body+_item_tag(item, level+1)
elif type(thing) in [ListType]:
start_tag = start_tag+'type="list">\n'
for item in thing:
tag_body = tag_body+_item_tag(item, level+1)
elif type(thing) in [DictType]:
start_tag = start_tag+'type="dict">\n'
for key, val in thing.items():
tag_body = tag_body+_entry_tag(key, val, level+1)
elif type(thing) in [InstanceType]:
start_tag = start_tag+'type="PyObject" class="%s">\n' % _klass(thing)
for name in dir(thing):
tag_body = tag_body+_attr_tag(name, getattr(thing, name), level+1)
else:
raise XMLPicklingError, "non-handled type %s." % type(thing)
return start_tag+tag_body+close_tag
def _klass(thing):
if type(thing)<>InstanceType:
raise ValueError, \
"non-Instance type %s passed to _klass()" % type(thing)
if hasattr(thing, '__fakename__'): return thing.__fakename__
else: return thing.__class__.__name__
def safe_eval(s):
if 0: # Condition for malicious string in eval() block
raise "SecurityError", \
"Malicious string '%s' should not be eval()'d" % s
else:
return eval(s)
def safe_string(s):
import string, re
# markup XML entities
s = string.replace(s, '&', '&')
s = string.replace(s, '<', '<')
s = string.replace(s, '>', '>')
s = string.replace(s, '"', '"')
s = string.replace(s, "'", ''')
# for others, use Python style escapes
s = repr(s)
return s[1:-1] # without the extra single-quotes
def unsafe_string(s):
import string, re
# for Python escapes, exec the string
# (niggle w/ literalizing apostrophe)
s = string.replace(s, "'", r"\047")
exec "s='"+s+"'"
# XML entities (DOM does it for us)
return s
#-- Hand generated test object
test_xml = """<?xml version="1.0"?>
<!DOCTYPE PyObject SYSTEM "PyObjects.dtd">
<PyObject class="Automobile">
<attr name="doors" type="numeric" value="4" />
<attr name="make" type="string" value="Honda" />
<attr name="tow_hitch" type="None" />
<attr name="prev_owners" type="tuple">
<item type="string" value="<NAME>" />
<item type="tuple">
<item type="string" value="<NAME>" />
<item type="string" value="<NAME>" />
</item>
<item type="string" value="<NAME>" />
</attr>
<attr name="repairs" type="list">
<item type="string" value="June 1, 1999: Fixed radiator" />
<item type="PyObject" class="Swindle">
<attr name="date" type="string" value="July 1, 1999" />
<attr name="swindler" type="string" value="Ed's Auto" />
<attr name="purport" type="string" value="Fix A/C" />
</item>
</attr>
<attr name="options" type="dict">
<entry>
<key type="string" value="Cup Holders" />
<val type="numeric" value="4" />
</entry>
<entry>
<key type="string" value="Custom Wheels" />
<val type="string" value="Chrome Spoked" />
</entry>
</attr>
<attr name="engine" type="PyObject" class="Engine">
<attr name="cylinders" type="numeric" value="4" />
<attr name="manufacturer" | |
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([('test.stone', text)])
self.assertEqual(
"Symbol 'S' already defined (test.stone:3).",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 6)
# Test name from two specs that are part of the same namespace
text1 = textwrap.dedent("""\
namespace test
struct S
f String
""")
text2 = textwrap.dedent("""\
namespace test
struct S
f String
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([('test1.stone', text1), ('test2.stone', text2)])
self.assertEqual(
"Symbol 'S' already defined (test1.stone:3).",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 4)
def test_imported_namespaces(self):
text1 = textwrap.dedent("""\
namespace ns1
struct S1
f1 String
struct S2
f2 String
alias Iso8601 = Timestamp("%Y-%m-%dT%H:%M:%SZ")
""")
text2 = textwrap.dedent("""\
namespace ns2
import ns1
struct S3
f3 String
f4 ns1.Iso8601?
f5 ns1.S1?
example default
f3 = "hello"
f4 = "2015-05-12T15:50:38Z"
route r1(ns1.S1, ns1.S2, S3)
""")
api = specs_to_ir([('ns1.stone', text1), ('ns2.stone', text2)])
self.assertEqual(api.namespaces['ns2'].get_imported_namespaces(),
[api.namespaces['ns1']])
xs = api.namespaces['ns2'].get_route_io_data_types()
xs = sorted(xs, key=lambda x: x.name.lower())
self.assertEqual(len(xs), 3)
ns1 = api.namespaces['ns1']
ns2 = api.namespaces['ns2']
self.assertEqual(xs[0].namespace, ns1)
self.assertEqual(xs[1].namespace, ns1)
s3_dt = ns2.data_type_by_name['S3']
self.assertEqual(s3_dt.fields[2].data_type.data_type.namespace, ns1)
self.assertEqual(xs[2].name, 'S3')
def test_namespace_obj(self):
text = textwrap.dedent("""\
namespace ns1
struct S1
f1 String
struct S2
f2 String
s3 S3
struct S3
f3 String
struct S4
f4 String
alias A = S2
route r(S1, List(S4?)?, A)
""")
api = specs_to_ir([('ns1.stone', text)])
ns1 = api.namespaces['ns1']
# Check that all data types are defined
self.assertIn('S1', ns1.data_type_by_name)
self.assertIn('S2', ns1.data_type_by_name)
self.assertIn('S3', ns1.data_type_by_name)
self.assertIn('S4', ns1.data_type_by_name)
self.assertEqual(len(ns1.data_types), 4)
# Check that route is defined
self.assertIn('r', ns1.route_by_name)
self.assertEqual(len(ns1.routes), 1)
s1 = ns1.data_type_by_name['S1']
a = ns1.alias_by_name['A']
s3 = ns1.data_type_by_name['S3']
s4 = ns1.data_type_by_name['S4']
route_data_types = ns1.get_route_io_data_types()
self.assertIn(s1, route_data_types)
# Test that aliased reference is included
self.assertIn(a, route_data_types)
# Test that field type is not present
self.assertNotIn(s3, route_data_types)
# Check that type that is wrapped by a list and/or nullable is present
self.assertIn(s4, route_data_types)
def test_whitespace(self):
text = textwrap.dedent("""\
namespace test
struct S
f String
++++
g Int64
++++
example default
f = "hi"
++++++++
g = 3
route r(Void, S, Void)
""").replace('+', ' ')
specs_to_ir([('ns1.stone', text)])
text = textwrap.dedent("""\
namespace test
struct S
f String
++++
g Int64
++++
example default
f = "hi"
++++
++++++
g = 3
route r(Void, S, Void)
""").replace('+', ' ')
specs_to_ir([('ns1.stone', text)])
text = textwrap.dedent("""\
namespace test
# weirdly indented comment
struct S
# weirdly indented comment
f String
g Int64
example default
f = "hi"
# weirdly indented comment
g = 3
route r(Void, S, Void)
""")
specs_to_ir([('ns1.stone', text)])
def test_route_attrs_schema(self):
# Try to define route in stone_cfg
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
struct Route
f1 String
route r(Void, Void, Void)
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([('stone_cfg.stone', stone_cfg_text)])
self.assertEqual(
'No routes can be defined in the stone_cfg namespace.',
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 6)
self.assertEqual(cm.exception.path, 'stone_cfg.stone')
# Try to set bad type for schema
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
struct Route
f1 String
""")
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
attrs
f1 = 3
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
self.assertEqual(
'integer is not a valid string',
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 4)
self.assertEqual(cm.exception.path, 'test.stone')
# Try missing attribute for route
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
struct Route
f1 String
""")
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
self.assertEqual(
"Route does not define attr key 'f1'.",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 2)
self.assertEqual(cm.exception.path, 'test.stone')
# Test missing attribute for route attribute with default
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
struct Route
f1 String = "yay"
""")
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
""")
api = specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
ns1 = api.namespaces['test']
self.assertEquals(ns1.route_by_name['r1'].attrs['f1'], 'yay')
# Test missing attribute for route attribute with optional
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
struct Route
f1 String?
""")
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
""")
api = specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
test = api.namespaces['test']
self.assertEquals(test.route_by_name['r1'].attrs['f1'], None)
# Test unknown route attributes
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
struct Route
f1 String?
""")
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
attrs
f2 = 3
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
self.assertEqual(
"Route attribute 'f2' is not defined in 'stone_cfg.Route'.",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 4)
self.assertEqual(cm.exception.path, 'test.stone')
# Test no route attributes defined at all
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
attrs
f1 = 3
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([('test.stone', test_text)])
self.assertEqual(
"Route attribute 'f1' is not defined in 'stone_cfg.Route'.",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 4)
self.assertEqual(cm.exception.path, 'test.stone')
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
struct Route
f1 Boolean
f2 Bytes
f3 Float64
f4 Int64
f5 String
f6 Timestamp("%Y-%m-%dT%H:%M:%SZ")
f7 S
f8 T
f9 S?
f10 T
f11 S?
alias S = String
alias T = String?
""")
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
attrs
f1 = true
f2 = "asdf"
f3 = 3.2
f4 = 10
f5 = "Hello"
f6 = "2015-05-12T15:50:38Z"
f7 = "World"
f8 = "World"
f9 = "World"
""")
api = specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
test = api.namespaces['test']
attrs = test.route_by_name['r1'].attrs
self.assertEquals(attrs['f1'], True)
self.assertEquals(attrs['f2'], b'asdf')
self.assertEquals(attrs['f3'], 3.2)
self.assertEquals(attrs['f4'], 10)
self.assertEquals(attrs['f5'], 'Hello')
self.assertEquals(
attrs['f6'], datetime.datetime(2015, 5, 12, 15, 50, 38))
self.assertEquals(attrs['f7'], 'World')
self.assertEquals(attrs['f8'], 'World')
self.assertEquals(attrs['f9'], 'World')
self.assertEquals(attrs['f10'], None)
self.assertEquals(attrs['f11'], None)
# Try defining an attribute twice.
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
import test
struct Route
f1 String
""")
test_text = textwrap.dedent("""\
namespace test
route r1(Void, Void, Void)
attrs
f1 = "1"
f1 = "2"
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
self.assertEqual(
"Attribute 'f1' defined more than once.",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 6)
self.assertEqual(cm.exception.path, 'test.stone')
# Test union type
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
import test
struct Route
f1 test.U
""")
test_text = textwrap.dedent("""\
namespace test
union U
a
b
route r1(Void, Void, Void)
attrs
f1 = a
""")
specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
# Try union type with bad attribute
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
import test
struct Route
f1 test.U
""")
test_text = textwrap.dedent("""\
namespace test
union U
a
b
route r1(Void, Void, Void)
attrs
f1 = 3
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
self.assertEqual(
"Expected union tag as value.",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 9)
self.assertEqual(cm.exception.path, 'test.stone')
# Try union type attribute with non-void tag set
stone_cfg_text = textwrap.dedent("""\
namespace stone_cfg
import test
struct Route
f1 test.U
""")
test_text = textwrap.dedent("""\
namespace test
union U
a
b String
route r1(Void, Void, Void)
attrs
f1 = b
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([
('stone_cfg.stone', stone_cfg_text), ('test.stone', test_text)])
self.assertEqual(
"invalid reference to non-void option 'b'",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 9)
self.assertEqual(cm.exception.path, 'test.stone')
def test_inline_type_def(self):
text = textwrap.dedent("""\
namespace test
struct Photo
dimensions Dimensions
"Dimensions for a photo."
struct
height UInt64
"Height of the photo."
width UInt64
"Width of the photo."
example default
height = 5
width = 10
location GpsCoordinates?
struct
latitude Float64
longitude Float64
example default
latitude = 37.23
longitude = 122.2
time_taken Int64
"The timestamp when the photo was taken."
example default
"A typical photo"
dimensions = default
location = default
time_taken = 100
union E
e1
e2 E2
"Test E2."
union
a
b
route r(Void, Photo, E)
""")
specs_to_ir([('ns1.stone', text)])
text = textwrap.dedent("""\
namespace test
struct T
g Int64
struct S
f T
"Dimensions for a photo or video."
struct
a String
b Int64
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([('ns1.stone', text)])
self.assertEqual(
"Symbol 'T' already defined (ns1.stone:3).",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 9)
self.assertEqual(cm.exception.path, 'ns1.stone')
def test_annotations(self):
# Test non-existant annotation
text = textwrap.dedent("""\
namespace test
annotation NonExistant = NonExistant()
struct S
f String
@NonExistant
"Test field with two non-existant tag."
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([('test.stone', text)])
self.assertEqual(
"Unknown Annotation type 'NonExistant'.",
cm.exception.msg)
self.assertEqual(cm.exception.lineno, 3)
# Test omission tag
text = textwrap.dedent("""\
namespace test
annotation InternalOnly = Omitted("internal")
struct S
f String
@InternalOnly
"Test field with one omitted tag."
""")
api = specs_to_ir([('test.stone', text)])
s = api.namespaces['test'].data_type_by_name['S']
self.assertEqual(s.all_fields[0].name, 'f')
self.assertEqual(s.all_fields[0].omitted_caller, 'internal')
# Test applying two omission tags to one field
text = textwrap.dedent("""\
namespace test
annotation InternalOnly = Omitted("internal")
annotation AlphaOnly = Omitted("alpha_only")
struct S
f String
@AlphaOnly
@InternalOnly
"Test field with two omitted tags."
""")
with self.assertRaises(InvalidSpec) as cm:
specs_to_ir([('test.stone', text)])
self.assertEqual(
"Omitted caller | |
'.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h))
else:
if not _is_writable_dir(h):
raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h)
os.mkdir(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path): return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path): return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0],
'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path): return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
return a filehandle to one of the example files in mpl-data/example
*fname*
the name of one of the files in mpl-data/example
"""
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if not os.path.exists(fullpath):
raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%(
fname, datadir))
return file(fullpath, 'rb')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return d.items()
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print >> sys.stderr, """\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print >> sys.stderr, """\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
def __setitem__(self, key, val):
try:
if key in _deprecated_map.keys():
alt = _deprecated_map[key]
warnings.warn('%s is deprecated in matplotlibrc. Use %s \
instead.'% (key, alt))
key = alt
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.'%key)
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
cnt = 0
rc_temp = {}
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
else:
print >> sys.stderr, """
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
if key not in rcParams:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
rcParams[key] = v
def rcdefaults():
"""
Restore the default rc params - the ones that were created at
matplotlib load time.
"""
rcParams.update(rcParamsDefault)
if NEWCONFIG:
#print "importing from reorganized config system!"
try:
from config import rcParams, rcdefaults, mplConfig, save_config
verbose.set_level(rcParams['verbose.level'])
verbose.set_fileo(rcParams['verbose.fileo'])
except:
from config import rcParams, rcdefaults
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. For the Cairo backend,
the argument can have an extension to indicate the type of
| |
other services.
:param pulumi.Input[Union[str, 'MoveCost']] default_move_cost: Specifies the move cost for the service.
:param pulumi.Input[int] min_instance_count: MinInstanceCount is the minimum number of instances that must be up to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * InstanceCount) ). Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted into the number of nodes on which the instances are allowed to be placed according to the placement constraints on the service.
:param pulumi.Input[int] min_instance_percentage: MinInstancePercentage is the minimum percentage of InstanceCount that must be up to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * InstanceCount) ). Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first converted into the number of nodes on which the instances are allowed to be placed according to the placement constraints on the service.
:param pulumi.Input[str] placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
:param pulumi.Input[Sequence[pulumi.Input['ScalingPolicyArgs']]] scaling_policies: Scaling policies for this service.
:param pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricArgs']]] service_load_metrics: The service load metrics is given as an array of ServiceLoadMetric objects.
:param pulumi.Input[Union[str, 'ServicePackageActivationMode']] service_package_activation_mode: The activation Mode of the service package
:param pulumi.Input[Sequence[pulumi.Input[Union['ServicePlacementInvalidDomainPolicyArgs', 'ServicePlacementNonPartiallyPlaceServicePolicyArgs', 'ServicePlacementPreferPrimaryDomainPolicyArgs', 'ServicePlacementRequireDomainDistributionPolicyArgs', 'ServicePlacementRequiredDomainPolicyArgs']]]] service_placement_policies: A list that describes the correlation of the service with other services.
"""
pulumi.set(__self__, "instance_count", instance_count)
pulumi.set(__self__, "partition_description", partition_description)
pulumi.set(__self__, "service_kind", 'Stateless')
pulumi.set(__self__, "service_type_name", service_type_name)
if correlation_scheme is not None:
pulumi.set(__self__, "correlation_scheme", correlation_scheme)
if default_move_cost is not None:
pulumi.set(__self__, "default_move_cost", default_move_cost)
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
if min_instance_percentage is not None:
pulumi.set(__self__, "min_instance_percentage", min_instance_percentage)
if placement_constraints is not None:
pulumi.set(__self__, "placement_constraints", placement_constraints)
if scaling_policies is not None:
pulumi.set(__self__, "scaling_policies", scaling_policies)
if service_load_metrics is not None:
pulumi.set(__self__, "service_load_metrics", service_load_metrics)
if service_package_activation_mode is not None:
pulumi.set(__self__, "service_package_activation_mode", service_package_activation_mode)
if service_placement_policies is not None:
pulumi.set(__self__, "service_placement_policies", service_placement_policies)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> pulumi.Input[int]:
"""
The instance count.
"""
return pulumi.get(self, "instance_count")
@instance_count.setter
def instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "instance_count", value)
@property
@pulumi.getter(name="partitionDescription")
def partition_description(self) -> pulumi.Input[Union['NamedPartitionSchemeArgs', 'SingletonPartitionSchemeArgs', 'UniformInt64RangePartitionSchemeArgs']]:
"""
Describes how the service is partitioned.
"""
return pulumi.get(self, "partition_description")
@partition_description.setter
def partition_description(self, value: pulumi.Input[Union['NamedPartitionSchemeArgs', 'SingletonPartitionSchemeArgs', 'UniformInt64RangePartitionSchemeArgs']]):
pulumi.set(self, "partition_description", value)
@property
@pulumi.getter(name="serviceKind")
def service_kind(self) -> pulumi.Input[str]:
"""
The kind of service (Stateless or Stateful).
Expected value is 'Stateless'.
"""
return pulumi.get(self, "service_kind")
@service_kind.setter
def service_kind(self, value: pulumi.Input[str]):
pulumi.set(self, "service_kind", value)
@property
@pulumi.getter(name="serviceTypeName")
def service_type_name(self) -> pulumi.Input[str]:
"""
The name of the service type
"""
return pulumi.get(self, "service_type_name")
@service_type_name.setter
def service_type_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_type_name", value)
@property
@pulumi.getter(name="correlationScheme")
def correlation_scheme(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationArgs']]]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "correlation_scheme")
@correlation_scheme.setter
def correlation_scheme(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationArgs']]]]):
pulumi.set(self, "correlation_scheme", value)
@property
@pulumi.getter(name="defaultMoveCost")
def default_move_cost(self) -> Optional[pulumi.Input[Union[str, 'MoveCost']]]:
"""
Specifies the move cost for the service.
"""
return pulumi.get(self, "default_move_cost")
@default_move_cost.setter
def default_move_cost(self, value: Optional[pulumi.Input[Union[str, 'MoveCost']]]):
pulumi.set(self, "default_move_cost", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
MinInstanceCount is the minimum number of instances that must be up to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * InstanceCount) ). Note, if InstanceCount is set to -1, during MinInstanceCount computation -1 is first converted into the number of nodes on which the instances are allowed to be placed according to the placement constraints on the service.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter(name="minInstancePercentage")
def min_instance_percentage(self) -> Optional[pulumi.Input[int]]:
"""
MinInstancePercentage is the minimum percentage of InstanceCount that must be up to meet the EnsureAvailability safety check during operations like upgrade or deactivate node. The actual number that is used is max( MinInstanceCount, ceil( MinInstancePercentage/100.0 * InstanceCount) ). Note, if InstanceCount is set to -1, during MinInstancePercentage computation, -1 is first converted into the number of nodes on which the instances are allowed to be placed according to the placement constraints on the service.
"""
return pulumi.get(self, "min_instance_percentage")
@min_instance_percentage.setter
def min_instance_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_percentage", value)
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> Optional[pulumi.Input[str]]:
"""
The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
"""
return pulumi.get(self, "placement_constraints")
@placement_constraints.setter
def placement_constraints(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "placement_constraints", value)
@property
@pulumi.getter(name="scalingPolicies")
def scaling_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPolicyArgs']]]]:
"""
Scaling policies for this service.
"""
return pulumi.get(self, "scaling_policies")
@scaling_policies.setter
def scaling_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingPolicyArgs']]]]):
pulumi.set(self, "scaling_policies", value)
@property
@pulumi.getter(name="serviceLoadMetrics")
def service_load_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricArgs']]]]:
"""
The service load metrics is given as an array of ServiceLoadMetric objects.
"""
return pulumi.get(self, "service_load_metrics")
@service_load_metrics.setter
def service_load_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricArgs']]]]):
pulumi.set(self, "service_load_metrics", value)
@property
@pulumi.getter(name="servicePackageActivationMode")
def service_package_activation_mode(self) -> Optional[pulumi.Input[Union[str, 'ServicePackageActivationMode']]]:
"""
The activation Mode of the service package
"""
return pulumi.get(self, "service_package_activation_mode")
@service_package_activation_mode.setter
def service_package_activation_mode(self, value: Optional[pulumi.Input[Union[str, 'ServicePackageActivationMode']]]):
pulumi.set(self, "service_package_activation_mode", value)
@property
@pulumi.getter(name="servicePlacementPolicies")
def service_placement_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union['ServicePlacementInvalidDomainPolicyArgs', 'ServicePlacementNonPartiallyPlaceServicePolicyArgs', 'ServicePlacementPreferPrimaryDomainPolicyArgs', 'ServicePlacementRequireDomainDistributionPolicyArgs', 'ServicePlacementRequiredDomainPolicyArgs']]]]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "service_placement_policies")
@service_placement_policies.setter
def service_placement_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ServicePlacementInvalidDomainPolicyArgs', 'ServicePlacementNonPartiallyPlaceServicePolicyArgs', 'ServicePlacementPreferPrimaryDomainPolicyArgs', 'ServicePlacementRequireDomainDistributionPolicyArgs', 'ServicePlacementRequiredDomainPolicyArgs']]]]]):
pulumi.set(self, "service_placement_policies", value)
@pulumi.input_type
class SubResourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Azure resource identifier.
:param pulumi.Input[str] id: Azure resource identifier.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Azure resource identifier.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class UniformInt64RangePartitionSchemeArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
high_key: pulumi.Input[float],
low_key: pulumi.Input[float],
partition_scheme: pulumi.Input[str]):
"""
Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions.
:param pulumi.Input[int] count: The number of partitions.
:param pulumi.Input[float] high_key: The upper bound of the partition key range that
should be split between the partition ‘Count’
:param pulumi.Input[float] low_key: The lower bound of the partition key range that
should be split between the partition ‘Count’
:param pulumi.Input[str] partition_scheme: Enumerates the ways that a service can be partitioned.
Expected value is 'UniformInt64Range'.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "high_key", high_key)
pulumi.set(__self__, "low_key", low_key)
pulumi.set(__self__, "partition_scheme", 'UniformInt64Range')
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of partitions.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="highKey")
def high_key(self) -> pulumi.Input[float]:
"""
The upper bound of the partition key range that
should be split between the partition ‘Count’
"""
return pulumi.get(self, "high_key")
@high_key.setter
def high_key(self, value: pulumi.Input[float]):
pulumi.set(self, "high_key", value)
@property
@pulumi.getter(name="lowKey")
def low_key(self) -> pulumi.Input[float]:
"""
The lower bound of the partition key range that
should be split between the partition ‘Count’
"""
return pulumi.get(self, "low_key")
@low_key.setter
def low_key(self, value: pulumi.Input[float]):
pulumi.set(self, "low_key", value)
@property
@pulumi.getter(name="partitionScheme")
def partition_scheme(self) -> pulumi.Input[str]:
"""
Enumerates the ways that a service can be partitioned.
Expected value is 'UniformInt64Range'.
"""
return pulumi.get(self, "partition_scheme")
@partition_scheme.setter
def partition_scheme(self, value: pulumi.Input[str]):
pulumi.set(self, "partition_scheme", value)
@pulumi.input_type
class VMSSExtensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
publisher: pulumi.Input[str],
type: pulumi.Input[str],
type_handler_version: pulumi.Input[str],
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
settings: Optional[Any] = None):
"""
Specifies set of extensions that should be installed onto the virtual machines.
:param pulumi.Input[str] name: The name of the extension.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension | |
<filename>examples/file_proof/file_proof.py<gh_stars>10-100
#!/bin/sh
""":" .
exec python "$0" "$@"
"""
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import binascii
import datetime
import hashlib
import os
import json
import sys
sys.path.extend(["../../"])
from bbc1.core import bbc_app
from bbc1.core.bbc_config import DEFAULT_CORE_PORT
from bbc1.core import bbclib
from bbc1.core.message_key_types import KeyType
from bbc1.core.bbc_error import *
MAPPING_FILE = ".bbc_id_mappings"
PRIVATE_KEY = ".private_key"
PUBLIC_KEY = ".public_key"
domain_id = bbclib.get_new_id("file_proof_test_domain", include_timestamp=False)
asset_group_id = bbclib.get_new_id("file_proof_asset_group", include_timestamp=False)
user_name = "user_default"
user_id = bbclib.get_new_id(user_name, include_timestamp=False)
key_pair = None
def store_id_mappings(name, asset_group_id, transaction_id=None, asset_ids=None):
if transaction_id is None and asset_ids is None:
return
mapping = dict()
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
if os.path.exists(MAPPING_FILE):
with open(MAPPING_FILE, "r") as f:
mapping = json.load(f)
mapping.setdefault(asset_group_id_str, dict()).setdefault(name, dict())
if transaction_id is not None:
mapping[asset_group_id_str][name]['transaction_id'] = binascii.b2a_hex(transaction_id).decode()
if asset_ids is not None:
if isinstance(asset_ids, list):
entry = []
for ast in asset_ids:
entry.append(binascii.b2a_hex(ast))
mapping[asset_group_id_str][name]['asset_id'] = entry
else:
mapping[asset_group_id_str][name]['asset_id'] = binascii.b2a_hex(asset_ids).decode()
with open(MAPPING_FILE, "w") as f:
json.dump(mapping, f, indent=4)
def remove_id_mappings(name, asset_group_id):
mapping = dict()
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
if os.path.exists(MAPPING_FILE):
with open(MAPPING_FILE, "r") as f:
mapping = json.load(f)
if asset_group_id_str in mapping:
mapping[asset_group_id_str].pop(name, None)
if len(mapping[asset_group_id_str].keys()) == 0:
del mapping[asset_group_id_str]
with open(MAPPING_FILE, "w") as f:
json.dump(mapping, f, indent=4)
def get_id_from_mappings(name, asset_group_id):
if not os.path.exists(MAPPING_FILE):
return None
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
with open(MAPPING_FILE, "r") as f:
mapping = json.load(f)
if mapping is None:
return None
if asset_group_id_str in mapping and name in mapping[asset_group_id_str]:
result = dict()
if 'transaction_id' in mapping[asset_group_id_str][name]:
result['transaction_id'] = binascii.a2b_hex(mapping[asset_group_id_str][name]['transaction_id'])
if 'asset_id' in mapping[asset_group_id_str][name]:
if isinstance(mapping[asset_group_id_str][name]['asset_id'], list):
entry = []
for ast in mapping[asset_group_id_str][name]['asset_id']:
entry.append(binascii.a2b_hex(ast))
result['asset_id'] = entry
else:
result['asset_id'] = binascii.a2b_hex(mapping[asset_group_id_str][name]['asset_id'])
return result
return None
def get_list_from_mappings(asset_group_id):
if not os.path.exists(MAPPING_FILE):
return None
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
with open(MAPPING_FILE, "r") as f:
mapping = json.load(f)
if mapping is None:
return None
if asset_group_id_str in mapping:
result = []
for name in mapping[asset_group_id_str]:
result.append(name)
return result
return None
def domain_setup():
tmpclient = bbc_app.BBcAppClient(port=DEFAULT_CORE_PORT, multiq=False, loglevel="all")
if os.path.exists("node_key.pem"):
tmpclient.set_node_key("node_key.pem")
tmpclient.domain_setup(domain_id)
tmpclient.callback.synchronize()
tmpclient.unregister_from_core()
print("Domain %s is created." % (binascii.b2a_hex(domain_id[:4]).decode()))
print("Setup is done.")
def setup_bbc_client():
bbc_app_client = bbc_app.BBcAppClient(port=DEFAULT_CORE_PORT, multiq=False, loglevel="all")
bbc_app_client.set_user_id(user_id)
bbc_app_client.set_domain_id(domain_id)
bbc_app_client.set_callback(bbc_app.Callback())
ret = bbc_app_client.register_to_core()
assert ret
return bbc_app_client
def require_receiver_info_for(filename):
print("Your name is [", user_name, "] and user_id is [", binascii.b2a_hex(user_id).decode(), "]")
print("Please enter the receiver user name for file %s." % filename)
receiver_name = input('>> ')
receiver_user_id = bbclib.get_new_id(receiver_name, include_timestamp=False)
return receiver_name, receiver_user_id
def search_reference_txid_from_mappings(filename):
reference_txid = None
file_info = get_id_from_mappings(os.path.basename(filename), asset_group_id)
if file_info:
reference_txid = file_info["transaction_id"]
return reference_txid
def send_signreq(receiver_name, receiver_user_id, ref_txids=None, file_data=None, bbc_app_client=None):
transaction = bbclib.make_transaction(relation_num=1, witness=True)
user_info_msg = "Ownership is transfered from %s to %s" % (user_name, receiver_name)
bbclib.add_relation_asset(transaction, relation_idx=0, asset_group_id=asset_group_id,
user_id=receiver_user_id, asset_body=user_info_msg, asset_file=file_data)
transaction.witness.add_witness(user_id)
transaction.witness.add_witness(receiver_user_id)
for i, ref_txid in enumerate(ref_txids):
bbc_app_client.search_transaction(ref_txid)
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print("ERROR: ", response_data[KeyType.reason].decode())
sys.exit(0)
prev_tx, fmt_type = bbclib.deserialize(response_data[KeyType.transaction_data])
bbclib.add_relation_pointer(transaction, 0, ref_transaction_id=prev_tx.digest())
asset_id = transaction.relations[0].asset.asset_id
asset_files = {asset_id: file_data}
ret = bbc_app_client.gather_signatures(transaction, destinations=[receiver_user_id], asset_files=asset_files)
if not ret:
print("Failed to send sign request")
sys.exit(0)
return transaction
def wait_for_signs(transaction, bbc_app_client):
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print("Rejected because ", response_data[KeyType.reason].decode(), "")
sys.exit(0)
result = response_data[KeyType.result]
transaction.witness.add_signature(user_id=result[1], signature=result[2])
sig_mine = transaction.sign(private_key=key_pair.private_key,
public_key=key_pair.public_key)
transaction.witness.add_signature(user_id=user_id, signature=sig_mine)
transaction.digest()
return transaction
def create_transaction_object_for_filedata(receiver_name, receiver_user_id, ref_txids=None, file_data=None,
bbc_app_client=None):
if ref_txids is None or ref_txids[0] is None:
ref_txids = []
transaction = send_signreq(receiver_name, receiver_user_id, ref_txids, file_data, bbc_app_client)
return wait_for_signs(transaction, bbc_app_client)
def insert_signed_transaction_to_bbc_core(transaction=None, bbc_app_client=None, file_name=None):
print("Insert the transaction into BBc-1")
ret = bbc_app_client.insert_transaction(transaction)
assert ret
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print("ERROR: ", response_data[KeyType.reason].decode())
sys.exit(0)
def send_transaction_info_msg(bbc_app_client=None, transaction=None, file_name=None, receiver_user_id=None):
transaction_info = [os.path.basename(file_name), transaction.transaction_id]
bbc_app_client.send_message(transaction_info, receiver_user_id)
def wait_for_transaction_msg(bbc_app_client=None):
print("Your name is [", user_name, "] and user_id is [", binascii.b2a_hex(user_id).decode(), "]")
print("Waiting for file transfer.....")
response_data = bbc_app_client.callback.synchronize()
if KeyType.transaction_data not in response_data or KeyType.all_asset_files not in response_data:
print("**** Invalid message is received...")
print(response_data)
bbc_app_client.sendback_denial_of_sign(response_data[KeyType.source_user_id],
response_data[KeyType.transaction_id],
"Invalid message is received.")
sys.exit(1)
return response_data
def pick_valid_transaction_info(received_data=None, bbc_app_client=None):
transaction, fmt_type = bbclib.deserialize(received_data[KeyType.transaction_data])
asset_files = received_data[KeyType.all_asset_files]
asset_id = transaction.relations[0].asset.asset_id
if asset_id not in asset_files:
print("**** No valid file is received...")
print(received_data)
bbc_app_client.sendback_denial_of_sign(received_data[KeyType.source_user_id],
transaction.transaction_id,
"No valid file is received.")
sys.exit(1)
file_to_obtain = asset_files[asset_id]
file_digest = hashlib.sha256(file_to_obtain).digest()
print("--------------------------")
print("File digest written in the transaction data: ",
binascii.b2a_hex(transaction.relations[0].asset.asset_file_digest).decode())
print("File digest calculated from the received file:", binascii.b2a_hex(file_digest).decode())
print("--------------------------")
return transaction, received_data[KeyType.source_user_id]
def prompt_user_to_accept_the_file(bbc_app_client=None, source_id=None, transaction_id=None):
print("====> Do you want to accept the file?")
answer = input('(Y/N) >> ')
if answer != "Y":
bbc_app_client.sendback_denial_of_sign(source_id, transaction_id, "Denied to accept the file")
sys.exit(1)
def wait_for_file_info_msg(bbc_app_client=None):
print("Waiting for the message from the sender...")
response_data = bbc_app_client.callback.synchronize(timeout=10)
if response_data is None:
print("No final message received... Ask the sender about the filename and transaction_id")
sys.exit(0)
if KeyType.message not in response_data:
print("Received invalid message....")
sys.exit(0)
filename, transaction_id = response_data[KeyType.message]
print("--> file name is %s and the transaction_id is %s" % (filename.decode(),
binascii.b2a_hex(transaction_id).decode()))
return filename, transaction_id
def store_proc(file, txid=None):
with open(file, "rb") as fin:
data = fin.read()
bbc_app_client = setup_bbc_client()
store_transaction = bbclib.make_transaction(relation_num=1, witness=True)
user_info = "Owner is %s" % user_name
bbclib.add_relation_asset(store_transaction, relation_idx=0, asset_group_id=asset_group_id,
user_id=user_id, asset_body=user_info, asset_file=data)
store_transaction.witness.add_witness(user_id)
if txid:
bbc_app_client.search_transaction(txid)
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print("ERROR: ", response_data[KeyType.reason].decode())
sys.exit(0)
prev_tx, fmt_type = bbclib.deserialize(response_data[KeyType.transaction_data])
bbclib.add_relation_pointer(transaction=store_transaction, relation_idx=0,
ref_transaction_id=prev_tx.transaction_id)
sig = store_transaction.sign(private_key=key_pair.private_key,
public_key=key_pair.public_key)
store_transaction.get_sig_index(user_id)
store_transaction.add_signature_object(user_id=user_id, signature=sig)
store_transaction.digest()
print(store_transaction)
ret = bbc_app_client.insert_transaction(store_transaction)
assert ret
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print("ERROR: ", response_data[KeyType.reason].decode())
sys.exit(0)
store_id_mappings(os.path.basename(file), asset_group_id, transaction_id=response_data[KeyType.transaction_id],
asset_ids=store_transaction.relations[0].asset.asset_id)
def store_file(file):
fileinfo = get_id_from_mappings(os.path.basename(file), asset_group_id)
if fileinfo is not None:
print("the file already stored : %s" % os.path.basename(file))
sys.exit(0)
store_proc(file=file, txid=None)
print("file stored : %s" % os.path.basename(file))
print("done store %s" % file)
def get_file(file):
fileinfo = get_id_from_mappings(os.path.basename(file), asset_group_id)
if fileinfo is None:
print("Not exists in local mapping cache. So, asset_id is not known...")
sys.exit(1)
bbc_app_client = setup_bbc_client()
ret = bbc_app_client.search_transaction_with_condition(asset_group_id=asset_group_id, asset_id=fileinfo["asset_id"])
assert ret
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print("ERROR: ", response_data[KeyType.reason].decode())
sys.exit(0)
get_transaction, fmt_type = bbclib.deserialize(response_data[KeyType.transactions][0])
if KeyType.all_asset_files in response_data:
asset_file_dict = response_data[KeyType.all_asset_files]
asset_id = get_transaction.relations[0].asset.asset_id
data = asset_file_dict[asset_id]
else:
data = get_transaction.relations[0].asset.asset_body
out_file_name = file
if os.path.exists(out_file_name):
current_datetime = datetime.datetime.now()
time_str = current_datetime.strftime('_%Y%m%d%H%M%S')
out_file_name += time_str
with open(out_file_name, "wb") as outfile:
outfile.write(data)
print("done get %s" % out_file_name)
def remove_file(file):
fileinfo = get_id_from_mappings(os.path.basename(file), asset_group_id)
if fileinfo is None:
print("File does not exist: %s" % os.path.basename(file))
sys.exit(0)
fileinfo = remove_id_mappings(os.path.basename(file), asset_group_id)
print("done remove %s" % file)
def list_file():
fileinfo = get_list_from_mappings(asset_group_id)
if fileinfo is None:
print("No files present in local mapping cache. So, asset_id is not known...")
sys.exit(1)
print("%s" % '\n'.join(fileinfo))
def update_file(file):
fileinfo = get_id_from_mappings(os.path.basename(file), asset_group_id)
if fileinfo is None:
print("Not exists in local mapping cache. So, transaction_id is not known...")
sys.exit(1)
transaction_id = fileinfo["transaction_id"]
# TODO consider whether to check existence of the transaction object
store_proc(file=file, txid=transaction_id)
print("done update %s" % os.path.basename(file))
def verify_file(file):
fileinfo = get_id_from_mappings(os.path.basename(file), asset_group_id)
if fileinfo is None:
print("Not exists in local mapping cache. So, asset_id is not known...")
sys.exit(1)
bbc_app_client = setup_bbc_client()
ret = bbc_app_client.search_transaction_with_condition(asset_group_id=asset_group_id, asset_id=fileinfo["asset_id"])
assert ret
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print("ERROR: ", response_data[KeyType.reason].decode())
sys.exit(0)
transaction, fmt_type = bbclib.deserialize(response_data[KeyType.transactions][0])
digest = transaction.digest()
ret = transaction.signatures[0].verify(digest)
if not ret:
print("Transaction data is invalid.")
sys.exit(1)
with open(file, "rb") as fin:
data = fin.read()
file_digest = hashlib.sha256(data).digest()
if file_digest == transaction.relations[0].asset.asset_file_digest:
print("%s is valid" % file)
else:
print("%s is invalid" % file)
print("done verify %s" % os.path.basename(file))
print("Content of the transaction:::")
print(transaction)
def create_keypair():
keypair = bbclib.KeyPair()
keypair.generate()
with open(PRIVATE_KEY, "wb") as fout:
fout.write(keypair.private_key)
with open(PUBLIC_KEY, "wb") as fout:
fout.write(keypair.public_key)
print("created private_key and public_key : %s, %s" % (PRIVATE_KEY, PUBLIC_KEY))
def enter_file_wait_mode():
bbc_app_client = setup_bbc_client()
recvdat = wait_for_transaction_msg(bbc_app_client=bbc_app_client)
transaction, source_id = pick_valid_transaction_info(received_data=recvdat,
bbc_app_client=bbc_app_client)
prompt_user_to_accept_the_file(bbc_app_client=bbc_app_client, source_id=source_id, transaction_id=transaction.transaction_id)
signature = transaction.sign(keypair=key_pair)
bbc_app_client.sendback_signature(source_id, transaction.transaction_id, -1, signature)
filename, transaction_id = wait_for_file_info_msg(bbc_app_client=bbc_app_client)
store_id_mappings(os.path.basename(filename.decode()), asset_group_id,
transaction_id=transaction_id, asset_ids=transaction.relations[0].asset.asset_id)
def enter_file_send_mode(filename):
receiver_name, receiver_user_id = require_receiver_info_for(filename)
with open(filename, "rb") as fin:
file_data = fin.read()
txid_for_reference = search_reference_txid_from_mappings(filename)
bbc_app_client = setup_bbc_client()
transfer_transaction = create_transaction_object_for_filedata(receiver_name,
receiver_user_id,
ref_txids=[txid_for_reference],
file_data=file_data,
bbc_app_client=bbc_app_client)
insert_signed_transaction_to_bbc_core(transaction=transfer_transaction,
bbc_app_client=bbc_app_client,
file_name=filename)
remove_id_mappings(os.path.basename(filename), asset_group_id)
send_transaction_info_msg(bbc_app_client=bbc_app_client,
transaction=transfer_transaction,
file_name=filename,
receiver_user_id=receiver_user_id)
print("Transfer is done.....")
def sys_check(args):
if args.command_type in ("store", "update", "verify") and \
not os.path.exists(args.target_file):
raise Exception("file not found : %s" | |
this code on Pangeo I needed to NOT flip the elevation values here and then switch the bounding box y value order
# Not entirely sure what's going on, but need to be aware of this!!
# print("Note: check for proper orientation of results depending on compute environment. Pangeo results were upside down.")
elev_copy = np.copy(np.flip(onedem.elevation.values, axis=flipax))
# flipax=[]
# elev_copy = np.copy(onedem.elevation.values)
# import matplotlib.pyplot as plt
# print(plt.imshow(elev_copy))
# generate a labeled array of potential iceberg features, excluding those that are too large or small
seglabeled_arr = raster_ops.labeled_from_segmentation(elev_copy, [3,10], resolution=res, min_area=min_area, flipax=[])
print("Got labeled raster of potential icebergs for an image")
# remove features whose borders are >50% no data values (i.e. the "iceberg" edge is really a DEM edge)
labeled_arr = raster_ops.border_filtering(seglabeled_arr, elev_copy, flipax=[]).astype(seglabeled_arr.dtype)
# apparently rasterio can't handle int64 inputs, which is what border_filtering returns
import matplotlib.pyplot as plt
print(plt.imshow(labeled_arr))
# create iceberg polygons
# somehow a < 1 pixel berg made it into this list... I'm doing a secondary filtering by area in the iceberg filter step for now
poss_bergs_list = list(poly[0]['coordinates'][0] for poly in rasterio.features.shapes(labeled_arr, transform=onedem.attrs['transform']))[:-1]
poss_bergs = [Polygon(poly) for poly in poss_bergs_list]
try:
del elev_copy
del seglabeled_arr
del labeled_arr
except NameError:
pass
return poss_bergs
def getexval(potvals, coord, val):
idx = (np.abs(potvals - val)).argmin()
nearval = potvals.isel({coord: idx}).item()
return nearval
def filter_pot_bergs(poss_bergs, onedem, usedask):
"""
Test each potential iceberg for validity, and if valid compute the sea level adjustment and
get elevation pixel values for putting into the geodataframe.
Parameter
---------
poss_bergs : list of potential iceberg geometries
"""
fjord = onedem.attrs['fjord']
max_freebd = fjord_props.get_ice_thickness(fjord)/10.0
minfree = fjord_props.get_min_freeboard(fjord)
res = onedem.attrs['res'][0] #Note: the pixel area will be inaccurate if the resolution is not the same in x and y
try:
crs = onedem.attrs['crs']
except KeyError:
try:
crs = onedem.attrs['proj4']
except KeyError:
print("Your input DEM does not have a CRS attribute")
# for berg in poss_bergs:
# try: hold = Polygon(berg)
# except NotImplementedError:
# print(berg)
# Note: list of poss_bergs must be a list of shapely geometry types
# the previous version, which used Polygon(berg) for berg in poss_bergs in the next line,
# was a problem when a multipolygon got created after combining results from dask chunks
poss_gdf = gpd.GeoDataFrame({'origberg': poss_bergs}, geometry='origberg')
poss_gdf = poss_gdf.set_crs(crs)
print("Potential icebergs found: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
# remove empty or invalid geometries
poss_gdf = poss_gdf[~poss_gdf.origberg.is_empty]
poss_gdf = poss_gdf[poss_gdf.origberg.is_valid]
# print(len(poss_gdf))
# 10 pixel buffer
buffer = 10 * res
# create a negatively buffered berg outline to exclude border/water pixels
poss_gdf['berg'] = poss_gdf.origberg.buffer(-buffer)
# get the largest polygon from a multipolygon (if one was created during buffering)
def get_largest_from_multi(multipolygons):
bergs = []
for multipolygon in multipolygons:
subbergs = list(multipolygon)
area = []
for sb in subbergs:
sb = Polygon(sb)
area.append(sb.area)
# print(area)
idx = np.where(area == np.nanmax(area))[0]
berg = Polygon(subbergs[idx[0]])
bergs.append(berg)
return bergs
poss_multis = (poss_gdf.berg.geom_type == "MultiPolygon")
poss_gdf.loc[poss_multis, 'berg'] = get_largest_from_multi(poss_gdf[poss_multis].berg)
del poss_multis
# print(len(poss_gdf))
# remove holes, where present in buffered polygons
poss_ints = ([len(interior) > 0 for interior in poss_gdf.berg.interiors])
poss_gdf.loc[poss_ints, 'berg'] = [Polygon(list(getcoords.exterior.coords)) for getcoords in poss_gdf[poss_ints].berg]
del poss_ints
poss_gdf = poss_gdf[~poss_gdf.berg.is_empty]
poss_gdf = poss_gdf[poss_gdf.berg.is_valid]
# print("Potential icebergs after buffering and invalid, multi, and interior polygons removed: " + str(len(poss_gdf)))
# get the polygon complexity and remove if it's above the threshold
poss_gdf['complexity'] = [vector_ops.poly_complexity(oneberg) for oneberg in poss_gdf.berg]
if res == 2.0:
complexthresh = 0.07
elif res ==4.0:
complexthresh = 0.10
else:
complexthresh = 0.08
print("using a default complexity threshold value - add one for your resolution")
poss_gdf = poss_gdf[poss_gdf.complexity < complexthresh]
print("Potential icebergs after complex ones removed: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
poss_gdf = poss_gdf.reset_index().drop(columns=["index", "complexity"])
total_bounds = poss_gdf.total_bounds
try: onedem = onedem.rio.slice_xy(*total_bounds)
except NoDataInBounds:
coords = ('x','y','x','y')
exbound_box = []
for a, b in zip(total_bounds, coords):
exbound_box.append(getexval(onedem[b], b, a))
onedem = onedem.rio.slice_xy(*exbound_box)
# onedem = onedem.chunk({'x': 1024, 'y':1024})
# onedem = onedem.rio.clip_box(*total_bounds).chunk({'x': 1024, 'y':1024})
# rasterize the icebergs; get the buffered iceberg elevation values for computing draft
poss_gdf['bergkey'] = poss_gdf.index.astype(int)
poss_gdf["geometry"] = poss_gdf.berg
gdf_grid = make_geocube(vector_data=poss_gdf,
measurements=["bergkey"],
like=onedem,
fill=np.nan
)
# gdf_grid = gdf_grid.chunk({'x': 1024, 'y':1024}) #DevGoal: make this a variable
poss_gdf["freeboardmed"] = [0.0] * len(poss_gdf.index)
poss_gdf["elevs"] = '' # so that it's object type, not int, for a variable length array
for bkey in poss_gdf["bergkey"]:
bergdem = onedem.where(gdf_grid["bergkey"] == bkey, drop=True)
pxvals = bergdem["elevation"].values
pxvals = pxvals[~np.isnan(pxvals)]
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==bkey].index[0], "elevs"] = pxvals
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==bkey].index[0], "freeboardmed"] = np.nanmedian(pxvals)
del bergdem
del gdf_grid
# skip bergs that returned all nan elevation values (and thus a nan median value)
poss_gdf = poss_gdf[poss_gdf["freeboardmed"] != np.nan]
# print(len(poss_gdf))
# skip bergs that likely contain a lot of cloud (or otherwise unrealistic elevation) pixels
poss_gdf = poss_gdf[poss_gdf['freeboardmed'] < max_freebd] # units in meters, matching those of the DEM elevation
print("Potential icebergs after too-tall ones removed: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
# print(poss_gdf)
# get the regional elevation values and use to determine the sea level adjustment
def get_sl_poly(origberg):
"""
Create a polygon (with a hole) for getting pixels to use for the sea level adjustment
"""
# outer extent of ocean pixels used
outer = list(origberg.buffer(2*buffer).exterior.coords)
# inner extent of ocean pixels used
inner = list(origberg.buffer(buffer).exterior.coords)
return Polygon(outer, holes=[inner])
poss_gdf['sl_aroundberg'] = poss_gdf.origberg.apply(get_sl_poly)
def get_sl_adj(sl_aroundberg):
"""
Clip the polygon from the elevation DEM and get the pixel values.
Compute the sea level offset
"""
try:
slvals = onedem.elevation.rio.clip([sl_aroundberg], crs=onedem.attrs['crs']).values.flatten() #from_disk=True
except NoDataInBounds:
if sl_aroundberg.area < (res**2.0) * 10.0:
slvals = []
else:
try:
slvals = onedem.elevation.rio.clip([sl_aroundberg], crs=onedem.attrs['crs'], all_touched=True).values.flatten()
except NoDataInBounds:
print("Manually check this DEM for usability")
print(sl_aroundberg.area)
print((res**2.0) * 10.0)
print(onedem.elevation.rio.bounds(recalc=True))
print(sl_aroundberg.bounds)
sl_adj = np.nanmedian(slvals)
return sl_adj
# print(onedem)
onedem['elevation'] = onedem.elevation.rio.write_crs(onedem.attrs['crs'], inplace=True)
# NOTE: sea level adjustment (m) is relative to tidal height at the time of image acquisition, not 0 msl
poss_gdf["sl_adj"] = poss_gdf.sl_aroundberg.apply(get_sl_adj)
# check that the median freeboard elevation (pre-filtering) is at least x m above sea level
poss_gdf = poss_gdf[abs(poss_gdf.freeboardmed - poss_gdf.sl_adj) > minfree]
print("Potential icebergs after too small ones removed: " + str(len(poss_gdf)))
if len(poss_gdf) == 0:
return [], [], []
# apply the sea level adjustment to the elevation values
def decrease_offset_wrapper(gpdrow):
corrpxvals = icalcs.apply_decrease_offset(gpdrow["elevs"], gpdrow["sl_adj"])
# gpdrow["elevs"] = corrpxvals
return corrpxvals
poss_gdf["elevs"] = poss_gdf.apply(decrease_offset_wrapper, axis=1)
print("Final icebergs for estimating water depths: " + str(len(poss_gdf)))
return poss_gdf.berg, poss_gdf.elevs, poss_gdf.sl_adj
# Attempts to use dask to eliminate memory crashing issues; some had minor errors, but overall it
# was coarsening the data that proved most effective. This is also leftover from moving away from groupby
# if usedask == True:
# @dask.delayed
# def get_berg_px_vals(bkey, onedem, gdf_grid):
# pxvals = onedem.where(gdf_grid["bergkey"] == bkey, drop=True)["elevation"].values
# pxvals = pxvals[~np.isnan(pxvals)]
# return {key, pxvals}
# pxdict = {}
# print("using dask to iterate through the berg keys")
# bkey_delayeds = [d for d in it.chain.from_iterable(poss_gdf["bergkey"])]
# for bkey in bkey_delayeds:
# keypx_dict = get_berg_px_vals(bkey, onedem, gdf_grid)
# pxdict.update(keypx_dict)
# pxdict = dask.compute(*pxdict)
# print(pxdict)
# print(type(pxdict))
# for key, val in pxdict.items():
# poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "elevs"] = val
# poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "freeboardmed"] = np.nanmedian(val)
# del pxdict
'''
gdf_grid['elev'] = onedem.reset_coords(drop=True)["elevation"]
gdf_grid = gdf_grid.chunk({'x': 1024, 'y':1024}) #DevGoal: make this a variable
grouped = gdf_grid.drop("spatial_ref").groupby(gdf_grid.bergkey)
@dask.delayed
def get_berg_px_vals(key, vals):
pxvals = vals.elev.values
pxvals = pxvals[~np.isnan(pxvals)]
return {key: pxvals}
if usedask == True:
pxdict = {}
print("using dask to iterate through the groups")
group_delayeds = [d for d in it.chain.from_iterable(grouped.to_delayed())]
for key, vals in group_delayeds:
keypx_dict = get_berg_px_vals(key, vals)
pxdict.update(keypx_dict)
pxdict = dask.compute(*pxdict)
print(pxdict)
print(type(pxdict))
for key, val in pxdict.items():
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "elevs"] = val
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "freeboardmed"] = np.nanmedian(val)
del pxdict
else:
for key, vals in grouped:
pxvals = vals.elev.values
pxvals = pxvals[~np.isnan(pxvals)]
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "elevs"] = pxvals
poss_gdf.at[poss_gdf[poss_gdf["bergkey"]==key].index[0], "freeboardmed"] = np.nanmedian(pxvals)
del grouped
'''
# NOTE: sea level adjustment | |
from __future__ import annotations
import argparse
import http.server
import json
import threading
from pathlib import Path
from typing import List, Optional, Any
import jinja2
import requests_cache
ENCODING = "utf-8"
URL = "url"
LEVELS = "levels"
CACHE = "cache"
URL_DEFAULT = "https://hub.zebr0.io"
LEVELS_DEFAULT = []
CACHE_DEFAULT = 300
CONFIGURATION_FILE_DEFAULT = Path("/etc/zebr0.conf")
class Client:
"""
Nested key-value system with built-in inheritance and templating, designed for configuration management and deployment.
This Client can connect to any key-value server that follows HTTP REST standards.
For now it only supports plain text responses, JSON support is in the works.
Nested keys and inheritance:
To fully exploit the Client, you should define a structure in the naming of your keys, like "<project>/<environment/<key>".
Then use the "levels" parameter of the constructor to point to a specific project and environment, like ["mattermost", "production"].
Finally, use the get() function to fetch a key and it will automatically look for the most specific value possible.
Note that you don't have to duplicate keys for each project and environment, as they can be inherited from their parent level.
Templating:
You can use the double-braces {{ }} in your values to benefit from the Jinja templating engine.
You can refer to the constructor parameters {{ url }} and {{ levels[x] }}, include the value from another key {{ "another-key" | get }} or the content of a file {{ "/path/to/the/file" | read }}.
Configuration file:
Client configuration can also be read from a JSON file, a simple dictionary with the "url", "levels" and "cache" keys.
The save_configuration() function can help you create one from an existing Client.
The suggested default path can be used for a system-wide configuration.
If provided, constructor parameters will always supersede the values from the configuration file, which in turn supersede the default values.
Note that the inheritance and templating mechanisms are performed by the client, to be as server-agnostic as possible.
:param url: URL of the key-value server, defaults to https://hub.zebr0.io
:param levels: levels of specialization (e.g. ["mattermost", "production"] for a <project>/<environment>/<key> structure), defaults to []
:param cache: in seconds, the duration of the cache of http responses, defaults to 300 seconds
:param configuration_file: path to the configuration file, defaults to /etc/zebr0.conf for a system-wide configuration
"""
def __init__(self, url: str = "", levels: Optional[List[str]] = None, cache: int = 0, configuration_file: Path = CONFIGURATION_FILE_DEFAULT) -> None:
# first set default values
self.url = URL_DEFAULT
self.levels = LEVELS_DEFAULT
self.cache = CACHE_DEFAULT
# then override with the configuration file if present
try:
configuration_string = configuration_file.read_text(ENCODING)
configuration = json.loads(configuration_string)
self.url = configuration.get(URL, URL_DEFAULT)
self.levels = configuration.get(LEVELS, LEVELS_DEFAULT)
self.cache = configuration.get(CACHE, CACHE_DEFAULT)
except OSError:
pass # configuration file not found, ignored
# finally override with the parameters if present
if url:
self.url = url
if levels:
self.levels = levels
if cache:
self.cache = cache
# templating setup
self.jinja_environment = jinja2.Environment(keep_trailing_newline=True)
self.jinja_environment.globals[URL] = self.url
self.jinja_environment.globals[LEVELS] = self.levels
self.jinja_environment.filters["get"] = self.get
self.jinja_environment.filters["read"] = read
# http requests setup
self.http_session = requests_cache.CachedSession(backend="memory", expire_after=cache)
def get(self, key: str, default: str = "", template: bool = True, strip: bool = True) -> str:
"""
Fetches the value of a provided key from the server.
Based on the levels defined in the Client, will return the first key found from the deepest level to the root level.
A default value can be provided to be returned if the key isn't found at any level.
:param key: key to look for
:param default: value to return if the key isn't found at any level, defaults to ""
:param template: shall the value be processed by the templating engine ? defaults to True
:param strip: shall the value be stripped off leading and trailing white spaces ? defaults to True
:return: the resulting value of the key
"""
# let's do this with a nice recursive function :)
def fetch(levels):
full_url = "/".join([self.url] + levels + [key])
response = self.http_session.get(full_url)
if response.ok:
return response.text # if the key is found, we return the value
elif levels:
return fetch(levels[:-1]) # if not, we try at the parent level
else:
return default # if we're at the top level, the key just doesn't exist, we return the default value
value = fetch(self.levels) # let's try at the deepest level first
value = self.jinja_environment.from_string(value).render() if template else value # templating
value = value.strip() if strip else value # stripping
return value
def save_configuration(self, configuration_file: Path = CONFIGURATION_FILE_DEFAULT) -> None:
"""
Saves the Client's configuration to a JSON file.
:param configuration_file: path to the configuration file, defaults to /etc/zebr0.conf for a system-wide configuration
"""
configuration = {URL: self.url, LEVELS: self.levels, CACHE: self.cache}
configuration_string = json.dumps(configuration)
configuration_file.write_text(configuration_string, ENCODING)
class TestServer:
"""
Rudimentary key-value HTTP server, for development or testing purposes only.
The keys and their values are stored in a dictionary, that can be defined either in the constructor or through the "data" attribute.
Access logs are also available through the "access_logs" attribute.
Basic usage:
>>> server = TestServer({"key": "value", ...})
>>> server.start()
>>> ...
>>> server.stop()
Or as a context manager, in which case the server will be started automatically, then stopped at the end of the "with" block:
>>> with TestServer() as server:
>>> server.data = {"key": "value", ...}
>>> ...
:param data: the keys and their values stored in a dictionary, defaults to an empty dictionary
:param address: the address the server will be listening to, defaults to 127.0.0.1
:param port: the port the server will be listening to, defaults to 8000
"""
def __init__(self, data: dict = None, address: str = "127.0.0.1", port: int = 8000) -> None:
self.data = data or {}
self.access_logs = []
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(zelf):
key = zelf.path[1:] # the key is the request's path, minus the leading "/"
value = self.data.get(key)
if value: # standard HTTP REST behavior
zelf.send_response(200)
zelf.end_headers()
zelf.wfile.write(str(value).encode(ENCODING))
else:
zelf.send_response(404)
zelf.end_headers()
self.access_logs.append(zelf.path)
self.server = http.server.ThreadingHTTPServer((address, port), RequestHandler)
def start(self) -> None:
""" Starts the server in a separate thread. """
threading.Thread(target=self.server.serve_forever).start()
def stop(self) -> None:
""" Stops the server. """
self.server.shutdown()
self.server.server_close()
def __enter__(self) -> TestServer:
""" When used as a context manager, starts the server at the beginning of the "with" block. """
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
""" When used as a context manager, stops the server at the end of the "with" block. """
self.stop()
def read(path: str, encoding: str = ENCODING) -> str:
"""
Filter for the Jinja templating engine, that allows to read a file's content.
:param path: path to the file
:param encoding: encoding of the file, defaults to "utf-8"
:return: the content of the file
"""
path = Path(path)
return path.read_text(encoding=encoding) if path.is_file() else ""
def build_argument_parser(*args: Any, **kwargs: Any) -> argparse.ArgumentParser:
"""
Builds an ArgumentParser that zebr0 executables can use to share a common Client CLI syntax.
For some reason, subclassing argparse.ArgumentParser and adding the arguments in the constructor doesn't work well with subparsers.
A builder function does.
:param args: arguments of the ArgumentParser constructor
:param kwargs: keyword arguments of the ArgumentParser constructor
:return: the customized ArgumentParser
"""
argparser = argparse.ArgumentParser(*args, **kwargs)
argparser.add_argument("-u", "--url", help="URL of the key-value server, defaults to https://hub.zebr0.io", metavar="<url>")
argparser.add_argument("-l", "--levels", nargs="*", help='levels of specialization (e.g. "mattermost production" for a <project>/<environment>/<key> structure), defaults to ""', metavar="<level>")
argparser.add_argument("-c", "--cache", type=int, help="in seconds, the duration of the cache of http responses, defaults to 300 seconds", metavar="<duration>")
argparser.add_argument("-f", "--configuration-file", type=Path, default=CONFIGURATION_FILE_DEFAULT, help=f"path to the configuration file, defaults to {CONFIGURATION_FILE_DEFAULT} for a system-wide configuration", metavar="<path>")
return argparser
def main(args: Optional[List[str]] = None) -> None:
"""
usage: zebr0-setup [-h] [-u <url>] [-l [<level> [<level> ...]]] [-c <duration>] [-f <path>] [-t <key>]
Saves zebr0's configuration in a JSON file.
optional arguments:
-h, --help show this help message and exit
-u <url>, --url <url>
URL of the key-value server, defaults to https://hub.zebr0.io
-l [<level> [<level> ...]], --levels [<level> [<level> ...]]
levels of specialization (e.g. "mattermost production" for a <project>/<environment>/<key> structure), defaults | |
<reponame>dani3l125/torchprune<gh_stars>10-100
"""A module for all helper functions pertaining to files and parameters."""
import copy
import os
import pathlib
import datetime
import time
import re
import torch
import yaml
import numpy as np
import matplotlib.colors as mcolors
def write_parameters(param, dir, file="parameters.yaml"):
"""Write parameters to desired directory and file."""
# don't dump generated parameters (pop them from a copy)
param_original = copy.deepcopy(param)
if "generated" in param_original:
param_original.pop("generated")
# don't store "ReferenceNet" as method
param_original["experiments"]["methods"].remove("ReferenceNet")
with open(os.path.join(dir, file), "w+") as ymlfile:
yaml.dump(param_original, ymlfile)
def set_mosek_path():
"""Set the MOSEK licence file path."""
# TODO: do this differently...
os.environ["MOSEKLM_LICENSE_FILE"] = os.path.realpath("misc/mosek.lic")
def get_parameters(file, num_workers, id_worker):
"""Load parameters and generate additional parameters.
Args:
file (str): name of parameter file
num_workers (int): total number of workers
id_worker (int): id of this worker
Returns:
list: list of parameter dictionary with experiments to run.
"""
# retrieve array of parameter files
param_array = _generate_file_param(file)
# those should always be the same (doesn't make sense otherwise ...)
num_nets = param_array[0]["experiments"]["numNets"]
num_rep = param_array[0]["experiments"]["numRepetitions"]
methods = param_array[0]["experiments"]["methods"]
is_training = param_array[0]["experiments"]["mode"] == "train"
num_custom = len(param_array)
num_methods = len(methods)
# check for responsibilities.
if is_training:
responsibilities = _check_responsibilities(
num_custom, num_nets, 1, 1, num_workers, id_worker
)
else:
responsibilities = _check_responsibilities(
num_custom, num_nets, num_rep, num_methods, num_workers, id_worker
)
# check the parameters for soundness
for param, responsibility in zip(param_array, responsibilities):
# some asserts on the validity of the mode
mode = param["experiments"]["mode"]
assert (
mode == "cascade"
or mode == "retrain"
or mode == "train"
or mode == "cascade-rewind"
)
# also check that these parameters always align...
assert (
num_nets == param["experiments"]["numNets"]
and num_rep == param["experiments"]["numRepetitions"]
and methods == param["experiments"]["methods"]
and is_training == (mode == "train")
)
# generator-style function so that we generate remaining parameters on the
# fly dynamically
param_resp = zip(param_array, responsibilities)
for id_custom, (param, responsibility) in enumerate(param_resp):
# generate remaining set of parameters
param["generated"] = _generate_remaining_param(
param,
responsibility,
num_custom,
id_custom,
num_workers,
id_worker,
)
yield param
def load_param_from_file(file):
"""Load the parameters from file w/ fixed parameters if desired."""
# parameter dictionary that will be built up
param = {}
def update_param_key_from_file(param_to_update):
"""Update param (sub-) dict according to file."""
if not (
isinstance(param_to_update, dict) and "file" in param_to_update
):
return
# delete file key-word from parameters
file_for_param = param_to_update["file"]
del param_to_update["file"]
# update with new parameters
# do it recursively so that we can also update file param there
param_current = copy.deepcopy(param_to_update)
param_to_update.update(load_param_from_file(file_for_param))
param_to_update.update(param_current)
# load the parameters.
with open(_get_full_param_path(file), "r") as ymlfile:
param.update(yaml.full_load(ymlfile))
# check if any configurations are specified using other param files
update_param_key_from_file(param)
# also recurse on dictionaries then, not on lists though...
for key in param:
update_param_key_from_file(param[key])
return param
def _generate_file_param(file):
"""Generate the parameter dictionary from the yaml file.
We will fill up the fixed parameters with the fixed parameter files, but if
the actual param file overwrites some of them, these will be preferred.
Args:
file (str): relative path under root/param to param file
Returns:
dict: all parameters to load
"""
# load params from the file (first default, then update with provided)
param_original = load_param_from_file("default.yaml")
param_original.update(load_param_from_file(file))
# add reference method.
if "experiments" in param_original:
param_original["experiments"]["methods"].insert(0, "ReferenceNet")
# now build up the param array with multiple customizations.
param_array = []
# this is a param file that contains multiple parameters
if "customizations" in param_original:
# pop customization from params
customizations = param_original.pop("customizations")
# generate a list of customized parameters
for custom in customizations:
# get a copy of the vanilla parameters
param_custom = copy.deepcopy(param_original)
# recurse into subdictionaries and modify desired key
subdict = param_custom
for key in custom["key"][:-1]:
subdict = subdict[key]
subdict[custom["key"][-1]] = custom["value"]
# now append it to the array
param_array.append(param_custom)
else:
# simply put the one element in an array
param_array.append(param_original)
return param_array
def _generate_remaining_param(
param, responsibility, num_custom, id_custom, num_workers, id_worker
):
"""Auto-generate the remaining parameters that are required.
Args:
param (dict): parameters loaded from file
responsibility (np.array): responsibility mask for this worker
num_custom (int): number of customizations
id_custom (int): ID of this customization in range(num_custom)
num_workers (int): total number of workers to split
id_worker (int): ID of this worker in range(num_workers)
Returns:
dict: generated parameters
"""
generated = {}
# assign responsibility right away
generated["responsibility"] = responsibility
# also assign number of customizations right away
generated["numCustom"] = num_custom
generated["idCustom"] = id_custom
# different spacing options to determine keep ratios
all_ratios = []
for spacing_config in param["experiments"]["spacing"]:
all_ratios.extend(_get_keep_ratios(spacing_config))
# split and store
generated["keepRatios"], generated["compressRatios"] = _split_keep_ratios(
all_ratios,
param["experiments"]["retrainIterations"],
param["experiments"]["mode"],
)
# generate net name and store
generated["netName"] = "_".join(
[
param["network"]["name"],
param["network"]["dataset"],
f"e{param['training']['numEpochs']}",
]
)
# also store the number of workers and the id
generated["numWorkers"] = num_workers
generated["idWorker"] = id_worker
# check for test dataset
if "datasetTest" in param["experiments"]:
generated["datasetTest"] = param["experiments"]["datasetTest"]
else:
generated["datasetTest"] = param["network"]["dataset"]
# generate a markdown version of the param file
generated["paramMd"] = _generate_param_markdown(param)
# generate list of names and colors for these particular set of algorithms
generated["network_names"] = [
param["network_names"][key] if key in param["network_names"] else key
for key in param["experiments"]["methods"]
]
mcolor_list = list(mcolors.CSS4_COLORS.keys())
generated["network_colors"] = [
param["network_colors"][key]
if key in param["network_colors"]
else mcolor_list[hash(key) % len(mcolor_list)]
for key in param["experiments"]["methods"]
]
# generate training and retraining parameters from provided ones
generated["training"] = copy.deepcopy(param["training"])
generated["training"]["startEpoch"] = 0
generated["training"]["outputSize"] = param["network"]["outputSize"]
if "earlyStopEpoch" not in generated["training"]:
generated["training"]["earlyStopEpoch"] = generated["training"][
"numEpochs"
]
if "enableAMP" not in generated["training"]:
generated["training"]["enableAMP"] = True
if "testBatchSize" not in generated["training"]:
generated["training"]["testBatchSize"] = generated["training"][
"batchSize"
]
generated["retraining"] = copy.deepcopy(generated["training"])
generated["retraining"].update(param["retraining"])
# same metrics for training and re-training
generated["retraining"]["metricsTest"] = generated["training"][
"metricsTest"
]
# needed for rewind checkpoint
if "rewind" in param["experiments"]["mode"]:
generated["training"]["retrainStartEpoch"] = generated["retraining"][
"startEpoch"
]
else:
generated["training"]["retrainStartEpoch"] = -1 # invalid in this case
# check for number of available GPUs
generated["numAvailableGPUs"] = torch.cuda.device_count()
# get the results parent directory
parent_dir = os.path.join(
param["directories"]["results"],
param["network"]["dataset"].lower(),
param["network"]["name"],
)
parent_dir = os.path.realpath(parent_dir)
# generate the list of folders that have some parameter settings
results_dir_prev = _find_latest_results(param, generated, parent_dir)
generated["resultsDirPrev"] = results_dir_prev
# check if we re-purpose time tag now.
if results_dir_prev is None:
time_tag = datetime.datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S_%f")
else:
parent_dir, time_tag = os.path.split(results_dir_prev)
# now set the directories.
results_dir = os.path.join(parent_dir, time_tag)
# store them
generated["parentDir"] = parent_dir
generated["timeTag"] = time_tag
generated["resultsDir"] = results_dir
# also define sub directories
generated["stdoutDir"] = os.path.join(generated["resultsDir"], "stdout")
generated["logDir"] = os.path.join(generated["resultsDir"], "log")
generated["dataDir"] = os.path.join(generated["resultsDir"], "data")
generated["reportsDir"] = os.path.join(generated["resultsDir"], "reports")
generated["plotDir"] = os.path.join(
generated["resultsDir"], "plots", generated["datasetTest"]
)
# generate global tag for tensorboard
generated["globalTag"] = "_".join(
[
generated["netName"],
f"re{generated['retraining']['numEpochs']}",
param["experiments"]["mode"],
f"int{len(generated['keepRatios'])}",
]
)
# we can put trained networks inside results folder
if (
"rewind" in param["experiments"]["mode"]
or param["directories"]["trained_networks"] is None
):
generated["training"]["dir"] = os.path.join(
results_dir, "trained_networks"
)
else:
generated["training"]["dir"] = os.path.realpath(
param["directories"]["trained_networks"]
)
generated["retraining"]["dir"] = os.path.join(
results_dir, "retrained_networks"
)
# return the generated parameters
return generated
def _generate_param_markdown(param):
"""Generate a markdown compatible string from the parameters."""
text = yaml.dump(param)
text = re.sub("#.*?\n", "\n", text)
text = text.replace("\n", " \n")
return text
def _get_keep_ratios(spacing_config):
"""Get the keep ratios for a given spacing configuration."""
if spacing_config["type"] == "harmonic":
# numIntervals is the number of intervals to go down to 0.5
pow = np.log(2) / np.log(1 + spacing_config["numIntervals"])
num_iters = max(1, int(spacing_config["minVal"] ** (-1.0 / pow)))
keep_ratios = [(i + 1) ** -pow for i in range(1, num_iters + 1)]
elif spacing_config["type"] == "harmonic2":
# numIntervals is the number of intervals to go from max to min
# value as everywhere
r_min = spacing_config["minVal"]
r_max = spacing_config["maxVal"]
num_int = spacing_config["numIntervals"]
pow = np.log(r_max / r_min) / np.log(num_int)
keep_ratios = r_max * np.arange(1, num_int + 1) ** (-pow)
keep_ratios = keep_ratios.tolist()
elif spacing_config["type"] == "geometric":
keep_ratios = np.geomspace(
spacing_config["maxVal"],
spacing_config["minVal"],
spacing_config["numIntervals"],
).tolist()
elif spacing_config["type"] == "cubic":
r_min = spacing_config["minVal"]
r_max = spacing_config["maxVal"]
num_int = spacing_config["numIntervals"]
keep_ratios = (
r_min
+ (r_max - r_min) * (1 - np.arange(num_int) / (num_int - 1)) ** 3
)
keep_ratios = keep_ratios.tolist()
elif spacing_config["type"] == "linear":
keep_ratios = np.linspace(
spacing_config["maxVal"],
spacing_config["minVal"],
spacing_config["numIntervals"],
).tolist()
elif spacing_config["type"] == "manual":
keep_ratios = spacing_config["values"]
else:
raise ValueError(
"This spacing configuration is not implemented: "
"{}".format(spacing_config["type"])
)
| |
any point before the SSH transport layer
has finished key exchange (ie, gotten to the point where we may attempt
to authenticate), the L{Deferred} returned by
L{SSHCommandClientEndpoint.connect} fires with a L{Failure} wrapping
the reason for the lost connection.
"""
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", b"dummy user",
self.hostname, self.port, knownHosts=self.knownHosts,
ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
d = endpoint.connect(factory)
transport = StringTransport()
factory = self.reactor.tcpClients[0][2]
client = factory.buildProtocol(None)
client.makeConnection(transport)
client.connectionLost(Failure(ConnectionDone()))
self.failureResultOf(d).trap(ConnectionDone)
def test_connectionCancelledBeforeSecure(self):
"""
If the connection is cancelled before the SSH transport layer has
finished key exchange (ie, gotten to the point where we may attempt to
authenticate), the L{Deferred} returned by
L{SSHCommandClientEndpoint.connect} fires with a L{Failure} wrapping
L{CancelledError} and the connection is aborted.
"""
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", b"dummy user",
self.hostname, self.port, knownHosts=self.knownHosts,
ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
d = endpoint.connect(factory)
transport = AbortableFakeTransport(None, isServer=False)
factory = self.reactor.tcpClients[0][2]
client = factory.buildProtocol(None)
client.makeConnection(transport)
d.cancel()
self.failureResultOf(d).trap(CancelledError)
self.assertTrue(transport.aborted)
# Make sure the connection closing doesn't result in unexpected
# behavior when due to cancellation:
client.connectionLost(Failure(ConnectionDone()))
def test_connectionCancelledBeforeConnected(self):
"""
If the connection is cancelled before it finishes connecting, the
connection attempt is stopped.
"""
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", b"dummy user",
self.hostname, self.port, knownHosts=self.knownHosts,
ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
d = endpoint.connect(factory)
d.cancel()
self.failureResultOf(d).trap(ConnectingCancelledError)
self.assertTrue(self.reactor.connectors[0].stoppedConnecting)
def test_passwordAuthenticationFailure(self):
"""
If the SSH server rejects the password presented during authentication,
the L{Deferred} returned by L{SSHCommandClientEndpoint.connect} fires
with a L{Failure} wrapping L{AuthenticationFailed}.
"""
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", b"dummy user",
self.hostname, self.port, password=b"<PASSWORD>",
knownHosts=self.knownHosts, ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
connected = endpoint.connect(factory)
server, client, pump = self.connectedServerAndClient(
self.factory, self.reactor.tcpClients[0][2])
# For security, the server delays password authentication failure
# response. Advance the simulation clock so the client sees the
# failure.
self.reactor.advance(server.service.passwordDelay)
# Let the failure response traverse the "network"
pump.flush()
f = self.failureResultOf(connected)
f.trap(AuthenticationFailed)
# XXX Should assert something specific about the arguments of the
# exception
self.assertClientTransportState(client, False)
def setupKeyChecker(self, portal, users):
"""
Create an L{ISSHPrivateKey} checker which recognizes C{users} and add it
to C{portal}.
@param portal: A L{Portal} to which to add the checker.
@type portal: L{Portal}
@param users: The users and their keys the checker will recognize. Keys
are byte strings giving user names. Values are byte strings giving
OpenSSH-formatted private keys.
@type users: L{dict}
"""
mapping = dict([(k,[Key.fromString(v).public()])
for k, v in iteritems(users)])
checker = SSHPublicKeyChecker(InMemorySSHKeyDB(mapping))
portal.registerChecker(checker)
def test_publicKeyAuthenticationFailure(self):
"""
If the SSH server rejects the key pair presented during authentication,
the L{Deferred} returned by L{SSHCommandClientEndpoint.connect} fires
with a L{Failure} wrapping L{AuthenticationFailed}.
"""
badKey = Key.fromString(privateRSA_openssh)
self.setupKeyChecker(self.portal, {self.user: privateDSA_openssh})
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", self.user,
self.hostname, self.port, keys=[badKey],
knownHosts=self.knownHosts, ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
connected = endpoint.connect(factory)
server, client, pump = self.connectedServerAndClient(
self.factory, self.reactor.tcpClients[0][2])
f = self.failureResultOf(connected)
f.trap(AuthenticationFailed)
# XXX Should assert something specific about the arguments of the
# exception
# Nothing useful can be done with the connection at this point, so the
# endpoint should close it.
self.assertTrue(client.transport.disconnecting)
def test_authenticationFallback(self):
"""
If the SSH server does not accept any of the specified SSH keys, the
specified password is tried.
"""
badKey = Key.fromString(privateRSA_openssh)
self.setupKeyChecker(self.portal, {self.user: privateDSA_openssh})
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", self.user, self.hostname, self.port,
keys=[badKey], password=<PASSWORD>, knownHosts=self.knownHosts,
ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
connected = endpoint.connect(factory)
# Exercising fallback requires a failed authentication attempt. Allow
# one.
self.factory.attemptsBeforeDisconnect += 1
server, client, pump = self.connectedServerAndClient(
self.factory, self.reactor.tcpClients[0][2])
pump.pump()
# The server logs the channel open failure - this is expected.
errors = self.flushLoggedErrors(ConchError)
self.assertIn(
'unknown channel', (errors[0].value.data, errors[0].value.value))
self.assertEqual(1, len(errors))
# Now deal with the results on the endpoint side.
f = self.failureResultOf(connected)
f.trap(ConchError)
self.assertEqual(b'unknown channel', f.value.value)
# Nothing useful can be done with the connection at this point, so the
# endpoint should close it.
self.assertTrue(client.transport.disconnecting)
def test_publicKeyAuthentication(self):
"""
If L{SSHCommandClientEndpoint} is initialized with any private keys, it
will try to use them to authenticate with the SSH server.
"""
key = Key.fromString(privateDSA_openssh)
self.setupKeyChecker(self.portal, {self.user: privateDSA_openssh})
self.realm.channelLookup[b'session'] = WorkingExecSession
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", self.user, self.hostname, self.port,
keys=[key], knownHosts=self.knownHosts, ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
connected = endpoint.connect(factory)
server, client, pump = self.connectedServerAndClient(
self.factory, self.reactor.tcpClients[0][2])
protocol = self.successResultOf(connected)
self.assertIsNotNone(protocol.transport)
def test_skipPasswordAuthentication(self):
"""
If the password is not specified, L{SSHCommandClientEndpoint} doesn't
try it as an authentication mechanism.
"""
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", self.user, self.hostname, self.port,
knownHosts=self.knownHosts, ui=FixedResponseUI(False))
factory = Factory()
factory.protocol = Protocol
connected = endpoint.connect(factory)
server, client, pump = self.connectedServerAndClient(
self.factory, self.reactor.tcpClients[0][2])
pump.pump()
# Now deal with the results on the endpoint side.
f = self.failureResultOf(connected)
f.trap(AuthenticationFailed)
# Nothing useful can be done with the connection at this point, so the
# endpoint should close it.
self.assertTrue(client.transport.disconnecting)
def test_agentAuthentication(self):
"""
If L{SSHCommandClientEndpoint} is initialized with an
L{SSHAgentClient}, the agent is used to authenticate with the SSH
server. Once the connection with the SSH server has concluded, the
connection to the agent is disconnected.
"""
key = Key.fromString(privateRSA_openssh)
agentServer = SSHAgentServer()
agentServer.factory = Factory()
agentServer.factory.keys = {key.blob(): (key, b"")}
self.setupKeyChecker(self.portal, {self.user: privateRSA_openssh})
agentEndpoint = SingleUseMemoryEndpoint(agentServer)
endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", self.user, self.hostname, self.port,
knownHosts=self.knownHosts, ui=FixedResponseUI(False),
agentEndpoint=agentEndpoint)
self.realm.channelLookup[b'session'] = WorkingExecSession
factory = Factory()
factory.protocol = Protocol
connected = endpoint.connect(factory)
server, client, pump = self.connectedServerAndClient(
self.factory, self.reactor.tcpClients[0][2])
# Let the agent client talk with the agent server and the ssh client
# talk with the ssh server.
for i in range(14):
agentEndpoint.pump.pump()
pump.pump()
protocol = self.successResultOf(connected)
self.assertIsNotNone(protocol.transport)
# Ensure the connection with the agent is cleaned up after the
# connection with the server is lost.
self.loseConnectionToServer(server, client, protocol, pump)
self.assertTrue(client.transport.disconnecting)
self.assertTrue(agentEndpoint.pump.clientIO.disconnecting)
def test_loseConnection(self):
"""
The transport connected to the protocol has a C{loseConnection} method
which causes the channel in which the command is running to close and
the overall connection to be closed.
"""
self.realm.channelLookup[b'session'] = WorkingExecSession
endpoint = self.create()
factory = Factory()
factory.protocol = Protocol
connected = endpoint.connect(factory)
server, client, pump = self.finishConnection()
protocol = self.successResultOf(connected)
self.loseConnectionToServer(server, client, protocol, pump)
# Nothing useful can be done with the connection at this point, so the
# endpoint should close it.
self.assertTrue(client.transport.disconnecting)
class ExistingConnectionTests(TestCase, SSHCommandClientEndpointTestsMixin):
"""
Tests for L{SSHCommandClientEndpoint} when using the C{existingConnection}
constructor.
"""
def setUp(self):
"""
Configure an SSH server with password authentication enabled for a
well-known (to the tests) account.
"""
SSHCommandClientEndpointTestsMixin.setUp(self)
knownHosts = KnownHostsFile(FilePath(self.mktemp()))
knownHosts.addHostKey(
self.hostname, self.factory.publicKeys[b'ssh-rsa'])
knownHosts.addHostKey(
networkString(self.serverAddress.host),
self.factory.publicKeys[b'ssh-rsa'])
self.endpoint = SSHCommandClientEndpoint.newConnection(
self.reactor, b"/bin/ls -l", self.user, self.hostname, self.port,
password=self.password, knownHosts=knownHosts,
ui=FixedResponseUI(False))
def create(self):
"""
Create and return a new L{SSHCommandClientEndpoint} using the
C{existingConnection} constructor.
"""
factory = Factory()
factory.protocol = Protocol
connected = self.endpoint.connect(factory)
# Please, let me in. This kinda sucks.
channelLookup = self.realm.channelLookup.copy()
try:
self.realm.channelLookup[b'session'] = WorkingExecSession
server, client, pump = self.connectedServerAndClient(
self.factory, self.reactor.tcpClients[0][2])
finally:
self.realm.channelLookup.clear()
self.realm.channelLookup.update(channelLookup)
self._server = server
self._client = client
self._pump = pump
protocol = self.successResultOf(connected)
connection = protocol.transport.conn
return SSHCommandClientEndpoint.existingConnection(
connection, b"/bin/ls -l")
def finishConnection(self):
"""
Give back the connection established in L{create} over which the new
command channel being tested will exchange data.
"""
# The connection was set up and the first command channel set up, but
# some more I/O needs to happen for the second command channel to be
# ready. Make that I/O happen before giving back the objects.
self._pump.pump()
self._pump.pump()
self._pump.pump()
self._pump.pump()
return self._server, self._client, self._pump
def assertClientTransportState(self, client, immediateClose):
"""
Assert that the transport for the given protocol is still connected.
L{SSHCommandClientEndpoint.existingConnection} re-uses an SSH connected
created by some other code, so other code is responsible for cleaning
it up.
"""
self.assertFalse(client.transport.disconnecting)
self.assertFalse(client.transport.aborted)
class ExistingConnectionHelperTests(TestCase):
"""
Tests for L{_ExistingConnectionHelper}.
"""
def test_interface(self):
"""
L{_ExistingConnectionHelper} implements L{_ISSHConnectionCreator}.
"""
self.assertTrue(
verifyClass(_ISSHConnectionCreator, _ExistingConnectionHelper))
def test_secureConnection(self):
"""
L{_ExistingConnectionHelper.secureConnection} returns a L{Deferred}
which fires with whatever object was fed to
L{_ExistingConnectionHelper.__init__}.
"""
result = object()
helper = _ExistingConnectionHelper(result)
self.assertIs(
result, self.successResultOf(helper.secureConnection()))
def test_cleanupConnectionNotImmediately(self):
"""
L{_ExistingConnectionHelper.cleanupConnection} does nothing to the
existing connection if called with C{immediate} set to C{False}.
"""
helper = _ExistingConnectionHelper(object())
# Bit hard to test | |
tentar criar arquivo, sha1 nao confere. arquivo:'+File['name'])
consoleShow( _T+'Termino do processo...'+_N, None)
def opt_install_obj ():
execute = consoleShow(_A+'Efetuar as includes para objeto utilizado'+_N, 'YesOrNo')
if execute:
installObjInFiles()
consoleShow( _T+'Fim das alterações...'+_N, None)
def is_JSCript_file (full_file_data):
global patern_have_JSCript
FP = regularGo(patern_have_JSCript, True)
maths = FP.finditer(full_file_data)
for math in list(maths):
return True
return False
def installObjInFiles ():
global path_file, fileList, result_list, patern_line_break, patern_have_layout_link_ARR, patern_findtoMAKE_layout_link, patern_have_obj_include, patern_findtoMAKE_obj_include, patern_findtoMAKE_obj_include_force
# find all oLayout's
hourglass_START(-1)
FP = regularGo(patern_findtoMAKE_layout_link, True)
for Id, File in result_list.iteritems():
# not process and empty
if not File['process']:
continue
# not process and empty
hourglass()
#var def
full_file_data = open(path_file+File['path']+File['name']).read()
line_map = map_line_break(full_file_data)
#var def
maths = FP.finditer(full_file_data)
for math in list(maths):
#var def
line_position = find_line_position(math, line_map)
#var def
obj_name = math.group(1)
patern = patern_have_layout_link_ARR[0]+obj_name+patern_have_layout_link_ARR[1]
### print '\n*', patern_have_layout_link_ARR, '\n', patern, '\n', math.group()
if not existMathIn(full_file_data ,patern):
#var def
Start = math.start()
End = math.end()
line = get_line(full_file_data, line_map, line_position)
change = line+ 'Set '+obj_name+'.Seguranca = oSeg\n'
#var def
mDta = {'start': Start, 'end': End, 'start_g': Start, 'end_g': End, 'change': change}
result_list[Id]['maths']['data'][mDta['start']] = {'data': mDta, 'work_type': 'install_link'}
# find all install need's
FP = regularGo(patern_findtoMAKE_obj_include, False)
FP_unique = regularGo(patern_findtoMAKE_obj_include_unique, False)
FP_force = regularGo(patern_findtoMAKE_obj_include_force, False)
drop_all_includes()
white_list = correction_multiple_include()
for Id, File in result_list.iteritems():
# not process and empty and not in white list
if not File['process']:
continue
if File['maths']['data'] == {}:
continue
if Id not in white_list:
continue
# not process and empty and not in white list
hourglass()
if have_include_instaled(Id):
continue
#var def
not_install_in_first_mode = True
full_file_data = open(path_file+File['path']+File['name']).read()
line_map = map_line_break(full_file_data)
#var def
if not existMathIn(full_file_data, patern_have_obj_include):
#var def
change = '\n<!--#include virtual="/Classes/Seguranca.asp"-->\n<% Dim oSeg : Set oSeg = New Seguranca %>\n'
#var def
maths = FP.finditer(full_file_data)
for math in list(maths):
#var def
Start = math.start(1)
End = math.end(1)
#var def
not_install_in_first_mode = False
break
if not_install_in_first_mode:
maths = FP_unique.finditer(full_file_data)
for math in list(maths):
#var def
Start = math.start(1)
End = math.end(1)
#var def
not_install_in_first_mode = False
break
if not_install_in_first_mode:
maths = FP_force.finditer(full_file_data)
for math in list(maths):
#var def
Start = math.start(1)
End = math.end(1)
#var def
not_install_in_first_mode = False
break
if not not_install_in_first_mode:
mDta = {'start': Start, 'end': End, 'start_g': Start, 'end_g': End, 'change': change}
result_list[Id]['maths']['data'][mDta['start']] = {'data': mDta, 'work_type': 'install_include'}
result_list[Id]['maths']['include'] = mDta['start']
else:
mDta = {'start': 0, 'end': 0, 'start_g': 0, 'end_g': 0, 'change': change}
result_list[Id]['maths']['data'][-1] = {'data': mDta, 'work_type': 'install_include'}
result_list[Id]['maths']['include'] = -1
def existMathIn (file_data ,patern):
FP = regularGo(patern, False)
maths = FP.finditer(file_data)
for math in list(maths):
return True
return False
def makeChangesInFile (File):
global path_file, path_file_change
#var def
Maths = File['maths']['data']
filePath = path_file+File['path']+File['name']
#if file not exist
if not os.path.exists(filePath):
raise ValueError('\nERRO\nArquivo nao existe na origem de copia.\nNome: '+File['name'])
return
#if file not exist
file = open(filePath, 'r')
file_data = file.read()
file.close()
#var def
file_data = makeNewLine(file_data, Maths, [], False, True)
'''if File['maths']['include'] != None:
M = File['maths']['include']
change = result_list[Id]['maths']['data'][M]
file_data = change +file_data'''
create_path_if_not_exit(File['path'])
#var def
filePath = path_file_change+File['path']+File['name']
file = open(filePath, 'w')
file.write(file_data)
def create_path_if_not_exit(directory):
global path_file_change
if not os.path.exists(path_file_change+directory):
os.makedirs(path_file_change+directory)
def hourglass_START (limit):
global hourglass_pass
hourglass_pass['limit'] = float(limit)
hourglass_pass['status'] = 0
hourglass_pass['point'] = 0
os.system('cls')
if hourglass_pass['limit'] == -1:
sys.stdout.write('\n\nAguarde...')
else:
print '\n\tCriando copias\n\tArquivos modificados: '+str(int(hourglass_pass['limit']))
sys.stdout.write('\n Aguarde:\n')
def hourglass ():
global hourglass_pass
if hourglass_pass['limit'] == -1:
hourglass_pass['status'] += 1
sys.stdout.write('.')
if hourglass_pass['status'] > 388:
os.system('cls')
hourglass_pass['status'] = 0
sys.stdout.write('\n\nAguarde...')
else:
hourglass_pass['status'] += 1
old_point = hourglass_pass['point']
hourglass_pass['point'] = hourglass_pass['status'] / hourglass_pass['limit'] * 40 #char lim 80
if old_point < int(hourglass_pass['point']):
sys.stdout.write('=')
def writeLogFile ():
global path_log, result_list
print "\n>>>Criando Log"
filePath = path_log +'/log.html'
f = open(filePath, 'w')
f.write( htmlCss() );
for kF, file in result_list.iteritems():
#var def
name = file['name']
LineWorks = file['line_works']
ocorrencia = 0
MathGroup = ''
#var def
for kW, LineData in LineWorks.iteritems():
#var def
position = LineData['position']
old_line = LineData['old']
Maths = LineData['maths']
MathLine = ''
#var def
for kM, math in Maths.iteritems():
if math['work_type'] not in ['find_recordset_dim', 'find_recordset_call']:
ocorrencia += 1
MathLine = MathLine + htmlDiv_Math(math['start'], math['end'])
if len(MathLine):
dtLine = makeNewLine(old_line, Maths, ['html'], False)
old_line_tag = dtLine['html']['old']
new_line_tag = dtLine['html']['new']
MathGroup = MathGroup + htmlDiv_Line(position, MathLine, old_line_tag, new_line_tag);
f.write( htmlDiv_File(name, ocorrencia, MathGroup) );
f.close()
webbrowser.open(path_log +'/log.html')
#FUNCTION
#########
######
#GRAFO
def GRAFO_find_Roots ():
global grafo, woodlist, rootlist, all_rootlist
woodlist = {}
rootlist = []
all_rootlist = {}
for N in grafo.keys():
for L in grafo[N]['links']:
woodlist[L] = 0
woodlist = woodlist.keys()
for N in grafo.keys():
if N not in woodlist:
if len(grafo[N]['links']) > 0:
all_rootlist[N] = 0
all_rootlist = all_rootlist.keys()
def GRAFO_easy_process ():
global grafo, woodlist, seedlist, rootlist, all_rootlist, Processed_Roots
Processed_Roots = []
# process roots
for R in all_rootlist:
if grafo[R]['active']:
Processed_Roots += [R]
grafo[R]['*'] = True
else:
rootlist = [R]
def GRAFO_recursive_setRoots (Root, N):
global grafo
grafo[Root]['groups'] += grafo[N]['links']
grafo[N]['roots'] += [Root]
for L in grafo[N]['links']:
GRAFO_recursive_setRoots(Root, L)
def GRAFO_setRoots_setGroups ():
global grafo, rootlist
for R in all_rootlist:
grafo[R]['groups'] = grafo[R]['links']
for L in grafo[R]['links']:
GRAFO_recursive_setRoots(R, L)
def GRAFO_find_and_process_seeds ():
global grafo, rootlist, seedlist, Processed_Roots
seedlist = {}
for S in grafo:
if grafo[S]['links'] == [] and grafo[S]['roots'] == []:
seedlist[S] = 0
grafo[S]['*'] = True
Processed_Roots += [S]
seedlist = seedlist.keys()
def GRAFO_add_group_roots (key, Roots):
global crosslist, crossgroup
for R in Roots:
if R not in crossgroup[key]:
crossgroup[key] += [R]
crosslist[R] = 0
def GRAFO_group_roots ():
global grafo, woodlist, rootlist, alonelist, crosslist, crossgroup
crossgroup = {}
alonelist = []
crosslist = {}
for W in woodlist:
hourglass() #*------!
thisRoots = grafo[W]['roots']
if len(thisRoots) > 0:
found = False
for k, group in crossgroup.iteritems():
for R in thisRoots:
if R in group:
GRAFO_add_group_roots(k, thisRoots)
found = True
break
if not found:
key = len(crossgroup)
crossgroup[key] = thisRoots
for R in thisRoots:
crosslist[R] = 0
crosslist = crosslist.keys()
for R in rootlist:
if R not in crosslist:
alonelist += [R]
crosslist = None
def GRAFO_recursive_setRanks (N, pt):
global grafo
grafo[N]['rank'] += 1+pt
pt = grafo[N]['rank']
for L in grafo[N]['links']:
GRAFO_recursive_setRanks(L, pt)
def GRAFO_rank_all ():
global grafo, woodlist, alonelist
for R in rootlist:
if R not in alonelist:
for L in grafo[R]['links']:
GRAFO_recursive_setRanks(L, 0)
for W in woodlist:
grafo[W]['rank'] *= len(grafo[W]['roots'])
def GRAFO_find_best (List):
first = True
best = None
for N in List:
if grafo[N]['active']:
if first:
best = N
score = grafo[N]['rank']
first = False
elif grafo[N]['rank'] < score:
best = N
score = grafo[N]['rank']
return best
def GRAFO_process_alone ():
global grafo, alonelist, Processed_Roots
for A in alonelist:
best = GRAFO_find_best(select)
if best == None:
Processed_Roots += [R]
grafo[R]['*'] = True
def GRAFO_remove_black_nodes (List, Blacklist):
white = []
for N in List:
if N not in Blacklist:
white += [N]
return white
def GRAFO_feed_processed_Noods (thisRoots, Processed_Noods):
Noods = {}
for R in thisRoots:
Noods[R] = 0
for N in grafo[R]['groups']:
Noods[N] = 0
for N in Noods.keys():
Processed_Noods[N] = 0
return Processed_Noods
def GRAFO_process_cross ():
global grafo, crossgroup, Processed_Roots
Processed_Noods = {}
for pR in Processed_Roots:
Nodos = grafo[pR]['groups']
if Nodos != None:
for N in Nodos:
Processed_Noods[N] = 0
'''raw_input(Processed_Roots)
raw_input(Processed_Noods)'''
limit = 10
while limit > 0:
if len(crossgroup) > 0:
limit -= 1
remove = []
for k, group in crossgroup.iteritems():
hourglass() #*------!
select = []
for R in group:
if R not in Processed_Roots:
select += [R] + grafo[R]['groups']
select = GRAFO_remove_black_nodes(select, Processed_Noods.keys())
if len(select) == 0:
remove += [k]
continue
best = GRAFO_find_best(select)
if best == None:
remove += [k]
continue
else:
grafo[best]['*'] = True
Processed_Noods[best] = 0
thisRoots = grafo[best]['roots']
Processed_Roots += thisRoots
Processed_Noods = GRAFO_feed_processed_Noods(thisRoots, Processed_Noods)
#*-*-*-*-
'''print best, '\n<S>', select, '\n<R>', Processed_Roots, '\n<B>', Processed_Noods.keys()
raw_input()'''
#*-*-*-*-
for D in remove:
del crossgroup[D]
else:
break
def GRAFO_process ():
time = (datetime.now()).microsecond
GRAFO_find_Roots()
GRAFO_easy_process()
GRAFO_setRoots_setGroups()
GRAFO_find_and_process_seeds()
GRAFO_group_roots()
GRAFO_rank_all()
GRAFO_process_alone()
GRAFO_process_cross()
time = (datetime.now()).microsecond - time
#raw_input('END:'+str(time))
def GRAFO_in ():
global grafo, GRAFO_index_list, result_list
grafo = {}
GRAFO_index_list = {}
for Id in result_list.keys():
# correcao temporaria
'''if result_list[Id]['path'] == 'WEB/popup/':
continue'''
# correcao temporaria
hourglass() #*------!
active = result_list[Id]['process']# and result_list[Id]['maths']['include'] != None:
if Id not in GRAFO_index_list.values():
newKey = len(GRAFO_index_list)
GRAFO_index_list[newKey] = Id
| |
# \brief Gets whether the narrowband DDCs on the radio are tunable.
#
# \copydetails CyberRadioDriver::IRadio::isNbddcTunable()
@classmethod
def isNbddcTunable(cls):
return False if cls.nbddcType is None else cls.nbddcType.tunable
##
# \brief Gets whether the narrowband DDCs on the radio have selectable
# sources.
#
# \copydetails CyberRadioDriver::IRadio::isNbddcSelectableSource()
@classmethod
def isNbddcSelectableSource(cls):
return False if cls.nbddcType is None else cls.nbddcType.selectableSource
##
# \brief Gets the frequency offset range for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRange()
@classmethod
def getNbddcFrequencyRange(cls):
return (0.0,0.0) if cls.nbddcType is None else cls.nbddcType.frqRange
##
# \brief Gets the frequency offset resolution for narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRes()
@classmethod
def getNbddcFrequencyRes(cls):
return 0.0 if cls.nbddcType is None else cls.nbddcType.frqRes
##
# \brief Gets the allowed rate set for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcRateSet()
@classmethod
def getNbddcRateSet(cls, index=None):
return cls.getDdcRateSet(False, index)
##
# \brief Gets the allowed rate list for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcRateList()
@classmethod
def getNbddcRateList(cls, index=None):
return cls.getDdcRateList(False, index)
##
# \brief Gets the allowed rate set for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcBwSet()
@classmethod
def getNbddcBwSet(cls, index=None):
return cls.getDdcBwSet(False, index)
##
# \brief Gets the allowed rate list for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcBwList()
@classmethod
def getNbddcBwList(cls, index=None):
return cls.getDdcBwList(False, index)
##
# \brief Gets the number of narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumFftStream()
@classmethod
def getNumFftStream(cls):
return len(cls.getFftStreamIndexRange())
##
# \brief Gets the index range for the narrowband DDCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamIndexRange()
@classmethod
def getFftStreamIndexRange(cls):
return [] if cls.numFftStream == 0 else \
list(range(cls.fftStreamIndexBase, cls.fftStreamIndexBase + cls.numFftStream, 1))
##
# \brief Gets the allowed rate set for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamRateSet()
@classmethod
def getFftStreamRateSet(cls,):
return cls.fftStreamType.getDdcRateSet() if cls.fftStreamType is not None else {}
##
# \brief Gets the allowed rate list for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamRateList()
@classmethod
def getFftStreamRateList(cls,):
return cls.fftStreamType.getDdcRateList() if cls.fftStreamType is not None else []
##
# \brief Gets the allowed window set for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamWindowSet()
@classmethod
def getFftStreamWindowSet(cls,):
return cls.fftStreamType.getWindowSet() if cls.fftStreamType is not None else {}
##
# \brief Gets the allowed window list for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamWindowList()
@classmethod
def getFftStreamWindowList(cls,):
return sorted(cls.fftStreamType.getWindowSet().keys()) if cls.fftStreamType is not None else []
##
# \brief Gets the allowed size set for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamSizeSet()
@classmethod
def getFftStreamSizeSet(cls,):
return cls.fftStreamType.getSizeSet() if cls.fftStreamType is not None else {}
##
# \brief Gets the allowed size list for the FFTs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getFftStreamSizeList()
@classmethod
def getFftStreamSizeList(cls,):
return sorted(cls.fftStreamType.getSizeSet().keys()) if cls.fftStreamType is not None else []
##
# \brief Gets the ADC sample rate for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getAdcRate()
@classmethod
def getAdcRate(cls):
return cls.adcRate
##
# \brief Gets the VITA 49 header size for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaHeaderSize()
@classmethod
def getVitaHeaderSize(cls, payloadType=None):
return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).headerSizeWords
##
# \brief Gets the VITA 49 payload size for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaPayloadSize()
@classmethod
def getVitaPayloadSize(cls, payloadType=None):
return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).payloadSizeWords
##
# \brief Gets the VITA 49 tail size for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaTailSize()
@classmethod
def getVitaTailSize(cls, payloadType=None):
return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).tailSizeWords
##
# \brief Gets dictionary with information about VITA 49 framing.
#
# \copydetails CyberRadioDriver::IRadio::getVitaFrameInfoDict()
@classmethod
def getVitaFrameInfoDict(cls, payloadType=None):
return cls.ifSpecMap.get(payloadType, cls.ifSpec).getVitaFrameInfoDict()
# \brief Gets whether data coming from the radio is byte-swapped with
# respect to the endianness of the host operating system.
#
# \copydetails CyberRadioDriver::IRadio::isByteswapped()
@classmethod
def isByteswapped(cls, payloadType=None):
return (cls.ifSpecMap.get(payloadType, cls.ifSpec).byteOrder != sys.byteorder)
##
# \brief Gets whether data coming from the radio has I and Q data swapped.
#
# \copydetails CyberRadioDriver::IRadio::isIqSwapped()
@classmethod
def isIqSwapped(cls, payloadType=None):
return cls.ifSpecMap.get(payloadType, cls.ifSpec).iqSwapped
##
# \brief Gets the byte order for data coming from the radio.
#
# \copydetails CyberRadioDriver::IRadio::getByteOrder()
@classmethod
def getByteOrder(cls, payloadType=None):
return cls.ifSpecMap.get(payloadType, cls.ifSpec).byteOrder
##
# \brief Gets the number of Gigabit Ethernet interfaces on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumGigE()
@classmethod
def getNumGigE(cls):
return len(cls.getGigEIndexRange())
##
# \brief Gets the index range for the Gigabit Ethernet interfaces on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getGigEIndexRange()
@classmethod
def getGigEIndexRange(cls):
return [] if cls.numGigE == 0 else \
list(range(cls.gigEIndexBase, cls.gigEIndexBase + cls.numGigE, 1))
##
# \brief Gets the number of destination IP address table entries available for
# each Gigabit Ethernet interface on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumGigEDipEntries()
@classmethod
def getNumGigEDipEntries(cls):
return len(cls.getGigEDipEntryIndexRange())
##
# \brief Gets the index range for the destination IP address table entries
# available for the Gigabit Ethernet interfaces on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getGigEDipEntryIndexRange()
@classmethod
def getGigEDipEntryIndexRange(cls):
return [] if cls.numGigE == 0 else \
list(range(cls.gigEDipEntryIndexBase, \
cls.gigEDipEntryIndexBase + cls.numGigEDipEntries, 1))
##
# \brief Gets the list of connection modes that the radio supports.
#
# \copydetails CyberRadioDriver::IRadio::getConnectionModeList()
@classmethod
def getConnectionModeList(cls):
return [] if cls.connectionModes is None else cls.connectionModes
##
# \brief Gets whether the radio supports a given connection mode.
#
# \copydetails CyberRadioDriver::IRadio::isConnectionModeSupported()
@classmethod
def isConnectionModeSupported(cls, mode):
return mode in cls.getConnectionModeList()
##
# \brief Gets the radio's default baud rate.
#
# \copydetails CyberRadioDriver::IRadio::getDefaultBaudrate()
@classmethod
def getDefaultBaudrate(cls):
return cls.defaultBaudrate
##
# \brief Gets the radio's default control port.
#
# \copydetails CyberRadioDriver::IRadio::getDefaultControlPort()
@classmethod
def getDefaultControlPort(cls):
return cls.defaultPort
##
# \brief Gets the allowed VITA enable options set for the radio.
#
# \copydetails CyberRadioDriver::IRadio::getVitaEnableOptionSet()
@classmethod
def getVitaEnableOptionSet(cls):
return {} if cls.vitaEnableOptions is None else cls.vitaEnableOptions
##
# \brief Gets the number of transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumTransmitters()
@classmethod
def getNumTransmitters(cls):
return len(cls.getTransmitterIndexRange())
##
# \brief Gets the index range for the transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterIndexRange()
@classmethod
def getTransmitterIndexRange(cls):
return [] if cls.numTxs == 0 else \
list(range(cls.txIndexBase, \
cls.txIndexBase + cls.numTxs, 1))
##
# \brief Gets the frequency range for the transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyRange()
@classmethod
def getTransmitterFrequencyRange(cls):
return (0.0,0.0) if cls.numTxs == 0 else cls.txType.frqRange
##
# \brief Gets the frequency resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyRes()
@classmethod
def getTransmitterFrequencyRes(cls):
return None if cls.numTxs == 0 else cls.txType.frqRes
##
# \brief Gets the frequency unit for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyUnit()
@classmethod
def getTransmitterFrequencyUnit(cls):
return None if cls.numTxs == 0 else cls.txType.frqUnits
##
# \brief Gets the attenuation range for the transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterAttenuationRange()
@classmethod
def getTransmitterAttenuationRange(cls):
return (0.0,0.0) if cls.numTxs == 0 else cls.txType.attRange
##
# \brief Gets the attenuation resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterAttenuationRes()
@classmethod
def getTransmitterAttenuationRes(cls):
return None if cls.numTxs == 0 else cls.txType.attRes
##
# \brief Gets whether transmitters on the radio support continuous-wave
# (CW) tone generation.
#
# \copydetails CyberRadioDriver::IRadio::transmitterSupportsCW()
@classmethod
def transmitterSupportsCW(cls):
return (cls.numTxs > 0 and issubclass(cls.txType.toneGenType,
components._cwToneGen))
##
# \brief Gets the number of CW tone generators for each transmitter.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWNum()
@classmethod
def getTransmitterCWNum(cls):
return len(cls.getTransmitterCWIndexRange())
##
# \brief Gets the CW tone generator index range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWIndexRange()
@classmethod
def getTransmitterCWIndexRange(cls):
return [] if not cls.transmitterSupportsCW() else \
list(range(cls.txType.toneGenIndexBase, \
cls.txType.toneGenIndexBase + cls.txType.numToneGen, 1))
##
# \brief Gets the CW tone generator frequency range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWFrequencyRange()
@classmethod
def getTransmitterCWFrequencyRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.frqRange
##
# \brief Gets the CW tone generator frequency resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWFrequencyRes()
@classmethod
def | |
' (c) NOT including this file ... ', iFilename, aTokens[iFilename], iBarcode, aTokens[iBarcode], iYes, aTokens[iYes], zPlat, otherInfo
includeFlag = 0
# if this is the SNP platform, we're only keeping tumor samples ...
# (which means they can be 01, 02, 06 ... I think anything that starts with a '0' basically)
if (includeFlag and (zPlat == "Genome_Wide_SNP_6")):
if (aTokens[iBarcode][13] != "0"):
print ' (d) NOT including this file ... ', iFilename, aTokens[iFilename], iBarcode, aTokens[iBarcode], iYes, aTokens[iYes], zPlat, otherInfo
includeFlag = 0
else:
print ' (d) YES including this file ... ', iFilename, aTokens[iFilename], iBarcode, aTokens[iBarcode], iYes, aTokens[iYes], zPlat, otherInfo
# sanity check that we don't have duplicate barcodes ...
if (includeFlag and (barcode in sdrfDict.keys())):
if (1):
print " WARNING ??? this key is already being used in this dictionary ??? ", barcode
print lineNum, barcode, archive, filename
print aTokens
includeFlag = 0
# sys.exit(-1)
# sanity check that the barcode looks like a TCGA barcode
if (includeFlag and (not barcode.startswith("TCGA-"))):
print " non-TCGA barcode ??? ", barcode, " --> NOT including "
# print lineNum, barcode, archive, filename
# print aTokens
includeFlag = 0
# sys.exit(-1)
if (includeFlag):
sdrfDict[barcode] = (archive, filename, otherInfo)
print ' YES including this file ... ', iFilename, aTokens[iFilename], iBarcode, aTokens[iBarcode], iYes, aTokens[iYes]
else:
if (aTokens[iFilename] != "->"):
print ' (e) NOT including this file ... ', iFilename, aTokens[iFilename], iBarcode, aTokens[iBarcode], iYes, aTokens[iYes]
print ' --> returning from getSDRFinfo ... %d ' % (len(sdrfDict))
if (len(sdrfDict) > 1):
keyList = sdrfDict.keys()
keyList.sort()
print keyList[0], sdrfDict[keyList[0]]
# now we want to build a list of archives and a list of files from the
# sdrfDict ...
archiveList = []
fileList = []
for aKey in sdrfDict.keys():
if (sdrfDict[aKey][0] not in archiveList):
archiveList += [sdrfDict[aKey][0]]
if (sdrfDict[aKey][1] not in fileList):
fileList += [sdrfDict[aKey][1]]
archiveList.sort()
fileList.sort()
print ' have %d archives and %d data files ' % (len(archiveList), len(fileList))
if (len(archiveList) == 0):
print " ERROR ??? why are there no archives to be processed ??? "
if (len(fileList) == 0):
print " ERROR ??? why are there no files to be processed ??? "
if (0):
print ' '
print ' list of archives : (%d) ' % (len(archiveList))
print archiveList
print ' '
print ' list of files : (%d) ' % (len(fileList))
# print fileList
sys.exit(-1)
return (sdrfDict, archiveList, fileList, zPlat)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def cleanString_OLD(aString):
ii = aString.find("'")
if (ii >= 0):
bString = aString[:ii] + aString[ii + 1:]
# print " in cleanString : <%s> <%s> " % ( aString, bString )
aString = bString
ii = aString.find('"')
if (ii >= 0):
bString = aString[:ii] + aString[ii + 1:]
# print " in cleanString : <%s> <%s> " % ( aString, bString )
aString = bString
return (aString)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def removeParentheticalStuff(aString):
# print " in removeParentheticalStuff ... ", aString
i1 = aString.find('(')
if (i1 >= 0):
i2 = aString.find(')', i1)
if (i2 >= 0):
aString = aString[:i1] + aString[i2 + 1:]
aString = removeParentheticalStuff(aString)
# print " returning from removeParentheticalStuff ... ", aString
return (aString)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def cleanString(aString):
# print " in cleanString <%s> " % aString
tString = removeParentheticalStuff(aString)
if (len(tString) > 1):
aString = tString
skipChars = ["(", ")", "'", "@", '"']
spaceChars = [" ", ".", "/", ":"]
okChars = ["-", "_", ","]
bString = ''
for ii in range(len(aString)):
if (aString[ii] in skipChars):
doNothing = 1
elif (aString[ii] in spaceChars):
if (bString[-1] != "_"):
bString += "_"
elif (aString[ii] in okChars):
bString += aString[ii]
else:
iChar = ord(aString[ii])
if ((iChar < ord('0')) or (iChar > ord('z'))):
# print " what character is this ??? ", iChar
doNothing = 1
elif ((iChar > ord('9')) and (iChar < ord('A'))):
# print " what character is this ??? ", iChar
doNothing = 1
elif ((iChar > ord('Z')) and (iChar < ord('a'))):
# print " what character is this ??? ", iChar
doNothing = 1
else:
bString += aString[ii]
# somewhat of a hack ;-)
if (bString == "stage_0"):
# print " Transforming <STAGE 0> to <TIS> "
bString = "tis"
if (bString.startswith("stage_")):
bString = bString[6:]
if (bString.startswith("grade_")):
bString = bString[6:]
try:
while (bString[-1] == "_"):
bString = bString[:-1]
except:
doNothing = 1
# print " returning bString <%s> " % bString
return (bString)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# the feature name is to be of the form:
# DTYPE:FTYPE:FNAME:CHR:START:STOP:STRAND:EXTRA
# for example:
# N:GEXP:BRCA2:chr13:31787617:31871809:+:U133A
def makeFeatureName(dType, fType, fName, chr='', start=-1, stop=-1, strand='', xName=''):
# start out with LOTS of sanity checking of these input values ...
if (dType != 'N' and dType != 'C' and dType != 'B'):
print " ERROR in makeFeatureName ... dType should be B or C or N ", dType
sys.exit(-1)
if (fType.find(":") > 0):
print " ERROR in makeFeatureName ... fType contains colon ???!!! ", fType
sys.exit(-1)
if (0):
if (fName.find(":") > 0):
print " WARNING in makeFeatureName ... fName contains colon ???!!! ", fName
ii = fName.find(":")
fName = fName[:ii] + fName[ii + 1:]
fType = cleanString(fType)
fName = cleanString(fName)
if (chr != ''):
try:
iChr = int(chr)
if (iChr < 0 or iChr > 22):
print " ERROR in makeFeatureName ... invalid chromosome ??? ", chr
sys.exit(-1)
except:
aChr = chr.upper()
if (aChr != 'X' and aChr != 'Y' and aChr != 'M'):
print " ERROR in makeFeatureName ... invalid chromosome ??? ", chr
sys.exit(-1)
if (strand != '+' and strand != '-' and strand != ''):
print " ERROR in makeFeatureName ... invalid strand ??? ", strand
sys.exit(-1)
if (fType == "RPPA"):
if (len(RPPAdict) == 0):
print " reading in RPPA annotation file ... "
fh = file( gidgetConfigVars['TCGAFMP_BIOINFORMATICS_REFERENCES'] + "/tcga_platform_genelists/MDA_antibody_annotation_2014_03_04.txt" )
for aLine in fh:
aLine = aLine.strip()
aLine = aLine.split('\t')
if (len(aLine) == 2):
if (aLine[0] != "Gene Name"):
if (aLine[1] not in RPPAdict.keys()):
RPPAdict[aLine[1]] = aLine[0]
fh.close()
# move the 'gene name' to the 'extra stuff', and then get the gene name
# from RPPAdict
xName = fName
fName = RPPAdict[xName]
print " mapped %s to %s " % (xName, fName)
# paste the first few pieces of information together ...
tmpName = dType + ":" + fType + ":" + fName + ":"
# add chromosome string
if (chr != ''):
tmpName += "chr" + chr
tmpName += ":"
# add (start) position
if (start >= 0):
tmpName += "%d" % start
tmpName += ":"
# add stop position
if (stop >= 0):
tmpName += "%d" % stop
tmpName += ":"
if (strand != ''):
tmpName += "%s" % strand
tmpName += ":"
if (xName != ''):
tmpName += "%s" % xName
# print " --> feature name <%s> " % tmpName
# double-check that there are no question marks ...
while (tmpName.find("?") >= 0):
ii = tmpName.find("?")
tmpName = tmpName[:ii] + tmpName[ii + 1:]
return (tmpName)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def readOneDataFile(fName, geneList, zPlat, metaData):
print " "
try:
fh = file(fName)
print " in readOneDataFile ... ", zPlat, len(geneList)
print fName
except:
print " ERROR in readOneDataFile ... ", zPlat
print " failed to open input file !?!?! ", fName
sys.exit(-1)
try:
dType = dataTypeDict[zPlat][0]
fType = dataTypeDict[zPlat][1]
except:
print " ERROR in readOneDataFile ... ", zPlat
print dataTypeDict
sys.exit(-1)
# for the Agilent level-3 data, there are two header rows that look like this:
# Hybridization REF TCGA-13-0888-01A-01R-0406-07
# Composite Element REF log2 lowess normalized (cy5/cy3) collapsed by
# gene symbol
# for the Affy U133A data, there are also two header rows that look like this:
# Hybridization REF 5500024056197041909864.A11
# Composite Element REF Signal
# for the Illumina HumanMethylation27 data, there are also two header rows, but
# a few more columns:
# Hybridization REF TCGA-04-1655-01A-01D-0563-05 TCGA-04-1655-01A-01D-0563-05 TCGA-04-1655-01A-01D-0563-05 TCGA-04-1655-01A-01D-0563-05
# Composite Element REF Beta_Value Gene_Symbol Chromosome
# Genomic_Coordinate
# for the IlluminaGA RNASeq data, there is one header row and a total of 4 columns:
# gene raw_counts median_length_normalized RPKM
# for the Genome Wide | |
import time
import json
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn import metrics
import seaborn as sns
import matplotlib.pyplot as plt
from itertools import cycle
import apr_constants
import common_functions
def plot_correlation_matrix(correlation_matrix, save_plot=True, file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
plt.figure(figsize=(12, 12))
sns.set(font_scale=1.4)
sns.heatmap(correlation_matrix, cmap='coolwarm', square=True)
plt.title('Correlation between different features', fontsize=apr_constants.TITLE_FONT_SIZE)
if save_plot:
print('Save Correlation Matrix')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'Correlation Matrix.jpg')
plt.show()
def plot_pca(input_pca_data, save_plot=False, target_names=None, file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
if target_names is None:
target_names = []
plt.figure(figsize=(20, 10))
new_data = input_pca_data.copy()
genres = {i: target_names[i] for i in range(0, len(target_names))}
new_data.genre = [genres[int(item)] for item in new_data.genre]
sns.scatterplot(x='PC1', y='PC2', data=new_data, hue='genre', alpha=0.6, palette=apr_constants.COLORS_LIST, s=200)
plt.title('PCA on Genres', fontsize=apr_constants.TITLE_FONT_SIZE)
plt.xticks(fontsize=14)
plt.yticks(fontsize=10)
plt.xlabel('Principal Component 1', fontsize=22)
plt.ylabel('Principal Component 2', fontsize=22)
if save_plot:
print('Save PCA Plot')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'PCA Scatter Plot.jpg')
plt.show()
def plot_3d_pca(input_pca_data, save_plot=True, file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
# initialize figure and 3d projection for the PC3 data
fig = plt.figure(figsize=(15, 12))
ax = fig.add_subplot(111, projection='3d')
# assign x,y,z coordinates from PC1, PC2 & PC3
xs = input_pca_data['PC1']
ys = input_pca_data['PC2']
zs = input_pca_data['PC3']
# initialize scatter plot and label axes
plot = ax.scatter(xs, ys, zs, alpha=0.6, c=input_pca_data['genre'], cmap='magma', depthshade=True)
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=15)
ax.tick_params(axis='z', labelsize=15)
ax.set_xlabel('PC1', labelpad=25)
ax.set_ylabel('PC2', labelpad=25)
ax.set_zlabel('PC3', labelpad=15)
fig.colorbar(plot, shrink=0.6, aspect=9)
if save_plot:
print('Save 3D PCA Plot')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'PCA 3D Scattert Plot.jpg')
plt.show()
def plot_clusters(input_pca_data, centroids_value=None, labels=None, colors_list=None, genres_list=None, save_plot=True,
plot_centroids=True, file_name=apr_constants.DEFAULT_FILE_NAME, save_path=apr_constants.PROJECT_ROOT):
if genres_list is None:
genres_list = []
if colors_list is None:
colors_list = []
if labels is None:
labels = []
if centroids_value is None:
centroids_value = []
pca_1, pca_2, gen = input_pca_data['PC1'], input_pca_data['PC2'], input_pca_data['genre']
colors = {v: k for v, k in enumerate(colors_list)}
genres = {v: k for v, k in enumerate(genres_list)}
df = pd.DataFrame({'pca_1': pca_1, 'pca_2': pca_2, 'label': labels, 'genre': gen})
groups = df.groupby('label')
fig, ax = plt.subplots(figsize=(20, 13))
plt.style.use('fivethirtyeight')
markers = ['s', 'o', 'v', '<', '>', 'P', '*', 'h', 'd', '8']
for genre, group in groups:
ax.plot(group.pca_1, group.pca_2, marker=markers[genre], linestyle='', ms=10, color=colors[genre],
label=genres[genre], mec='none', alpha=0.2)
ax.set_aspect('auto')
ax.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
ax.tick_params(axis='y', which='both', left='off', top='off', labelleft='off')
if plot_centroids:
plt.plot(centroids_value[:, 0], centroids_value[:, 1], 'k*', ms=14)
ax.legend()
ax.set_title("Genres Music Clusters Results", fontsize=apr_constants.TITLE_FONT_SIZE)
if save_plot:
print('Save Clusters Plot')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'Clusters Plot.jpg')
plt.show()
def plot_confusion_matrix_k_means(input_data, save_plot=True, labels=None, target_names=None,
file_name=apr_constants.DEFAULT_FILE_NAME, save_path=apr_constants.PROJECT_ROOT):
if target_names is None:
target_names = []
if labels is None:
labels = []
input_data['predicted_label'] = labels
data = metrics.confusion_matrix(input_data['genre'], input_data['predicted_label'])
df_cm = pd.DataFrame(data, columns=np.unique(target_names), index=np.unique(target_names))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize=(10, 10))
sns.set(font_scale=1.4)
heatmap = sns.heatmap(df_cm, cmap="Blues", annot=True, fmt='g', annot_kws={"size": 8}, square=True)
heatmap.set_xticklabels(heatmap.get_xticklabels(), rotation=45)
plt.title('CM for K-Means', fontsize=apr_constants.TITLE_FONT_SIZE)
if save_plot:
print('Save K-means Confusion Matrix')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'K-means Confusion Matrix Plot.jpg')
plt.show()
def plot_silhouette(input_data, min_clusters=2, max_clutsers=5, save_plot=False,
file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
eval_data = input_data.copy()
silhouette_score_values = list()
execution_time_values = list()
number_of_clusters = range(min_clusters, max_clutsers + 1)
for i in number_of_clusters:
start_time = time.time()
clusters = KMeans(i)
clusters.fit(eval_data)
cluster_labels = clusters.predict(eval_data)
execution_time = time.time() - start_time
execution_time_values.append(execution_time)
silhouette_score_values.append(
metrics.silhouette_score(eval_data, cluster_labels, metric='euclidean', sample_size=None,
random_state=None))
fig, ax1 = plt.subplots(figsize=(15, 10))
y_ax_ticks = np.arange(0, max(silhouette_score_values) + 1, 0.1)
x_ax_ticks = np.arange(min_clusters, max_clutsers + 1, 1)
ax1.plot(number_of_clusters, silhouette_score_values)
ax1.plot(number_of_clusters, silhouette_score_values, 'bo')
ax1.set_title("Silhouette Score Values vs Numbers of Clusters", fontsize=22)
ax1.set_yticks(y_ax_ticks)
ax1.set_ylabel('Silhouette Score', fontsize=22)
ax1.set_xticks(x_ax_ticks)
ax1.set_xlabel('Number Of Clusters', fontsize=22)
ax1.grid(False)
ax2 = ax1.twinx()
y_ax2_ticks = np.arange(0, max(execution_time_values) + 1, 0.3)
ax2.plot(number_of_clusters, execution_time_values, 'orange', linestyle='dashed')
ax2.plot(number_of_clusters, execution_time_values, 'orange', marker="o", linestyle='dashed')
ax2.set_yticks(y_ax2_ticks)
ax2.set_ylabel('Fit Time (sec)', fontsize=22)
ax2.grid(False)
optimal_number_of_components = number_of_clusters[silhouette_score_values.index(max(silhouette_score_values))]
worst_number_of_components = number_of_clusters[silhouette_score_values.index(min(silhouette_score_values))]
# optimal_execution_time = number_of_clusters[execution_time_values.index(max(execution_time_values))]
# worst_execution_time = number_of_clusters[execution_time_values.index(min(execution_time_values))]
plt.rcParams.update({'font.size': 12})
for i in silhouette_score_values:
clr = 'black'
wgt = 'normal'
value_position = silhouette_score_values.index(i)
sil_y_offset_value = 0.006
if max(silhouette_score_values) == i:
wgt = 'bold'
clr = 'green'
ax1.annotate(str(round(i, 3)), xy=(value_position + 2, i + sil_y_offset_value), color=clr, weight=wgt)
elif min(silhouette_score_values) == i:
wgt = 'bold'
clr = 'red'
ax1.annotate(str(round(i, 3)), xy=(value_position + 2, i + sil_y_offset_value), color=clr, weight=wgt)
else:
ax1.annotate(str(round(i, 3)), xy=(value_position + 2, i + sil_y_offset_value), color=clr, weight=wgt)
for j in execution_time_values:
clr = 'black'
wgt = 'normal'
value_time_position = execution_time_values.index(j)
time_y_offset_value = 0.06
if max(execution_time_values) == j:
wgt = 'bold'
clr = 'red'
ax2.annotate(str(round(j, 3)), xy=(value_time_position + 2, j - time_y_offset_value), color=clr, weight=wgt)
elif min(execution_time_values) == j:
wgt = 'bold'
clr = 'green'
ax2.annotate(str(round(j, 3)), xy=(value_time_position + 2, j - time_y_offset_value), color=clr, weight=wgt)
else:
ax2.annotate(str(round(j, 3)), xy=(value_time_position + 2, j - time_y_offset_value), color=clr, weight=wgt)
ax1.vlines(x=optimal_number_of_components, ymin=0, ymax=max(silhouette_score_values), linewidth=2, color='green',
label='Max Value', linestyle='dashdot')
ax1.vlines(x=worst_number_of_components, ymin=0, ymax=min(silhouette_score_values), linewidth=2, color='red',
label='min Value', linestyle='dashdot')
# Adding legend
ax1.legend(loc='upper center', prop={'size': apr_constants.LEGEND_SIZE})
ax2.legend(['ExecutionTime'], loc='upper right', prop={'size': apr_constants.LEGEND_SIZE})
if save_plot:
print('Save Silhouette Plot')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'Clusters Silhouette Plot.jpg')
plt.show()
# print("Optimal number of components is:", Optimal_NumberOf_Components)
def plot_roc(y_test, y_score, classifier_name=apr_constants.DEFAULT_CLASSIFIER_NAME, save_plot=False, target_names=None,
file_name=apr_constants.DEFAULT_FILE_NAME, save_path=apr_constants.PROJECT_ROOT, type_learning='SL'):
if target_names is None:
target_names = []
genres = target_names
ordinal_position = []
for index in range(0, len(target_names)):
ordinal_position.append(index)
test_label = preprocessing.label_binarize(y_test, classes=ordinal_position)
if type_learning == 'SL':
y_label = y_score
elif type_learning == 'UL':
y_label = preprocessing.label_binarize(y_score, classes=ordinal_position)
n_classes = test_label.shape[1]
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(test_label[:, i], y_label[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
colors = cycle(apr_constants.ROC_COLOR_LIST)
plt.figure(figsize=(15, 10))
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=1.5,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(genres[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=1.5)
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate (FPR)', fontsize=24)
plt.ylabel('True Positive Rate (TPR)', fontsize=24)
plt.title('Receiver operating characteristic for ' + classifier_name.replace('_', ' ').upper(),
fontsize=apr_constants.TITLE_FONT_SIZE)
plt.legend(loc='lower right', prop={'size': apr_constants.LEGEND_SIZE})
if save_plot:
print('Save ROC Plot')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'ROC Plot.jpg')
plt.show()
def plot_classification_report(input_data, export_json=True, labels=None, target_names=None,
file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
if target_names is None:
target_names = []
if labels is None:
labels = []
input_data['predicted_label'] = labels
report_dic = False
if export_json:
report_dic = True
report = metrics.classification_report(input_data['genre'], input_data['predicted_label'],
target_names=target_names,
output_dict=report_dic)
# print('results_reports: ', report)
if export_json:
common_functions.check_create_directory(save_path + apr_constants.DATA)
with open(save_path + apr_constants.DATA + file_name + '_report_results.json', 'w') as res:
json.dump(report, res, indent=4)
plt.show()
def plot_bmp_bar(input_data, save_plot=False, target_names=None, file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
if target_names is None:
target_names = []
plt.figure(figsize=(15, 7))
new_data = input_data[['genre', 'tempo']].copy()
genres = {i: target_names[i] for i in range(0, len(target_names))}
new_data.genre = [genres[item] for item in new_data.genre]
sns.boxplot(x=new_data.genre, y=new_data.tempo, palette='husl')
plt.title('BPM Boxplot for Genres', fontsize=apr_constants.TITLE_FONT_SIZE)
plt.xticks(fontsize=14)
plt.yticks(fontsize=10)
plt.xlabel('Genre', fontsize=22)
plt.ylabel('BPM', fontsize=22)
if save_plot:
print('Save BPM Bar')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'BPM BoxPlot.jpg')
plt.show()
def plot_confusion_matrix(clf, x_test, y_test, classes, normalize='true',
classifier_name=apr_constants.DEFAULT_CLASSIFIER_NAME, save_plot=False,
file_name=apr_constants.DEFAULT_FILE_NAME, save_path=apr_constants.PROJECT_ROOT):
fig, ax = plt.subplots(figsize=(10, 10))
metrics.plot_confusion_matrix(clf, x_test, y_test, normalize=normalize, cmap=plt.cm.Blues, ax=ax,
display_labels=classes, values_format='.0f')
ax.set_title('CM for ' + classifier_name.replace('_', ' ').upper(), fontsize=apr_constants.TITLE_FONT_SIZE)
plt.xticks(rotation=45)
plt.grid(False)
if save_plot:
print('Save Confusion Matrix Plot')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'Confusion Matrix Plot.jpg')
plt.show()
def plot_predictions_compare(normalize_cm, y_test, y_pred, target_names=None,
classifier_name=apr_constants.DEFAULT_CLASSIFIER_NAME,
save_plot=False, file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
if target_names is None:
target_names = []
genres = target_names
calc_cm = metrics.confusion_matrix(y_test, y_pred, normalize=normalize_cm)
bar = pd.DataFrame(calc_cm, columns=genres, index=genres)
ax = bar.plot(kind='bar', figsize=(15, 15), fontsize=14, width=0.8)
plt.title('Musical Genres BarPlot Predictions For ' + classifier_name.replace('_', ' ').upper(),
fontsize=apr_constants.TITLE_FONT_SIZE)
plt.xlabel('Musical Genres', fontsize=22)
plt.xticks(rotation=45)
plt.ylabel('Number of Occurrences', fontsize=22)
for p in ax.patches:
if p.get_height() > 0:
if normalize_cm != 'true':
ax.annotate(format(p.get_height()), (p.get_x() + p.get_width() / 2, p.get_height()), ha='center',
va='center', size=15, xytext=(0, 8), textcoords='offset points', fontsize=8)
else:
ax.annotate(format(p.get_height(), '.3f') + '%',
(p.get_x() + (p.get_width() / 2) + 0.015, p.get_height() + 0.01), ha='center', va='center',
size=15, xytext=(0, 8), textcoords='offset points', fontsize=8, rotation=90)
if save_plot:
print('Save Plot Predictions Compare')
common_functions.check_create_directory(save_path)
plt.savefig(save_path + file_name + ' - ' + 'Predictions Compare Plot.jpg')
plt.show()
def plot_predictions_simple_compare(input_data, target_names=None,
save_plot=False, file_name=apr_constants.DEFAULT_FILE_NAME,
save_path=apr_constants.PROJECT_ROOT):
if target_names is None:
target_names = []
ax = input_data.plot(kind="bar", figsize=(15, 15), fontsize=14, width=0.6)
ax.set_xticklabels(target_names)
ax.legend(["Real Value", "Predict Value"])
plt.title('Simple BarPlot Predictions Evaluation', fontsize=26)
plt.xlabel('Musical Genres', fontsize=18)
plt.ylabel('Number of Occurrences', fontsize=18)
for p in ax.patches:
ax.annotate(format(p.get_height()),
(p.get_x() + (p.get_width() / 2) + 0.015, p.get_height() + 5), ha='center', va='center',
size=18, xytext=(0, 8), textcoords='offset points', fontsize=14, rotation=90)
if save_plot:
print('Save | |
import numpy as np
from pathlib import Path
import os
import h5py
import json
import pyHMT2D
from ..__common__ import gVerbose
import logging
class Calibrator(object):
"""Calibrator class to handle calibration process
A calibrator is constructed from its configuration file in JSON format. The configuration
specifies hydraulic model (e.g., Backwater-1D, SRH-2D, HEC-RAS, etc.), case (model dependent), an Objectives
object with a list of Objective objects, and optimizer.
"""
def __init__(self, config_json_file_name):
""" Calibrator class constructor
Parameters
----------
config_json_file_name : str
name of the JSON configuration file for calibrator
"""
# file name for the JSON configuration file
self.config_json_file_name = config_json_file_name
# model name, e.g., "SRH-2D", "HEC-RAS"
self.model_name = ''
# optimizer name, e.g., "scipy.optimize"
self.optimizer_name = ''
# configuration dictionary loaded from JSON
self.configuration = {}
# load the configuration from JSON
self.load_config_json()
# hydraulic model
self.hydraulic_model = None
# hydraulic data
self.hydraulic_data = None
# create hydraulic model and data
self.create_model_data()
# calibration parameters
self.calibration_parameters = pyHMT2D.Calibration.Parameters(self.configuration["calibration"]["calibration_parameters"])
# Objectives object
self.objectives = pyHMT2D.Calibration.Objectives(self.configuration["calibration"]["objectives"])
# Optimizer object
self.optimizer = None
# create the optimizer
self.create_optimizer()
# initialize a logger
self.logger = None
self.init_logger()
def load_config_json(self):
""" Load the JSON configuration file
Returns
-------
"""
if gVerbose: print("Load calibration configuration from file", self.config_json_file_name)
with open(self.config_json_file_name) as f:
self.configuration = json.load(f)
self.model_name = self.configuration["model"]
self.optimizer_name = self.configuration["calibration"]["optimizer"]
#print(self.configuration)
#print some information about the calibration configuration
if gVerbose: print("model =", self.configuration["model"])
#model specific configuration
if gVerbose: print("Configuration for",self.configuration["model"], ":")
if gVerbose: print(json.dumps(self.configuration[self.configuration["model"]], indent=4, sort_keys=False))
#calibration information
#calibration parameters
if gVerbose: print("There are total", len(self.configuration["calibration"]["calibration_parameters"]), "calibration parameters:")
if gVerbose: print(json.dumps(self.configuration["calibration"]["calibration_parameters"], indent=4, sort_keys=False))
#objectives
if gVerbose: print("There are total", len(self.configuration["calibration"]["objectives"]), "calibration objectives:")
if gVerbose: print(json.dumps(self.configuration["calibration"]["objectives"], indent=4, sort_keys=False))
#optimizer
if gVerbose: print("The selected optimizer is", self.configuration["calibration"]["optimizer"], "with the following setup:")
if gVerbose: print(json.dumps(self.configuration["calibration"][self.configuration["calibration"]["optimizer"]], indent=4, sort_keys=False))
def create_model_data(self):
""" Create the model and set up the case in preparation of calibration
Returns
-------
"""
if self.model_name == "Backwater-1D": # create a Backwater_1D_Model object and open the simulation case
self.hydraulic_model = pyHMT2D.Hydraulic_Models_Data.Backwater_1D_Model()
# load Backwater_1D_Data configuration data
self.hydraulic_data = pyHMT2D.Hydraulic_Models_Data.Backwater_1D_Data(self.config_json_file_name)
# set the simulation case in the Backwater_1D_Model object
self.hydraulic_model.set_simulation_case(self.hydraulic_data)
elif self.model_name == "SRH-2D": # create a SRH_2D_Model object and open the simulation case
version = self.configuration["SRH-2D"]["version"]
srh_pre_path = self.configuration["SRH-2D"]["srh_pre_path"]
srh_path = self.configuration["SRH-2D"]["srh_path"]
extra_dll_path = self.configuration["SRH-2D"]["extra_dll_path"]
# create a SRH-2D model instance
self.hydraulic_model = pyHMT2D.SRH_2D.SRH_2D_Model(version, srh_pre_path,
srh_path, extra_dll_path, faceless=False)
# initialize the SRH-2D model
self.hydraulic_model.init_model()
if gVerbose: print("Hydraulic model name: ", self.hydraulic_model.getName())
if gVerbose: print("Hydraulic model version: ", self.hydraulic_model.getVersion())
# open the simulation case
self.hydraulic_model.open_project(self.configuration["SRH-2D"]["case"])
self.hydraulic_data = self.hydraulic_model.get_simulation_case()
elif self.model_name == "HEC-RAS": # create a HEC_RAS_Model object and open the simulation case
version = self.configuration["HEC-RAS"]["version"]
# whether to run HEC-RAS faceless
if (self.configuration["HEC-RAS"]["faceless"] == "True"):
faceless = True
elif (self.configuration["HEC-RAS"]["faceless"] == "False"):
faceless = False
else:
raise Exception("faceless should be either True or False. Please check.")
# create a HEC-RAS model instance
self.hydraulic_model = pyHMT2D.RAS_2D.HEC_RAS_Model(version, faceless)
# initialize the HEC-RAS model
self.hydraulic_model.init_model()
print("Hydraulic model name: ", self.hydraulic_model.getName())
print("Hydraulic model version: ", self.hydraulic_model.getVersion())
# open the simulation case
#self.hydraulic_model.open_project(self.configuration["HEC-RAS"]["case"],
# self.configuration["HEC-RAS"]["terrainFileName"])
#self.hydraulic_data = self.hydraulic_model.get_simulation_case()
else:
raise Exception("The specified model: %s, is not supported", self.model_name)
def create_optimizer(self):
""" Create the optimizer
Returns
-------
"""
if self.optimizer_name == "scipy.optimize.local":
self.optimizer = pyHMT2D.Calibration.Optimizer_ScipyOptimizeLocal(self.configuration["calibration"]["scipy.optimize.local"])
elif self.optimizer_name == "scipy.optimize.global":
self.optimizer = pyHMT2D.Calibration.Optimizer_ScipyOptimizeGlobal(self.configuration["calibration"]["scipy.optimize.global"])
elif self.optimizer_name == "enumerator":
self.optimizer = pyHMT2D.Calibration.Optimizer_Enumerator(self.configuration["calibration"]["enumerator"])
else:
raise Exception("Specified optimizer is not supported.")
def init_logger(self):
""" Initialize a logger
Returns
-------
"""
# Create a custom logger
self.logger = logging.getLogger(__name__)
# Create handlers
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler('calibration.log', mode='w')
# Create formatters and add it to handlers
c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to the logger
self.logger.addHandler(c_handler)
self.logger.addHandler(f_handler)
self.logger.setLevel(logging.INFO)
def calibrate(self):
""" Calibrate the model
Returns
-------
"""
print("Calibration starts ...")
# get the Manning's n calibration information
materialID_list, materialName_list, initial_guess_list, ManningN_min_list, ManningN_max_list = \
self.calibration_parameters.get_ManningN_Info_list()
if self.optimizer_name == "enumerator":
self.optimizer.minimize(self.func_to_minimize, args=(materialID_list, materialName_list,), callback=self.callback)
elif self.optimizer_name == "scipy.optimize.local":
import scipy.optimize as OP
# build bounds
ManningN_bounds = OP.Bounds(np.array(ManningN_min_list), np.array(ManningN_max_list))
# scipy.optimize.minimize's arguments are different for different categories of methods. Need to
# separate them.
#for methods bounds are not allowed:
if self.optimizer.method == "Nelder-Mead":
result = OP.minimize(self.func_to_minimize, initial_guess_list, args=(materialID_list,
materialName_list,),
method=self.optimizer.method,
callback=self.callback,
options=self.optimizer.options
)
#for methods bounds are allowed:
elif self.optimizer.method == "L-BFGS-B":
result = OP.minimize(self.func_to_minimize, initial_guess_list, args=(materialID_list,
materialName_list,),
method=self.optimizer.method,
jac=self.optimizer.jac,
hess=self.optimizer.hess,
tol=self.optimizer.tol,
bounds=ManningN_bounds,
options=self.optimizer.options
)
elif self.optimizer.method == "Powell":
result = OP.minimize(self.func_to_minimize, initial_guess_list, args=(materialID_list,
materialName_list,),
method=self.optimizer.method,
callback=self.callback,
bounds=ManningN_bounds,
options=self.optimizer.options
)
else:
raise Exception("The optimization method %s is not supported." % self.optimizer.method)
print(result)
elif self.optimizer_name == "scipy.optimize.global":
import scipy.optimize as OP
# build ranges (tuples)
ManningN_ranges = tuple(zip(ManningN_min_list, ManningN_max_list))
#get the "full_output" option
if self.configuration["calibration"]["scipy.optimize.global"]["full_output"] == "True":
full_output = True
elif self.configuration["calibration"]["scipy.optimize.global"]["full_output"] == "False":
full_output = False
else:
raise Exception("Optinmization parameter full_output can only be True or False. Please check.")
if self.optimizer.method == "brute":
result = OP.brute(self.func_to_minimize, ManningN_ranges, args=(materialID_list,
materialName_list,),
Ns=self.configuration["calibration"]["scipy.optimize.global"]["Ns"],
#finish=self.configuration["calibration"]["scipy.optimize.global"]["finish"],
finish=None, #hard-wired here to not allow "brute" to go beyond the range
full_output=full_output
)
# send information to logger for the record or restart
if full_output: #return both parameter values and function value
msg = "\nThe optimized parameters:: " + ", ".join(map(str, result[0])) \
+ "\nThe function value at optimized parameters: " + str(result[1])
else: #return only paramter values
msg = "\nThe optimized parameters:: " + ", ".join(map(str, result))
self.logger.info(msg)
else:
raise Exception("The optimization method %s is not supported." % self.optimizer.method)
else:
raise Exception("The optimizer name %s is not supported." % self.optimizer_name)
#write out the optimizer's calibration intermediate steps' parameter values and calibration errors
self.optimizer.write_optimization_results_to_csv()
#write out the simulation results at measurement locations (for postprocessing)
self.objectives.outputSimulationResultToCSV()
print("Calibration ended.")
def func_to_minimize(self, ManningNs, ManningN_MaterialIDs, ManningN_MaterialNames):
"""Function to minimize (the score, i.e., the cost function)
Returns
-------
"""
total_score = np.inf
if self.model_name == "Backwater-1D":
#set the Manning's n with the new values
self.hydraulic_model.get_simulation_case().modify_ManningsN(ManningN_MaterialIDs,
ManningNs,
ManningN_MaterialNames)
# run the Backwater_1D_Model model
self.hydraulic_model.run_model()
# output the result to VTK
vtkFileName = self.hydraulic_model.get_simulation_case().outputResultToVTK()
#calculate the total score of the calibration run for all specified objectives
#The score is calculated by comparing sampled result on VTK and measurement
self.objectives.calculate_total_score(vtkFileName)
if gVerbose: print("Total score = ", self.objectives.total_score)
total_score = self.objectives.total_score
elif self.model_name == "SRH-2D":
# set the Manning's n with the new values
self.hydraulic_data.srhhydro_obj.modify_ManningsN(ManningN_MaterialIDs,
ManningNs,
ManningN_MaterialNames)
srhhydro_filename = self.hydraulic_data.srhhydro_obj.srhhydro_filename
self.hydraulic_data.srhhydro_obj.write_to_file(srhhydro_filename)
# run SRH-2D Pre to preprocess the case
self.hydraulic_model.run_pre_model()
# run the SRH-2D model's current project
self.hydraulic_model.run_model()
# output the result to VTK
# read SRH-2D result in XMDF format (*.h5)
# Whether the XMDF result is nodal or cell center. In SRH-2D's ".srhhydro" file,
# the output option for "OutputFormat" can be manually changed before simulation.
# Options are "XMDF" (results at at nodes), "XMDFC" (results are at cell centers), etc.
# For example, "OutputFormat XMDFC EN". The following lines show that the SRH-2D simulation
# was run with "XMDFC" as output format (see the "XMDFC" part of the result file name) and thus
# we set "bNodal = False".
bNodal = False
self.hydraulic_data.readSRHXMDFFile(self.hydraulic_data.get_case_name() + "_XMDFC.h5", bNodal)
# export the SRH-2D result to VTK: lastTimeStep=True means we only want to deal with the last time step.
# See the code documentation of outputXMDFDataToVTK(...) for more options. It returns a list of vtk file names.
vtkFileNameList = self.hydraulic_data.outputXMDFDataToVTK(bNodal, lastTimeStep=True, dir='')
# calculate the total score of the calibration run for all specified objectives
# The score is calculated by comparing sampled result on VTK and measurement
self.objectives.calculate_total_score(vtkFileNameList[-1]) #only take the last vtk result file
if gVerbose: print("Total score = ", self.objectives.total_score)
total_score = self.objectives.total_score
elif self.model_name == "HEC-RAS":
# open the simulation case
self.hydraulic_model.open_project(self.configuration["HEC-RAS"]["case"],
self.configuration["HEC-RAS"]["terrainFileName"])
self.hydraulic_data = self.hydraulic_model.get_simulation_case()
# set the Manning's n with the new values
self.hydraulic_data.modify_ManningsN(ManningN_MaterialIDs,
ManningNs,
ManningN_MaterialNames)
#update the time stamp of the Manning's n GeoTiff file (to force HEC-RAS to re-compute 2D flow area's
#properties table. (No need? The above Manning's modification already updatet the time stamp.)
if os.path.dirname(self.hydraulic_data.hdf_filename) == '':
fileBase = b''
else:
fileBase | |
<filename>preprocessing/fap.py
# -*- coding: utf-8 -*-
import numpy as np
import scipy.signal
from scipy.signal import peak_widths
import matplotlib.pyplot as plt
import utils
import peakutils
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
def faps_slide_subplot(faps_feat_df,sbj,label=False):
if sbj != 'all':
faps_feat_df = faps_feat_df[faps_feat_df['sbj_idx']==sbj]
# prepare faps that will be plotted
faps = faps_feat_df['faps'].tolist()
if label:
labels = faps_feat_df['label'].tolist()
FAP_index = [i for i in range(19)]
# slide show
for i in range(len(faps)):
fig, axes = plt.subplots(nrows=10,ncols=2,sharex=True,figsize=(18,16))
for j, ax in enumerate(axes.flatten()):
if j == 19:
break
ax.plot(faps[i][:,j])
ax.set_ylabel(FAP_index[j])
if label:
fig.suptitle(str(labels[i]))
plt.show()
plt.waitforbuttonpress()
plt.close()
return
def faps_slide_plot(faps_feat_df,sbj,label=False,peak_plot=None,plot_sig=None):
if sbj != 'all':
faps_feat_df = faps_feat_df[faps_feat_df['sbj_idx']==sbj]
if plot_sig is not None:
faps_feat_df['faps'] = faps_feat_df['faps'].apply(lambda x:x[:,plot_sig])
# prepare faps that will be plotted
faps = faps_feat_df['faps'].tolist()
if peak_plot is not None:
peaks = faps_feat_df[peak_plot].tolist()
try:
p_selects = faps_feat_df['p_sel'].tolist()
p_lbs = faps_feat_df['p_lb'].tolist()
p_rbs = faps_feat_df['p_rb'].tolist()
except:
pass
if label:
labels = faps_feat_df['label'].tolist()
# slide show
for i in range(len(faps)):
plt.figure(figsize=(12,8))
try:
for col in range(faps[i].shape[1]):
plt.plot(faps[i][:,col])
except:
plt.plot(faps[i])
if peak_plot is not None:
if len(peaks[i])>0:
for p in peaks[i]:
plt.axvline(p,color='black',lw=1)
try:
plt.axvline(p_selects[i],color='black',lw=3)
plt.axvline(p_lbs[i],color='black',lw=3)
plt.axvline(p_rbs[i],color='black',lw=3)
except:
pass
if label:
plt.title(str(labels[i]))
# FAP_index = ['l_i_eyebrow_y','r_i_eyebrow_y','l_o_eyebrow_y','r_o_eyebrow_y',
# 'l_i_eyebrow_x','r_i_eyebrow_x','t_l_eyelid_y','t_r_eyelid_y',
# 'l_cheeck_y','r_cheeck_y','l_nose_x','r_nose_x',
# 'l_o_cornerlip_y','r_o_cornerlip_y','l_o_cornerlip_x','r_o_cornerlip_x',
# 'l_b_midlip_y','l_t_midlip_y','open_jaw']
if plot_sig is not None:
FAP_index = plot_sig
else:
FAP_index = [i for i in range(19)]
plt.legend(FAP_index)
plt.show()
plt.waitforbuttonpress()
plt.close()
return
def dir_vector_slide_plot(faps_df,sbj,label=False):
if sbj != 'all':
faps_df = faps_df[faps_df['sbj_idx']==sbj]
# prepare faps that will be plotted
au = faps_df['FAP'].tolist()
if label:
labels = faps_df['label'].tolist()
# slide show
i = 0
for row in range(len(au)):
plt.figure()
x = [i for i in range(19)]
plt.stem(x,au[row])
if label:
plt.title(str(labels[i]))
i += 1
plt.show()
plt.waitforbuttonpress()
plt.close()
return
def calm_detector(faps_df,thres=1,remove=True):
def mask_gen(row,thres):
fap = row['faps']
col = [i for i in range(19)]
# remove fap 6 and 7
col.remove(6)
col.remove(7)
fap = fap[:,col]
# absolute value
fap = np.absolute(fap)
# find peak for each traces
p_collect = []
for i in range(fap.shape[1]):
p = peakutils.indexes(fap[:,i],min_dist=10,thres=0)
if len(p) > 0:
p_mag = [fap[p_pos,i] for p_pos in p]
p_collect.append(np.max(p_mag))
if len(p_collect) > 0:
max_peak_avg = np.average(p_collect)
if max_peak_avg < thres :
row['calm_mask'] = True
else:
row['calm_mask'] = False
else:
row['calm_mask'] = True
return row
faps_df = faps_df.apply(mask_gen,thres=thres,axis=1)
if remove:
faps_df = faps_df[~faps_df['calm_mask']]
else:
faps_df = faps_df[faps_df['calm_mask']]
return faps_df.drop('calm_mask',axis=1)
def get_peak(faps_df,mode='peak',window_width=10,sliding_step=3,min_dist=10,thres=0.6):
def find_peak_cov(x,w):
# change shape to (19,100) from (100,19)
x = x.transpose()
L = x.shape[1]
# find each cov for each sliding window
diff_cov = []
for i in range(w,L-w,sliding_step):
x_w = x[:,i:i+w]
cov_m = np.cov(x_w)
# map the positive
pos = 0
neg = 0
for row in range(cov_m.shape[0]):
for col in range(row+1,cov_m.shape[1]):
# normalize covarience by this formula
# cov(x1,x2) / (std(x1)*std(x2))
cov_m[row,col] = cov_m[row,col]/(np.sqrt(cov_m[row,row])*np.sqrt(cov_m[col,col]))
if cov_m[row,col] >= 0:
pos = pos+cov_m[row,col]
else:
neg = neg+cov_m[row,col]
diff_val = abs(pos) - abs(neg)
diff_cov.append(diff_val)
# peak should be at the maximum different + size of window
peak_position = w + np.argmax(diff_cov)
return [peak_position]
def find_peak_peakutils(row,min_dist,thres):
# detect peak based on min_dist and threshold
x = row['faps']
col_sel = [i for i in range(19)]
col_sel.remove(6)
col_sel.remove(7)
col_sel.remove(14)
col_sel.remove(15)
x = x[:,col_sel]
x = np.abs(x)
x = np.sum(x,axis=1)
p = peakutils.indexes(x,min_dist=min_dist,thres=thres)
row['peak_pos'] = p
# select peak
p_width, p_height, p_lb, p_rb = peak_widths(x,p,rel_height=1)
# create array of peak properties
# each column is one peak, delete column that p_width is less than 7
p_prop_np = np.array([p_width,p_height,p_lb,p_rb,p])
col_del = []
for col in range(p_prop_np.shape[1]):
if p_prop_np[0,col] < 7:
col_del.append(col)
col_del.sort(reverse=True)
for c in col_del:
p_prop_np = np.delete(p_prop_np,p_prop_np[:,c],1)
# calculate p_width/p_height
if len(p_prop_np.tolist()[0]) > 0:
criteria = np.divide(p_prop_np[0],p_prop_np[1])
crit_idx = np.argmax(criteria)
p_sel = [p_prop_np[4,crit_idx]]
else:
p_sel = []
row['peak_sel'] = p_sel
# get AU
if len(p_sel)>0:
# get window length of p_width
x = row['faps']
c = p_sel[0]
L = 20
L = int(round(L/2,0))
pl = int(max(0,c-L))
pr = int(min(x.shape[0],c+L))
x_win = x[pl:pr,:]
FAP = []
for col in range(x_win.shape[1]):
trace = x_win[:,col]
trace_abs = np.absolute(trace)
p_trace = peakutils.indexes(trace_abs,thres=0.4,min_dist=10)
if len(p_trace) == 0:
FAP.append(0)
continue
else:
pp = p_trace[np.argmax([trace[i] for i in p_trace])]
slope_l = (trace[pp]-trace[0])/(pp)
slope_r = (trace[len(trace)-1]-trace[pp])/(19-pp)
if slope_l > 0 and slope_r < 0:
FAP.append(1)
elif slope_l < 0 and slope_r > 0:
FAP.append(-1)
else:
FAP.append(0)
else:
FAP = [0 for i in range(19)]
# create column AU
row['FAP'] = FAP
# convert it to AU
# AU1
if FAP[0] == 1 and FAP[1] == 1:
row['AU1'] = 1
else:
row['AU1'] = 0
# AU2
if FAP[2] == 1 and FAP[3] == 1:
row['AU2'] = 1
else:
row['AU2'] = 0
# AU4
if FAP[0] == -1 and FAP[1] == -1 and FAP[4] == -1 and FAP[5] == -1:
row['AU4'] = 1
else:
row['AU4'] = 0
# AU5
if FAP[6] == 1 and FAP[7] == 1 :
row['AU5'] = 1
else:
row['AU5'] = 0
# AU6
if FAP[6] == -1 and FAP[7] == -1 and FAP[8] == -1 and FAP[9] == -1:
row['AU6'] = 1
else:
row['AU6'] = 0
# AU9
if FAP[10] == -1 and FAP[11] == -1:
row['AU9'] = 1
else:
row['AU9'] = 0
# AU10
if FAP[12] == -1 and FAP[13] == -1:
row['AU10'] = 1
else:
row['AU10'] = 0
# AU12
if FAP[12] == -1 and FAP[13] == -1 and FAP[14] == 1 and FAP[15] == 1:
row['AU12'] = 1
else:
row['AU12'] = 0
# AU15
if FAP[12] == 1 and FAP[13] == 1:
row['AU15'] = 1
else:
row['AU15'] = 0
# AU16
if FAP[16] == -1 and FAP[18] == -1:
row['AU16'] = 1
else:
row['AU16'] = 0
# AU20
if FAP[14] == 1 and FAP[15] == 1 and FAP[16] == -1:
row['AU20'] = 1
else:
row['AU20'] = 0
# AU23
if FAP[14] == -1 and FAP[15] == -1:
row['AU23'] = 1
else:
row['AU23'] = 0
# AU26
if FAP[18] == 1 and FAP[16] == 1:
row['AU26'] = 1
else:
row['AU26'] = 0
return row
# apply faps_df['faps'] with find peak function
if mode == 'cov':
faps_df['peak_pos'] = faps_df['faps'].apply(find_peak_cov,w=window_width)
elif mode == 'peak':
# find peak
faps_df = faps_df.apply(find_peak_peakutils,min_dist=min_dist,thres=thres,axis=1)
return faps_df
def faps_preprocessing_samples(faps_df,smooth=True,fix_scaler='standard',aoi=None,sbj_num=88,fix_scaler_mode='sbj',sm_wid_len=10,center_mean=False):
# reserve test subject idx
sbj_idx = [sbj_num for i in range(faps_df.shape[0])]
faps_df['sbj_idx'] = sbj_idx
if aoi is not None:
faps_df['faps'] = faps_df['faps'].apply(lambda x:x[aoi[0]:aoi[1]])
# absolute all the signal
faps_df['faps'] = faps_df['faps'].apply(lambda x:np.absolute(x))
if smooth:
smoothed = []
for i in range(faps_df.shape[0]):
faps = np.array(faps_df.iloc[i]['faps'])
# faps = scipy.signal.savgol_filter(faps,window_length=15,polyorder=2,axis=1)
for col in range(faps.shape[1]):
faps[:,col] = scipy.signal.savgol_filter(faps[:,col],window_length=sm_wid_len,polyorder=2)
smoothed.append(faps)
faps_df['tmp'] = smoothed
faps_df = faps_df.drop('faps',axis=1)
faps_df = faps_df[['tmp','ori_idx','sbj_idx']]
faps_df.columns = ['faps','ori_idx','sbj_idx']
if fix_scaler is not None:
faps_block = faps_df['faps'].values
a_to_fit = faps_block[0]
for i in range(1,faps_df.shape[0]):
a_to_fit = np.concatenate([a_to_fit,faps_block[i]])
if fix_scaler == 'minmax':
sc = MinMaxScaler()
else:
sc = StandardScaler()
if fix_scaler_mode == 'sbj':
sc.fit(a_to_fit)
faps_df['faps'] = faps_df['faps'].apply(lambda x:sc.transform(x))
elif fix_scaler_mode == 'each':
faps_df['faps'] = faps_df['faps'].apply(lambda x:sc.fit_transform(x))
if center_mean:
faps_df['faps'] = faps_df['faps'].apply(lambda x:x-np.average(x))
# set type of array
faps_df['faps'] = faps_df['faps'].apply(lambda x:x.astype(np.float64))
return faps_df
def faps_preprocessing(faps_df,smooth=True,filter_miss=None,fix_scaler='standard',aoi=None,sm_wid_len=10,center_mean=False):
# reserve test subject idx
total_sbj = int((faps_df.index.max()+1)/70)
sbj_idx = [j for j in range(1,total_sbj+1) for i in range(70) ]
faps_df['sbj_idx'] = sbj_idx
if filter_miss is not None:
faps_df['miss_ratio'] = filter_miss
faps_df = faps_df[faps_df['miss_ratio'] <= 25]
faps_df = faps_df.drop('miss_ratio',axis=1)
if aoi is not None:
faps_df['faps'] = faps_df['faps'].apply(lambda x:x[aoi[0]:aoi[1]])
# absolute all the signal
faps_df['faps'] = faps_df['faps'].apply(lambda x:np.absolute(x))
if smooth:
smoothed = []
for i in range(faps_df.shape[0]):
faps = np.array(faps_df.iloc[i]['faps'])
# faps = scipy.signal.savgol_filter(faps,window_length=15,polyorder=2,axis=1)
for col in range(faps.shape[1]):
faps[:,col] = scipy.signal.savgol_filter(faps[:,col],window_length=sm_wid_len,polyorder=2)
smoothed.append(faps)
faps_df['tmp'] = smoothed
faps_df = faps_df.drop('faps',axis=1)
faps_df = faps_df[['tmp','ori_idx','sbj_idx']]
faps_df.columns = ['faps','ori_idx','sbj_idx']
if fix_scaler is not None:
output_df = pd.DataFrame()
for subject_idx in range(1,52):
faps_per_sbj = faps_df[faps_df['sbj_idx']==subject_idx]
faps_block = faps_per_sbj['faps'].values
a_to_fit = faps_block[0]
for i in range(1,faps_per_sbj.shape[0]):
a_to_fit = np.concatenate([a_to_fit,faps_block[i]])
if fix_scaler == 'minmax':
sc = MinMaxScaler()
else:
sc = StandardScaler()
sc.fit(a_to_fit)
tmp_df = faps_per_sbj.copy()
tmp_df['faps'] = faps_per_sbj['faps'].apply(lambda x:sc.transform(x))
output_df = output_df.append(tmp_df)
faps_df = output_df
| |
"vacation_correction" only)
User is allowed to create contract_type (Create for "contract_type" only)
User is allowed to create leave_submission (Create for "leave_submission" only)
User is allowed to create vacation_correction (Create for "vacation_correction" only)
User is allowed to edit contract_type (Edit for "contract_type" only)
User is allowed to edit leave_submission (Edit for "leave_submission" only)
User is allowed to edit vacation_correction (Edit for "vacation_correction" only)
Role "issue_admin":
User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only)
User is allowed to access issue (View for "issue" only)
User is allowed to create area (Create for "area" only)
User is allowed to create category (Create for "category" only)
User is allowed to create doc_issue_status (Create for "doc_issue_status" only)
User is allowed to create ext_tracker (Create for "ext_tracker" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create keyword (Create for "keyword" only)
User is allowed to create kind (Create for "kind" only)
User is allowed to create msg_keyword (Create for "msg_keyword" only)
User is allowed to create safety_level (Create for "safety_level" only)
User is allowed to create severity (Create for "severity" only)
User is allowed to create status (Create for "status" only)
User is allowed to create status_transition (Create for "status_transition" only)
User is allowed to create test_level (Create for "test_level" only)
User is allowed to edit area (Edit for "area" only)
User is allowed to edit category (Edit for "category" only)
User is allowed to edit doc_issue_status (Edit for "doc_issue_status" only)
User is allowed to edit ext_tracker (Edit for "ext_tracker" only)
User is allowed to edit issue (Edit for "issue" only)
User is allowed to edit keyword (Edit for "keyword" only)
User is allowed to edit kind (Edit for "kind" only)
User is allowed to edit msg_keyword (Edit for "msg_keyword" only)
User is allowed to edit safety_level (Edit for "safety_level" only)
User is allowed to edit severity (Edit for "severity" only)
User is allowed to edit status (Edit for "status" only)
User is allowed to edit status_transition (Edit for "status_transition" only)
User is allowed to edit test_level (Edit for "test_level" only)
Role "it":
Create (Create for "user_contact" only)
User is allowed Edit on (Edit for "file": ('name', 'type') only)
User is allowed Edit on (Edit for "location": ('domain_part',) only)
User is allowed Edit on (Edit for "organisation": ('domain_part',) only)
User is allowed Edit on (Edit for "user": ('ad_domain', 'nickname', 'password', 'pictures', 'roles', 'timetracking_by', 'timezone', 'username') only)
User is allowed Edit on (Edit for "user": ('address', 'alternate_addresses', 'nickname', 'password', 'timezone', 'username') only)
User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only)
User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed to access domain_permission (View for "domain_permission" only)
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
User is allowed to create domain_permission (Create for "domain_permission" only)
User is allowed to create it_category (Create for "it_category" only)
User is allowed to create it_int_prio (Create for "it_int_prio" only)
User is allowed to create it_issue (Create for "it_issue" only)
User is allowed to create it_project (Create for "it_project" only)
User is allowed to create it_request_type (Create for "it_request_type" only)
User is allowed to create mailgroup (Create for "mailgroup" only)
User is allowed to edit domain_permission (Edit for "domain_permission" only)
User is allowed to edit it_category (Edit for "it_category" only)
User is allowed to edit it_int_prio (Edit for "it_int_prio" only)
User is allowed to edit it_issue (Edit for "it_issue" only)
User is allowed to edit it_project (Edit for "it_project" only)
User is allowed to edit it_request_type (Edit for "it_request_type" only)
User is allowed to edit mailgroup (Edit for "mailgroup" only)
User may manipulate user Roles through the web (Web Roles)
Role "itview":
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
Role "msgedit":
(Search for "msg": ('date', 'id') only)
User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only)
User is allowed to access ext_msg (View for "ext_msg" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
Role "msgsync":
(Search for "msg": ('date', 'id') only)
User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only)
User is allowed to access ext_msg (View for "ext_msg" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to create ext_msg (Create for "ext_msg" only)
User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only)
User is allowed to edit ext_msg (Edit for "ext_msg" only)
User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only)
Role "nosy":
User may get nosy messages for doc (Nosy for "doc" only)
User may get nosy messages for issue (Nosy for "issue" only)
User may get nosy messages for it_issue (Nosy for "it_issue" only)
User may get nosy messages for it_project (Nosy for "it_project" only)
User may get nosy messages for support (Nosy for "support" only)
Role "office":
(Restore for "room" only)
(Retire for "room" only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed to access user_contact (View for "user_contact" only)
User is allowed to create absence (Create for "absence" only)
User is allowed to create absence_type (Create for "absence_type" only)
User is allowed to create room (Create for "room" only)
User is allowed to create uc_type (Create for "uc_type" only)
User is allowed to edit absence (Edit for "absence" only)
User is allowed to edit absence_type (Edit for "absence_type" only)
User is allowed to edit room (Edit for "room" only)
User is allowed to edit uc_type (Edit for "uc_type" only)
Role "organisation":
User is allowed to access location (View for "location" only)
User is allowed to access org_location (View for "org_location" only)
User is allowed to access organisation (View for "organisation" only)
User is allowed to create location (Create for "location" only)
User is allowed to create org_location (Create for "org_location" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to edit location (Edit for "location" only)
User is allowed to edit org_location (Edit for "org_location" only)
User is allowed to edit organisation (Edit for "organisation" only)
Role "pgp":
Role "procurement":
(View for "sap_cc" only)
(View for "time_project" only)
User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'purchasing_agents', 'team_lead') only)
User is allowed Edit on (Edit for "time_project": ('group_lead', 'purchasing_agents', 'team_lead') only)
Role "project":
User is allowed Edit on (Edit for "time_project": ('cost_center', 'department', 'deputy', 'description', 'name', 'nosy', 'organisation', 'responsible', 'status') only)
User is allowed Edit on (Edit for "time_project": ('infosec_req', 'is_extern', 'max_hours', 'op_project', 'planned_effort', 'product_family', 'project_type', 'reporting_group', 'work_location') only)
User is allowed to access time_project (View for "time_project" only)
User is allowed to access time_report (View for "time_report" only)
User is allowed to access time_wp (View for "time_wp" only)
User is allowed to create time_project (Create for "time_project" only)
User is allowed to create time_project_status (Create for "time_project_status" only)
User is allowed to create time_wp (Create for "time_wp" only)
User is allowed to create time_wp_group (Create for "time_wp_group" only)
User is allowed to edit time_project_status (Edit for "time_project_status" only)
User is allowed to edit time_wp (Edit for "time_wp" only)
User is allowed to edit time_wp_group (Edit for "time_wp_group" only)
Role "project_view":
User is allowed to access | |
'''
Classes for extracting "decodable features" from various types of neural signal sources.
Examples include spike rate estimation, LFP power, and EMG amplitude.
'''
import numpy as np
import time
from scipy.signal import butter, lfilter
import math
import os
import nitime.algorithms as tsa
from riglib.ripple.pyns import pyns
class FeatureExtractor(object):
'''
Parent of all feature extractors, used only for interfacing/type-checking.
Feature extractors are objects tha gets the data that it needs (e.g., spike timestamps, LFP voltages, etc.)
from the neural data source object and extracts features from it
'''
@classmethod
def extract_from_file(cls, *args, **kwargs):
raise NotImplementedError
class DummyExtractor(FeatureExtractor):
'''
An extractor which does nothing. Used for tasks which are only pretending to be BMI tasks, e.g., visual feedback tasks
'''
feature_type = 'obs'
feature_dtype = [('obs', 'f8', (1,))]
def __call__(self, *args, **kwargs):
return dict(obs=np.array([[np.nan]]))
class BinnedSpikeCountsExtractor(FeatureExtractor):
'''
Extracts spike counts from spike timestamps separated into rectangular window.
This extractor is (currently) the main type of feature extractor in intracortical BMIs
'''
feature_type = 'spike_counts'
def __init__(self, source, n_subbins=1, units=[]):
'''
Constructor for BinnedSpikeCountsExtractor
Parameters
----------
source: DataSource instance
Source must implement a '.get()' function which returns the appropriate data
(appropriateness will change depending on the source)
n_subbins: int, optional, default=1
Number of bins into which to divide the observed spike counts
units: np.ndarray of shape (N, 2), optional, default=[]
Units which need spike binning. Each row of the array corresponds to (channel, unit). By default no units will be binned.
Returns
-------
BinnedSpikeCountsExtractor instance
'''
self.feature_dtype = [('spike_counts', 'u4', (len(units), n_subbins)), ('bin_edges', 'f8', 2)]
self.source = source
self.n_subbins = n_subbins
self.units = units
extractor_kwargs = dict()
extractor_kwargs['n_subbins'] = self.n_subbins
extractor_kwargs['units'] = self.units
self.extractor_kwargs = extractor_kwargs
self.last_get_spike_counts_time = 0
def set_n_subbins(self, n_subbins):
'''
Alter the # of subbins without changing the extractor kwargs of a decoder
Parameters
----------
n_subbins : int
Number of bins into which to divide the observed spike counts
Returns
-------
None
'''
self.n_subbins = n_subbins
self.extractor_kwargs['n_subbins'] = n_subbins
self.feature_dtype = [('spike_counts', 'u4', (len(self.units), n_subbins)), ('bin_edges', 'f8', 2)]
def get_spike_ts(self, *args, **kwargs):
'''
Get the spike timestamps from the neural data source. This function has no type checking,
i.e., it is assumed that the Extractor object was created with the proper source
Parameters
----------
None are needed (args and kwargs are ignored)
Returns
-------
numpy record array
Spike timestamps in the format of riglib.plexon.Spikes.dtype
'''
return self.source.get()
def get_bin_edges(self, ts):
'''
Determine the first and last spike timestamps to allow HDF files
created by the BMI to be semi-synchronized with the neural data file
Parameters
----------
ts : numpy record array
Must have field 'ts' of spike timestamps in seconds
Returns
-------
np.ndarray of shape (2,)
The smallest and largest timestamps corresponding to the current feature;
useful for rough synchronization of the BMI event loop with the neural recording system.
'''
if len(ts) == 0:
bin_edges = np.array([np.nan, np.nan])
else:
min_ind = np.argmin(ts['ts'])
max_ind = np.argmax(ts['ts'])
bin_edges = np.array([ts[min_ind]['ts'], ts[max_ind]['ts']])
@classmethod
def bin_spikes(cls, ts, units, max_units_per_channel=13):
'''
Count up the number of BMI spikes in a list of spike timestamps.
Parameters
----------
ts : numpy record array
Must have field 'ts' of spike timestamps in seconds
units : np.ndarray of shape (N, 2)
Each row corresponds to the channel index (typically the electrode number) and
the unit index (an index to differentiate the possibly many units on the same electrode). These are
the units used in the BMI.
max_units_per_channel : int, optional, default=13
This int is used to map from a (channel, unit) index to a single 'unit_ind'
for faster binning of spike timestamps. Just set to a large number.
Returns
-------
np.ndarray of shape (N, 1)
Column vector of counts of spike events for each of the N units.
'''
unit_inds = units[:,0]*max_units_per_channel + units[:,1]
edges = np.sort(np.hstack([unit_inds - 0.5, unit_inds + 0.5]))
spiking_unit_inds = ts['chan']*max_units_per_channel + ts['unit']
counts, _ = np.histogram(spiking_unit_inds, edges)
return counts[::2]
def __call__(self, start_time, *args, **kwargs):
'''
Main function to retreive new spike data and bin the counts
Parameters
----------
start_time : float
Absolute time from the task event loop. This is used only to subdivide
the spike timestamps into multiple bins, if desired (if the 'n_subbins' attribute is > 1)
*args, **kwargs : optional positional/keyword arguments
These are passed to the source, or ignored (not needed for this extractor).
Returns
-------
dict
Extracted features to be saved in the task.
'''
ts = self.get_spike_ts(*args, **kwargs)
if len(ts) == 0:
counts = np.zeros([len(self.units), self.n_subbins])
elif self.n_subbins > 1:
subbin_edges = np.linspace(self.last_get_spike_counts_time, start_time, self.n_subbins+1)
# Decrease the first subbin index to include any spikes that were
# delayed in getting to the task layer due to threading issues
# An acceptable delay is 1 sec or less. Realistically, most delays should be
# on the millisecond order
subbin_edges[0] -= 1
subbin_inds = np.digitize(ts['arrival_ts'], subbin_edges)
counts = np.vstack([self.bin_spikes(ts[subbin_inds == k], self.units) for k in range(1, self.n_subbins+1)]).T
else:
counts = self.bin_spikes(ts, self.units).reshape(-1, 1)
print("Units", self.units)
print("Counts:", counts)
counts = np.array(counts, dtype=np.uint32)
bin_edges = self.get_bin_edges(ts)
self.last_get_spike_counts_time = start_time
return dict(spike_counts=counts, bin_edges=bin_edges)
@classmethod
def extract_from_file(cls, files, neurows, binlen, units, extractor_kwargs, strobe_rate=60.0):
'''
Compute binned spike count features
Parameters
----------
files : dict
Data files used to train the decoder. Should contain exactly one type of neural data file (e.g., Plexon, Blackrock, TDT, Ripple)
neurows: np.ndarray of shape (T,)
Timestamps in the plexon time reference corresponding to bin boundaries
binlen: float
Length of time over which to sum spikes from the specified cells
units: np.ndarray of shape (N, 2)
List of units that the decoder will be trained on. The first column specifies the electrode number and the second specifies the unit on the electrode
extractor_kwargs: dict
Any additional parameters to be passed to the feature extractor. This function is agnostic to the actual extractor utilized
strobe_rate: 60.0
The rate at which the task sends the sync pulse to the neural recording file
Returns
-------
spike_counts : np.ndarray of shape (N, T)
Spike counts binned over the length of the datafile.
units : np.ndarray of shape (N, 2)
Each row corresponds to the channel index (typically the electrode number) and
the unit index (an index to differentiate the possibly many units on the same electrode). These are
the units used in the BMI.
extractor_kwargs : dict
Parameters used to instantiate the feature extractor, to be stored
along with the trained decoder so that the exact same feature extractor can be re-created at runtime.
'''
if 'plexon' in files:
from plexon import plexfile
plx = plexfile.openFile(str(files['plexon']))
# interpolate between the rows to 180 Hz
if binlen < 1./strobe_rate:
interp_rows = []
neurows = np.hstack([neurows[0] - 1./strobe_rate, neurows])
for r1, r2 in zip(neurows[:-1], neurows[1:]):
interp_rows += list(np.linspace(r1, r2, 4)[1:])
interp_rows = np.array(interp_rows)
else:
step = int(binlen/(1./strobe_rate)) # Downsample kinematic data according to decoder bin length (assumes non-overlapping bins)
interp_rows = neurows[::step]
print(('step: ', step))
from plexon import psth
spike_bin_fn = psth.SpikeBin(units, binlen)
spike_counts = np.array(list(plx.spikes.bin(interp_rows, spike_bin_fn)))
# discard units that never fired at all
discard_zero_units = extractor_kwargs.pop('discard_zero_units', True)
if discard_zero_units:
unit_inds, = np.nonzero(np.sum(spike_counts, axis=0))
units = units[unit_inds,:]
spike_counts = spike_counts[:, unit_inds]
extractor_kwargs['units'] = units
return spike_counts, units, extractor_kwargs
elif 'blackrock' in files:
nev_fname = [name for name in files['blackrock'] if '.nev' in name][0] # only one of them
nev_hdf_fname = [name for name in files['blackrock'] if '.nev' in name and name[-4:]=='.hdf']
nsx_fnames = [name for name in files['blackrock'] if '.ns' in name]
# interpolate between the rows to 180 Hz
if binlen < 1./strobe_rate:
interp_rows = []
neurows = np.hstack([neurows[0] - 1./strobe_rate, neurows])
for r1, r2 in zip(neurows[:-1], neurows[1:]):
interp_rows += list(np.linspace(r1, r2, 4)[1:])
interp_rows = np.array(interp_rows)
else:
step = int(binlen/(1./strobe_rate)) # Downsample kinematic data according to decoder bin length (assumes non-overlapping bins)
interp_rows = neurows[::step]
if len(nev_hdf_fname) == 0:
nev_hdf_fname = | |
the first actual value in the score tuples
# produced by the recommender (note that hybrid recommenders use the first
# position to indicate the algorithm number)
if recommender.is_hybrid():
start_index = 1
else:
start_index = 0
recommendations = recommender.recommend(100)
nose.tools.ok_(len(recommendations) > 0, "No recommendations were returned!")
former_top_product = recommendations[0][1]
old_strength = recommendations[0][0]
# Meta-test
boost_activity_type = None
in_boost = 1
for boost_activity_type, in_boost in self.session_context.in_boost_by_activity.items():
if in_boost != 1:
break
nose.tools.ok_(in_boost > 1, "Weak text fixture. There should be at least one in-boosted activity.")
activity = {"external_user_id": target,
"external_product_id": former_top_product,
"activity": boost_activity_type,
"created_at": self.session_context.get_present_date()}
tasks.update_summaries(self.session_context, activity)
session.refresh()
recommendations = recommender.recommend(100)
nose.tools.ok_(len(recommendations) > 0, "No recommendations were returned!")
new_strength = None
for rec in recommendations:
if rec[1] == former_top_product:
new_strength = rec[0]
break
nose.tools.ok_(new_strength is not None,
"The former top recommendation should have been recommended again.")
for i in range(start_index, len(new_strength)):
old_strength_value = old_strength[i]
new_strength_value = new_strength[i]
nose.tools.ok_(abs(new_strength_value / old_strength_value - in_boost) < tests.FLOAT_DELTA,
"Incorrect application of the activity in-boost")
self.db_proxy.increment_impression_summary(user_id=target,
product_id=former_top_product,
date=self.session_context.get_present_date(),
anonymous=False)
self.db_proxy.increment_impression_summary(user_id=target,
product_id=former_top_product,
date=self.session_context.get_present_date(),
anonymous=False)
history_decay = {'history_decay_function_name': 'exponential', 'history_decay_exponential_function_halflife': 2}
session = tests.init_session(user_id=target, custom_settings=history_decay, algorithm=self.algorithm)
recommender = session.get_recommender()
recommendations = recommender.recommend(100)
nose.tools.ok_(len(recommendations) > 0, "No recommendations were returned!")
new_strength = None
for rec in recommendations:
if rec[1] == former_top_product:
new_strength = rec[0]
break
nose.tools.ok_(new_strength is not None,
"The former top recommendation should have been recommended again.")
for i in range(start_index, len(new_strength)):
old_strength_value = old_strength[i]
new_strength_value = new_strength[i]
nose.tools.ok_(abs(new_strength_value / old_strength_value - in_boost / 2) < tests.FLOAT_DELTA,
"Incorrect application of the in-boost and history decay together")
def test_product_age_decay_exponential(self):
""" Tests the effect of applying a product age decay factor based on an exponential
function on recommendations. It applies to all recommendation heuristics.
"""
target = "u_tec_1"
id_twin_product_old = "p_tec_TWIN_OLD"
id_twin_product_new = "p_tec_TWIN_NEW"
# makes it so that the oldest twin is 2 days (the configured half life) older
old_date = self.session_context.get_present_date() - dt.timedelta(days=2)
new_date = self.session_context.get_present_date()
twin_product_old = {"external_id": id_twin_product_old,
"language": "english",
"date": old_date,
"expiration_date": old_date + dt.timedelta(days=30),
"resources": {"title": "Whatever Gets You Through The Night"},
"full_content": """Begin. Technology. Technology. This is all we got. End.""",
"category": "Nonsense"}
twin_product_new = {"external_id": id_twin_product_new,
"language": "english",
"date": new_date,
"expiration_date": new_date + dt.timedelta(days=30),
"resources": {"title": "Whatever Gets You Through The Night"},
"full_content": """Begin. Technology. Technology. This is all we got. End.""",
"category": "Nonsense"}
self.db_proxy.insert_product(twin_product_old)
tasks.process_product(self.session_context, id_twin_product_old)
self.db_proxy.insert_product(twin_product_new)
tasks.process_product(self.session_context, id_twin_product_new)
# makes it so that all users consume (and have impressions on) the twins, except for the target user
users = self.db_proxy.fetch_all_user_ids()
for user in users:
if user != target:
activity = {"external_user_id": user,
"external_product_id": id_twin_product_old,
"activity": "buy",
"created_at": self.session_context.get_present_date()}
tasks.update_summaries(self.session_context, activity)
activity = {"external_user_id": user,
"external_product_id": id_twin_product_new,
"activity": "buy",
"created_at": self.session_context.get_present_date()}
tasks.update_summaries(self.session_context, activity)
if self.session_context.impressions_enabled:
is_anonymous = config.is_anonymous(user)
self.db_proxy.increment_impression_summary(user,
id_twin_product_old,
date=self.session_context.get_present_date(),
anonymous=is_anonymous)
self.db_proxy.increment_impression_summary(user,
id_twin_product_new,
date=self.session_context.get_present_date(),
anonymous=is_anonymous)
ut.generate_templates(self.session_context)
pt.generate_templates(self.session_context)
pttfidf.generate_templates(self.session_context) # Unfortunately we need to regenerate from scratch,
# otherwise the df's of the twins will be different.
custom_settings = {'product_age_decay_function_name': 'exponential',
'product_age_decay_exponential_function_halflife': 2,
'near_identical_filter_field': None, 'near_identical_filter_threshold': None}
# Disables near-identical filtering
session = tests.init_session(user_id=target, custom_settings=custom_settings, algorithm=self.algorithm)
session.refresh()
recommender = session.get_recommender()
# Determines the index of the first actual value in the score tuples
# produced by the recommender (note that hybrid recommenders use the first
# position to indicate the algorithm number)
if recommender.is_hybrid():
start_index = 1
else:
start_index = 0
recommendations = recommender.recommend(100)
nose.tools.ok_(len(recommendations) > 0, "No recommendations were returned!")
strength_old_twin = None
strength_new_twin = None
for rec in recommendations:
if rec[1] == id_twin_product_old:
strength_old_twin = rec[0]
if rec[1] == id_twin_product_new:
strength_new_twin = rec[0]
for i in range(start_index, len(strength_old_twin)):
old_strength_value = strength_old_twin[i]
new_strength_value = strength_new_twin[i]
nose.tools.ok_(abs(old_strength_value / new_strength_value - 0.5) < tests.FLOAT_DELTA,
"Incorrect application of the product age decay")
def test_recommend(self, test_recommendation_quality=True):
""" Tests whether meaningful recommendations were obtained.
"""
# pre-generates a session context and use it for all recommendation tests below
session = tests.init_session(algorithm=self.algorithm)
def generate_queries_for_category(category, user_count, product_count):
for i in range(1, user_count + 1):
target_user = 'u_{0}_{1}'.format(category, str(i))
result = {
'target_user': target_user,
'category': category,
'product_count': product_count
}
yield result
def recommend(target_user):
""" Returns recommendations for a certain user.
:param target_user: user to recommend
:return: list of recommendations
"""
# updates the session's user context
session.user_id = target_user
session.refresh()
recommender = session.get_recommender()
return recommender.recommend(self.n_recommendations)
def verify_recommendations(_query, _recommendations):
""" Verify that the recommendation was successful.
:param _query: query parameters
:param _recommendations: recommendation result set
"""
recent_activities = session.user_context.recent_activities
products_consumed = list({act["external_product_id"] for act in recent_activities})
n_products_consumed = len(products_consumed)
nose.tools.ok_(len(_recommendations) > 0, "No recommendations were retrieved")
if test_recommendation_quality:
for j in range(min(_query['product_count'] - n_products_consumed, len(_recommendations))):
nose.tools.eq_(_recommendations[j][1][:6], "p_{0}_".format(_query['category']),
"Questionable recommendations were obtained " +
"for user %s: %s" % (_query['target_user'], _recommendations))
queries = []
# Economia
queries += generate_queries_for_category('eco', dp.N_USR_ECONOMIA, dp.N_PROD_ECONOMIA)
# Esportes
queries += generate_queries_for_category('esp', dp.N_USR_ESPORTES, dp.N_PROD_ESPORTES)
# Música
queries += generate_queries_for_category('mus', dp.N_USR_MUSICA, dp.N_PROD_MUSICA)
# Tecnologia
queries += generate_queries_for_category('tec', dp.N_USR_TECNOLOGIA, dp.N_PROD_TECNOLOGIA)
# We did an experiment trying to parallelize the test recommendations, but there was no speedup because the
# overhead is too cumbersome.
n_workers = 1 # For some reason a thread pool with 1 worker is slightly faster than the nonconcurrent version
with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers) as executor:
future_to_query = {executor.submit(wrap(recommend), q['target_user']): q
for q in queries}
for future in concurrent.futures.as_completed(future_to_query):
query = future_to_query[future]
recommendations = future.result()
verify_recommendations(query, recommendations)
def test_multi_activities_blocking_vs_non_blocking(self):
""" Checks that blocking activities prevent items from being recommended,
and that non-blocking activities do not do so.
"""
# Economia
for i in range(1, dp.N_USR_ECONOMIA + 1):
target = "u_eco_" + str(i)
session = tests.init_session(user_id=target, algorithm=self.algorithm)
recommender = session.get_recommender()
recommendations = recommender.recommend(self.n_recommendations)
nose.tools.ok_(len(recommendations) > 0, "Empty recommendation.")
if len(recommendations) > 0:
top_product = recommendations[0][1]
else:
return
supported_activities = self.session_context.supported_activities
blocking_activities = self.session_context.blocking_activities
non_blocking_activities = list(set(supported_activities) - set(blocking_activities))
# Meta-tests
nose.tools.ok_(len(non_blocking_activities) > 0,
"Weak test fixture. There should be at least one non_blocking activity")
nose.tools.ok_(len(blocking_activities) > 0,
"Weak test fixture. There should be at least one blocking activity")
# Saves a non-blocking activity first
activity = {"external_user_id": target,
"external_product_id": top_product,
"activity": non_blocking_activities[0],
"created_at": self.session_context.get_present_date()}
tasks.update_summaries(self.session_context, activity)
session = tests.init_session(user_id=target, algorithm=self.algorithm)
recommender = session.get_recommender()
recommendations = recommender.recommend(self.n_recommendations)
recommended_products = [r[1] for r in recommendations]
nose.tools.ok_(top_product in recommended_products,
"A non-blocking activity should not prevent a product from being recommended")
# Saves a blocking activity first
activity = {"external_user_id": target,
"external_product_id": top_product,
"activity": blocking_activities[0],
"created_at": self.session_context.get_present_date()}
tasks.update_summaries(self.session_context, activity)
session = tests.init_session(user_id=target, algorithm=self.algorithm)
recommender = session.get_recommender()
recommendations = recommender.recommend(self.n_recommendations)
recommended_products = [r[1] for r in recommendations]
if self.session_context.filter_strategy == ctx.AFTER_SCORING:
nose.tools.ok_(top_product not in recommended_products,
"A blocking activity should prevent a product from being recommended")
def test_recommendation_slack(self):
target_user = "u_tec_1"
ttl = 3
custom_settings = {
'history_decay_function_name': 'step',
'history_decay_step_function_ttl': ttl
}
session = tests.init_session(user_id=target_user, custom_settings=custom_settings, algorithm=self.algorithm)
recommender = session.get_recommender()
# this first query just measures how many products we're able to fetch
max_results = len(recommender.recommend(1000))
# retrieves half of those products
initial_query = recommender.recommend(max_results // 2)
recommended_products = [result[1] for result in initial_query]
impressions_summary = self.db_proxy.fetch_impressions_summary(
user_ids=[target_user],
product_ids=[recommended_products],
group_by_product=False,
anonymous=False).get(target_user, {})
# bury the first recommendation set with impressions so that those products do not appear again
for product_id in recommended_products:
# generate enough impressions to bury that product
for i in range(ttl - impressions_summary.get(product_id, (0, None))[0]):
self.db_proxy.increment_impression_summary(user_id=target_user,
product_id=product_id,
date=self.session_context.get_present_date(),
anonymous=False)
session.refresh()
# this query should still return some products if the recommender is internally adding a slack when fetching
# products from the database
recommender = session.get_recommender()
second_query = recommender.recommend(max_results // 2)
nose.tools.ok_(len(second_query) > 0, "Recommender '{0}' did not pass the 'slack test'".format(self.algorithm))
def test_near_identical(self):
""" Tests that two products considered 'near-identical' are not recommended at the same time
(within the same page) when the filtering strategy is AFTER_SCORING.
"""
target = "u_tec_1"
id_twin_product_1 = "p_tec_TWIN_1"
id_twin_product_2 = "p_tec_TWIN_2"
date = self.session_context.get_present_date() - dt.timedelta(days=1)
twin_product_1 = {"external_id": id_twin_product_1,
"language": "english",
"date": date,
"expiration_date": date + dt.timedelta(days=30),
"resources": {"title": "Whatever Gets You Through The Night"},
"full_content": """Begin. Technology. Technology. This is all we got. End.""",
"category": "Nonsense"}
twin_product_2 = {"external_id": id_twin_product_2,
"language": "english",
"date": date,
"expiration_date": date + dt.timedelta(days=30),
"resources": {"title": "Whatever | |
import ast
import collections
import contextlib
import inspect
import numpy
from ..utils.list import EventedList
from ..utils.event import EmitterGroup, Event
class RunList(EventedList):
"""
A list of BlueskyRuns.
"""
__slots__ = ()
def __contains__(self, run):
uid = run.metadata["start"]["uid"]
for run_ in self:
if run_.metadata["start"]["uid"] == uid:
return True
else:
return False
def run_is_completed(run):
"True is Run is completed and no further updates are coming."
return run.metadata["stop"] is not None
def run_is_live(run):
"True if Run is 'live' (observable) based on a streaming source, not at rest."
return hasattr(run, "events")
def run_is_live_and_not_completed(run):
"True if Run is 'live' (observable) and not yet complete."
return run_is_live(run) and (not run_is_completed(run))
@contextlib.contextmanager
def lock_if_live(run):
"""
Lock to prevent new data from being added, if this is a 'live' BlueskyRun.
If it is a BlueskyRun backed by data at rest (i.e. from databroker) do
nothing.
"""
if run_is_live(run):
with run.write_lock:
yield
else:
yield
# Make numpy functions accessible as (for example) log, np.log, and numpy.log.
_base_namespace = {"numpy": numpy, "np": numpy}
_base_namespace.update({name: getattr(numpy, name) for name in numpy.__all__})
def construct_namespace(run, stream_names):
"""
Put the contents of a run into a namespace to lookup in or ``eval`` expressions in.
This is used by the plot builders to support usages like
>>> model = Lines("-motor", ["log(det)"])
The words available in these expressions include:
* The ``BlueskyRun`` itself, as ``"run"``, from which any data or metadata
can be obtained
* All the streams, with the data accessible as items in a dict, as in
``"primary['It']"`` or ``"baseline['motor']"``
* The columns in the streams given by stream_names, as in ``"I0"``. If a
column name appears in multiple streams, the streams earlier in the list
get precedence.
* All functions in numpy. They can be spelled as ``log``, ``np.log``, or
``numpy.log``
In the event of name collisions, items higher in the list above will get
precedence.
Parameters
----------
run : BlueskyRun
stream_names : List[String]
Returns
-------
namespace : Dict
"""
namespace = dict(_base_namespace) # shallow copy
with lock_if_live(run):
# Add columns from streams in stream_names. Earlier entries will get
# precedence.
for stream_name in reversed(stream_names):
ds = run[stream_name].to_dask()
namespace.update({column: ds[column] for column in ds})
namespace.update({column: ds[column] for column in ds.coords})
namespace.update({stream_name: run[stream_name].to_dask() for stream_name in stream_names})
namespace.update({"run": run})
return namespace
class BadExpression(Exception):
pass
def call_or_eval(mapping, run, stream_names, namespace=None):
"""
Given a mix of callables and string expressions, call or eval them.
Parameters
----------
mapping : Dict[String, String | Callable]
Each item must be a stream name, field name, a valid Python
expression, or a callable. The signature of the callable may include
any valid Python identifiers provideed by :func:`construct_namespace`
or the user-provided namespace parmeter below. See examples.
run : BlueskyRun
stream_names : List[String]
namespace : Dict, optional
Returns
-------
results : Dict[String, Any]
Raises
------
ValueError
If input is not String or Callable
BadExpression
If input is String and eval(...) raises an error
Examples
--------
A function can have access to the whole BlueskyRun.
>>> def f(run):
... ds = run.primary.read()
... return (ds["a"] - ds["b"]) / (ds["a"] + ds["b"])
...
>>> call_or_eval({"x": f}, run, ["primary"])
But, it also provides a more "magical" option in support of brevity.
The signature may include parameters with the names streams or fields. The
names in the signature are significant and will determine what parameters
the function is called with.
>>> def f(a, b):
... return (a - b) / (a + b)
...
>>> call_or_eval({"x": f}, run, ["primary"])
Equivalently, as a lambda function:
>>> call_or_eval({"f": lambda a, b: (a - b) / (a + b)}, run, ["primary"])
"""
with lock_if_live(run):
namespace_ = construct_namespace(run, stream_names)
# Overlay user-provided namespace.
namespace_.update(namespace or {})
del namespace # Avoid conflating namespace and namespace_ below.
return {key: call_or_eval_one(item, namespace_) for key, item in mapping.items()}
def call_or_eval_one(item, namespace):
"""
Given a mix of callables and string expressions, call or eval them.
Parameters
----------
item : String | Callable
Each item must be a stream name, field name, a valid Python
expression, or a callable. The signature of the callable may include
any valid Python identifiers provideed in the namespace.
namespace : Dict
The namespace that the item is evaluated against.
Returns
-------
result : Any
Raises
------
ValueError
If input is not String or Callable
BadExpression
If input is String and eval(...) raises an error
"""
# If it is a callable, call it.
if callable(item):
# Inspect the callable's signature. For each parameter, find an
# item in our namespace with a matching name. This is similar
# to the "magic" of pytest fixtures.
parameters = inspect.signature(item).parameters
kwargs = {}
for name, parameter in parameters.items():
try:
kwargs[name] = namespace[name]
except KeyError:
if parameter.default is parameter.empty:
raise ValueError(f"Cannot find match for parameter {name}")
# Otherwise, it's an optional parameter, so skip it.
return item(**kwargs)
elif isinstance(item, str):
# If it is a key in our namespace, look it up.
try:
# This handles field or stream names that are not valid
# Python identifiers (e.g. ones with spaces in them).
return namespace[item]
except KeyError:
pass
# Check whether it is valid Python syntax.
try:
ast.parse(item)
except SyntaxError as err:
raise ValueError(f"Could find {item!r} in namespace or parse it as a Python expression.") from err
# Try to evaluate it as a Python expression in the namespace.
try:
return eval(item, namespace)
except Exception as err:
raise ValueError(f"Could find {item!r} in namespace or evaluate it.") from err
else:
raise ValueError(f"expected callable or string, received {item!r} of type {type(item).__name__}")
def auto_label(callable_or_expr):
"""
Given a callable or a string, extract a name for labeling axes.
Parameters
----------
callable_or_expr : String | Callable
Returns
-------
label : String
"""
if callable(callable_or_expr):
return getattr(callable_or_expr, "__name__", repr(callable_or_expr))
elif isinstance(callable_or_expr, str):
return callable_or_expr
else:
raise ValueError(
f"expected callable or string, received {callable_or_expr!r} of "
f"type {type(callable_or_expr).__name__}"
)
class RunManager:
"""
Keep a RunList with a maximum number of Runs, plus any 'pinned' Runs.
This is used internally as a helper class by Lines, Images, and others.
This tracks the relationship between Runs and Artists and ensures correct
cleanup when a Run is removed.
"""
def __init__(self, max_runs, needs_streams):
self._max_runs = int(max_runs)
self._needs_streams = tuple(needs_streams)
self.runs = RunList()
self._pinned = set()
# Maps Run (uid) to set of ArtistSpec.
self._runs_to_artists = collections.defaultdict(list)
self.runs.events.added.connect(self._on_run_added)
self.runs.events.removed.connect(self._on_run_removed)
self.events = EmitterGroup(source=self, run_ready=Event)
def add_run(self, run, *, pinned=False):
"""
Add a Run.
Parameters
----------
run : BlueskyRun
pinned : Boolean
If True, retain this Run until it is removed by the user.
"""
if pinned:
self._pinned.add(run.metadata["start"]["uid"])
self.runs.append(run)
def discard_run(self, run):
"""
Discard a Run, including any pinned and unpinned.
If the Run is not present, this will return silently.
Parameters
----------
run : BlueskyRun
"""
if run in self.runs:
self.runs.remove(run)
def track_artist(self, artist, runs):
"""
Track an Artist.
This ensures it will be removed when the associated Run is removed.
Parameters
----------
artist : ArtistSpec
runs : List[BlueskyRun]
"""
# TODO Someday we will need aritsts that represent data from *multiple*
# runs, and then we will need to rethink the expected API of artist
# (.run -> .runs?) and the cache management here. But that would be a
# widereaching change, so we'll stay within the framework as it is
# today.
if len(runs) != 1:
raise NotImplementedError("We current assume a 1:1 association of aritsts and runs.")
(run,) = runs
run_uid = run.metadata["start"]["uid"]
self._runs_to_artists[run_uid].append(artist)
def _cull_runs(self):
"Remove Runs from the beginning of self.runs to keep the length <= max_runs."
i = 0
while len(self.runs) > self.max_runs + len(self._pinned):
while self.runs[i].metadata["start"]["uid"] in self._pinned:
i += 1
self.runs.pop(i)
def _on_run_added(self, event):
"""
When a new Run is added, mark it as ready or listen for it to become ready.
By "ready" we mean, it has all the streams it needs to be drawn.
"""
self._cull_runs()
run = event.item
if run_is_live_and_not_completed(run):
# If the stream of interest is defined already, plot now.
if set(self.needs_streams).issubset(set(list(run))):
self.events.run_ready(run=run)
else:
# Otherwise, connect a callback to run when the stream of interest arrives.
run.events.new_stream.connect(self._on_new_stream)
| |
# -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
from werkzeug.exceptions import Forbidden
sys.path.append("..")
from flask import g
from hackathon import Component, RequiredFeature
from hackathon.database import Team, UserTeamRel, User, Hackathon
from hackathon.hackathon_response import not_found, bad_request, precondition_failed, ok, forbidden
from hackathon.constants import TeamMemberStatus
__all__ = ["TeamManager"]
class TeamManager(Component):
"""Component to manage hackathon teams"""
user_manager = RequiredFeature("user_manager")
admin_manager = RequiredFeature("admin_manager")
template_manager = RequiredFeature("template_manager")
def get_user_by_teams(self, user_id):
teams = self.__get_user_teams(user_id)
team_list = map(lambda x: x._asdict(), teams)
return team_list
def get_team_by_name(self, hackathon_id, team_name):
""" get user's team basic information stored on table 'team' based on team name
:type hackathon_id: int
:param hackathon_id: id of hackathon related to the team
:type team_name: str | unicode
:param team_name: name of the team
:rtype: dict
:return: team's information as a dict if team is found otherwise not_found()
"""
team = self.__get_team_by_name(hackathon_id, team_name)
if team:
return team.dic()
else:
return not_found("no such team")
def get_team_members_by_user(self, hackathon_id, user_id):
"""Get team member list of specific user
:type hackathon_id: int
:param hackathon_id: hackathon id
:type user_id: int
:param user_id: id of user
:rtype: dict
:return: team's information and team's members list if team is found otherwise not_found()
"""
detail = {}
team = self.__get_team_by_user(user_id, hackathon_id)
if team:
detail["team"] = self.__get_team_by_id(team.id).dic()
detail["members"] = self.__get_team_members(team)
return detail
else:
return not_found("no such team's members")
def get_team_members_by_name(self, hackathon_id, team_name):
"""Get team member list of specific team_name
:type hackathon_id: int
:param hackathon_id: hackathon id
:type team_name: str|unicode
:param team_name: name of team
:rtype: dict
:return: team's information and team's members list if team is found otherwise not_found()
"""
detail = {}
team = self.__get_team_by_name(hackathon_id, team_name)
if team:
detail["team"] = self.__get_team_by_id(team.id).dic()
detail["members"] = self.__get_team_members(team)
return detail
else:
return not_found("no such team's members")
def get_hackathon_team_list(self, hackathon_id, name=None, number=None):
"""Get the team list of selected hackathon
:type hackathon_id: int
:param hackathon_id: hackathon id
:type name: str|unicode
:param name: name of team. optional
:type number: int
:param number: querying condition, return number of teams
:rtype: list
:return: a list of team filter by name and number on selected hackathon
"""
hackathon_team_list = self.db.find_all_objects_by(Team, hackathon_id=hackathon_id)
hackathon_team_list = map(lambda x: x.dic(), hackathon_team_list)
if name is not None:
hackathon_team_list = filter(lambda x: name in x["name"], hackathon_team_list)
if number is not None:
hackathon_team_list = hackathon_team_list[0:number]
return hackathon_team_list
def create_team(self, kwargs):
"""Create new team by given args.
user only allow to join or create 1 team. So if user joined or created team before, will be get failed
when creating new team.
:type kwargs: dict
:param kwargs: a dict of required information to create new team
:rtype: dict
:return: created team information
"""
user_team_rel = self.__get_team_by_user(g.user.id, g.hackathon.id)
if user_team_rel:
self.log.debug("fail to create team since user is already in some team.")
return precondition_failed("you must leave the current team first")
if "team_name" not in kwargs:
return bad_request("Please provide a team name")
# check team name to avoid duplicate name
team_name = kwargs["team_name"]
if self.__get_team_by_name(g.hackathon.id, team_name):
return precondition_failed("The team name is existed, please provide a new name")
team = Team(name=team_name,
description=kwargs.get("description"),
git_project=kwargs.get("git_project"),
logo=kwargs.get("logo"),
create_time=self.util.get_now(),
update_time=self.util.get_now(),
leader_id=g.user.id,
hackathon_id=g.hackathon.id)
self.db.add_object(team)
user_team_rel = UserTeamRel(join_time=self.util.get_now(),
update_time=self.util.get_now(),
status=TeamMemberStatus.Approved,
hackathon_id=g.hackathon.id,
user_id=g.user.id,
team_id=team.id)
self.db.add_object(user_team_rel)
return team.dic()
def update_team(self, kwargs):
"""Update existing team information
:type kwargs: dict
:param kwargs: a dict to store update information for team
:rtype: dict
:return: updated team information in a dict
"""
if "id" not in kwargs:
return bad_request("Please choose a team to update")
team = self.__get_team_by_id(kwargs["id"])
if not team:
return not_found("team not exists")
# avoid duplicate team with same names
if "team_name" in kwargs and kwargs["team_name"] != team.name:
if self.__get_team_by_name(g.hackathon.id, kwargs["team_name"]):
return precondition_failed("team with the same name exists already")
self.__validate_team_permission(g.hackathon.id, team, g.user)
self.db.update_object(team,
name=kwargs.get("team_name", team.name),
description=kwargs.get("description", team.description),
git_project=kwargs.get("git_project", team.git_project),
logo=kwargs.get("logo", team.logo),
update_time=self.util.get_now())
return team.dic()
def dismiss_team(self, hackathon_id, team_name):
"""Dismiss a team by team leader or hackathon admin
:type hackathon_id: int
:param hackathon_id: hackathon id
:type team_name: str|unicode
:param team_name: name of the team to dismiss
:rtype: bool
:return: if dismiss success, return ok. if not ,return bad request.
"""
team = self.__get_team_by_name(hackathon_id, team_name)
if not team:
return ok()
self.__validate_team_permission(hackathon_id, team, g.user)
# delete all team members first
self.db.delete_all_objects_by(UserTeamRel, team_id=team.id)
self.db.delete_object(team)
return ok()
def join_team(self, hackathon_id, team_name, user):
"""Join a team will create a record on user_team_rel table which status will be 0.
:type hackathon_id: int
:param hackathon_id: hackathon id
:type team_name: str | unicode
:param team_name: team name
:type user: User
:param user: the user to join a team
:rtype: dict
:return: if user already joined team or team not exist, return bad request. Else, return a dict of joined
details.
"""
if self.db.find_first_object_by(UserTeamRel, hackathon_id=hackathon_id, user_id=g.user.id):
return precondition_failed("You have joined another team, please quit first.")
team = self.__get_team_by_name(hackathon_id, team_name)
if team:
candidate = UserTeamRel(join_time=self.util.get_now(),
update_time=self.util.get_now(),
status=TeamMemberStatus.Init,
hackathon_id=hackathon_id,
user_id=user.id,
team_id=team.id)
self.db.add_object(candidate)
return candidate.dic()
else:
return not_found("team not found !")
def update_team_member_status(self, hackathon_id, team_name, status, operator, candidate_id):
""" update user's status on selected team. if current user doesn't have permission, return bad request.
Else, update user's status
:type hackathon_id: int
:param hackathon_id: hackathon id
:type team_name: str|unicode
:param team_name: team name
:type status: int
:param status: the status of the team member, see TeamMemberStatus in constants.py
:type operator: User
:param operator: the current login user who trys to change the status of team member
:type candidate_id: int
:param candidate_id: the id of candidate who waits for approval
:rtype: bool
:return: if update success, return ok. if not , return bad request.
"""
team = self.__get_team_by_name(hackathon_id, team_name)
if not team:
return not_found("team not found")
self.__validate_team_permission(hackathon_id, team, operator)
candidate = self.db.find_first_object_by(UserTeamRel, hackathon_id=hackathon_id, user_id=candidate_id)
if status == TeamMemberStatus.Approved:
candidate.status = status
candidate.update_time = self.util.get_now()
self.db.commit()
return ok("approved")
if status == TeamMemberStatus.Denied:
self.db.delete_object(candidate)
return ok("Your request has been denied, please rejoin another team.")
def leave_team(self, hackathon_id, team_name):
"""Leave a team by user
:type hackathon_id: int
:param hid: hackathon id
:type team_name: str|unicode
:param team_name: team name
:rtype: bool
:return: if leave_team success, return ok. if not ,return bad request.
"""
team = self.__get_team_by_name(hackathon_id, team_name)
if not team:
# if team don't exist, do nothing
return ok()
# if user is not team leader
if team.leader_id != g.user.id:
self.db.delete_all_objects_by(UserTeamRel, hackathon_id=hackathon_id, user_id=g.user.id)
return ok("You have left the team")
# if user is team leader
if len(self.__get_team_members(team)) >= 2:
return precondition_failed("Please promote a new team leader, before leave team.")
else:
return self.dismiss_team(hackathon_id, team_name)
def kick(self, team_name, candidate_id):
""" team leader and admin kick some one from team
:type team_name: str|unicode
:param tname: team name
:type candidate_id: int
:param candidate_id: the candidate to kick off
:rtype: bool
:return: if kick success, return ok. if not ,return bad request
"""
team = self.__get_team_by_name(g.hackathon.id, team_name)
if not team:
# if team don't exist, do nothing
return ok()
self.__validate_team_permission(g.hackathon.id, team, g.user)
self.db.delete_all_objects_by(UserTeamRel, team_id=team.id, user_id=candidate_id)
return ok()
def promote_leader(self, hackathon_id, team_name, new_leader_id):
""" team leader promote some one from team to become team leader
:type hackathon_id: int
:param hackathon_id: hackathon id
:type team_name: str|unicode
:param team_name: team name
:type new_leader_id: int
:param new_leader_id: id of user who will become new leader of the | |
"""
Python job scheduling for humans.
An in-process scheduler for periodic jobs that uses the builder pattern
for configuration. Schedule lets you run Python functions (or any other
callable) periodically at pre-determined intervals using a simple,
human-friendly syntax.
Inspired by <NAME>' article "Rethinking Cron" [1] and the
"clockwork" Ruby module [2][3].
Features:
- A simple to use API for scheduling jobs.
- Very lightweight and no external dependencies.
- Excellent test coverage.
- Works with Python 2.7 and 3.3
Usage:
>>> import schedule
>>> import time
>>> def job(message='stuff'):
>>> print("I'm working on:", message)
>>> schedule.every(10).minutes.do(job)
>>> schedule.every().hour.do(job, message='things')
>>> schedule.every().day.at("10:30").do(job)
>>> while True:
>>> schedule.run_pending()
>>> time.sleep(1)
[1] http://adam.heroku.com/past/2010/4/13/rethinking_cron/
[2] https://github.com/tomykaira/clockwork
[3] http://adam.heroku.com/past/2010/6/30/replace_cron_with_clockwork/
"""
import datetime
import functools
import logging
import random
import time
from dateutil import parser
from dateutil.tz import tzlocal
from .tz import tz_offsets
logger = logging.getLogger('schedule')
class Scheduler(object):
def __init__(self):
self.jobs = []
def run_pending(self):
"""Run all jobs that are scheduled to run.
Please note that it is *intended behavior that tick() does not
run missed jobs*. For example, if you've registered a job that
should run every minute and you only call tick() in one hour
increments then your job won't be run 60 times in between but
only once.
"""
runnable_jobs = (job for job in self.jobs if job.should_run)
for job in sorted(runnable_jobs):
job.run()
def run_all(self, delay_seconds=0):
"""Run all jobs regardless if they are scheduled to run or not.
A delay of `delay` seconds is added between each job. This helps
distribute system load generated by the jobs more evenly
over time."""
logger.info('Running *all* %i jobs with %is delay inbetween',
len(self.jobs), delay_seconds)
for job in self.jobs:
job.run()
time.sleep(delay_seconds)
def clear(self):
"""Deletes all scheduled jobs."""
del self.jobs[:]
def every(self, interval=1):
"""Schedule a new periodic job."""
job = Job(interval)
self.jobs.append(job)
return job
def on(self, *days):
"""Schedule a new job to run on specific weekdays.
See the docstring for `Job.on()`.
"""
job = self.every()
job.unit = 'days'
return job.on(*days)
@property
def next_run(self):
"""Datetime when the next job should run."""
if not self.jobs:
return None
return min(self.jobs).next_run
@property
def idle_seconds(self):
"""Number of seconds until `next_run`."""
return (self.next_run - datetime.datetime.now(tzlocal())
).total_seconds()
class Job(object):
"""A periodic job as used by `Scheduler`."""
WEEKDAYS = {'sunday': 0, 'monday': 1, 'tuesday': 2, 'wednesday': 3,
'thursday': 4, 'friday': 5, 'saturday': 6}
def __init__(self, interval):
self.interval = interval # pause interval * unit between runs
self.job_func = None # the job job_func to run
self.unit = None # time units, e.g. 'minutes', 'hours', ...
self.at_time = None # optional time at which this job runs
self.between_times = ()
self.run_days = []
self.start_run = None # datetime after which this job will start
self.last_run = None # datetime of the last run
self.next_run = None # datetime of the next run
self.period = None # timedelta between runs, only valid for
def __lt__(self, other):
"""PeriodicJobs are sortable based on the scheduled time
they run next."""
return self.next_run < other.next_run
def __repr__(self):
fmt_dt = "%Y-%m-%d %H:%M:%S %Z"
fmt_t = "%H:%M:%S %Z"
def format_time(t):
return t.strftime(fmt_dt) if t else '[never]'
timestats = '(last run: %s, next run: %s)' % (
format_time(self.last_run), format_time(self.next_run))
job_func_name = self.job_func.__name__
args = [repr(x) for x in self.job_func.args]
kwargs = ['%s=%s' % (k, repr(v))
for k, v in self.job_func.keywords.items()]
call_repr = job_func_name + '(' + ', '.join(args + kwargs) + ')'
if self.run_days:
final_days = []
for day in self.run_days:
days_str = [k.title() for k, i in Job.WEEKDAYS.items()
for d in day if i == d]
final_days.append(' or '.join(days_str))
repr_str = 'Every %s' % ' and '.join(final_days)
else:
repr_str = 'Every %s %s' % (
self.interval,
self.unit[:-1] if self.interval == 1 else self.unit)
if self.between_times:
repr_str += ' between %s' % ' and '.join(
t.strftime(fmt_t).strip()
for t in self.between_times)
elif self.at_time:
repr_str += ' at %s' % self.at_time.strftime(fmt_t).strip()
if self.start_run:
repr_str += ' starting %s' % self.start_run.strftime(fmt_dt)
repr_str += ' do %s %s' % (call_repr, timestats)
return repr_str
@property
def second(self):
assert self.interval == 1
return self.seconds
@property
def seconds(self):
self.unit = 'seconds'
return self
@property
def minute(self):
assert self.interval == 1
return self.minutes
@property
def minutes(self):
self.unit = 'minutes'
return self
@property
def hour(self):
assert self.interval == 1
return self.hours
@property
def hours(self):
self.unit = 'hours'
return self
@property
def day(self):
assert self.interval == 1
return self.days
@property
def days(self):
self.unit = 'days'
return self
@property
def week(self):
assert self.interval == 1
return self.weeks
@property
def weeks(self):
self.unit = 'weeks'
return self
def on(self, *days):
"""Schedule the job to run on specific weekdays.
`days` can be a string (or sequence of strings) with the name of the
weekday (case insensitive), e.g. 'Monday', 'sunday', etc, or a starting
substring of the name of the weekday, e.g. 'tue', 'Sat', etc.
If you specify multiple days, e.g. ('mon', 'wed'), the job will run
every Monday and Wednesday.
You can also specify OR conditions by separating the day names with a
pipe, e.g. ('sun|mon', 'wed|thu'). In this case the job will run
every Sunday *or* Monday, and every Wednesday *or* Thursday.
"""
weeknums = []
for day in days:
day_or = set()
for d in day.split('|'):
for n, i in Job.WEEKDAYS.items():
if n.startswith(d.lower()):
day_or.add(i)
if day_or:
weeknums.append(day_or)
self.run_days = weeknums
return self
def at(self, time_str):
"""Schedule the job every day at a specific time.
Calling this is only valid for jobs scheduled to run every
N day(s).
"""
assert self.unit == 'days'
self.at_time = parser.parse(time_str, tzinfos=tz_offsets)
if not self.at_time.tzinfo:
self.at_time = self.at_time.replace(tzinfo=tzlocal())
return self
def between(self, time_str):
"""Schedule the job at a random time between two timestamps."""
times = []
for t in time_str.split('-'):
dt = parser.parse(t, tzinfos=tz_offsets)
if not dt.tzinfo:
dt = dt.replace(tzinfo=tzlocal())
times.append(dt)
self.between_times = tuple(times)
return self
def starting(self, date_str):
self.start_run = parser.parse(date_str, tzinfos=tz_offsets)
if not self.start_run.tzinfo:
self.start_run = self.start_run.replace(tzinfo=tzlocal())
return self
def do(self, job_func, *args, **kwargs):
"""Specifies the job_func that should be called every time the
job runs.
Any additional arguments are passed on to job_func when
the job runs.
"""
self.job_func = functools.partial(job_func, *args, **kwargs)
functools.update_wrapper(self.job_func, job_func)
self._schedule_next_run()
return self
@property
def should_run(self):
"""True if the job should be run now."""
return datetime.datetime.now(tzlocal()) >= self.next_run
def run(self):
"""Run the job and immediately reschedule it."""
logger.info('Running job %s', self)
self.job_func()
self.last_run = datetime.datetime.now(tzlocal())
self._schedule_next_run()
def _schedule_next_run(self):
"""Compute the instant when this job should run next."""
# Allow *, ** magic temporarily:
# pylint: disable=W0142
assert self.unit in ('seconds', 'minutes', 'hours', 'days', 'weeks')
starting = self.start_run or datetime.datetime.now(tzlocal())
self.period = datetime.timedelta(**{self.unit: self.interval})
self.next_run = starting + self.period
if self.run_days:
run_days = self.run_days[:]
if self.last_run:
starting = self.last_run
# Don't consider this day group if it has been run already
for day in self.run_days:
if self.last_run.isoweekday() in day:
run_days.remove(day)
days = set()
for day in run_days:
days.add(random.sample(day, 1)[0])
if not days:
days_delta = 0
else:
# Calculate the closest day from the starting date
delta_all = sorted([(i - starting.isoweekday()) % 7
for i in days])
days_delta = delta_all[0]
if (days_delta == 0 and self.last_run and
self.last_run.date() == starting.date()):
# Make sure the job doesn't run today twice
if self.unit == 'days':
days_delta = 7
elif self.unit == 'weeks':
days_delta = self.interval * 7
self.next_run = starting + datetime.timedelta(days=days_delta)
if self.between_times:
start, end = self.between_times
# Choose a random time between both timestamps
self.at_time = (start + datetime.timedelta(
seconds=random.randint(0, int(
(end - start).total_seconds()))))
if self.at_time:
self.next_run = self.next_run.replace(hour=self.at_time.hour,
minute=self.at_time.minute,
second=self.at_time.second,
microsecond=0,
tzinfo=self.at_time.tzinfo)
# If we are running for the first time, make sure we run
# at the specified time *today* as well
if (not self.last_run and not self.run_days and
self.at_time > datetime.datetime.now(tzlocal())):
self.next_run = self.next_run - datetime.timedelta(days=1)
logger.info('Scheduled job %s', self)
# The following methods are shortcuts for not having to
# create a Scheduler instance:
default_scheduler = Scheduler()
jobs = default_scheduler.jobs # todo: should this be a copy, e.g. jobs()?
def every(interval=1):
"""Schedule a new periodic job."""
return default_scheduler.every(interval)
def on(*days):
"""Schedule a new job to run on specific weekdays.
See the docstring for `Job.on()`.
"""
return default_scheduler.on(*days)
def run_pending():
"""Run all jobs that are | |
($lib.layer.get().pack())')
size = info.get('totalsize')
self.gt(size, 1)
# Verify we're showing actual disk usage and not just apparent
self.lt(size, 1000000000)
# Try to create an invalid layer
mesgs = await core.stormlist('$lib.layer.add(ldef=$lib.dict(lockmemory=(42)))')
# Create a new layer
newlayr = await core.callStorm('return($lib.layer.add().iden)')
self.isin(newlayr, core.layers)
# Ensure new layer is set to current model revision
newrev = await core.layers[newlayr].getModelVers()
self.eq(s_modelrev.maxvers, newrev)
# List the layers in the cortex
q = '''
for $layer in $lib.layer.list() {
$lib.print($layer.iden)
}
'''
idens = []
mesgs = await core.stormlist(q)
for mesg in mesgs:
if mesg[0] == 'print':
idens.append(mesg[1]['mesg'])
self.sorteq(idens, core.layers)
# Create a new layer with a name
q = f'$lib.print($lib.layer.add($lib.dict(name=foo)).iden)'
for mesg in await core.stormlist(q):
if mesg[0] == 'print':
namedlayer = mesg[1]['mesg']
self.eq(core.layers.get(namedlayer).layrinfo.get('name'), 'foo')
# Delete a layer
q = f'$lib.print($lib.layer.del({newlayr}))'
mesgs = await core.stormlist(q)
self.notin(newlayr, core.layers)
# Sad paths
q = f'$lib.layer.get(foo)'
with self.raises(s_exc.NoSuchIden):
await core.nodes(q)
q = f'$lib.layer.del(foo)'
with self.raises(s_exc.NoSuchIden):
await core.nodes(q)
q = f'$lib.layer.del({mainlayr})'
with self.raises(s_exc.LayerInUse):
await core.nodes(q)
# Test permissions
visi = await prox.addUser('visi')
await prox.setUserPasswd(visi['iden'], 'secret')
async with core.getLocalProxy(user='visi') as asvisi:
q = 'layer.get'
mesgs = await asvisi.storm(q).list()
self.stormIsInPrint(mainlayr, mesgs)
q = f'layer.get {mainlayr}'
mesgs = await asvisi.storm(q).list()
self.stormIsInPrint(mainlayr, mesgs)
q = 'layer.list'
idens = []
mesgs = await asvisi.storm(q).list()
for layr in core.layers.keys():
self.stormIsInPrint(layr, mesgs)
# Add requires 'add' permission
await self.agenraises(s_exc.AuthDeny, asvisi.eval('$lib.layer.add()'))
await prox.addUserRule(visi['iden'], (True, ('layer', 'add')))
layers = set(core.layers.keys())
q = 'layer.add --name "hehe haha"'
mesgs = await core.stormlist(q)
visilayr = list(set(core.layers.keys()) - layers)[0]
self.stormIsInPrint('(name: hehe haha)', mesgs)
self.isin(visilayr, core.layers)
# Del requires 'del' permission
await self.agenraises(s_exc.AuthDeny, asvisi.eval(f'$lib.layer.del({visilayr})'))
await prox.addUserRule(visi['iden'], (True, ('layer', 'del')))
q = f'layer.del {visilayr}'
mesgs = await asvisi.storm(q).list()
self.notin(visilayr, core.layers)
# Test add layer opts
layers = set(core.layers.keys())
q = f'layer.add --lockmemory --growsize 5000'
mesgs = await core.stormlist(q)
locklayr = list(set(core.layers.keys()) - layers)[0]
layr = core.getLayer(locklayr)
self.true(layr.lockmemory)
q = '''
for ($buid, $sode) in $lib.layer.get().getStorNodes() {
$lib.fire(layrdiff, sode=$sode)
}
'''
await core.addTagProp('risk', ('int', {}), ())
await core.nodes('[ it:dev:str=foo +#test:risk=50 ]')
gotn = [mesg[1] async for mesg in asvisi.storm(q) if mesg[0] == 'storm:fire']
fire = [mesg for mesg in gotn if mesg['data']['sode']['form'] == 'it:dev:str']
self.len(1, fire)
self.eq(fire[0]['data']['sode']['tagprops'], {'test': {'risk': (50, 9)}})
q = '''
$lib.print($lib.layer.get().pack())
$lib.fire(layrfire, layr=$lib.layer.get().pack())
'''
gotn = [mesg[1] async for mesg in asvisi.storm(q)]
fire = [mesg for mesg in gotn if mesg.get('type') == 'layrfire']
self.len(1, fire)
self.nn(fire[0]['data'].get('layr', None))
# formcounts for layers are exposed on the View object
await core.nodes('[(test:guid=(test,) :size=1138) (test:int=8675309)]')
counts = await core.callStorm('return( $lib.layer.get().getFormCounts() )')
self.eq(counts.get('test:int'), 2)
self.eq(counts.get('test:guid'), 1)
async def test_storm_lib_layer_upstream(self):
async with self.getTestCore() as core:
async with self.getTestCore() as core2:
await core2.nodes('[ inet:ipv4=1.2.3.4 ]')
url = core2.getLocalUrl('*/layer')
layriden = core2.view.layers[0].iden
offs = await core2.view.layers[0].getEditIndx()
layers = set(core.layers.keys())
q = f'layer.add --upstream {url}'
mesgs = await core.stormlist(q)
uplayr = list(set(core.layers.keys()) - layers)[0]
q = f'layer.set {uplayr} name "woot woot"'
mesgs = await core.stormlist(q)
self.stormIsInPrint('(name: woot woot)', mesgs)
layr = core.getLayer(uplayr)
evnt = await layr.waitUpstreamOffs(layriden, offs)
self.true(await asyncio.wait_for(evnt.wait(), timeout=6))
async def test_storm_lib_view(self):
async with self.getTestCoreAndProxy() as (core, prox):
derp = await core.auth.addUser('derp')
root = await core.auth.getUserByName('root')
await derp.addRule((True, ('view', 'add')))
await core.addTagProp('risk', ('int', {'min': 0, 'max': 100}), {'doc': 'risk score'})
await core.nodes('[test:int=12 +#tag.test +#tag.proptest:risk=20]')
# Get the main view
mainiden = await core.callStorm('return($lib.view.get().iden)')
altview = await core.callStorm('''
$layers = $lib.list()
for $layer in $lib.view.get().layers {
$layers.append($layer.iden)
}
return($lib.view.add($layers).iden)
''')
altlayr = await core.callStorm('return($lib.layer.add().iden)')
asderp = {'user': derp.iden, 'vars': {'altlayr': altlayr}}
with self.raises(s_exc.AuthDeny):
await core.callStorm(f'return($lib.view.add(($altlayr,)))', opts=asderp)
asderp = {'user': derp.iden, 'vars': {'altview': altview}}
with self.raises(s_exc.AuthDeny):
await core.callStorm(f'return($lib.view.get($altview).fork())', opts=asderp)
# Fork the main view
q = f'''
$view=$lib.view.get().fork()
return(($view.iden, $view.layers.index(0).iden))
'''
forkiden, forklayr = await core.callStorm(q)
self.isin(forkiden, core.views)
self.isin(forklayr, core.layers)
msgs = await core.stormlist(f'$v=$lib.view.get({forkiden}) $lib.print($lib.len($v))')
self.stormIsInErr('View does not have a length', msgs)
# Add a view
ldef = await core.addLayer()
newlayer = core.getLayer(ldef.get('iden'))
newiden = await core.callStorm(f'return($lib.view.add(({newlayer.iden},)).iden)')
self.nn(newiden)
self.isin(newiden, core.views)
# List the views in the cortex
q = '''
$views = $lib.list()
for $view in $lib.view.list() {
$views.append($view.iden)
}
return($views)
'''
idens = await core.callStorm(q)
self.sorteq(idens, core.views.keys())
# Delete the added view
q = f'$lib.view.del({newiden})'
await core.nodes(q)
self.notin(newiden, core.views)
# Fork the forked view
q = f'''
$forkview=$lib.view.get({forkiden}).fork()
return($forkview.pack().iden)
'''
childiden = await core.callStorm(q)
self.nn(childiden)
# Can't merge the first forked view if it has children
q = f'$lib.view.get({forkiden}).merge()'
await self.asyncraises(s_exc.CantMergeView, core.callStorm(q))
# Can't merge the child forked view if the parent is read only
core.views[childiden].parent.layers[0].readonly = True
q = f'$lib.view.get({childiden}).merge()'
await self.asyncraises(s_exc.ReadOnlyLayer, core.callStorm(q))
core.views[childiden].parent.layers[0].readonly = False
await core.nodes(q)
# Merge the forked view
q = f'$lib.view.get({childiden}).merge()'
await core.nodes(q)
# Remove the forked view
q = f'$lib.view.del({childiden})'
await core.nodes(q)
self.notin(childiden, core.views)
# Sad paths
await self.asyncraises(s_exc.NoSuchView, core.nodes('$lib.view.del(foo)'))
await self.asyncraises(s_exc.NoSuchView, core.nodes('$lib.view.get(foo)'))
await self.asyncraises(s_exc.CantMergeView, core.nodes(f'$lib.view.get().merge()'))
await self.asyncraises(s_exc.NoSuchLayer, core.nodes(f'view.add --layers {s_common.guid()}'))
await self.asyncraises(s_exc.SynErr, core.nodes('$lib.view.del($lib.view.get().iden)'))
# Check helper commands
# Get the main view
mesgs = await core.stormlist('view.get')
self.stormIsInPrint(mainiden, mesgs)
await core.stormlist('$lib.view.get().set(name, "test view")')
await core.stormlist('$lib.view.get().set(desc, "test view desc")')
await core.stormlist('$lib.layer.get().set(name, "test layer")')
await core.stormlist('$lib.layer.get().set(desc, "test layer desc")')
self.eq(await core.callStorm('return( $lib.view.get().get(name))'), 'test view')
self.eq(await core.callStorm('return( $lib.view.get().get(desc))'), 'test view desc')
self.eq(await core.callStorm('return( $lib.layer.get().get(name))'), 'test layer')
self.eq(await core.callStorm('return( $lib.layer.get().get(desc))'), 'test layer desc')
with self.raises(s_exc.BadOptValu):
await core.nodes('$lib.view.get().set(hehe, haha)')
with self.raises(s_exc.BadOptValu):
await core.nodes('$lib.layer.get().set(hehe, haha)')
async with core.getLocalProxy() as prox:
self.eq(core.view.iden, await prox.callStorm('return ($lib.view.get().get(iden))'))
q = 'return ($lib.view.get().layers.index(0).get(iden))'
self.eq(core.view.layers[0].iden, await prox.callStorm(q))
q = f'view.get {mainiden}'
mesgs = await core.stormlist(q)
self.stormIsInPrint(mainiden, mesgs)
self.stormIsInPrint('readonly: False', mesgs)
self.stormIsInPrint(core.view.layers[0].iden, mesgs)
# Fork the main view
views = set(core.views.keys())
q = f'view.fork {mainiden} --name lulz'
mesgs = await core.stormlist(q)
self.stormIsInPrint('(name: lulz)', mesgs)
helperfork = list(set(core.views.keys()) - views)[0]
self.isin(helperfork, core.views)
# Add a view
ldef = await core.addLayer()
newlayer2 = core.getLayer(ldef.get('iden'))
views = set(core.views.keys())
q = f'view.add --name "foo bar" --layers {newlayer.iden} {newlayer2.iden}'
mesgs = await core.stormlist(q)
self.stormIsInPrint('(name: foo bar)', mesgs)
helperadd = list(set(core.views.keys()) - views)[0]
# List the views in the cortex
q = 'view.list'
mesgs = await core.stormlist(q)
self.stormIsInPrint(f'Creator: {root.iden}', mesgs)
self.stormIsInPrint(f'readonly: False', mesgs)
for viden, v in core.views.items():
self.stormIsInPrint(viden, mesgs)
for layer in v.layers:
self.stormIsInPrint(layer.iden, mesgs)
# Delete the added view
q = f'view.del {helperadd}'
await core.nodes(q)
self.notin(helperadd, core.views)
# Merge the forked view
q = f'view.merge --delete {helperfork}'
await core.nodes(q)
self.notin(helperfork, core.views)
# Test permissions
visi = await prox.addUser('visi', passwd='<PASSWORD>')
async with core.getLocalProxy(user='visi') as asvisi:
await asvisi.eval('$lib.view.list()').list()
await asvisi.eval('$lib.view.get()').list()
# Add and Fork require 'add' permission
await self.agenraises(s_exc.AuthDeny, asvisi.eval(f'$lib.view.add(({newlayer.iden},))'))
await self.agenraises(s_exc.AuthDeny, asvisi.eval(f'$lib.view.get({mainiden}).fork()'))
await prox.addUserRule(visi['iden'], (True, ('view', 'add')))
await prox.addUserRule(visi['iden'], (True, ('layer', 'read')), gateiden=newlayer.iden)
q = f'''
$newview=$lib.view.add(({newlayer.iden},))
return($newview.pack().iden)
'''
addiden = await asvisi.callStorm(q)
self.isin(addiden, core.views)
q = f'''
$forkview=$lib.view.get({mainiden}).fork()
$lib.print($forkview.pack().iden)
'''
mesgs = await asvisi.storm(q).list()
for mesg in mesgs:
if mesg[0] == 'print':
forkediden = mesg[1]['mesg']
self.isin(forkediden, core.views)
# Owner can 'get' the forked view
q = f'$lib.view.get({forkediden})'
vals = await asvisi.storm(q).list()
self.len(2, vals)
# Del and Merge require 'del' permission unless performed by the owner
# Delete a view the user owns
q = f'$lib.view.del({addiden})'
await asvisi.storm(q).list()
self.notin(addiden, core.views)
forkview = core.getView(forkediden)
await alist(forkview.eval('[test:int=34 +#tag.test +#tag.proptest:risk=40]'))
await alist(forkview.eval('test:int=12 [-#tag.proptest:risk]'))
await alist(forkview.eval('test:int=12 | delnode'))
# Make a bunch of nodes so we chunk the permission check
for i in range(1000):
opts = {'vars': {'val': i + 1000}}
await self.agenlen(1, forkview.eval('[test:int=$val]', opts=opts))
# Merge the view forked by the user
# Will need perms for all the ops required to merge
q = f'$lib.view.get({forkediden}).merge()'
mesgs = await asvisi.storm(q).list()
await self.agenraises(s_exc.AuthDeny, asvisi.eval(q))
await prox.addUserRule(visi['iden'], (True, ('node', 'add',)))
await prox.addUserRule(visi['iden'], (True, ('node', 'del',)))
await prox.addUserRule(visi['iden'], (True, ('node', 'prop', 'set',)))
await prox.addUserRule(visi['iden'], (True, ('node', 'prop', 'del',)))
await prox.addUserRule(visi['iden'], (True, ('node', 'tag', 'add',)))
await prox.addUserRule(visi['iden'], (True, ('node', 'tag', 'del',)))
q = f'''
$view = $lib.view.get({forkediden})
$view.merge()
$lib.view.del($view.iden)
$lib.layer.del($view.layers.index(0).iden)
'''
await asvisi.callStorm(q)
self.notin(forkediden, core.views)
# Make some views not owned by the user
views = set(core.views.keys())
q = f'view.add --layers {newlayer.iden}'
mesgs = await core.stormlist(q)
self.stormIsInPrint('(name: unnamed)', mesgs)
rootadd = list(set(core.views.keys()) - views)[0]
self.isin(rootadd, core.views)
q = f'view.set {rootadd} name "lol lol"'
mesgs = await core.stormlist(q)
self.stormIsInPrint('(name: lol lol)', mesgs)
q = f'view.fork {mainiden}'
mesgs = await core.stormlist(q)
for mesg in mesgs:
if mesg[0] == 'print':
rootfork = mesg[1]['mesg'].split(' ')[-1]
self.isin(rootfork, core.views)
await self.agenraises(s_exc.AuthDeny, asvisi.eval(f'$lib.view.del({rootadd})'))
await prox.addUserRule(visi['iden'], (True, | |
'''
CHypre (Complex Hypre)
CHypreVec : ParVector
CHypreMat : ParCSR
container object to support complex using
real value hypre
it should work with pure real or pure imaginary
case too.
it follows the mathod naming convetion used
in scipy.sparse. However, since it inherits the list
object, __setitem__ can not be used for accessing
array elements. Use set_element, instead.
'''
import numpy as np
from numbers import Number
from scipy.sparse import csr_matrix, coo_matrix, lil_matrix
from mfem.common.parcsr_extra import *
# DO NOT IMPORT MPI in Global, sicne some routins will be used
# in serial mode too.
try:
import mfem.par
MFEM_PAR = True
except BaseException:
MFEM_PAR = False
class CHypreVec(list):
def __init__(self, r=None, i=None, horizontal=False):
list.__init__(self, [None] * 2)
self._horizontal = horizontal
if isinstance(r, np.ndarray):
self[0] = ToHypreParVec(r)
else:
self[0] = r
if isinstance(i, np.ndarray):
self[1] = ToHypreParVec(i)
else:
self[1] = i
def __repr__(self):
if self[0] is not None:
part = self[0].GetPartitioningArray()
elif self[1] is not None:
part = self[1].GetPartitioningArray()
else:
return "CHypreVec (empty)"
return "CHypreVec" + str(self.shape) + \
"[" + str(part[1] - part[0]) + "]"
@property
def imag(self):
return self[1]
@imag.setter
def imag(self, value):
self[1] = value
@property
def real(self):
return self[0]
@real.setter
def real(self, value):
self[0] = value
@property
def shape(self):
if self[0] is not None:
size = self[0].GlobalSize()
elif self[1] is not None:
size = self[1].GlobalSize()
else:
size = 0.0
if self._horizontal:
return 1, size
else:
return size, 1
def isComplex(self):
return not (self[1] is None)
def GetPartitioningArray(self):
if self[0] is not None:
part = self[0].GetPartitioningArray()
#part[2] = self[0].GlobalSize()
elif self[1] is not None:
prat = self[1].GetPartitioningArray()
#part[2] = self[1].GlobalSize()
else:
raise ValueError("CHypreVec is empty")
return part
def __imul__(self, other):
if isinstance(other, CHypreVec):
assert False, "CHypreVec *= vector is not supported. Use dot"
elif np.iscomplexobj(other):
#other = complex(other)
i = other.imag
r = other.real
if self[0] is not None and self[1] is not None:
rr = self[0].GetDataArray() * r - self[1].GetDataArray() * i
ii = self[0].GetDataArray() * i + self[1].GetDataArray() * r
self[0] = ToHypreParVec(rr)
self[1] = ToHypreParVec(ii)
elif self[0] is not None:
if np.any(i != 0.):
self[1] = ToHypreParVec(i * self[0].GetDataArray())
if np.any(r != 0.):
tmp = self[0].GetDataArray()
tmp *= r
else:
self[0] = None
elif self[1] is not None:
if np.any(i != 0.):
self[0] = ToHypreParVec(-i * self[1].GetDataArray())
if np.any(r != 0.):
tmp = self[1].GetDataArray()
tmp *= r
else:
self[1] = None
else:
passself[0] = None
else:
other = float(other)
if self[0] is not None:
self[0] *= other
if self[1] is not None:
self[1] *= other
return self
def __mul__(self, other):
if isinstance(other, CHypreVec):
assert False, "CHypreVec *= vector is not supported. Use dot"
elif np.iscomplexobj(other):
other = complex(other)
i = other.imag
r = other.real
else:
r = float(other)
i = 0.0
rdata = self[0].GetDataArray() if self[0] is not None else 0
idata = self[1].GetDataArray() if self[1] is not None else 0
rr = rdata * r - idata * i
ii = rdata * i + idata * r
# note: for the real part we keep it even if it is zero
# so that it conservs vector size information
rr = ToHypreParVec(rr)
ii = ToHypreParVec(ii) if np.count_nonzero(ii) != 0 else None
return CHypreVec(rr, ii, horizontal=self._horizontal)
def __add__(self, other):
assert self._horizontal == other._horizontal, "can not add vertical and hirozontal vector"
if self[0] is not None and other[0] is not None:
data = self[0].GetDataArray() + other[0].GetDataArray()
r = ToHypreParVec(data)
elif self[0] is not None:
data = self[0].GetDataArray()
r = ToHypreParVec(data)
elif other[0] is not None:
data = other[0].GetDataArray()
r = ToHypreParVec(data)
else:
r = None
if self[1] is not None and other[1] is not None:
data = self[1].GetDataArray() + other[1].GetDataArray()
i = ToHypreParVec(data)
elif self[1] is not None:
data = self[1].GetDataArray()
i = ToHypreParVec(data)
elif other[1] is not None:
data = other[1].GetDataArray()
i = ToHypreParVec(data)
else:
i = None
return CHypreVec(r, i, horizontal=self._horizontal)
def __sub__(self, other):
assert self._horizontal == other._horizontal, "can not add vertical and hirozontal vector"
if self[0] is not None and other[0] is not None:
data = self[0].GetDataArray() - other[0].GetDataArray()
r = ToHypreParVec(data)
elif self[0] is not None:
data = self[0].GetDataArray()
r = ToHypreParVec(data)
elif other[0] is not None:
data = -other[0].GetDataArray()
r = ToHypreParVec(data)
else:
r = None
if self[1] is not None and other[1] is not None:
data = self[1].GetDataArray() - other[1].GetDataArray()
i = ToHypreParVec(data)
elif self[1] is not None:
data = self[1].GetDataArray()
i = ToHypreParVec(data)
elif other[1] is not None:
data = -other[1].GetDataArray()
i = ToHypreParVec(data)
else:
i = None
return CHypreVec(r, i, horizontal=self._horizontal)
def dot(self, other):
if isinstance(other, CHypreVec):
return InnerProductComplex(self, other)
elif (isinstance(other, CHypreMat) and
self._horizontal):
ret = other.transpose().dot(self)
ret._horizontal = True
return ret
else:
raise ValueError(
"CHypreVec::dot supports Vec*Vec (InnerProduct) and (Mat^t*Vec)^t ")
def get_elements(self, idx):
part = self.GetPartitioningArray()
idx = idx - part[0]
idx = idx[idx < part[1]-part[0]]
idx = idx[idx >= 0]
if len(idx) == 0:
return np.array([])
ret = 0.0
if self[0] is not None:
ret = ret + self[0].GetDataArray()[idx]
if self[1] is not None:
ret = ret + 1j*self[1].GetDataArray()[idx]
return ret
def set_elements(self, idx, value):
part = self.GetPartitioningArray()
idx = idx - part[0]
idx = idx[idx < part[1]-part[0]]
idx = idx[idx >= 0]
rvalue = value.real if np.iscomplexobj(value) else value
if len(idx) == 0:
return
if self[0] is not None:
self[0].GetDataArray()[idx] = rvalue
if np.iscomplexobj(value):
if self[1] is None:
i = self[0].GetDataArray()*0.0
self[1] = ToHypreParVec(i)
self[1].GetDataArray()[idx] = value.imag
def set_element(self, i, v):
part = self.GetPartitioningArray()
if part[0] <= i and i < part[1]:
v = complex(v)
if self[0] is not None:
self[0][int(i - part[0])] = v.real
if self[1] is not None:
self[1][int(i - part[0])] = v.imag
def get_element(self, i):
part = self.GetPartitioningArray()
if part[0] <= i and i < part[1]:
if self[0] is not None:
r = self[0][int(i - part[0])]
else:
r = 0
if self[1] is not None:
return r + 1j * self[1][int(i - part[0])]
else:
return r
def copy_element(self, tdof, vec):
for i in tdof:
v = vec.get_element(i)
self.set_element(i, v)
'''
def gather(self):
from mpi4py import MPI
myid = MPI.COMM_WORLD.rank
vecr = 0.0; veci = 0.0
if self[0] is not None:
vecr = gather_vector(self[0].GetDataArray(), MPI.DOUBLE)
if self[1] is not None:
veci = gather_vector(self[1].GetDataArray(), MPI.DOUBLE)
if myid == 0:
if self[0] is None:
return vecr
else:
return vecr + 1j*veci
'''
def get_squaremat_from_right(self):
'''
squre matrix which can be multipled from the right of self.
'''
if not self._horizontal:
raise ValueError("Vector orientation is not right")
part = self.GetPartitioningArray()
width = self[1].GlobalSize()
return SquareCHypreMat(width, part, real=(self[1] is None))
def transpose(self):
self._horizontal = not self._horizontal
return self
def _do_reset(self, v, idx):
# ownership is transferrd to a new vector.
ownership = v.GetOwnership()
data = v.GetDataArray()
part = v.GetPartitioningArray()
for i in idx:
if i >= part[1]:
continue
if i < part[0]:
continue
data[i - part[0]] = 0
ret = ToHypreParVec(data)
ret.SetOwnership(ownership)
v.SetOwnership(0)
return ret
def resetCol(self, idx):
if self._horizontal:
if self[0] is not None:
self[0] = self._do_reset(self[0], idx)
if self[1] is not None:
self[1] = self._do_reset(self[1], idx)
else:
if 0 in idx:
self *= 0.0
def resetRow(self, idx):
if self._horizontal:
if 0 in idx:
self *= 0.0
else:
if self[0] is not None:
self[0] = self._do_reset(self[0], idx)
if self[1] is not None:
self[1] = self._do_reset(self[1], idx)
def _do_select(self, v, lidx):
# ownership is transferrd to a new vector.
ownership = v.GetOwnership()
data = v.GetDataArray()
data2 = data[lidx]
ret = ToHypreParVec(data2)
ret.SetOwnership(ownership)
v.SetOwnership(0)
return ret
def selectRows(self, idx):
'''
idx is global index
'''
if self._horizontal:
if not 0 in idx:
raise ValueError("VectorSize becomes zero")
return self
part = self.GetPartitioningArray()
idx = idx[idx >= part[0]]
idx = idx[idx < part[1]]
lidx = idx - part[0]
r = None
i = None
if not self._horizontal:
if self[0] is not None:
r = self._do_select(self[0], lidx)
if self[1] is not None:
i = self._do_select(self[1], lidx)
return CHypreVec(r, i, horizontal=self._horizontal)
def selectCols(self, idx):
'''
idx is global index
'''
if not self._horizontal:
if not 0 in idx:
raise | |
zone
Parameters
-----------
p1 : np.array
p2 : np.array
tol :
al1 :
al2 :
quadsel : 0 all quadrant
2 1
3 4
Returns
-------
edgelist
"""
x = self.pt[0, :]
y = self.pt[1, :]
#
# selection du quadran
#
if (quadsel == 0):
u0 = np.arange(self.Np)
if (quadsel == 1):
u0 = np.nonzero((y > p1[1]) & (x > p1[0]))[0]
if (quadsel == 2):
u0 = np.nonzero((y > p1[1]) & (x <= p1[0]))[0]
if (quadsel == 3):
u0 = np.nonzero((y <= p1[1]) & (x <= p1[0]))[0]
if (quadsel == 4):
u0 = np.nonzero((y <= p1[1]) & (x > p1[0]))[0]
x_u0 = x[u0]
y_u0 = y[u0]
#
# Permutation points
#
if (p1[0] > p2[0]):
pt = p2
p2 = p1
p1 = pt
#
# Box length
#
Dx = p2[0] - p1[0]
Dy = p2[1] - p1[1]
L = np.sqrt(Dx ** 2 + Dy ** 2)
#
# p1 p2
#
if ((abs(Dx) > finfo(float).eps) & (abs(Dy) > finfo(float).eps)):
a = Dy / Dx
b = p1[1] - a * p1[0]
b1 = p1[1] + p1[0] / a
b2 = p2[1] + p2[0] / a
delta_b = tol * L / abs(Dx)
delta_b1 = al1 * L * L / abs(Dy)
delta_b2 = al2 * L * L / abs(Dy)
u1 = np.nonzero(y_u0 < a * x_u0 + b + delta_b / 2.)[0]
x_u1 = x_u0[u1]
y_u1 = y_u0[u1]
u2 = np.nonzero(y_u1 > a * x_u1 + b - delta_b / 2.)[0]
x_u2 = x_u1[u2]
y_u2 = y_u1[u2]
if (a > 0):
u3 = np.nonzero(y_u2 > -x_u2 / a + b1 - delta_b1)[0]
x_u3 = x_u2[u3]
y_u3 = y_u2[u3]
u4 = np.nonzero(y_u3 < -x_u3 / a + b2 + delta_b2)[0]
else:
u3 = np.nonzero(y_u2 < -x_u2 / a + b1 + delta_b1)[0]
x_u3 = x_u2[u3]
y_u3 = y_u2[u3]
u4 = np.nonzero(y_u3 > -x_u3 / a + b2 - delta_b2)[0]
x_u4 = x_u3[u4]
y_u4 = y_u3[u4]
#
# p1 p2 vertical
#
if (abs(Dx) <= finfo(float).eps):
u1 = np.nonzero(x < p1[0] + tol / 2.)[0]
x_u1 = x[u1]
y_u1 = y[u1]
u2 = np.nonzero(x_u1 > p1[0] - tol / 2.)[0]
y_u2 = y[u2]
if (p1[1] > p2[1]):
u3 = np.nonzero(y_u2 < p1[1] + al1 * L)[0]
y_u3 = y[u3]
u4 = np.nonzero(y_u3 > p2[1] - al2 * L)[0]
else:
u3 = np.nonzero(y_u2 < p2[1] + al2 * L)[0]
y_u3 = y[u3]
u4 = np.nonzero(y_u3 > p1[1] - al1 * L)[0]
#
# p1 p2 horizontal
#
if (abs(Dy) <= finfo(float).eps):
u1 = np.nonzero(y < p1[1] + tol / 2.)[0]
y_u1 = y[u1]
u2 = np.nonzero(y_u1 > p1[1] - tol / 2.)[0]
x_u2 = x[u2]
if (p1(1) > p2(1)):
u3 = np.nonzero(x_u2 < p1[0] + al1 * L)[0]
x_u3 = x[u3]
u4 = np.nonzero(x_u3 > p2[0] - al2 * L)[0]
else:
u3 = np.nonzero(x_u2 < p2[0] + al2 * L)[0]
x_u3 = x[u3]
u4 = np.nonzero(x > p1[0] - al1 * L)[0]
nodelist = u0[u1[u2[u3[u4]]]]
edgelist = np.arange(self.Ns)
edgelist = self.find_edge_list(edgelist, nodelist)
return(edgelist)
def nd2seg(self, ndlist):
""" convert node list to edge list
Parameters
----------
ndlist : list or ndarray
node list
Returns
-------
seglist : ndarray
edge list
Notes
-----
previously nd2ed
"""
if isinstance(ndlist, np.ndarray):
ndlist = ndlist.tolist()
seglist = []
# for n in ndlist:
# seglist = seglist + self.Gs.adj[n].keys()
#l = map(lambda x: self.Gs.adj[x].keys(), ndlist)
l = [ list(dict(self.Gs.adj[x]).keys()) for x in ndlist ]
seglist = []
for y in l :
seglist = seglist + y
#reduce(lambda x, y: x + y, l)
return(np.unique(np.array(seglist)))
def ed2nd(self, edlist):
""" convert edgelist to nodelist
Parameters
----------
edlist : list or ndarray
edge list
Returns
-------
ndlist : ndarray
node list
"""
if isinstance(edlist, np.ndarray):
edlist = edlist.tolist()
# mecanisme de concatenation de listes
ndlist = []
for e in edlist:
ndlist = ndlist + self.Gs.adj[e].keys()
return(np.unique(ndlist))
def get_zone(self, ax):
""" get point list and segment list in a rectangular zone
Parameters
----------
ax : list ot tuple
[xmin,xmax,ymin,ymax]
Returns
-------
ptlist,seglist
"""
xmin = ax[0]
xmax = ax[1]
ymin = ax[2]
ymax = ax[3]
ptlist = []
for n in self.Gs.node.keys():
if n < 0:
x = self.Gs.pos[n][0]
y = self.Gs.pos[n][1]
if ((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)):
ptlist.append(n)
seglist = self.nd2seg(ptlist)
return ptlist, seglist
def get_points(self, boxorpol , tol = 0.05):
""" get points list and segments list in a polygonal zone
Parameters
----------
boxorpol : list or tuple
[xmin,xmax,ymin,ymax]
or shapely Polygon
Returns
-------
(pt,ke) : points coordinates and index
pt : (2xn)
ke : (,n)
Notes
-----
This method returns all the existing Layout point inside a box zone or
the boundary of a polygon
"""
if type(boxorpol) == geu.Polygon:
N = len(boxorpol.vnodes)/2
eax = boxorpol.bounds
xmin = eax[0] - tol
xmax = eax[2] + tol
ymin = eax[1] - tol
ymax = eax[3] + tol
else:
xmin = boxorpol[0]
xmax = boxorpol[1]
ymin = boxorpol[2]
ymax = boxorpol[3]
#
# layout points
#
x = self.pt[0,:]
y = self.pt[1,:]
uxmin = (x>= xmin)
uymin = (y>= ymin)
uxmax = (x<= xmax)
uymax = (y<= ymax)
#
# k True when all conditons are True simultaneously
#
k = np.where(uxmin*uymin*uxmax*uymax==1)[0]
#pt = np.array(zip(x[k],y[k])).T
# pt (2 x N )
pt = np.vstack((x[k],y[k]))
ke = self.upnt[k]
# if(pt.shape[1]<N):
# plt.ion()
# fig,a=self.showG('s')
# a.plot(pt[0,:],pt[1,:],'or')
# a.plot(eax[0],eax[1],'or')
# plt.show()
# ux = ((x>=xmin).all() and (x<=xmax).all())
# uy = ((y>=ymin).all() and (y<=ymax).all())
return((pt,ke))
def angleonlink3(self, p1=np.array([0, 0, 1]), p2=np.array([10, 3, 1])):
""" return (seglist,angle) between p1 and p2
Parameters
----------
p1 : np.array (3 x N) or (3,)
p2 : np.array (3 x N) or (3,)
Returns
-------
data : structured array x N
'i' : index
's' : slab
'a' : angle (in radians)
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('DLR2.lay')
>>> p1 = np.array([0,0,1])
>>> p2 = np.array([10,3,2])
>>> data = L.angleonlink3(p1,p2)
#array([(0, 141, 1.2793395519256592), (0, 62, 0.29145678877830505),
(0, 65, 0.29145678877830505)],
dtype=[('i', '<i8'), ('s', '<i8'), ('a', '<f4')])
See Also
--------
antprop.loss.Losst
"""
sh1 = np.shape(p1)
sh2 = np.shape(p2)
assert sh1[0] == 3
assert sh2[0] == 3
if (len(sh1) < 2) & (len(sh2) > 1):
p1 = np.outer(p1, np.ones(sh2[1]))
if (len(sh2) < 2) & (len(sh1) > 1):
p2 = np.outer(p2, np.ones(sh1[1]))
if (len(sh2) < 2) & (len(sh1) < 2):
p1 = np.outer(p1, np.ones(1))
p2 = np.outer(p2, np.ones(1))
# pdb.set_trace()
# 3 x N
u = p1 - p2
# 1 x N
nu = np.sqrt(np.sum(u * u, axis=0))
# 3 x N
un = u / nu[np.newaxis, :]
#
# warning : seglist contains the segment number in tahe not in Gs
#
seglist = np.unique(self.seginframe2(p1[0:2], p2[0:2])).astype(int)
#seglist = np.unique(self.seginframe(p1[0:2], p2[0:2]))
upos = np.nonzero(seglist >= 0)[0]
uneg = np.nonzero(seglist < 0)[0]
# nNLOS = len(uneg) + 1
# # retrieve the number of segments per link
# if nNLOS > 1:
# llink = np.hstack(
# (uneg[0], np.hstack((uneg[1:], array([len(seglist)]))) - uneg - 1))
# else:
# llink = np.array([len(seglist)])
# [(link id,number of seg),...]
# nl = zip(np.arange(nlink),llink)n
seglist = seglist[upos]
npta = self.tahe[0, seglist]
nphe = self.tahe[1, seglist]
Pta = self.pt[:, npta]
Phe = self.pt[:, nphe]
Nscreen = len(npta)
# get segment height bounds
zmin = np.array([self.Gs.node[x]['z'][0]
for x in self.tsg[seglist]])
zmax = np.array([self.Gs.node[x]['z'][1]
for x in self.tsg[seglist]])
# centroid of the screen
Pg = np.vstack(((Phe + Pta) / 2., (zmax + zmin) / 2.))
Ptahe = Phe - Pta
L1 = np.sqrt(np.sum(Ptahe * Ptahe, axis=0))
# 3 x Nscreen U1 is in plane xy
U1 = np.vstack((Ptahe / L1, np.zeros(Nscreen)))
L2 = zmax - zmin
U2 = np.array([0, 0, 1])[:, None] # 3 x 1 U2 is along z
#
# p1 : 3 x Ng
# p2 : 3 x Ng
# Pg : 3 x Nscreen
# U1 : 3 x Nscreen
# | |
-> edgedb.AsyncIOConnection:
conn_args = self.get_connect_args(**kwargs)
return await tconn.async_connect_test_client(**conn_args)
async def connect_test_protocol(self, **kwargs):
conn_args = self.get_connect_args(**kwargs)
conn = await test_protocol.new_connection(**conn_args)
await conn.connect()
return conn
class _EdgeDBServer:
proc: Optional[asyncio.Process]
def __init__(
self,
*,
bind_addrs: Tuple[str, ...] = ('localhost',),
bootstrap_command: Optional[str],
auto_shutdown: bool,
adjacent_to: Optional[edgedb.AsyncIOConnection],
max_allowed_connections: Optional[int],
compiler_pool_size: int,
debug: bool,
backend_dsn: Optional[str] = None,
data_dir: Optional[str] = None,
runstate_dir: Optional[str] = None,
reset_auth: Optional[bool] = None,
tenant_id: Optional[str] = None,
security: Optional[edgedb_args.ServerSecurityMode] = None,
default_auth_method: Optional[edgedb_args.ServerAuthMethod] = None,
binary_endpoint_security: Optional[
edgedb_args.ServerEndpointSecurityMode] = None,
http_endpoint_security: Optional[
edgedb_args.ServerEndpointSecurityMode] = None, # see __aexit__
enable_backend_adaptive_ha: bool = False,
tls_cert_file: Optional[os.PathLike] = None,
tls_key_file: Optional[os.PathLike] = None,
tls_cert_mode: edgedb_args.ServerTlsCertMode = (
edgedb_args.ServerTlsCertMode.SelfSigned),
env: Optional[Dict[str, str]] = None,
) -> None:
self.bind_addrs = bind_addrs
self.auto_shutdown = auto_shutdown
self.bootstrap_command = bootstrap_command
self.adjacent_to = adjacent_to
self.max_allowed_connections = max_allowed_connections
self.compiler_pool_size = compiler_pool_size
self.debug = debug
self.backend_dsn = backend_dsn
self.data_dir = data_dir
self.runstate_dir = runstate_dir
self.reset_auth = reset_auth
self.tenant_id = tenant_id
self.proc = None
self.data = None
self.security = security
self.default_auth_method = default_auth_method
self.binary_endpoint_security = binary_endpoint_security
self.http_endpoint_security = http_endpoint_security
self.enable_backend_adaptive_ha = enable_backend_adaptive_ha
self.tls_cert_file = tls_cert_file
self.tls_key_file = tls_key_file
self.tls_cert_mode = tls_cert_mode
self.env = env
async def wait_for_server_readiness(self, stream: asyncio.StreamReader):
while True:
line = await stream.readline()
if self.debug:
print(line.decode())
if not line:
raise RuntimeError("EdgeDB server terminated")
if line.startswith(b'READY='):
break
_, _, dataline = line.decode().partition('=')
return json.loads(dataline)
async def kill_process(self, proc: asyncio.Process):
proc.terminate()
try:
await asyncio.wait_for(proc.wait(), timeout=20)
except TimeoutError:
proc.kill()
async def _shutdown(self, exc: Optional[Exception] = None):
if self.proc is None:
return
if self.proc.returncode is None:
if self.auto_shutdown and exc is None:
try:
await asyncio.wait_for(self.proc.wait(), timeout=60 * 5)
except TimeoutError:
self.proc.kill()
raise AssertionError(
'server did not auto-shutdown in 5 minutes')
else:
await self.kill_process(self.proc)
# asyncio, hello?
# Workaround SubprocessProtocol.__del__ weirdly
# complaining that loop is closed.
self.proc._transport.close()
self.proc = None
async def __aenter__(self):
status_r, status_w = socket.socketpair()
cmd = [
sys.executable, '-m', 'edb.server.main',
'--port', 'auto',
'--testmode',
'--emit-server-status', f'fd://{status_w.fileno()}',
'--compiler-pool-size', str(self.compiler_pool_size),
'--tls-cert-mode', str(self.tls_cert_mode),
]
for addr in self.bind_addrs:
cmd.extend(('--bind-address', addr))
reset_auth = self.reset_auth
cmd.extend(['--log-level', 'd' if self.debug else 's'])
if self.max_allowed_connections is not None:
cmd.extend([
'--max-backend-connections', str(self.max_allowed_connections),
])
if self.backend_dsn is not None:
cmd.extend([
'--backend-dsn', self.backend_dsn,
])
elif self.adjacent_to is not None:
settings = self.adjacent_to.get_settings()
pgaddr = settings.get('pgaddr')
if pgaddr is None:
raise RuntimeError('test requires devmode')
pgaddr = json.loads(pgaddr)
pgdsn = (
f'postgres:///?user={pgaddr["user"]}&port={pgaddr["port"]}'
f'&host={pgaddr["host"]}'
)
cmd += [
'--backend-dsn', pgdsn
]
elif self.data_dir:
cmd += ['--data-dir', self.data_dir]
else:
cmd += ['--temp-dir']
if reset_auth is None:
reset_auth = True
if not reset_auth:
password = <PASSWORD>
bootstrap_command = ''
else:
password = secrets.token_urlsafe()
bootstrap_command = f"""\
ALTER ROLE edgedb {{
SET password := <PASSWORD>}';
}};
"""
if self.bootstrap_command is not None:
bootstrap_command += self.bootstrap_command
if bootstrap_command:
cmd += ['--bootstrap-command', bootstrap_command]
if self.auto_shutdown:
cmd += ['--auto-shutdown-after', '0']
if self.runstate_dir:
cmd += ['--runstate-dir', self.runstate_dir]
if self.tenant_id:
cmd += ['--tenant-id', self.tenant_id]
if self.security:
cmd += ['--security', str(self.security)]
if self.default_auth_method:
cmd += ['--default-auth-method', str(self.default_auth_method)]
if self.binary_endpoint_security:
cmd += ['--binary-endpoint-security',
str(self.binary_endpoint_security)]
if self.http_endpoint_security:
cmd += ['--http-endpoint-security',
str(self.http_endpoint_security)]
if self.enable_backend_adaptive_ha:
cmd += ['--enable-backend-adaptive-ha']
if self.tls_cert_file:
cmd += ['--tls-cert-file', self.tls_cert_file]
if self.tls_key_file:
cmd += ['--tls-key-file', self.tls_key_file]
if self.debug:
print(
f'Starting EdgeDB cluster with the following params:\n'
f'{" ".join(shlex.quote(c) for c in cmd)}'
)
env = os.environ.copy()
if self.env:
env.update(self.env)
stat_reader, stat_writer = await asyncio.open_connection(sock=status_r)
self.proc: asyncio.Process = await asyncio.create_subprocess_exec(
*cmd,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
pass_fds=(status_w.fileno(),),
)
status_task = asyncio.create_task(
asyncio.wait_for(
self.wait_for_server_readiness(stat_reader),
timeout=240,
),
)
try:
_, pending = await asyncio.wait(
[
status_task,
asyncio.create_task(self.proc.wait()),
],
return_when=asyncio.FIRST_COMPLETED,
)
except (Exception, asyncio.CancelledError):
try:
await self._shutdown()
finally:
raise
finally:
stat_writer.close()
status_w.close()
if pending:
for task in pending:
if not task.done():
task.cancel()
await asyncio.wait(pending, timeout=10)
if self.proc.returncode is not None:
output = (await self.proc.stdout.read()).decode().strip()
raise edgedb_cluster.ClusterError(output)
else:
assert status_task.done()
data = status_task.result()
return _EdgeDBServerData(
host='127.0.0.1',
port=data['port'],
password=password,
server_data=data,
tls_cert_file=data['tls_cert_file'],
)
async def __aexit__(self, exc_type, exc, tb):
try:
if (
(
self.http_endpoint_security
is edgedb_args.ServerEndpointSecurityMode.Optional
)
and
self.data is not None
and not self.auto_shutdown
):
# It's a good idea to test most of the ad-hoc test clusters
# for any errors in background tasks, as such tests usually
# test the functionality that involves notifications and
# other async events.
metrics = _fetch_metrics('127.0.0.1', self.data['port'])
errors = _extract_background_errors(metrics)
if errors:
raise AssertionError(
'server terminated with unexpected ' +
'background errors\n\n' +
errors
)
finally:
await self._shutdown(exc)
def start_edgedb_server(
*,
bind_addrs: tuple[str, ...] = ('localhost',),
auto_shutdown: bool=False,
bootstrap_command: Optional[str]=None,
max_allowed_connections: Optional[int]=10,
compiler_pool_size: int=2,
adjacent_to: Optional[edgedb.AsyncIOConnection]=None,
debug: bool=False,
backend_dsn: Optional[str] = None,
runstate_dir: Optional[str] = None,
data_dir: Optional[str] = None,
reset_auth: Optional[bool] = None,
tenant_id: Optional[str] = None,
security: Optional[edgedb_args.ServerSecurityMode] = None,
default_auth_method: Optional[edgedb_args.ServerAuthMethod] = None,
binary_endpoint_security: Optional[
edgedb_args.ServerEndpointSecurityMode] = None,
http_endpoint_security: Optional[
edgedb_args.ServerEndpointSecurityMode] = None,
enable_backend_adaptive_ha: bool = False,
tls_cert_file: Optional[os.PathLike] = None,
tls_key_file: Optional[os.PathLike] = None,
tls_cert_mode: edgedb_args.ServerTlsCertMode = (
edgedb_args.ServerTlsCertMode.SelfSigned),
env: Optional[Dict[str, str]] = None,
):
if not devmode.is_in_dev_mode() and not runstate_dir:
if backend_dsn or adjacent_to:
# We don't want to implicitly "fix the issue" for the test author
print('WARNING: starting an EdgeDB server with the default '
'runstate_dir; the test is likely to fail or hang. '
'Consider specifying the runstate_dir parameter.')
if adjacent_to and data_dir:
raise RuntimeError(
'adjacent_to and data_dir options are mutually exclusive')
if backend_dsn and data_dir:
raise RuntimeError(
'backend_dsn and data_dir options are mutually exclusive')
if backend_dsn and adjacent_to:
raise RuntimeError(
'backend_dsn and adjacent_to options are mutually exclusive')
if not runstate_dir and data_dir:
runstate_dir = data_dir
return _EdgeDBServer(
bind_addrs=bind_addrs,
auto_shutdown=auto_shutdown,
bootstrap_command=bootstrap_command,
max_allowed_connections=max_allowed_connections,
adjacent_to=adjacent_to,
compiler_pool_size=compiler_pool_size,
debug=debug,
backend_dsn=backend_dsn,
tenant_id=tenant_id,
data_dir=data_dir,
runstate_dir=runstate_dir,
reset_auth=reset_auth,
security=security,
default_auth_method=default_auth_method,
binary_endpoint_security=binary_endpoint_security,
http_endpoint_security=http_endpoint_security,
enable_backend_adaptive_ha=enable_backend_adaptive_ha,
tls_cert_file=tls_cert_file,
tls_key_file=tls_key_file,
tls_cert_mode=tls_cert_mode,
env=env,
)
def get_cases_by_shard(cases, selected_shard, total_shards, verbosity, stats):
if total_shards <= 1:
return cases
selected_shard -= 1 # starting from 0
new_test_est = 0.1 # default estimate if test is not found in stats
new_setup_est = 1 # default estimate if setup is not found in stats
# For logging
total_tests = 0
selected_tests = 0
total_est = 0
selected_est = 0
# Priority queue of tests grouped by setup script ordered by estimated
# running time of the groups. Order of tests within cases is preserved.
tests_by_setup = []
# Priority queue of individual tests ordered by estimated running time.
tests_with_est = []
# Prepare the source heaps
setup_count = 0
for case, tests in cases.items():
setup_script = getattr(case, 'get_setup_script', lambda: None)()
if setup_script and tests:
tests_per_setup = []
est_per_setup = setup_est = stats.get(
'setup::' + case.get_database_name(), (new_setup_est, 0),
)[0]
for test in tests:
total_tests += 1
est = stats.get(str(test), (new_test_est, 0))[0]
est_per_setup += est
tests_per_setup.append((est, test))
heapq.heappush(
tests_by_setup,
(-est_per_setup, setup_count, setup_est, tests_per_setup),
)
setup_count += 1
total_est += est_per_setup
else:
for test in tests:
total_tests += 1
est = stats.get(str(test), (new_test_est, 0))[0]
total_est += est
heapq.heappush(tests_with_est, (-est, total_tests, test))
target_est = total_est / total_shards # target running time of one shard
shards_est = [(0, shard, set()) for shard in range(total_shards)]
cases = {} # output
setup_to_alloc = set(range(setup_count)) # tracks first run of each setup
# Assign per-setup tests first
while tests_by_setup:
remaining_est, setup_id, setup_est, tests = heapq.heappop(
tests_by_setup,
)
est_acc, current, setups = heapq.heappop(shards_est)
# Add setup time
if setup_id not in setups:
setups.add(setup_id)
est_acc += setup_est
if current == selected_shard:
selected_est += setup_est
if setup_id in setup_to_alloc:
setup_to_alloc.remove(setup_id)
else:
# This means one more setup for the overall test run
target_est += setup_est / total_shards
# Add as much tests from this group to current shard as possible
while tests:
est, test = tests.pop(0)
est_acc += est # est is a positive number
remaining_est += est # remaining_est is a negative number
if current == selected_shard:
# Add the test to the result
_add_test(cases, test)
selected_tests += 1
selected_est += est
if est_acc >= target_est and -remaining_est > setup_est * 2:
# Current shard is full and the remaining tests would take more
# time than their setup, then add the tests back to the heap so
# that we could add them to another shard
heapq.heappush(
tests_by_setup,
(remaining_est, setup_id, setup_est, tests),
)
break
heapq.heappush(shards_est, (est_acc, current, setups))
| |
#!/usr/bin/python
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# ltr.py - LISP EID Traceroute Client - Trace the encap/decap paths
#
# Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name>
#
# -s: Optional source EID.
# <destination-EID>: required parameter [<iid>] in front is optional
#
# This application is run on an xTR. Typically a ITR or RTR, where the
# encapsulator adds to the ltr message with the RLOC the ITR is encapsulating
# to. Then the decapsulator will decapsulate and swap the source and
# destination addresses to return the packet to the source-EID (running the
# client program). If the ETR is not the EID, then the packet will be re-
# encapsulated in which more data is added to the ltr message.
#
# ltr messages run in UDP on port 2434 (4342 backwards) and are returned
# to the client program.
#
# The LISP-Trace message takes the following path:
#
# (1) ltr sends LISP-TRACE packet from its EID to the EID of the ETR on
# port 2434. It builds a type=9 packet with a nonce and an empty JSON field.
#
# (2) ITR will look up destination EID as part of forwarding logic and add
# RLOC information to LISP-Trace message. The message is encapsulated to
# the ETR.
#
# (3) The ETR (or RTR) will decap packet. It will add information to the LISP-
# packet. If it is the destination EID, it will send the LISP-Trace packet
# using itself as the source and the original source as the destination.
#
# (4) The local ITR will encapsulate the packet and add RLOC information to
# the LISP-Trace packet. It encapsulates the return packet to the ETR.
#
# (5) The ETR decapsulates the packet and sends it to the ltr client so the
# accumulated JSON data can be displayed for the user.
#
# This functionality works on a chain of encapsulating tunnels to give the
# user what RLOCs are used and the arrival time of the packet. It allows an
# ltr client to not only determine path and latency of the network, but if
# the encapsulation paths are symmetric or asymmetric.
#
# If there an error along the path, the node detecting the error will return
# the LISP-Trace packet to the RLOC of the originating ITR.
#
# The JSON format of an LISP-Trace packet is an array of dictionary arrays.
# The array will typically have 2 elements, one from ltr source to destination
# EID and one for the return path. Each dictionary array is keyed with "seid",
# "deid", and "paths". The array "paths" is the node data that is appended
# at each encapsulation hop. Note example below:
#
# [
# { "se" : "[<iid>]<orig-eid>", "de" : "[<iid>]<dest-eid>", "paths" : a
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] },
#
# { "se" : "[<iid>]<dest-eid>", "de" : "[<iid>]<orig-eid>", "paths" :
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] }
# ]
#
# Environment variable LISP_LTR_PORT is used to determine if the connection to
# the LISP API is done with a particular port. And if the port has a minus
# sign in front of it, it will use http rather https to connect to the
# lispers.net API. Environment variables LISP_LTR_USER and LISP_LTR_PW are
# used when lispers.net API is running with a password on username root.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from future import standard_library
standard_library . install_aliases ( )
from builtins import hex
import sys
import struct
import random
import socket
import json
import time
import os
import binascii
from subprocess import getoutput
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = "https"
oO0oIIII = 8080
if 59 - 59: i1IIi * i1IIi % OOooOOo + II111iiii
II = os . getenv ( "LISP_LTR_PORT" )
if ( II != None ) :
if ( II [ 0 ] == "-" ) :
II1iII1i = "http"
II = II [ 1 : : ]
if 100 - 100: i1IIi . I1Ii111 / IiII * OoooooooOO + I11i * oO0o
if ( II . isdigit ( ) == False ) :
print ( "Invalid value for env variable LISP_LTR_PORT" )
exit ( 1 )
if 99 - 99: iII111i . OOooOOo / iIii1I11I1II1 * iIii1I11I1II1
oO0oIIII = int ( II )
if 11 - 11: oO0o / i1IIi % II111iiii - OoOoOO00
OOo = os . getenv ( "LISP_LTR_USER" )
Ii1IIii11 = os . getenv ( "LISP_LTR_PW" )
if ( OOo == None ) : OOo = "root"
if ( Ii1IIii11 == None ) : Ii1IIii11 = ""
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
OOo000 = 2434
if 82 - 82: I11i . I1Ii111 / IiII % II111iiii % iIii1I11I1II1 % IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
def oO ( rloc , port ) :
OO0OOooOoO0Oo = socket . htonl ( 0x90000000 + port )
iiIIiIiIi = struct . pack ( "I" , OO0OOooOoO0Oo )
if 38 - 38: Ii1I / Oo0Ooo
OooO0 = rloc . split ( "." )
II11iiii1Ii = int ( OooO0 [ 0 ] ) << 24
II11iiii1Ii += int ( OooO0 [ 1 ] ) << 16
II11iiii1Ii += int ( OooO0 [ 2 ] ) << 8
II11iiii1Ii += int ( OooO0 [ 3 ] )
iiIIiIiIi += struct . pack ( "I" , socket . htonl ( II11iiii1Ii ) | |
from tkinter import *
from tkinter import ttk
from pickle import dump, load
from PIL import Image, ImageTk
from cryptography.fernet import Fernet
import pyperclip as p
from random import randint
from os import remove, rename, popen
from time import ctime
import pyqrcode
import png
import webbrowser
def copied_display():
global img2
b2 = Button(
image=img2,
borderwidth=0,
highlightthickness=0,
relief="flat")
b2.place(
x=150, y=417,
width=161,
height=55)
window.after(1000, b2.destroy)
def error_display():
global img3
b3 = Button(
image=img3,
borderwidth=0,
highlightthickness=0,
relief="flat")
b3.place(
x=108, y=488,
width=261,
height=88)
window.after(1500, b3.destroy)
def pwd():
global qrcode1
password = ""
try:
l = int(entry0.get())
special = entry1.get()
characters = "<KEY>"
if l >= 4:
if special == 'No' or special == "N" or special == "no" or special == "n":
for i in range(l):
char = characters[randint(0, 61)]
password += char
p.copy(password)
copied_display()
qrcode = pyqrcode.create(
"Password: {0}\nWebsite: {1}".format(password, entry2.get()))
qrcode.png("generated_qrcode.png", scale=4)
qrcode1 = PhotoImage(file="generated_qrcode.png")
qrcode_button = Button(
image=qrcode1,
borderwidth=0,
highlightthickness=0,
relief="flat")
qrcode_button.place(
x=130,
y=220)
remove("generated_qrcode.png")
window.after(60000, qrcode_button.destroy)
elif special == 'yes' or special == 'Yes' or special == "y" or special == 'Y':
special_characters = "!#$%&'()*+,-./:;<=>?@[]^_`{|}~"
for i in range(l):
choices = [True, False]
choice = choices[randint(0, 1)]
if choice == True:
char = special_characters[randint(0, 29)]
password += char
elif choice == False:
char = characters[randint(0, 61)]
password += char
p.copy(password)
copied_display()
qrcode = pyqrcode.create(
"Password: {0}\nWebsite: {1}".format(password, entry2.get()))
qrcode.png("generated_qrcode.png", scale=4)
qrcode1 = PhotoImage(file="generated_qrcode.png")
qrcode_button = Button(window,
image=qrcode1,
borderwidth=0,
highlightthickness=0,
relief="flat")
qrcode_button.place(
x=130,
y=220)
remove("generated_qrcode.png")
window.after(60000, qrcode_button.destroy)
else:
specify_error = Label(
window, text="Invalid Input", font=("Verdana", 10), fg='red')
specify_error.place(x=800, y=260)
if style.theme_use() == 'dark':
specify_error.configure(bg="#333333")
window.after(1000, specify_error.destroy)
else:
error_display()
except ValueError:
error_display()
if password:
f = open("Passwords.bin", "ab")
data = [ctime(), password, entry2.get()]
dump(data, f)
f.close()
def shw_pwds():
global tree
win = Toplevel()
win.title("Passwords")
win.iconbitmap('./resource/passwordbook.ico')
win.geometry("925x620")
if style.theme_use() == 'dark':
win.configure(background="#333333")
try:
with open("Passwords.bin", 'rb') as f:
try:
data = []
while True:
data.append(load(f))
except EOFError:
pass
except FileNotFoundError:
label5 = ttk.Label(win,
font=("Broadway", 48),
text="File Not Found")
label5.place(
y=250,
x=260)
win.after(500, win.destroy)
try:
if data != []:
tree = ttk.Treeview(win, column=(
"c1", "c2", "c3"), show='headings', height=20)
tree.column("# 1", anchor=CENTER)
tree.heading("# 1", text="Time")
tree.column("# 2", anchor=CENTER)
tree.heading("# 2", text="Password")
tree.column("#3", anchor=CENTER)
tree.heading("#3", text="Website")
except:
pass
try:
n = 1
for i in data:
tree.insert('', 'end', text=n, values=(
i[0], i[1], i[2]), tags=[i[1], i[2]])
n += 1
except UnboundLocalError:
pass
try:
def browse(event):
selected_item = tree.focus()
item = tree.item(selected_item)['values']
url = item[2]
webbrowser.open(url)
def ctrl_c(event):
global live_qrcode_img
selected_item = tree.focus()
item = tree.item(selected_item)['values']
p.copy(item[1])
live_qrcode = pyqrcode.create(
"Password: {0}\nWebsite: {1}".format(item[1], item[2]))
live_qrcode.png("live_generated_qrcode.png", scale=2)
live_qrcode_img = PhotoImage(file="live_generated_qrcode.png")
live_qrcode_label = Label(win,
image=live_qrcode_img)
live_qrcode_label.place(
x=770,
y=500)
remove("live_generated_qrcode.png")
win.after(60000, live_qrcode_label.destroy)
tree.place(
x=12,
y=15,
width=900)
tree.bind("<Triple-1>", browse)
tree.bind("<Double-1>", ctrl_c)
def update():
global entry1, entry2, label1, save_btn
try:
win.after(0, entry3.destroy)
win.after(0, label2.destroy)
win.after(0, search_in_btn.destroy)
win.after(0, reset_btn.destroy)
except:
pass
def save():
with open("Passwords.bin", 'rb') as f:
try:
data = []
while True:
data.append(load(f))
except EOFError:
pass
selected_item = tree.focus()
item = tree.item(selected_item)['values']
n = tree.item(selected_item)['text']
with open("Passwords.bin", "wb") as f:
for i in data:
if i[1] == item[1]:
i[1] = entry1.get()
i[2] = entry2.get()
dump(i, f)
tree.delete(tree.selection()[0])
tree.insert('', 'end', text=n,
values=(ctime(), i[1], i[2]))
elif i[1] != item[1]:
dump(i, f)
win.after(0, label1.destroy)
win.after(0, entry1.destroy)
win.after(0, entry2.destroy)
win.after(0, save_btn.destroy)
if tree.selection():
selected_item = tree.focus()
item = tree.item(selected_item)['values']
save_btn = ttk.Button(win, text="Save", command=save)
label1 = Label(win,
text=item[0])
if style.theme_use() == 'dark':
label1.configure(bg="#333333", fg='white')
entry1 = ttk.Entry(win,
width=43)
entry1.insert(0, item[1])
entry2 = ttk.Entry(win,
width=43)
entry2.insert(0, item[2])
label1.place(
y=500,
x=300)
entry1.place(
y=525,
x=300)
entry2.place(
y=560,
x=300)
save_btn.place(
y=560,
x=580)
else:
select_record_label = Label(
win, text="Select a record to update", font=("Verdana", 10), fg='red')
if style.theme_use() == 'dark':
select_record_label.configure(bg="#333333")
select_record_label.place(x=300, y=525)
win.after(1000, select_record_label.destroy)
def delete():
selected_item = tree.focus()
item = tree.item(selected_item)['values']
with open("Passwords.bin", "rb") as f:
a = []
try:
while True:
a.append(load(f))
except EOFError:
pass
with open("Edited.bin", "wb") as f:
for i in a:
if i != item:
dump(i, f)
remove("Passwords.bin")
rename("Edited.bin", "Passwords.bin")
tree.delete(tree.selection()[0])
def delete_all(event):
remove("key.key")
remove('password_file.key')
remove("Passwords.bin")
label4 = Label(win,
width=43,
text="Deleted Successfully",
font=("Verdana", 10), fg='red')
if style.theme_use() == 'dark':
label4.configure(bg="#333333", fg='white')
label4.place(
y=500,
x=250)
tree.delete(*tree.get_children())
win.update()
def search():
global entry3, label2, search_in_btn, reset_btn
try:
win.after(0, entry1.destroy)
win.after(0, entry2.destroy)
win.after(0, label1.destroy)
win.after(0, save_btn.destroy)
except:
pass
def reset():
global tree, up_data
tree.delete(*tree.get_children())
tree = ttk.Treeview(win, column=(
"c1", "c2", "c3"), show='headings', height=20)
tree.column("# 1", anchor=CENTER)
tree.heading("# 1", text="Time")
tree.column("# 2", anchor=CENTER)
tree.heading("# 2", text="Password")
tree.column("#3", anchor=CENTER)
tree.heading("#3", text="Website")
up_data = []
with open("Passwords.bin", 'rb') as f:
try:
while True:
up_data.append(load(f))
except EOFError:
pass
n = 1
for i in up_data:
tree.insert('', 'end', text=n, values=(
i[0], i[1], i[2]), tags=[i[1], i[2]])
n += 1
tree.place(
x=12,
y=15,
width=900)
tree.bind("<Triple-1>", browse)
tree.bind("<Double-1>", ctrl_c)
win.update()
win.after(1, reset_btn.destroy)
def search_in():
global tree
full_data = []
with open("Passwords.bin", 'rb') as f:
try:
while True:
full_data.append(load(f))
except EOFError:
pass
matching_data = []
for i in full_data:
if entry3.get() == i[1]:
matching_data.append(i)
elif entry3.get() == i[2]:
matching_data.append(i)
tree.delete(*tree.get_children())
tree = ttk.Treeview(win, column=(
"c1", "c2", "c3"), show='headings', height=20)
tree.column("# 1", anchor=CENTER)
tree.heading("# 1", text="Time")
tree.column("# 2", anchor=CENTER)
tree.heading("# 2", text="Password")
tree.column("#3", anchor=CENTER)
tree.heading("#3", text="Website")
n = 1
for i in matching_data:
tree.insert('', 'end', text=n, values=(
i[0], i[1], i[2]), tags=[i[1], i[2]])
n += 1
tree.place(
x=12,
y=15,
width=900)
tree.bind("<Triple-1>", browse)
tree.bind("<Double-1>", ctrl_c)
win.update()
win.after(1, entry3.destroy)
win.after(1, label2.destroy)
win.after(1, search_in_btn.destroy)
entry3 = ttk.Entry(win,
width=50)
entry3.place(
y=500,
x=230)
label2 = ttk.Label(win,
width=27,
text="Search by website or password")
label2.place(
y=525,
x=230)
search_in_btn = ttk.Button(win, text="Search", command=search_in)
search_in_btn.place(
x=450,
y=538)
reset_btn = ttk.Button(win, text="Reset", command=reset)
reset_btn.place(
x=680,
y=538)
update_btn = ttk.Button(win, text="Update", command=update)
update_btn.place(
x=40,
y=500)
del_btn = ttk.Button(win, text="Delete", command=delete)
del_btn.place(
x=130,
y=500)
del_all_btn = ttk.Button(win, text="Delete All")
del_all_btn.bind('<Double 1>', delete_all)
del_all_btn.place(
x=40,
y=540)
search_btn = ttk.Button(win, text="Search", command=search)
search_btn.place(
x=130,
y=540)
except UnboundLocalError:
win.destroy
win.resizable(False, False)
win.mainloop()
def Current_User(decryption_key, b, c):
global open_eye, closed_eye, open_eye_bright, closed_eye_bright
def forgot(event):
def auth_forgot():
if question_answer.get() == c:
window4.destroy()
remove('password_file.key')
remove('key.key')
remove('security_question_file.key')
New_User()
else:
Invalid_Password_label_1 = Label(
window4, text="Invalid", fg='red')
if style.theme_use() == 'dark':
Invalid_Password_label_1.configure(bg="#333333")
Invalid_Password_label_1.place(x=50, y=80)
window4.after(500, Invalid_Password_label_1.destroy)
window1.destroy()
window4 = Toplevel()
window4.title("Forgot Password")
window4.iconbitmap('./resource/forgot.ico')
window4.geometry('290x120')
if style.theme_use() == 'dark':
window4.configure(background="#333333")
question_label = Label(
window4, text="Where did you attend your high school?")
question_label.place(x=10, y=10)
if style.theme_use() == 'dark':
question_label.configure(bg="#333333", fg="white")
question_answer = ttk.Entry(window4, show="*", width=40)
question_answer.place(x=10, y=40)
def change_eye_forgot_1(event):
eye_icon_1.configure(image=open_eye)
if style.theme_use() == "dark":
eye_icon_1.configure(image=open_eye_bright)
eye_icon_1.bind("<Button>", change_eye_forgot)
question_answer.configure(show="*")
def change_eye_forgot(event):
eye_icon_1.configure(image=closed_eye)
if style.theme_use() == 'dark':
eye_icon_1.configure(image=closed_eye_bright)
eye_icon_1.bind("<Button>", change_eye_forgot_1)
question_answer.configure(show="")
eye_icon_1 = Label(window4, image=open_eye)
if style.theme_use() == 'dark':
eye_icon_1.configure(image=open_eye_bright)
eye_icon_1.place(x=250, y=11)
eye_icon_1.bind("<Button>", change_eye_forgot)
if style.theme_use() == 'dark':
eye_icon_1.configure(bg="#333333")
Save_Button = ttk.Button(window4, text="Continue", command=auth_forgot)
Save_Button.place(x=170, y=80)
window4.resizable(False, False)
window4.mainloop()
window1 = Toplevel()
window1.title("Login")
window1.iconbitmap('./resource/login.ico')
window1.geometry("300x100")
if style.theme_use() == 'dark':
window1.configure(background="#333333")
Password_label = Label(window1, text="Password")
Password_label.place(x=20, y=15)
forgot_password_label = Label(window1, text='Forgot Password', fg='red')
forgot_password_label.place(x=50, y=50)
if style.theme_use() == 'dark':
Password_label.configure(bg="#333333", fg='white')
forgot_password_label.configure(bg="#333333")
Password = ttk.Entry(window1, show="*", width=26)
Password.place(x=84, y=10)
def change_eye_1(event):
eye_icon.configure(image=open_eye)
if style.theme_use() == 'dark':
eye_icon.configure(image=open_eye_bright)
eye_icon.bind("<Button>", change_eye)
Password.configure(show="*")
def change_eye(event):
eye_icon.configure(image=closed_eye)
if style.theme_use() == 'dark':
eye_icon.configure(image=closed_eye_bright)
eye_icon.bind("<Button>", change_eye_1)
Password.configure(show="")
eye_icon = Label(window1, image=open_eye)
if style.theme_use() == 'dark':
eye_icon.configure(image=open_eye_bright)
eye_icon.place(x=265, y=14)
eye_icon.bind("<Button>", change_eye)
if style.theme_use() == 'dark':
eye_icon.configure(bg="#333333")
forgot_password_label.bind("<Button>", forgot)
def invalid_password():
Invalid_Password_label = Label(
window1, text="Invalid Password", fg='red')
if style.theme_use() == 'dark':
Invalid_Password_label.configure(bg="#333333")
Invalid_Password_label.place(x=50, y=50)
window1.after(500, Invalid_Password_label.destroy)
def correct_password():
window1.destroy()
shw_pwds()
def authentication():
if Password.get() == decryption_key.decrypt(b).decode():
correct_password()
else:
invalid_password()
Continue_button = ttk.Button(
window1, width=10, text="Continue", command=authentication)
Continue_button.place(x=170, y=50)
window1.resizable(False, False)
window1.mainloop()
def New_User():
global Password, open_eye, closed_eye
window2 = Toplevel()
window2.title("Sign Up")
window2.geometry("290x200")
window2.iconbitmap('./resource/signup.ico')
if style.theme_use() == 'dark':
window2.configure(background="#333333")
def check_passwords():
if New_Password.get() == "" or Re_enter_Password.get() == "" or Security_question.get() == "":
invalid_input = Label(
window2, text="Entries can't\nbe empty", font=("Verdana", 10), fg='red')
if style.theme_use() == 'dark':
invalid_input.configure(bg="#333333")
invalid_input.place(x=20, y=150)
window2.after(500, invalid_input.destroy)
elif New_Password.get() == Re_enter_Password.get():
Password = <PASSWORD>.get()
key = Fernet.generate_key()
security_answer = Security_question.get()
key_file = open("key.key", 'wb')
encryption_key = Fernet(key)
e_password = encryption_key.encrypt(Password.encode())
dump(key, key_file)
key_file.close()
password_file = open("password_file.key", 'wb')
dump(e_password, password_file)
security_question_file = open("security_question_file.key", 'wb')
dump(security_answer, security_question_file)
password_file.close()
security_question_file.close()
window2.destroy()
shw_pwds()
elif New_Password.get() != Re_enter_Password.get():
no_matchLabel = Label(
window2, text="Passwords not\nmatching", font=('Verdana', 10), fg='red')
if style.theme_use() == 'dark':
no_matchLabel.configure(bg="#333333")
no_matchLabel.place(x=20, y=150)
window2.after(500, no_matchLabel.destroy)
Password_label = Label(window2, text="Password")
Password_label.place(x=20, y=15)
New_Password = ttk.Entry(window2, show="*", width=26)
New_Password.place(x=84, y=10)
| |
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings
:param with_labels: Whether to build docker parameters with or without labels
:returns: A list of parameters to be added to docker run"""
parameters: List[DockerParameter] = [
{"key": "memory-swap", "value": self.get_mem_swap()},
{"key": "cpu-period", "value": "%s" % int(self.get_cpu_period())},
{"key": "cpu-quota", "value": "%s" % int(self.get_cpu_quota())},
]
if self.use_docker_disk_quota(system_paasta_config=system_paasta_config):
parameters.append(
{
"key": "storage-opt",
"value": f"size={int(self.get_disk() * 1024 * 1024)}",
}
)
if with_labels:
parameters.extend(
[
{"key": "label", "value": "paasta_service=%s" % self.service},
{"key": "label", "value": "paasta_instance=%s" % self.instance},
]
)
extra_docker_args = self.get_extra_docker_args()
if extra_docker_args:
for key, value in extra_docker_args.items():
parameters.extend([{"key": key, "value": value}])
parameters.extend(self.get_cap_add())
parameters.extend(self.get_docker_init())
parameters.extend(self.get_cap_drop())
return parameters
def use_docker_disk_quota(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> bool:
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
return system_paasta_config.get_enforce_disk_quota()
def get_docker_init(self) -> Iterable[DockerParameter]:
return [{"key": "init", "value": "true"}]
def get_disk(self, default: float = 1024) -> float:
"""Gets the amount of disk space in MiB required from the service's configuration.
Defaults to 1024 (1GiB) if no value is specified in the config.
:returns: The amount of disk space specified by the config, 1024 MiB if not specified"""
disk = self.config_dict.get("disk", default)
return disk
def get_gpus(self) -> Optional[int]:
"""Gets the number of gpus required from the service's configuration.
Default to None if no value is specified in the config.
:returns: The number of gpus specified by the config, 0 if not specified"""
gpus = self.config_dict.get("gpus", None)
return gpus
def get_container_type(self) -> Optional[str]:
"""Get Mesos containerizer type.
Default to DOCKER if gpus are not used.
:returns: Mesos containerizer type, DOCKER or MESOS"""
if self.get_gpus() is not None:
container_type = "MESOS"
else:
container_type = "DOCKER"
return container_type
def get_cmd(self) -> Optional[Union[str, List[str]]]:
"""Get the docker cmd specified in the service's configuration.
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("cmd", None)
def get_instance_type(self) -> Optional[str]:
return getattr(self, "config_filename_prefix", None)
def get_env_dictionary(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""A dictionary of key/value pairs that represent environment variables
to be injected to the container environment"""
env = {
"PAASTA_SERVICE": self.service,
"PAASTA_INSTANCE": self.instance,
"PAASTA_CLUSTER": self.cluster,
"PAASTA_DEPLOY_GROUP": self.get_deploy_group(),
"PAASTA_DOCKER_IMAGE": self.get_docker_image(),
"PAASTA_RESOURCE_CPUS": str(self.get_cpus()),
"PAASTA_RESOURCE_MEM": str(self.get_mem()),
"PAASTA_RESOURCE_DISK": str(self.get_disk()),
}
if self.get_gpus() is not None:
env["PAASTA_RESOURCE_GPUS"] = str(self.get_gpus())
try:
env["PAASTA_GIT_SHA"] = get_git_sha_from_dockerurl(
self.get_docker_url(system_paasta_config=system_paasta_config)
)
except Exception:
pass
team = self.get_team()
if team:
env["PAASTA_MONITORING_TEAM"] = team
instance_type = self.get_instance_type()
if instance_type:
env["PAASTA_INSTANCE_TYPE"] = instance_type
user_env = self.config_dict.get("env", {})
env.update(user_env)
return {str(k): str(v) for (k, v) in env.items()}
def get_env(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""Basic get_env that simply returns the basic env, other classes
might need to override this getter for more implementation-specific
env getting"""
return self.get_env_dictionary(system_paasta_config=system_paasta_config)
def get_args(self) -> Optional[List[str]]:
"""Get the docker args specified in the service's configuration.
If not specified in the config and if cmd is not specified, defaults to an empty array.
If not specified in the config but cmd is specified, defaults to null.
If specified in the config and if cmd is also specified, throws an exception. Only one may be specified.
:param service_config: The service instance's configuration dictionary
:returns: An array of args specified in the config,
``[]`` if not specified and if cmd is not specified,
otherwise None if not specified but cmd is specified"""
if self.get_cmd() is None:
return self.config_dict.get("args", [])
else:
args = self.config_dict.get("args", None)
if args is None:
return args
else:
# TODO validation stuff like this should be moved into a check_*
raise InvalidInstanceConfig(
"Instance configuration can specify cmd or args, but not both."
)
def get_monitoring(self) -> MonitoringDict:
"""Get monitoring overrides defined for the given instance"""
return self.config_dict.get("monitoring", {})
def get_deploy_constraints(
self,
blacklist: DeployBlacklist,
whitelist: DeployWhitelist,
system_deploy_blacklist: DeployBlacklist,
system_deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Return the combination of deploy_blacklist and deploy_whitelist
as a list of constraints.
"""
return (
deploy_blacklist_to_constraints(blacklist)
+ deploy_whitelist_to_constraints(whitelist)
+ deploy_blacklist_to_constraints(system_deploy_blacklist)
+ deploy_whitelist_to_constraints(system_deploy_whitelist)
)
def get_deploy_blacklist(self) -> DeployBlacklist:
"""The deploy blacklist is a list of lists, where the lists indicate
which locations the service should not be deployed"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""The deploy whitelist is a tuple of (location_type, [allowed value, allowed value, ...]).
To have tasks scheduled on it, a host must be covered by the deploy whitelist (if present) and not excluded by
the deploy blacklist."""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_docker_image(self) -> str:
"""Get the docker image name (with tag) for a given service branch from
a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["docker_image"]
else:
return ""
def get_docker_url(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> str:
"""Compose the docker url.
:returns: '<registry_uri>/<docker_image>'
"""
registry_uri = self.get_docker_registry(
system_paasta_config=system_paasta_config
)
docker_image = self.get_docker_image()
if not docker_image:
raise NoDockerImageError(
"Docker url not available because there is no docker_image"
)
docker_url = f"{registry_uri}/{docker_image}"
return docker_url
def get_desired_state(self) -> str:
"""Get the desired state (either 'start' or 'stop') for a given service
branch from a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["desired_state"]
else:
return "start"
def get_force_bounce(self) -> Optional[str]:
"""Get the force_bounce token for a given service branch from a generated
deployments.json file. This is a token that, when changed, indicates that
the instance should be recreated and bounced, even if no other
parameters have changed. This may be None or a string, generally a
timestamp.
"""
if self.branch_dict is not None:
return self.branch_dict["force_bounce"]
else:
return None
def check_cpus(self) -> Tuple[bool, str]:
cpus = self.get_cpus()
if cpus is not None:
if not isinstance(cpus, (float, int)):
return (
False,
'The specified cpus value "%s" is not a valid float or int.' % cpus,
)
return True, ""
def check_mem(self) -> Tuple[bool, str]:
mem = self.get_mem()
if mem is not None:
if not isinstance(mem, (float, int)):
return (
False,
'The specified mem value "%s" is not a valid float or int.' % mem,
)
return True, ""
def check_disk(self) -> Tuple[bool, str]:
disk = self.get_disk()
if disk is not None:
if not isinstance(disk, (float, int)):
return (
False,
'The specified disk value "%s" is not a valid float or int.' % disk,
)
return True, ""
def check_security(self) -> Tuple[bool, str]:
security = self.config_dict.get("security")
if security is None:
return True, ""
inbound_firewall = security.get("inbound_firewall")
outbound_firewall = security.get("outbound_firewall")
if inbound_firewall is None and outbound_firewall is None:
return True, ""
if inbound_firewall is not None and inbound_firewall not in (
"allow",
"reject",
):
return (
False,
'Unrecognized inbound_firewall value "%s"' % inbound_firewall,
)
if outbound_firewall is not None and outbound_firewall not in (
"block",
"monitor",
):
return (
False,
'Unrecognized outbound_firewall value "%s"' % outbound_firewall,
)
unknown_keys = set(security.keys()) - {
"inbound_firewall",
"outbound_firewall",
}
if unknown_keys:
return (
False,
'Unrecognized items in security dict of service config: "%s"'
% ",".join(unknown_keys),
)
return True, ""
def check_dependencies_reference(self) -> Tuple[bool, str]:
dependencies_reference = self.config_dict.get("dependencies_reference")
if dependencies_reference is None:
return True, ""
dependencies = self.config_dict.get("dependencies")
if dependencies is None:
return (
False,
'dependencies_reference "%s" declared but no dependencies found'
% dependencies_reference,
)
if dependencies_reference not in dependencies:
return (
False,
'dependencies_reference "%s" not found in dependencies dictionary'
% dependencies_reference,
)
return True, ""
def check(self, param: str) -> Tuple[bool, str]:
check_methods = {
"cpus": self.check_cpus,
"mem": self.check_mem,
"security": self.check_security,
"dependencies_reference": self.check_dependencies_reference,
"deploy_group": self.check_deploy_group,
}
check_method = check_methods.get(param)
if check_method is not None:
return check_method()
else:
return (
False,
'Your service config specifies "%s", an unsupported parameter.' % param,
)
def validate(self, params: Optional[List[str]] = None,) -> List[str]:
if params is None:
params = [
"cpus",
"mem",
"security",
"dependencies_reference",
"deploy_group",
]
error_msgs = []
for param in params:
check_passed, check_msg = self.check(param)
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def check_deploy_group(self) -> Tuple[bool, str]:
deploy_group = self.get_deploy_group()
if deploy_group is not None:
pipeline_deploy_groups = get_pipeline_deploy_groups(
service=self.service, soa_dir=self.soa_dir
)
if deploy_group not | |
import matplotlib.pyplot as plt
import numpy as np
import os
from constants import OUTPUT_DIR, COLOURS_DARK_BLUE, COLOURS_DARK_BLUE_YELLOW, X_DARK, Z_DARK, BLUE
from data_io import read_matrix_data_and_idx_vals, read_mfpt_heuristic, read_fpt_and_params, read_varying_mean_sd_fpt_and_params
from firstpassage import fpt_histogram, exponential_scale_estimate, sample_exponential, simplex_heatmap, \
fp_state_zloc_hist, fp_zloc_times_joint
from plotting import plot_simplex2D
from trajectory_analysis import corner_to_flux
def subsample_data():
# TODO
fpt_data_subsampled = 0
return fpt_data_subsampled
def load_datadict(basedir="figures" + os.sep + "data_fpt"):
# form is {'BL_N100_xall': {'states': X, 'times': Y, 'params': Z},
# 'TR_N10k_icfp': {'states': X, 'times': Y, 'params': Z}, etc. }
datadict = {}
# TODO maybe convert to non loop form and just explicitly name subdirs and desired plot headers?
subdirs = [sd for sd in os.listdir(basedir) if os.path.isdir(os.path.join(basedir, sd))]
for subdir in subdirs:
fpt, fps, params = read_fpt_and_params(basedir + os.sep + subdir)
datadict[subdir] = {'times': fpt, 'states': fps, 'params': params}
return datadict
def figure_fpt_multihist(multi_fpt_list, labels, figname_mod="def", bin_linspace=80, fs=16, colours=COLOURS_DARK_BLUE_YELLOW,
figsize=(8,6), ec='k', lw=0.5, flag_norm=False, flag_xlog10=False, flag_ylog10=False,
flag_disjoint=False, flag_show=True, outdir=OUTPUT_DIR, years=False, save=True, ax=None, reframe=False):
# resize fpt lists if not all same size (to the min size)
fpt_lengths = [len(fpt) for fpt in multi_fpt_list]
ensemble_size = np.min(fpt_lengths)
if years:
multi_fpt_list = [np.array(arr) / 365.0 for arr in multi_fpt_list]
# cleanup data to same size
if sum(fpt_lengths - ensemble_size) > 0:
print "Resizing multi_fpt_list elements:", fpt_lengths, "to the min size of:", ensemble_size
# TODO should randomize to not bias data
for idx in xrange(len(fpt_lengths)):
multi_fpt_list[idx] = multi_fpt_list[idx][:ensemble_size]
bins = np.linspace(np.min(multi_fpt_list), np.max(multi_fpt_list), bin_linspace)
# normalize
if flag_norm:
y_label = r'$P(\tau)$' #'Probability'
weights = np.ones_like(multi_fpt_list) / ensemble_size
else:
y_label = 'Frequency'
weights = np.ones_like(multi_fpt_list)
# prep fig before axes mod
if ax is None:
fig = plt.figure(figsize=figsize, dpi=120)
ax = plt.gca()
# mod axes (log)
if flag_xlog10:
#ax.set_xscale("log", nonposx='clip')
ax.set_xscale("log")
min_log = np.floor(np.min(np.log10(multi_fpt_list)))
max_log = np.ceil(np.max(np.log10(multi_fpt_list)))
bins = np.logspace(min_log, max_log, bin_linspace)
if flag_ylog10:
#ax.set_yscale("log", nonposx='clip')
ax.set_yscale("log")
if reframe:
ax.set_xlim(0.5, np.max(multi_fpt_list))
ax.set_ylim(0.0, 0.16)
# plot calls
if flag_disjoint:
ax.hist(multi_fpt_list, bins=bins, color=colours, label=labels, weights=weights.T, edgecolor=ec, linewidth=lw)
else:
for idx, fpt_list in enumerate(multi_fpt_list):
ax.hist(fpt_list, bins=bins, alpha=0.6, color=colours[idx], label=labels[idx],
weights=weights[idx,:])
ax.hist(fpt_list, histtype='step', bins=bins, alpha=0.6, color=colours[idx],
label=None,weights=weights[idx,:], edgecolor=ec, linewidth=lw, fill=False)
# labels
#ax.set_title(r'$\tau$ histogram (%d samples)' % (ensemble_size), fontsize=fs)
if years:
label = r'$\tau$ (years)' # 'First-passage time (years)'
ax.set_xlabel(label, fontsize=fs)
else:
#ax.set_xlabel('First-passage time (cell division timescale)', fontsize=fs)
ax.set_xlabel(r'$\tau$', fontsize=fs)
ax.set_ylabel(y_label, fontsize=fs)
ax.legend(fontsize=fs-2) # loc='upper right'
ax.tick_params(labelsize=fs)
# plt.locator_params(axis='x', nbins=4)
# save and show
if save:
plt_save = 'fig_fpt_multihist_%s.pdf' % figname_mod
plt.savefig(outdir + os.sep + plt_save, bbox_inches='tight')
if flag_show:
plt.show()
return plt.gca()
def figure_mfpt_varying(mean_fpt_varying, sd_fpt_varying, param_vary_name, param_set, params, samplesize, SEM_flag=True,
show_flag=False, figname_mod="", outdir=OUTPUT_DIR, fs=16):
if SEM_flag:
sd_fpt_varying = sd_fpt_varying / np.sqrt(samplesize) # s.d. from CLT since sample mean is approx N(mu, sd**2/n)
plt.errorbar(param_set, mean_fpt_varying, yerr=sd_fpt_varying, label="sim")
plt.title("Mean FP Time, %s varying (sample=%d)" % (param_vary_name, samplesize))
ax = plt.gca()
ax.set_xlabel(r'$%s$' % param_vary_name, fontsize=fs)
ax.set_ylabel(r'$\langle\tau\rangle$', fontsize=fs)
# log options
for i in xrange(len(mean_fpt_varying)):
print i, param_set[i], mean_fpt_varying[i], sd_fpt_varying[i]
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
#ax.set_xscale("log", nonposx='clip')
ax.set_xscale("log")
#ax.set_xlim([0.8*1e2, 1*1e7])
if flag_ylog10:
#ax.set_yscale("log", nonposx='clip')
ax.set_yscale("log")
#ax.set_ylim([0.8*1e2, 3*1e5])
# create table of params
plt_save = "mean_fpt_varying" + figname_mod
plt.savefig(outdir + os.sep + plt_save + '.pdf', bbox_inches='tight')
if show_flag:
plt.show()
return ax
def figure_mfpt_varying_dual(mean_fpt_varying, sd_fpt_varying, param_vary_name, param_set, params, samplesize, SEM_flag=True,
show_flag=False, figname_mod="", outdir=OUTPUT_DIR, fs=20, ax=None):
if SEM_flag:
sd_fpt_varying = sd_fpt_varying / np.sqrt(samplesize) # s.d. from CLT since sample mean is approx N(mu, sd**2/n)
if ax is None:
plt.figure()
ax = plt.gca()
#ax_dual = ax.twinx()
heuristic_x = np.logspace(np.log10(np.min(param_set)), np.log10(np.max(param_set)), 100)
heuristic_y = [corner_to_flux('BL', params.mod_copy({param_vary_name: p})) for p in heuristic_x]
ax.plot(param_set, mean_fpt_varying, '--', marker='o', color='k', label=r'$\langle\tau\rangle$ (simulation)')
ax.plot(param_set, sd_fpt_varying, '-.', marker='^', color='r', label=r'$\delta\tau$ (simulation)')
ax.plot(heuristic_x, heuristic_y, 'k', label=r"Flux to $\hat z$ from FP")
#plt.title("Mean FP Time, %s varying (sample=%d)" % (param_vary_name, samplesize))
ax.set_xlabel(r'$%s$' % param_vary_name, fontsize=fs)
ax.set_ylabel(r'$\tau$', fontsize=fs)
#ax.set_ylabel(r'$\langle\tau\rangle$', fontsize=fs)
#ax.set_ylabel(r'$\delta\tau$', fontsize=fs)
plt.xticks(fontsize=fs-2)
plt.yticks(fontsize=fs-2)
# hacky fix to sharey
#print "HACKY"
#print ax.get_ylim()
#ax_dual.set_ylim(ax.get_ylim())
# log options
for i in xrange(len(mean_fpt_varying)):
print i, param_set[i], mean_fpt_varying[i], sd_fpt_varying[i]
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
#ax.set_xscale("log", nonposx='clip')
ax.set_xscale("log")
#ax_dual.set_xscale("log", nonposx='clip')
#ax.set_xlim([0.8*1e2, 1*1e7])
if flag_ylog10:
#ax.set_yscale("log", nonposx='clip')
ax.set_yscale("log")
#ax_dual.set_yscale("log", nonposx='clip')
#ax.set_ylim([0.8*1e2, 3*1e5])
# create table of params
ax.legend(fontsize=fs-2)
plt_save = "mean_fpt_varying_dual" + figname_mod
plt.savefig(outdir + os.sep + plt_save + '.pdf', bbox_inches='tight')
if show_flag:
plt.show()
return ax
def figure_mfpt_varying_composite(means, sds, param_vary_name, param_set, params,
show_flag=False, figname_mod="", outdir=OUTPUT_DIR, fs=20, ax=None):
if ax is None:
plt.figure(figsize=(5,4))
ax = plt.gca()
num_sets = 6
assert num_sets == len(means)
colours = [X_DARK, Z_DARK, '#ffd966', BLUE, 'pink', 'brown'] #['black', 'red', 'green', 'blue']
#TODO markers =
labels = [r"$b=0.8$, $c=0.9$, $\gamma=1$", r"$b=0.8$, $c=0.9$, $\gamma=4$", r"$b=0.8$, $c=0.9$, $\gamma=100$",
r"$b=1.2$, $c=1.1$, $\gamma=1$", r"$b=1.2$, $c=1.1$, $\gamma=4$", r"$b=1.2$, $c=1.1$, $\gamma=100$", ]
region_labels = [r"$b=0.8$, $c=0.9$, $\gamma=1$", r"$b=0.8$, $c=0.9$, $\gamma=4$", r"$b=0.8$, $c=0.9$, $\gamma=100$",
r"$b=1.2$, $c=1.1$, $\gamma=1$", r"$b=1.2$, $c=1.1$, $\gamma=4$", r"$b=1.2$, $c=1.1$, $\gamma=100$"]
corners = ['BL1g', 'BL', 'BL100g', 'TR1g', 'TR', 'TR100g']#, 'TR1g', 'TR']
heuristic_x = np.logspace(np.log10(np.min(param_set)), np.log10(np.max(param_set)), 100)
for idx in xrange(num_sets):
print idx, len(param_set), len(means[idx]), len(sds[idx])
ax.plot(param_set, means[idx], '.-', marker='o', markeredgecolor='k', color=colours[idx], label=r'%s: $\langle\tau\rangle$' % region_labels[idx], zorder=3)
ax.plot(param_set, sds[idx], '-.', marker='^', markeredgecolor='k', color=colours[idx], label=r'%s: $\delta\tau$' % region_labels[idx], zorder=2)
#ax.plot(param_set, sds[idx], '-.', marker='^', markeredgecolor='k', color=colours[idx], zorder=2)
heuristic_y = [corner_to_flux(corners[idx], params.mod_copy({param_vary_name: p})) for p in heuristic_x]
ax.plot(heuristic_x, heuristic_y, '--k', zorder=1) #, label=r"Flux to $\hat z$ from FP (Region %s)" % region_labels[idx])
#plt.title("Mean FP Time, %s varying (sample=%d)" % (param_vary_name, samplesize))
ax.set_xlabel(r'$%s$' % param_vary_name, fontsize=fs)
ax.set_ylabel(r'$\tau$', fontsize=fs)
#ax.set_ylabel(r'$\langle\tau\rangle$', fontsize=fs)
#ax.set_ylabel(r'$\delta\tau$', fontsize=fs)
plt.xticks(fontsize=fs-2)
plt.yticks(fontsize=fs-2)
# log options
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
#ax.set_xscale("log", nonposx='clip')
ax.set_xscale("log")
#ax_dual.set_xscale("log", nonposx='clip')
ax.set_xlim([np.min(param_set)*0.9, 1.5*1e4])
if flag_ylog10:
#ax.set_yscale("log", nonposx='clip')
ax.set_yscale("log")
#ax_dual.set_yscale("log", nonposx='clip')
ax.set_ylim([6*1e-1, 3*1e6])
ax.legend(fontsize=fs-6, ncol=2, loc='upper right')
plt_save = "mean_fpt_varying_composite" + figname_mod
plt.savefig(outdir + os.sep + plt_save + '.pdf', bbox_inches='tight')
if show_flag:
plt.show()
return ax
def figure_mfpt_varying_collapsed(means, sds, param_vary_name, param_set, params,
show_flag=False, figname_mod="", outdir=OUTPUT_DIR, fs=20, ax=None):
if ax is None:
plt.figure()
ax = plt.gca()
num_sets = 6
assert num_sets == len(means)
colours = [X_DARK, Z_DARK, '#ffd966', BLUE, 'pink', 'brown'] #['black', 'red', 'green', 'blue']
region_labels = ['BLg1', 'BLg4', 'BLg100', 'TRg1', 'TRg4', 'TRg100']#, 'TRg1', 'TRg4']
corners = ['BL1g', 'BL', 'BL100g', 'TR1g', 'TR', 'TR100g']#, 'TR1g', 'TR']
for idx in xrange(num_sets):
print idx, len(param_set), len(means[idx]), len(sds[idx])
means_scaled = [means[idx][i] / corner_to_flux(corners[idx], params.mod_copy({param_vary_name: param_set[i]})) for i in xrange(len(means[idx]))]
ax.plot(param_set, means_scaled, '.-', marker='o', markeredgecolor='k', color=colours[idx], label=r'Region %s' % region_labels[idx], zorder=3)
#ax.plot(param_set, sd_scaled, '-.', marker='^', markeredgecolor='k', color=colours[idx], label=r'$\delta\tau$ (Region %s)' % region_labels[idx], zorder=2)
#ax.plot(heuristic_x, heuristic_y, '--k', zorder=1) #, label=r"Flux to $\hat z$ from FP (Region %s)" % region_labels[idx])
#plt.title("Mean FP Time, %s varying (sample=%d)" % (param_vary_name, samplesize))
plt.axhline(1.0, color='k', ls='--', lw=1.0)
ax.set_xlabel(r'$%s$' % param_vary_name, fontsize=fs)
ax.set_ylabel(r'$\mu z^{\ast} \langle\tau\rangle_{sim}$', fontsize=fs)
#ax.set_ylabel(r'$\langle\tau\rangle$', fontsize=fs)
#ax.set_ylabel(r'$\delta\tau$', fontsize=fs)
plt.xticks(fontsize=fs-2)
plt.yticks(fontsize=fs-2)
# log options
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
#ax.set_xscale("log", nonposx='clip')
ax.set_xscale("log")
#ax_dual.set_xscale("log", nonposx='clip')
ax.set_xlim([np.min(param_set)*0.9, 1.5*1e4])
if flag_ylog10:
#ax.set_yscale("log", nonposx='clip')
ax.set_yscale("log")
#ax_dual.set_yscale("log", nonposx='clip')
#ax.set_ylim([0.0, 10])
ax.legend(fontsize=fs-6)
plt_save = "mean_fpt_varying_collapsed" + figname_mod
plt.savefig(outdir + os.sep + plt_save + '.pdf', bbox_inches='tight')
if show_flag:
plt.show()
return ax
if __name__ == "__main__":
multihist = False
simplex_and_zdist = False
only_fp_zloc_times_joint = False
composite_simplex_zdist = False
composite_hist_simplex_zdist = False
inspect_fpt_flux = False
mfpt_single = False
mfpt_composite = False
mfpt_details = False
mfpt_composite_TR = True
mfpt_composite_BL = True
basedir = "data"
if any([multihist, only_fp_zloc_times_joint, simplex_and_zdist, composite_simplex_zdist, composite_hist_simplex_zdist, inspect_fpt_flux]):
dbdir = basedir + os.sep + "fpt"
datdict = load_datadict(basedir=dbdir)
"""
title = "N100_xall"
keys = ['BR_%s' % title, 'TR_%s' % title, 'BL_%s' % title, 'TL_%s' % title]
#labels = ["b=0.80, c=1.10 (Region II)", "b=1.20, c=1.10 (Region III)", "b=0.80, c=0.90 (Region IV)", "b=1.20, c=0.90 (Region I)"]
labels = [r"(II) $b=0.8$, $c=1.1$", r"(III) $b=1.2$, $c=1.1$", r"(IV) $b=0.8$, $c=0.9$", r"(I) $b=1.2$, $c=0.9$"]
corners = ['BR', 'TR', 'BL', 'TL']
num_hists = len(keys)
"""
"""
title = "N100_xall"
keys = ['BL_%s_g1' % title, 'BL_%s_g4' % title, 'BL_%s_g100' % title,
'TR_%s_g1' % title, 'TR_%s_g4' % title, 'TR_%s_g100' % title]
#labels = ["b=0.80, c=1.10 (Region II)", "b=1.20, c=1.10 (Region III)", "b=0.80, c=0.90 (Region IV)", "b=1.20, c=0.90 (Region I)"]
labels = [r"$b=0.8$, $c=0.9$, $\gamma=1$", r"$b=0.8$, $c=0.9$, $\gamma=4$", r"$b=0.8$, $c=0.9$, $\gamma=100$",
r"$b=1.2$, $c=1.1$, $\gamma=1$", r"$b=1.2$, $c=1.1$, $\gamma=4$", r"$b=1.2$, $c=1.1$, $\gamma=100$",]
corners = ['BL1g', 'BL4g', 'BL100g', 'TR1g', 'TR4g', 'TR100g']
num_hists = len(keys)
"""
title = "N10k_xall"
keys = ['TR_%s_g1' % title]
# labels = ["b=0.80, c=1.10 (Region II)", "b=1.20, c=1.10 | |
TOOLBAR_BOTTOM = 2
class ButtonsWidget(QWidget):
buttons_mode = ButtonsMode.INTERNAL
qt_css_class = "ButtonsWidget"
qt_css_extra = ""
def __init__(self) -> None:
super().__init__()
self.buttons: List[QAbstractButton] = []
def resizeButtons(self) -> None:
frame_width = self.style().pixelMetric(QStyle.PixelMetric.PM_DefaultFrameWidth)
if self.buttons_mode == ButtonsMode.INTERNAL:
x = self.rect().right() - frame_width
y = self.rect().top() + frame_width
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y)
elif self.buttons_mode == ButtonsMode.TOOLBAR_RIGHT:
x = self.rect().right() - frame_width
y = self.rect().top() - frame_width
for i, button in enumerate(self.buttons):
sz = button.sizeHint()
if i > 0:
y += sz.height()
button.move(x - sz.width(), y)
elif self.buttons_mode == ButtonsMode.TOOLBAR_BOTTOM:
x = self.rect().left() - frame_width
y = self.rect().bottom() + frame_width
for i, button in enumerate(self.buttons):
sz = button.sizeHint()
if i > 0:
x += sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name: str, on_click: Callable[..., Any], tooltip: str,
insert: bool=False) -> QToolButton:
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
# Horizontal buttons are inside the edit widget and do not have borders.
if self.buttons_mode == ButtonsMode.INTERNAL:
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} "
"pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))
button.clicked.connect(on_click)
if insert:
self.buttons.insert(0, button)
else:
self.buttons.append(button)
# Vertical buttons are integrated into the widget, within a margin that moves the edge
# of the edit widget over to make space.
frame_width = self.style().pixelMetric(QStyle.PixelMetric.PM_DefaultFrameWidth)
if self.buttons_mode == ButtonsMode.TOOLBAR_RIGHT:
self.button_padding = max(button.sizeHint().width() for button in self.buttons) + 4
self.setStyleSheet(self.qt_css_class +
" { margin-right: "+ str(self.button_padding) +"px; }"+
self.qt_css_extra)
elif self.buttons_mode == ButtonsMode.TOOLBAR_BOTTOM:
self.button_padding = max(button.sizeHint().height() for button in self.buttons) + \
frame_width
self.setStyleSheet(
self.qt_css_class +" { margin-bottom: "+ str(self.button_padding) +"px; }"+
self.qt_css_extra)
return button
def addCopyButton(self, tooltipText: Optional[str]=None) -> QAbstractButton:
if tooltipText is None:
tooltipText = _("Copy to clipboard")
return self.addButton("icons8-copy-to-clipboard-32.png", self._on_copy,
tooltipText)
def _on_copy(self) -> None:
get_app_state_qt().app_qt.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(KeyEventLineEdit, ButtonsWidget):
qt_css_class = "QLineEdit"
def __init__(self, text: str='') -> None:
KeyEventLineEdit.__init__(self, None, text, {Qt.Key.Key_Return, Qt.Key.Key_Enter})
self.buttons: List[QAbstractButton] = []
def resizeEvent(self, event: QResizeEvent) -> None:
QLineEdit.resizeEvent(self, event)
self.resizeButtons()
buttons_width = 0
for button in self.buttons:
buttons_width += button.size().width()
self.setTextMargins(0, 0, buttons_width, 0)
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
qt_css_class = "QPlainTextEdit"
def __init__(self, text: Optional[str]=None) -> None:
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons: List[QAbstractButton] = []
def resizeEvent(self, event: QResizeEvent) -> None:
QPlainTextEdit.resizeEvent(self, event)
self.resizeButtons()
class ButtonsTableWidget(QTableWidget, ButtonsWidget):
buttons_mode = ButtonsMode.TOOLBAR_BOTTOM
qt_css_class = "QTableWidget"
def __init__(self, parent: Optional[QWidget]=None,
buttons_mode: ButtonsMode=ButtonsMode.TOOLBAR_RIGHT) -> None:
self.buttons_mode = buttons_mode
QTableWidget.__init__(self, parent)
self.buttons: List[QAbstractButton] = []
def resizeEvent(self, event: QResizeEvent) -> None:
QTableWidget.resizeEvent(self, event)
self.resizeButtons()
class ColorSchemeItem:
def __init__(self, fg_color: str, bg_color: str) -> None:
self.colors = (fg_color, bg_color)
def _get_color(self, background: bool) -> str:
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background: bool=False, class_name: str="QWidget", id_name: str="") \
-> str:
css_prefix = "background-" if background else ""
color = self._get_color(background)
key_name = class_name
if id_name:
key_name += "#"+ id_name
return "{} {{ {}color:{}; }}".format(key_name, css_prefix, color)
def as_color(self, background: bool=False) -> QColor:
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
DEFAULT = ColorSchemeItem("black", "white")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
GREEN = ColorSchemeItem("#117c11", "#8af296")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
YELLOW = ColorSchemeItem("yellow", "yellow")
@staticmethod
def has_dark_background(widget: QWidget) -> bool:
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget: QWidget) -> None:
if ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class SortableTreeWidgetItem(QTreeWidgetItem):
DataRole = Qt.ItemDataRole.UserRole + 1
def __lt__(self, other: object) -> bool:
assert isinstance(other, QTreeWidgetItem)
column = self.treeWidget().sortColumn()
self_data = self.data(column, self.DataRole)
other_data = other.data(column, self.DataRole)
if None not in (self_data, other_data):
# We have set custom data to sort by
return cast(bool, self_data < other_data)
try:
# Is the value something numeric?
self_text = self.text(column).replace(',', '')
other_text = other.text(column).replace(',', '')
return float(self_text) < float(other_text)
except ValueError:
# If not, we will just do string comparison
return self.text(column) < other.text(column)
def update_fixed_tree_height(tree: QTreeWidget, maximum_height: Optional[int]=None) -> None:
# We can't always rely on the manually set maximum height sticking.
# It's possible the setting of the fixed height explicitly replaces it.
if maximum_height is None:
maximum_height = tree.maximumHeight()
tree_model = tree.model()
cell_index = tree_model.index(0, 1)
row_height = tree.rowHeight(cell_index)
if row_height == 0:
row_height = tree.header().height()
row_count = tree_model.rowCount()
table_height = row_height * row_count
if maximum_height > 5:
table_height = min(table_height, maximum_height)
if tree.header().isVisible:
table_height += tree.header().height() + 2
tree.setFixedHeight(table_height)
def protected(func: D1) -> D1:
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self: "ElectrumWindow", *args: Any, **kwargs: Any) -> Any:
main_window = self
if 'main_window' in kwargs:
main_window = kwargs['main_window']
elif 'wallet_id' in kwargs:
main_window2 = app_state.app_qt.get_wallet_window_by_id(kwargs['wallet_id'])
assert main_window2 is not None
main_window = main_window2
parent = main_window.top_level_window()
password: Optional[str] = None
while True:
password = main_window.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
main_window._wallet.check_password(password)
break
except Exception as e:
main_window.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return cast(D1, request_password)
def icon_path(icon_basename: str) -> str:
return resource_path('icons', icon_basename)
def read_qt_ui(ui_name: str) -> QWidget:
# NOTE(typing) This is not typed by PyQt5-stubs.
return cast(QWidget, loadUi(resource_path("ui", ui_name))) # type: ignore
@lru_cache()
def read_QIcon(icon_basename: str) -> QIcon:
return QIcon(icon_path(icon_basename))
def get_source_index(model_index: QModelIndex, klass: Any) -> QModelIndex:
model = model_index.model()
while model is not None and not isinstance(model, klass):
model_index = model.mapToSource(model_index)
model = model_index.model()
return model_index
def get_default_language() -> str:
name = QLocale.system().name()
return name if name in languages else 'en_UK'
def can_show_in_file_explorer() -> bool:
return sys.platform in ('win32', 'darwin')
def show_in_file_explorer(path: str) -> bool:
# https://stackoverflow.com/a/46019091/11881963
if sys.platform == 'win32':
args = []
if not os.path.isdir(path):
args.append('/select,')
args.append(QDir.toNativeSeparators(path))
QProcess.startDetached('explorer', args)
return True
elif sys.platform == 'darwin':
args = [
'-e', 'tell application "Finder"',
'-e', 'activate',
'-e', 'select POSIX file "%s"' % path,
'-e', 'end tell',
'-e', 'return',
]
QProcess.execute('/usr/bin/osascript', args)
return True
return False
def create_new_wallet(parent: QWidget, initial_dirpath: str) -> Optional[str]:
create_filepath, __ = QFileDialog.getSaveFileName(parent, _("Enter a new wallet file name"),
initial_dirpath)
if not create_filepath:
return None
# QFileDialog.getSaveFileName uses forward slashes for "easier pathing".. correct this.
create_filepath = os.path.normpath(create_filepath)
if os.path.exists(create_filepath):
MessageBox.show_error(_("Overwriting existing files not supported at this time."))
return None
dirpath, filename = os.path.split(create_filepath)
if not create_filepath.endswith(DATABASE_EXT):
if os.path.exists(create_filepath + DATABASE_EXT):
MessageBox.show_error(_("The file name '{}' is already in use.").format(filename))
return None
if not dirpath or not os.path.isdir(dirpath) or not os.access(dirpath, os.R_OK | os.W_OK):
MessageBox.show_error(_("The selected directory is not accessible."))
return None
name_edit = QLabel(filename)
fields = [
(QLabel(_("Wallet") +":"), name_edit),
]
from .password_dialog import ChangePasswordDialog, PasswordAction
from .wallet_wizard import PASSWORD_NEW_TEXT
# NOTE(typing) The signature matches for `fields` but the type checker gives a false positive.
d = ChangePasswordDialog(parent, PASSWORD_NEW_TEXT, _("Create New Wallet"),
fields, # type: ignore
kind=PasswordAction.NEW)
success, _old_password, new_password = d.run()
if not success or not cast(str, new_password).strip():
return None
assert new_password is not None
from electrumsv.storage import WalletStorage
storage = WalletStorage.create(create_filepath, new_password)
# This path is guaranteed to be the full file path with file extension.
wallet_path = storage.get_path()
storage.close()
# Store the credential in case we most likely are going to open it immediately and do not
# want to prompt for the password immediately after the user just specififed it.
app_state.credentials.set_wallet_password(wallet_path, new_password,
CredentialPolicyFlag.FLUSH_AFTER_WALLET_LOAD | CredentialPolicyFlag.IS_BEING_ADDED)
return create_filepath
class FormSeparatorLine(QFrame):
def __init__(self) -> None:
super().__init__()
self.setObjectName("FormSeparatorLine")
self.setFrameShape(QFrame.HLine)
self.setFixedHeight(1)
FieldType = Union[QWidget, QLayout]
class FormSectionWidget(QWidget):
"""
A standardised look for forms whether informational or user editable.
In the longer term it might be worth looking at whether the standard Qt FormLayout
can be used to do something that looks the same with less custom code to achieve it.
"""
show_help_label: bool = True
# minimum_label_width: int = 80
_frame_layout: QFormLayout
def __init__(self, parent: Optional[QWidget]=None,
minimum_label_width: Optional[int]=None) -> None:
super().__init__(parent)
frame = self._frame = QFrame()
frame.setObjectName("FormFrame")
self.clear(have_layout=False)
frame.setLayout(self._frame_layout)
self.setStyleSheet("""
#FormSeparatorLine {
border: 1px solid #E3E2E2;
}
#FormSectionLabel {
color: #444444;
}
#FormFrame {
background-color: #F2F2F2;
border: 1px solid #E3E2E2;
}
""")
vbox = QVBoxLayout()
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(frame)
self.setLayout(vbox)
def create_title(self, title_text: str) -> QLabel:
label = QLabel(title_text)
label.setObjectName("FormSectionTitle")
label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
return label
def add_title(self, title_text: str) -> None:
label | |
for el in remove_list:
equal_imm_blocks_present_for_migration.remove(el)
# iterate through the blocks and find the best one
for block_to_migrate_to in equal_imm_blocks_present_for_migration:
# skip yourself
if block_to_migrate_to == hot_blck_synced:
continue
block_metric_attr = self.get_block_attr(selected_metric) # metric to pay attention to
# iterate and found blocks that are at least as good as the current block
if getattr(block_to_migrate_to, block_metric_attr) == getattr(hot_blck_synced, block_metric_attr):
# blocks have similar attr value
if (selected_metric == "power" and selected_dir == -1) or \
(selected_metric == "latency" and selected_dir == 1) or (selected_metric == "area"):
# if we want to slow down (reduce latency, improve power), look for parallel task on the other block
block_to_mig_to_parallelism_exist, parallelism_type = self.check_if_task_can_run_with_any_other_task_in_parallel(sim_dp,
task,
block_to_migrate_to)
if (selected_metric == "area" and selected_dir == -1):
# no parallelism possibly allows for theo the other memory to shrink
if not block_to_mig_to_parallelism_exist:
results_block.append(block_to_migrate_to)
parallelism_type = ["serialism"]
else:
if block_to_mig_to_parallelism_exist:
results_block.append(block_to_migrate_to)
parallelism_type = ["serialism"]
else:
# if we want to accelerate (improve latency, get more power), look for parallel task on the same block
if current_block_parallelism_exist:
results_block.append(block_to_migrate_to)
elif inequality_dir*getattr(block_to_migrate_to, block_metric_attr) > inequality_dir*getattr(hot_blck_synced, block_metric_attr):
results_block.append(block_to_migrate_to)
break
# if no block found, just load the results_block with current block
if len(results_block) == 0:
results_block = [hot_blck_synced]
found_block_to_mig_to = False
else:
found_block_to_mig_to = True
# pick at random to try random scenarios. At the moment, only equal and immeidately better blocks are considered
random.seed(datetime.now().microsecond)
result_block = random.choice(results_block)
selection_mode = "batch"
if found_block_to_mig_to:
if getattr(result_block, block_metric_attr) == getattr(hot_blck_synced, block_metric_attr):
selection_mode = "batch"
else:
selection_mode = "single"
return result_block, found_block_to_mig_to, selection_mode, parallelism_type, locality_type
def is_system_ic(self, ex_dp, sim_dp, blck):
if not sim_dp.dp_stats.fits_budget(1):
return False
elif sim_dp.dp_stats.fits_budget(1) and not self.dram_feasibility_check_pass(ex_dp):
return False
else:
for block in ex_dp.get_hardware_graph().get_blocks():
neighs = block.get_neighs()
if any(el for el in neighs if el.subtype == "dram"):
if block == blck:
return True
return False
def bus_has_pe_mem_topology_for_split(self, ex_dp, sim_dp, ref_task, block):
if not block.type == "ic" or ref_task.is_task_dummy():
return False
found_pe_block = False
found_mem_block = False
migrant_tasks = self.dh.find_parallel_tasks_of_task_in_block(ex_dp, sim_dp, ref_task, block)[0]
migrant_tasks_names = [el.get_name() for el in migrant_tasks]
mem_neighs = [el for el in block.get_neighs() if el.type == "mem"]
pe_neighs = [el for el in block.get_neighs() if el.type == "pe"]
for neigh in pe_neighs:
neigh_tasks = [el.get_name() for el in neigh.get_tasks_of_block_by_dir("loop_back")]
# if no overlap skip
if len(list(set(migrant_tasks_names) - set(neigh_tasks) )) == len(migrant_tasks_names):
continue
else:
found_pe_block = True
break
for neigh in mem_neighs:
neigh_tasks = [el.get_name() for el in neigh.get_tasks_of_block_by_dir("write")]
# if no overlap skip
if len(list(set(migrant_tasks_names) - set(neigh_tasks) )) == len(migrant_tasks_names):
continue
else:
found_mem_block = True
break
if found_pe_block and found_mem_block :
return True
else:
return False
def get_feasible_transformations(self, ex_dp, sim_dp, hot_blck_synced, selected_metric, selected_krnl, sorted_metric_dir):
# if this knob is set, we randomly pick a transformation
# THis is to illustrate the architectural awareness of FARSI a
if config.transformation_selection_mode == "random":
all_transformations = config.all_available_transformations
return all_transformations
# pick a transformation smartly
imm_block = self.dh.get_immediate_block_multi_metric(hot_blck_synced, selected_metric, sorted_metric_dir, hot_blck_synced.get_tasks_of_block())
task = ex_dp.get_hardware_graph().get_task_graph().get_task_by_name(selected_krnl.get_task_name())
feasible_transformations = set(config.metric_trans_dict[selected_metric])
# find the block that is at least as good as the block (for migration)
# if can't find any, we return the same block
selected_metric = list(sorted_metric_dir.keys())[-1]
selected_dir = sorted_metric_dir[selected_metric]
equal_imm_block_present_for_migration, found_blck_to_mig_to, selection_mode, parallelism_type, locality_type = self.select_block_to_migrate_to(ex_dp, sim_dp, hot_blck_synced,
selected_metric, sorted_metric_dir, selected_krnl)
hot_block_type = hot_blck_synced.type
hot_block_subtype = hot_blck_synced.subtype
parallelism_exist, parallelism_type = self.check_if_task_can_run_with_any_other_task_in_parallel(sim_dp, task, hot_blck_synced)
other_block_parallelism_exist = False
all_transformations = config.metric_trans_dict[selected_metric]
can_improve_locality = self.can_improve_locality(ex_dp, hot_blck_synced, task)
can_improve_routing = self.can_improve_routing(ex_dp, sim_dp, hot_blck_synced, task)
bus_has_pe_mem_topology_for_split = self.bus_has_pe_mem_topology_for_split(ex_dp, sim_dp, task,hot_blck_synced)
# ------------------------
# based on parallelism, generate feasible transformations
# ------------------------
if parallelism_exist:
if selected_metric == "latency":
if selected_dir == -1:
if hot_block_type == "pe":
feasible_transformations = ["migrate", "split"] # only for PE since we wont to be low cost, for IC/MEM cost does not increase if you customize
else:
if hot_block_type == "ic":
mem_neighs = [el for el in hot_blck_synced.get_neighs() if el.type == "mem"]
pe_neighs = [el for el in hot_blck_synced.get_neighs() if el.type == "pe"]
if len(mem_neighs) <= 1 or len(pe_neighs) <= 1 or not bus_has_pe_mem_topology_for_split:
feasible_transformations = ["swap", "split_swap"] # ", "swap", "split_swap"]
else:
feasible_transformations = ["migrate", "split"] # ", "swap", "split_swap"]
else:
feasible_transformations = ["migrate", "split"] #", "swap", "split_swap"]
else:
# we can do better by comparing the advantage disadvantage of migrating
# (Advantage: slowing down by serialization, and disadvantage: accelerating by parallelization)
feasible_transformations = ["swap"]
if selected_metric == "power":
if selected_dir == -1:
# we can do better by comparing the advantage disadvantage of migrating
# (Advantage: slowing down by serialization, and disadvantage: accelerating by parallelization)
feasible_transformations = ["swap", "split_swap"]
else:
feasible_transformations = all_transformations
if selected_metric == "area":
if selected_dir == -1:
if hot_block_subtype == "pe":
feasible_transformations = ["migrate", "swap"]
else:
feasible_transformations = ["migrate", "swap", "split_swap"]
else:
feasible_transformations = all_transformations
elif not parallelism_exist:
if selected_metric == "latency":
if selected_dir == -1:
feasible_transformations = ["swap", "split_swap"]
else:
feasible_transformations = ["swap", "migrate"]
if selected_metric == "power":
if selected_dir == -1:
feasible_transformations = ["migrate", "swap", "split_swap"]
if selected_metric == "area":
if selected_dir == -1:
feasible_transformations = ["migrate", "swap","split_swap"]
else:
feasible_transformations = ["migrate", "swap", "split"]
# ------------------------
# based on locality, generate feasible transformations
# ------------------------
if can_improve_locality and ('transfer' in config.all_available_transformations):
# locality not gonna improve area with the current set up
if not selected_metric == "area" and selected_dir == -1:
feasible_transformations.append("transfer")
#------------------------
# there is a on opportunity for routing
#------------------------
if can_improve_routing and ('routing' in config.all_available_transformations):
transformation_list = list(feasible_transformations)
transformation_list.append('routing')
feasible_transformations = set(transformation_list)
#------------------------
# post processing of the destination blocks to eliminate transformations
#------------------------
# filter migrate
if not found_blck_to_mig_to:
# if can't find a block that is at least as good as the current block, can't migrate
feasible_transformations = set(list(set(feasible_transformations) - set(['migrate'])))
# filter split
number_of_task_on_block = 0
if hot_blck_synced.type == "pe":
number_of_task_on_block = len(hot_blck_synced.get_tasks_of_block())
else:
number_of_task_on_block = len(hot_blck_synced.get_tasks_of_block_by_dir("write"))
if number_of_task_on_block == 1: # can't split an accelerator
feasible_transformations = set(list(set(feasible_transformations) - set(['split', 'split_swap'] )))
# filter swap
block_metric_attr = self.get_block_attr(selected_metric) # metric to pay attention to
if getattr(imm_block, block_metric_attr) == getattr(hot_blck_synced, block_metric_attr):
#if imm_block.get_generic_instance_name() == hot_blck_synced.get_generic_instance_name():
# if can't swap improve, get rid of swap
feasible_transformations = set(list(set(feasible_transformations) - set(['swap'])))
# for IC's we don't use migrate
if hot_blck_synced.type in ["ic"]:
# we don't cover migrate for ICs at the moment
# TODO: add this feature later
feasible_transformations = set(list(set(feasible_transformations) - set(['migrate', 'split_swap'])))
# if no valid transformation left, issue the identity transformation (where nothing changes and a simple copying is done)
if len(list(set(feasible_transformations))) == 0:
feasible_transformations = ["identity"]
return feasible_transformations
def get_transformation_design_space_size(self, move_to_apply, ex_dp, sim_dp, block_of_interest, selected_metric, sorted_metric_dir):
# if this knob is set, we randomly pick a transformation
# THis is to illustrate the architectural awareness of FARSI a
imm_block = self.dh.get_immediate_block_multi_metric(block_of_interest, selected_metric, sorted_metric_dir, block_of_interest.get_tasks_of_block())
task = (block_of_interest.get_tasks_of_block())[0] # any task for do
feasible_transformations = set(config.metric_trans_dict[selected_metric])
# find the block that is at least as good as the block (for migration)
# if can't find any, we return the same block
selected_metric = list(sorted_metric_dir.keys())[-1]
selected_dir = sorted_metric_dir[selected_metric]
equal_imm_blocks_present_for_migration = self.dh.get_equal_immediate_blocks_present(ex_dp, block_of_interest,
selected_metric, selected_dir, [task])
if len(equal_imm_blocks_present_for_migration) == 1 and equal_imm_blocks_present_for_migration[0] == block_of_interest:
equal_imm_blocks_present_for_migration = []
buses = [el for el in ex_dp.get_blocks() if el.type == "ic"]
mems = [el for el in ex_dp.get_blocks() if el.type == "mem"]
srams = [el for el in ex_dp.get_blocks() if el.type == "sram"]
drams = [el for el in ex_dp.get_blocks() if el.type == "dram"]
pes = [el for el in ex_dp.get_blocks() if el.type == "pe"]
ips = [el for el in ex_dp.get_blocks() if el.subtype == "ip"]
gpps = [el for el in ex_dp.get_blocks() if el.subtype == "gpp"]
# per block
# for PEs
if block_of_interest.subtype == "gpp":
number_of_task_on_block = len(block_of_interest.get_tasks_of_block())
move_to_apply.design_space_size["hardening"] += number_of_task_on_block + 1# +1 for swap, the rest is for split_swap
move_to_apply.design_space_size["pe_allocation"] += (number_of_task_on_block + 1) # +1 is for split, the rest os for split_swap
elif block_of_interest.subtype == "ip":
move_to_apply.design_space_size["softening"] += 1
# for all
for mode in ["frequency_modulation", "bus_width_modulation", "loop_iteration_modulation", "allocation"]:
if not block_of_interest.type =="pe":
if mode == "loop_iteration_modulation":
continue
value = self.dh.get_all_compatible_blocks_of_certain_char(ex_dp, block_of_interest,
selected_metric, selected_dir, | |
self.draw_groupid()
"""
把当前处理好的结果存储下来,并打开下一张图片和加载相应的检测信息
"""
def open_next(self, event):
if self.cur < len(self.imageList) - 1:
self.save()
self.clear_bbox()
self.cur += 1
self.load_image(self.imageDirPathBuffer + '/' + self.imageList[self.cur], event)
else:
self.save()
messagebox.showinfo(title='',message='the last picture') #提示信息对话窗
return
self.processingLabel.config(text=" ")
self.processingLabel.update_idletasks()
"""
把当前处理好的结果存储下来,并打开上一张图片和加载相应的检测信息
"""
def open_previous(self, event):
if self.cur > 0:
self.save()
self.clear_bbox()
self.cur -= 1
self.load_image(self.imageDirPathBuffer + '/' + self.imageList[self.cur], event)
else:
messagebox.showinfo(title='',message='the first picture')
return
self.processingLabel.config(text=" ")
self.processingLabel.update_idletasks()
"""
存储结果
"""
def save(self):
for idx, item in enumerate(self.bboxList):
choosebyFrameandTrack = self.img_message[(self.img_message.frame_id == int(self.frameId)) &
(self.img_message.track_id == int(self.boxtrackid[self.bboxIdList[idx]]))]
# 如果该检测框在dataframe中不存在
if choosebyFrameandTrack.empty:
trackid = int(self.boxtrackid[self.bboxIdList[idx]])
# 从dataframe中取出当前的frame id的一行
choosebyFrame = self.img_message[(self.img_message.frame_id == int(self.frameId))] # 先根据frameid筛选一下img_message
# 如果在该frame下没有检测框,需要找到比当前frameid大和小的位置,把新一行放进去
if choosebyFrame.empty:
chooseBigFrameId = self.img_message[self.img_message.frame_id > int(self.frameId)] # 筛选出比当前frameid大的所有dataframe
# 如果没有比当前frameid大的数据,即该帧在最后,插在最后
if chooseBigFrameId.empty:
# time = self.img_message.time.tolist()[0] # 得到时间
# lastframeid = self.img_message.frame_id.tolist()[0] # 得到最后一行的frameid
# curframetime= time + 39*(int(self.frameId)-lastframeid)
x = list(self.bboxList[idx])[0] * self.rate
y = list(self.bboxList[idx])[1] * self.rate
w = (list(self.bboxList[idx])[2] - list(self.bboxList[idx])[0]) * self.rate
h = (list(self.bboxList[idx])[3] - list(self.bboxList[idx])[1]) * self.rate
# get_groupid = self.newgroupidentry.get()
# if get_groupid:
# groupid = int(get_groupid)
# else:
# groupid = None
insertRow = pd.DataFrame([[int(self.frameId), trackid, x , y, w, h, 0]],
columns = ['frame_id', 'track_id', 'x', 'y', 'w', 'h', 'group_id'])
self.img_message = self.img_message.append(insertRow, ignore_index=True)
# 有比当前frameid大的数据,即该帧在开头或者中间,按照顺序插入
else:
bigerIdrow = int(chooseBigFrameId.index.tolist()[0]) # 得到行号
# time = chooseBigFrameId.time.tolist()[0] # 得到时间
# nextframeid = chooseBigFrameId.frame_id.tolist()[0] # 得到frameid
# curframetime = time - 39*(nextframeid - int(self.frameId))
x = list(self.bboxList[idx])[0] * self.rate
y = list(self.bboxList[idx])[1] * self.rate
w = (list(self.bboxList[idx])[2] - list(self.bboxList[idx])[0]) * self.rate
h = (list(self.bboxList[idx])[3] - list(self.bboxList[idx])[1]) * self.rate
# get_groupid = self.newgroupidentry.get()
# if get_groupid:
# groupid = int(get_groupid)
# else:
# groupid = None
insertRow = pd.DataFrame([[int(self.frameId), trackid, x , y, w, h, 0]],
columns = ['frame_id', 'track_id', 'x', 'y', 'w', 'h', 'group_id'])
above = self.img_message.loc[:bigerIdrow-1]
below = self.img_message.loc[bigerIdrow:]
self.img_message = above.append(insertRow, ignore_index=True).append(below, ignore_index=True)
# 如果在该frame下有检测框,则找到该frame下的track id的最大值,然后插在该行的下面
else:
# 找到该frame下的trackid最大值的行号,然后把新的检测框放到改行之后---------------------------------------
maxIdOfTheFrame = choosebyFrame.loc[:,"track_id"].max() # 然后找到trackid的最大值
choosebyFrameandMaxid = choosebyFrame[(choosebyFrame.track_id == maxIdOfTheFrame)] # 把最大值行筛选出来
row = int(choosebyFrameandMaxid.index.tolist()[0]) # 得到行号
# time = choosebyFrame.time.tolist()[0] # 一个frame下的检测框时间是相同的
# ---------------------------------------------------------------------------------------------------
x = list(self.bboxList[idx])[0] * self.rate
y = list(self.bboxList[idx])[1] * self.rate
w = (list(self.bboxList[idx])[2] - list(self.bboxList[idx])[0]) * self.rate
h = (list(self.bboxList[idx])[3] - list(self.bboxList[idx])[1]) * self.rate
# get_groupid = self.newgroupidentry.get()
# if get_groupid:
# groupid = int(get_groupid)
# else:
# groupid = None
insertRow = pd.DataFrame([[int(self.frameId), trackid, x , y, w, h, 0]],
columns = ['frame_id','track_id','x','y','w','h','group_id'])
above = self.img_message.loc[:row]
below = self.img_message.loc[row+1:]
self.img_message = above.append(insertRow,ignore_index=True).append(below,ignore_index=True)
# 如果该检测框在dataframe中存在
else:
rowNumber = int(choosebyFrameandTrack.index.tolist()[0])
self.img_message.iloc[[rowNumber], [2]] = list(self.bboxList[idx])[0] * self.rate # 左上角x
self.img_message.iloc[[rowNumber], [3]] = list(self.bboxList[idx])[1] * self.rate # 左上角y
self.img_message.iloc[[rowNumber], [4]] = (list(self.bboxList[idx])[2] - list(self.bboxList[idx])[0]) * self.rate # 框的宽
self.img_message.iloc[[rowNumber], [5]] = (list(self.bboxList[idx])[3] - list(self.bboxList[idx])[1]) * self.rate # 框的宽
"""
把组成同一个group的检测框框起来
"""
def draw_groupid(self):
self.objectListgroup.delete(0, len(self.bboxIdgroupList))
self.canvas.delete(self.bboxIdgroup)
self.bboxIdgroupList = []
if self.img_message.empty:
return
choosebyFrameId = self.img_message[(self.img_message.frame_id == int(self.frameId))]
list_choosebyFrameId = choosebyFrameId.index.tolist()
groupid = []
for i in list_choosebyFrameId:
temp = ((self.img_message.iloc[[i],[6]]).values[0])[0]
if temp!=0 and temp not in groupid:
groupid.append(temp)
for i in groupid:
choosebyFrameandGroup = self.img_message[(self.img_message.frame_id == int(self.frameId)) &
(self.img_message.group_id == int(i))]
if len(choosebyFrameandGroup) == 1:
continue
x1 = int(((choosebyFrameandGroup.iloc[[0],[2]]).values[0])[0] / self.rate)
y1 = int(((choosebyFrameandGroup.iloc[[0],[3]]).values[0])[0] / self.rate)
w1 = int(((choosebyFrameandGroup.iloc[[0],[4]]).values[0])[0] / self.rate)
h1 = int(((choosebyFrameandGroup.iloc[[0],[5]]).values[0])[0] / self.rate)
x2 = x1 + w1
y2 = y1 + h1
strtrackid = ''
for j in range(len(choosebyFrameandGroup)):
strtrackid = strtrackid + ' ' + str(((choosebyFrameandGroup.iloc[[j],[1]]).values[0])[0])
if (int(((choosebyFrameandGroup.iloc[[j],[2]]).values[0])[0] / self.rate) < x1):
x1 = int(((choosebyFrameandGroup.iloc[[j],[2]]).values[0])[0] / self.rate)
if (int(((choosebyFrameandGroup.iloc[[j],[3]]).values[0])[0] / self.rate) < y1):
y1 = int(((choosebyFrameandGroup.iloc[[j],[3]]).values[0])[0] / self.rate)
if (int(((choosebyFrameandGroup.iloc[[j],[2]]).values[0])[0] / self.rate)+int(((choosebyFrameandGroup.iloc[[j],[4]]).values[0])[0] / self.rate) > x2):
x2 = int(((choosebyFrameandGroup.iloc[[j],[2]]).values[0])[0] / self.rate)+int(((choosebyFrameandGroup.iloc[[j],[4]]).values[0])[0] / self.rate)
if (int(((choosebyFrameandGroup.iloc[[j],[3]]).values[0])[0] / self.rate)+int(((choosebyFrameandGroup.iloc[[j],[5]]).values[0])[0] / self.rate) > y2):
y2 = int(((choosebyFrameandGroup.iloc[[j],[3]]).values[0])[0] / self.rate)+int(((choosebyFrameandGroup.iloc[[j],[5]]).values[0])[0] / self.rate)
self.bboxIdgroup = self.canvas.create_rectangle(x1-5, y1-5, x2+5, y2+5, width=2, outline='yellow')
self.bboxIdgroupList.append(self.bboxIdgroup)
self.objectListgroup.insert(END, strtrackid + ': ' + str(i))
self.objectListgroup.itemconfig(len(self.bboxIdgroupList) - 1, fg='red')
self.boxgroupid[self.bboxIdgroup] = int(i)
# print(self.bboxIdgroupList)
"""
从输入框中读入想要成组的track id和组的编号group id,更新img_message并在图上画出group的框
"""
def button_groupid(self):
entryTrackId = self.entryTrackId.get()
entryGroupId = self.entryGroupId.get()
if entryTrackId and entryGroupId:
list_trackid = entryTrackId.split()
for i in list_trackid:
# choosebyTrack = self.img_message[(self.img_message.track_id == int(i))]
# list_choosebyTrack = choosebyTrack.index.tolist()
choosebyTrackFrame = self.img_message[(self.img_message.track_id == int(i)) &
(self.img_message.frame_id == int(self.frameId))]
list_choosebyTrack = choosebyTrackFrame.index.tolist()
for index in list_choosebyTrack:
self.img_message.iloc[[index],[6]] = int(entryGroupId)
self.draw_groupid()
else:
messagebox.showinfo(title='',message='TrackId & GroupId\ncannot be empty')
return
# print(self.img_message)
"""
鼠标在图片上点击时可以获取位置
"""
def mouse_click(self, event):
# Check if Updating BBox
# find_enclosed(x1, y1, x2, y2)-- 返回完全包含在限定矩形内所有画布对象的 ID
if self.canvas.find_enclosed(event.x - 5, event.y - 5, event.x + 5, event.y + 5):
self.EDIT = True
self.editPointId = int(self.canvas.find_enclosed(event.x - 5, event.y - 5, event.x + 5, event.y + 5)[0])
else:
self.EDIT = False
# Set the initial point
if self.EDIT:
idx = self.bboxPointList.index(self.editPointId)
self.editbboxId = self.bboxIdList[math.floor(idx/4.0)]
self.bboxId = self.editbboxId
pidx = self.bboxIdList.index(self.editbboxId)
pidx = pidx * 4
self.o1 = self.bboxPointList[pidx]
self.o2 = self.bboxPointList[pidx + 1]
self.o3 = self.bboxPointList[pidx + 2]
self.o4 = self.bboxPointList[pidx + 3]
a = 0
b = 0
c = 0
d = 0
if self.editPointId == self.o1:
a, b, c, d = self.canvas.coords(self.o3)
elif self.editPointId == self.o2:
a, b, c, d = self.canvas.coords(self.o4)
elif self.editPointId == self.o3:
a, b, c, d = self.canvas.coords(self.o1)
elif self.editPointId == self.o4:
a, b, c, d = self.canvas.coords(self.o2)
self.STATE['x'], self.STATE['y'] = int((a+c)/2), int((b+d)/2)
else:
self.STATE['x'], self.STATE['y'] = event.x, event.y
"""
拖动鼠标事件:画出矩形框
"""
def mouse_drag(self, event):
self.mouse_move(event)
# print(self.bboxId)
track_id = 0
if self.bboxId:
self.currBboxColor = self.canvas.itemcget(self.bboxId, "outline")
self.canvas.delete(self.bboxId)
self.canvas.delete(self.o1)
self.canvas.delete(self.o2)
self.canvas.delete(self.o3)
self.canvas.delete(self.o4)
track_id = self.boxtrackid[self.bboxId]
self.canvas.delete("box"+str(self.boxtrackid[self.bboxId]))
del self.boxtrackid[self.bboxId]
if self.EDIT:
self.bboxId = self.canvas.create_rectangle(self.STATE['x'], self.STATE['y'],
event.x, event.y,
width=2,
outline=self.currBboxColor)
self.boxtrackid[self.bboxId] = track_id
else:
self.currBboxColor = config.COLORS[len(self.bboxList) % len(config.COLORS)]
self.bboxId = self.canvas.create_rectangle(self.STATE['x'], self.STATE['y'],
event.x, event.y,
width=2,
outline=self.currBboxColor)
self.boxtrackid[self.bboxId] = track_id
"""
鼠标指的地方可以在窗口的左边有个放大的处理
"""
def mouse_move(self, event):
self.disp.config(text='x: %d, y: %d' % (event.x*self.rate, event.y*self.rate))
self.zoom_view(event) # 放大局部
if self.tkimg:
# Horizontal and Vertical Line for precision
if self.hl:
self.canvas.delete(self.hl)
self.hl = self.canvas.create_line(0, event.y, self.tkimg.width(), event.y, width=2)
if self.vl:
self.canvas.delete(self.vl)
self.vl = self.canvas.create_line(event.x, 0, event.x, self.tkimg.height(), width=2)
"""
释放鼠标,如果这个矩形框在图片中是存在的,则更新它的对角位置,如果是新的矩形框,则在右上角显示
"""
def mouse_release(self, event):
try:
labelidx = self.labelListBox.curselection()
self.currLabel = self.labelListBox.get(labelidx)
except:
pass
if self.EDIT:
self.update_bbox()
self.EDIT = False
# print(self.STATE['x'],self.STATE['y'],event.x,event.y)
x1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'], event.x)
y1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'], event.y)
var = ''
if x1 != x2:
# 如果不是一个点,即必须画的是矩形框
# 则把画的矩形框的左上角右下角的坐标值存储下来
self.bboxList.append((x1, y1, x2, y2))
o1 = self.canvas.create_oval(x1 - 3, y1 - 3, x1 + 3, y1 + 3, fill="red")
o2 = self.canvas.create_oval(x2 - 3, y1 - 3, x2 + 3, y1 + 3, fill="red")
o3 = self.canvas.create_oval(x2 - 3, y2 - 3, x2 + 3, y2 + 3, fill="red")
o4 = self.canvas.create_oval(x1 - 3, y2 - 3, x1 + 3, y2 + 3, fill="red")
self.bboxPointList.append(o1)
self.bboxPointList.append(o2)
self.bboxPointList.append(o3)
self.bboxPointList.append(o4)
self.bboxIdList.append(self.bboxId)
self.objectLabelList.append(str(self.currLabel))
if self.boxtrackid.__contains__(self.bboxId) and self.boxtrackid[self.bboxId]!=0 :
# 如果字典结构boxtrackid中存在这个bboxid
# 则在框的左上角加上trackid
var = str(self.boxtrackid[self.bboxId])
self.canvas.create_text(x1,y1-14,text = var,fill='red',font=('Arial',18),tags = "box" + var)
else:
# 如果字典结构boxtrackid中不存在这个bboxid
# 则从newtrackid这个entry中获得新id
newtrackid = self.newtrackid.get()
if newtrackid:
# 如果这个id不为空
# 则把这个bboxid和相应的trackid存到字典序列boxtrackid中
# 并且在左上角画出相应的trackid
self.boxtrackid[self.bboxId]=newtrackid
var = str(newtrackid)
self.canvas.create_text(x1,y1-14,text = var,fill='red',font=('Arial',18),tags = "box" + var)
else:
# 如果获得的id为空
# 则这种情况不能出现
# 则把已经画出来的矩形框、trackid等删除,并return
self.boxtrackid[self.bboxId]= newtrackid
self.bboxId = None
self.objectListBox.insert(END, '(%d, %d) -> (%d, %d)' % (x1*self.rate, y1*self.rate, x2*self.rate, y2*self.rate) + ': ' + var)
self.newtrackid.delete(0,END)
self.objectListBox.itemconfig(len(self.bboxIdList) - 1, fg=self.currBboxColor)
self.currLabel = None
self.del_bbox()
self.save()
# print(self.boxtrackid)
return
# bboxId存在或者得到的新id不为空则把相应的数据存储下来,并显示到右上角的listbox中
self.bboxId = None
self.objectListBox.insert(END, '(%d, %d) -> (%d, %d)' % (x1*self.rate, y1*self.rate, x2*self.rate, y2*self.rate) + ': ' + var)
self.newtrackid.delete(0,END)
self.objectListBox.itemconfig(len(self.bboxIdList) - 1, fg=self.currBboxColor)
self.currLabel = None
self.save()
self.draw_groupid()
# print(self.boxtrackid)
def zoom_view(self, event):
try:
if self.zoomImgId:
self.zoomcanvas.delete(self.zoomImgId)
self.zoomImg = self.img.copy()
self.zoomImgCrop = self.zoomImg.crop(((event.x - 25), (event.y - 25), (event.x + 25), (event.y + 25)))
self.zoomImgCrop = self.zoomImgCrop.resize((150, 150))
self.tkZoomImg = ImageTk.PhotoImage(self.zoomImgCrop)
self.zoomImgId = self.zoomcanvas.create_image(0, 0, image=self.tkZoomImg, anchor=NW)
hl = self.zoomcanvas.create_line(0, 75, 150, 75, width=2)
vl = self.zoomcanvas.create_line(75, 0, 75, 150, width=2)
except:
pass
def update_bbox(self):
idx = self.bboxIdList.index(self.editbboxId)
self.bboxIdList.pop(idx)
self.bboxList.pop(idx)
self.objectListBox.delete(idx)
self.currLabel = self.objectLabelList[idx]
self.objectLabelList.pop(idx)
idx = idx*4
self.canvas.delete(self.bboxPointList[idx])
self.canvas.delete(self.bboxPointList[idx+1])
self.canvas.delete(self.bboxPointList[idx+2])
self.canvas.delete(self.bboxPointList[idx+3])
self.bboxPointList.pop(idx)
self.bboxPointList.pop(idx)
self.bboxPointList.pop(idx)
self.bboxPointList.pop(idx)
def cancel_bbox(self, event):
if self.STATE['click'] == 1:
if self.bboxId:
self.canvas.delete(self.bboxId)
self.bboxId = None
self.STATE['click'] = 0
"""
| |
map_root['id'], map_version_id, layer_id], GetXmlFeatures)
except (SyntaxError, urlfetch.DownloadError):
pass
return features
def SetDistanceOnFeatures(features, center):
for f in features:
f.distance = EarthDistance(center, f.location)
def FilterFeatures(features, radius, max_count):
# TODO(kpy): A top-k selection algorithm could be faster than O(n log n)
# sort. It seems likely to me that the gain would be small enough that it's
# not worth the code complexity, but it wouldn't hurt to check my hunch.
features.sort() # sorts by distance; see Feature.__cmp__
features[:] = [f for f in features[:max_count] if f.distance < radius]
def GetFilteredFeatures(map_root, map_version_id, topic_id, request,
center, radius, max_count):
"""Gets a list of the Feature objects for a topic within the given circle."""
def GetFromDatastore():
features = GetFeatures(map_root, map_version_id, topic_id, request, center)
if center:
SetDistanceOnFeatures(features, center)
FilterFeatures(features, radius, max_count)
# For the features that were selected for display, fetch additional details
# that we avoid retrieving for unfiltered results due to latency concerns
SetDetailsOnFilteredFeatures(features)
return features
return FILTERED_FEATURES_CACHE.Get(
[map_root['id'], map_version_id, topic_id,
center and RoundGeoPt(center), radius, max_count], GetFromDatastore)
def SetDetailsOnFilteredFeatures(features):
# TODO(user): consider fetching details for each feature in parallel
for f in features:
if f.layer_type == maproot.LayerType.GOOGLE_PLACES:
place_details = GetGooglePlaceDetails(f.gplace_id)
f.description_html = GetGooglePlaceDescriptionHtml(place_details)
f.html_attrs = GetGooglePlaceHtmlAttributions(place_details)
def GetAnswersAndReports(map_id, topic_id, location, radius):
"""Gets information on recent crowd reports for a given topic and location.
Args:
map_id: The map ID.
topic_id: The topic ID.
location: The location to search near, as an ndb.GeoPt.
radius: Radius in metres.
Returns:
A 3-tuple (latest_answers, answer_times, report_dicts) where latest_answers
is a dictionary {qid: latest_answer} containing the latest answer for each
question; answer_times is a dictionary {qid: effective_time} giving the
effective time of the latest answer for each question; and report_dicts is
an array of dictionaries representing the latest 10 crowd reports. Each
dictionary in report_dicts has the answers for a report keyed by qid, as
well as two special keys: '_effective' for the effective time and '_id'
for the report ID.
"""
full_topic_id = map_id + '.' + topic_id
answers, answer_times, report_dicts = {}, {}, []
now = datetime.datetime.utcnow()
# Assume that all the most recently effective still-relevant answers are
# contained among the 100 most recently updated CrowdReport entities.
for report in model.CrowdReport.GetByLocation(
location, {full_topic_id: radius}, 100, hidden=False):
if now - report.effective < MAX_ANSWER_AGE:
report_dict = {}
# The report's overall comment is stored under the special qid '_text'.
for question_id, answer in report.answers.items() + [
(full_topic_id + '._text', report.text)]:
tid, qid = question_id.rsplit('.', 1)
if tid == full_topic_id:
report_dict[qid] = answer
if answer or answer == 0: # non-empty answer
if qid not in answer_times or report.effective > answer_times[qid]:
answers[qid] = answer
answer_times[qid] = report.effective
report_dicts.append(
dict(report_dict, _effective=report.effective, _id=report.id))
report_dicts.sort(key=lambda report_dict: report_dict['_effective'])
report_dicts.reverse()
return answers, answer_times, report_dicts[:REPORTS_PER_FEATURE]
def GetLegibleTextColor(background_color):
"""Decides whether text should be black or white over a given color."""
rgb = background_color.strip('#')
if len(rgb) == 3:
rgb = rgb[0]*2 + rgb[1]*2 + rgb[2]*2
red, green, blue = int(rgb[0:2], 16), int(rgb[2:4], 16), int(rgb[4:6], 16)
luminance = red * 0.299 + green * 0.587 + blue * 0.114
return luminance > 128 and '#000' or '#fff'
def GetFeatureAttributions(features):
"""Builds a list of all unique html attributions for given features."""
# Skip all the duplicates when joining attributions into one list
html_attrs = set()
for f in features:
for attr in f.html_attrs:
html_attrs.add(attr)
return list(html_attrs)
def SetAnswersAndReportsOnFeatures(features, map_root, topic_id, qids):
"""Sets 'status_color', 'answers', and 'answer_text' on the given Features."""
map_id = map_root.get('id') or ''
topic = GetTopic(map_root, topic_id) or {}
radius = topic.get('cluster_radius', 100)
questions_by_id = {q['id']: q for q in topic.get('questions', [])}
choices_by_id = {(q['id'], c['id']): c
for q in topic.get('questions', [])
for c in q.get('choices', [])}
def FormatAnswers(answers):
"""Formats a set of answers into a text summary."""
answer_texts = []
for qid in qids:
question = questions_by_id.get(qid)
answer = answers.get(qid)
prefix = question and question.get('title') or ''
prefix += ': ' if prefix else ''
if question:
if question.get('type') == 'CHOICE':
choice = choices_by_id.get((qid, answer))
if choice:
label = choice.get('label') or prefix + choice.get('title', '')
answer_texts.append(label + '.')
elif answer or answer == 0:
answer_texts.append(prefix + str(answer) + '.')
return ' '.join(answer_texts)
def GetStatusColor(answers):
"""Determines the indicator color from the answer to the first question."""
qid = qids and qids[0] or ''
first_question = questions_by_id.get(qid, {})
if first_question.get('type') == 'CHOICE':
choice = choices_by_id.get((qid, answers.get(qid)))
return choice and choice.get('color')
if topic.get('crowd_enabled') and qids:
for f in features:
# Even though we use the radius to get the latest answers, the cache key
# omits radius so that InvalidateReportCache can quickly delete cache
# entries without fetching from the datastore. So, when a cluster radius
# is changed and its map is republished, affected entries in the answer
# cache will be stale until they expire. This seems like a good tradeoff
# because (a) changing a cluster radius in a published map is rare (less
# than once per map); (b) the answer cache has a short TTL (15 s); and
# (c) posting crowd reports is frequent (many times per day).
answers, answer_times, report_dicts = REPORT_CACHE.Get(
[map_id, topic_id, RoundGeoPt(f.location)],
lambda: GetAnswersAndReports(map_id, topic_id, f.location, radius))
f.answers = answers
f.answer_text = FormatAnswers(answers)
if answer_times:
# Convert datetimes to string descriptions like "5 min ago".
f.answer_time = utils.ShortAge(max(answer_times.values()))
f.answer_source = 'Crisis Map user'
f.status_color = GetStatusColor(answers)
# Include a few recent reports.
f.reports = [{'answer_summary': FormatAnswers(report),
'effective': utils.ShortAge(report['_effective']),
'id': report['_id'],
'text': '_text' in qids and report['_text'] or '',
'status_color': GetStatusColor(report)}
for report in report_dicts]
def InvalidateReportCache(full_topic_ids, location):
"""Deletes cached answers affected by a new report at a given location."""
for full_topic_id in full_topic_ids:
if '.' in full_topic_id:
map_id, topic_id = full_topic_id.split('.')
REPORT_CACHE.Delete([map_id, topic_id, RoundGeoPt(location)])
def RemoveParamsFromUrl(url, *params):
"""Removes specified query parameters from a given URL."""
base, query = (url.split('?') + [''])[:2]
pairs = cgi.parse_qsl(query)
query = urllib.urlencode([(k, v) for k, v in pairs if k not in params])
# Returned URL always ends in ? or a query parameter, so that it's always
# safe to add a parameter by appending '&name=value'.
return base + '?' + query
def GetGeoJson(features, include_descriptions):
"""Converts a list of Feature instances to a GeoJSON object."""
return {
'type': 'FeatureCollection',
'features': [{
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [f.location.lon, f.location.lat]
},
'properties': {
'name': f.name,
'description_html':
f.description_html if include_descriptions else None,
'distance': f.distance,
'distance_mi': RoundDistance(f.distance_mi),
'distance_km': RoundDistance(f.distance_km),
'layer_id': f.layer_id,
'status_color': f.status_color,
'answer_text': f.answer_text,
'answer_time': f.answer_time,
'answer_source': f.answer_source,
'answers': f.answers,
'reports': f.reports
}
} for f in features]
}
def RoundDistance(distance):
"""Round distances above 10 (mi/km) to the closest integer."""
return math.ceil(distance) if distance > 10 else distance
def RenderFooter(items, html_attrs=None):
"""Renders the card footer as HTML and appends source attributions at the end.
Args:
items: A sequence of items, where each item is (a) a string or (b) a pair
[url, text] to be rendered as a hyperlink that opens in a new window.
html_attrs: A list of html strings with links to attributions.
Returns:
A Unicode string of safe HTML containing only text and <a> tags.
"""
results = []
for item in items:
if isinstance(item, (str, unicode)):
results.append(kmlify.HtmlEscape(item))
elif len(item) == 2:
url, text = item
scheme, _, _, _, _ = urlparse.urlsplit(url)
if scheme in ['http', 'https']: # accept only safe schemes
results.append('<a href="%s" target="_blank">%s</a>' % (
kmlify.HtmlEscape(url), kmlify.HtmlEscape(text)))
for html_attr in html_attrs or []:
# Attributions html comes from Google Places API or built on the fly
# inside card.py, so no sanitization here
results.append(html_attr)
return ' '.join(results)
class CardBase(base_handler.BaseHandler):
"""Card rendering code common to all the card handlers below.
For all these card handlers, the map and topic are determined from the
URL path (the map is specified by ID or label, and the topic ID is either
explicit in the path or assumed to be the first existing topic for the map).
Use these query parameters to customize the resulting card:
- n: Maximum number of items to show.
- ll: Geolocation of the center point to search near (in lat,lon format).
- | |
import json
import struct
import os
import sys
import base64
import math
import maya.cmds
import maya.OpenMaya as OpenMaya
import shutil
import time
try:
from PySide.QtGui import QImage, QColor, qRed, qGreen, qBlue, QImageWriter
except ImportError:
from PySide2.QtGui import QImage, QColor, qRed, qGreen, qBlue, QImageWriter
# TODO don't export hidden nodes?
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '%r (%r, %r) %2.2f sec' % \
(method.__name__, args, kw, te-ts)
return result
return timed
class ResourceFormats(object):
EMBEDDED = 'embedded'
SOURCE = 'source'
BIN = 'bin'
class AnimOptions(object):
NONE = 'none'
KEYED = 'keyed'
BAKED = 'baked'
class ClassPropertyDescriptor(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
class ExportSettings(object):
file_format = 'gltf'
resource_format = 'bin'
anim = 'keyed'
vflip=True
out_file = ''
_out_dir = ''
_out_basename = ''
@classmethod
def set_defaults(cls):
cls.file_format = 'glb'
cls.resource_format = 'bin'
cls.anim = 'keyed'
cls.vflip=True
cls.out_file = ''
@classproperty
def out_bin(cls):
return cls.out_basename + '.bin'
@classproperty
def out_basename(cls):
base, ext = os.path.splitext(cls.out_file)
cls._out_basename = os.path.basename(base)
return cls._out_basename
@classproperty
def out_dir(cls):
cls._out_dir = os.path.dirname(cls.out_file)
return cls._out_dir
class GLTFExporter(object):
# TODO: Add VFlip option
def __init__(self, file_path, resource_format='bin', anim='keyed', vflip=True):
self.output = {
"asset": {
"version": "2.0",
"generator": "maya-glTFExport",
}
}
ExportSettings.set_defaults()
Scene.set_defaults()
Node.set_defaults()
Mesh.set_defaults()
Material.set_defaults()
Camera.set_defaults()
Animation.set_defaults()
Image.set_defaults()
Texture.set_defaults()
Buffer.set_defaults()
BufferView.set_defaults()
Accessor.set_defaults()
ExportSettings.out_file = file_path
ExportSettings.resource_format = resource_format
ExportSettings.anim = anim
ExportSettings.vflip = vflip
def run(self):
if not ExportSettings.out_file:
ExportSettings.out_file = maya.cmds.fileDialog2(caption="Specify a name for the file to export.",
fileMode=0)[0]
basename, ext = os.path.splitext(ExportSettings.out_file)
if not ext in ['.glb', '.gltf']:
raise Exception("Output file must have gltf or glb extension.")
ExportSettings.file_format = ext[1:]
if not os.path.exists(ExportSettings.out_dir):
os.makedirs(ExportSettings.out_dir)
# TODO: validate file_path and type
scene = Scene()
# we only support exporting single scenes,
# so the first scene is the active scene
self.output['scene'] = 0
if Scene.instances:
self.output['scenes'] = Scene.instances
if Node.instances:
self.output['nodes'] = Node.instances
if Mesh.instances:
self.output['meshes'] = Mesh.instances
if Camera.instances:
self.output['cameras'] = Camera.instances
if Material.instances:
self.output['materials'] = Material.instances
if Image.instances:
self.output['images'] = Image.instances
if Texture.instances:
self.output['textures'] = Texture.instances
if Animation.instances and Animation.instances[0].channels:
self.output['animations'] = Animation.instances
if Buffer.instances:
self.output['buffers'] = Buffer.instances
if BufferView.instances:
self.output['bufferViews'] = BufferView.instances
if Accessor.instances:
self.output['accessors'] = Accessor.instances
if not Scene.instances[0].nodes:
raise RuntimeError('Scene is empty. No file will be exported.')
if ExportSettings.file_format == 'glb':
json_str = json.dumps(self.output, sort_keys=True, separators=(',', ':'), cls=GLTFEncoder)
json_bin = bytearray(json_str.encode(encoding='UTF-8'))
# 4-byte-aligned
aligned_len = (len(json_bin) + 3) & ~3
for i in range(aligned_len - len(json_bin)):
json_bin.extend(b' ')
bin_out = bytearray()
file_length = 12 + 8 + len(json_bin)
if Buffer.instances:
buffer = Buffer.instances[0]
file_length += 8 + len(buffer)
# Magic number
bin_out.extend(struct.pack('<I', 0x46546C67)) # glTF in binary
bin_out.extend(struct.pack('<I', 2)) # version number
bin_out.extend(struct.pack('<I', file_length))
bin_out.extend(struct.pack('<I', len(json_bin)))
bin_out.extend(struct.pack('<I', 0x4E4F534A)) # JSON in binary
bin_out += json_bin
if Buffer.instances:
bin_out.extend(struct.pack('<I', len(buffer)))
bin_out.extend(struct.pack('<I', 0x004E4942)) # BIN in binary
bin_out += buffer.byte_str
with open(ExportSettings.out_file, 'wb') as outfile:
outfile.write(bin_out)
else:
with open(ExportSettings.out_file, 'w') as outfile:
json.dump(self.output, outfile, cls=GLTFEncoder)
if (ExportSettings.resource_format == ResourceFormats.BIN
and Buffer.instances):
buffer = Buffer.instances[0]
with open(ExportSettings.out_dir + "/" + buffer.uri, 'wb') as outfile:
outfile.write(buffer.byte_str)
def export(file_path=None, resource_format='bin', anim='keyed', vflip=True, selection=False):
GLTFExporter(file_path, resource_format, anim, vflip).run()
class GLTFEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ExportItem):
return obj.to_json()
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
'''
for key in obj.keys():
if key not in ignored_keys:
obj[key] = [item.to_json() for item in obj[key]]
return obj
'''
class ExportItem(object):
def __init__(self, name=None):
self.name = name
class Scene(ExportItem):
'''Needs to add itself to scenes'''
instances = []
maya_nodes = None
@classmethod
def set_defaults(cls):
cls.instances = []
def __init__(self, name="defaultScene", maya_nodes=None):
super(Scene, self).__init__(name=name)
self.index = len(Scene.instances)
Scene.instances.append(self)
anim = None
if not ExportSettings.anim == AnimOptions.NONE:
anim = Animation('defaultAnimation')
self.nodes = []
if maya_nodes:
self.maya_nodes = maya_nodes
else:
self.maya_nodes = maya.cmds.ls(assemblies=True, long=True)
for transform in self.maya_nodes:
if transform not in Camera.default_cameras:
self.nodes.append(Node(transform, anim))
def to_json(self):
scene_def = {"name":self.name, "nodes":[node.index for node in self.nodes]}
return scene_def
class Node(ExportItem):
'''Needs to add itself to nodes list, possibly node children, and possibly scene'''
instances = []
maya_node = None
matrix = None
translation = None
rotation = None
scale = None
camera = None
mesh = None
@classmethod
def set_defaults(cls):
cls.instances = []
def __init__(self, maya_node, anim=None):
self.maya_node = maya_node
name = maya.cmds.ls(maya_node, shortNames=True)[0]
super(Node, self).__init__(name=name)
self.index = len(Node.instances)
Node.instances.append(self)
self.children = []
#self.matrix = maya.cmds.xform(self.maya_node, query=True, matrix=True)
self.translation = maya.cmds.getAttr(self.maya_node+'.translate')[0]
self.rotation = self._get_rotation_quaternion()
self.scale = maya.cmds.getAttr(self.maya_node+'.scale')[0]
if anim:
self._get_animation(anim)
maya_children = maya.cmds.listRelatives(self.maya_node, children=True, fullPath=True)
if maya_children:
for child in maya_children:
childType = maya.cmds.objectType(child)
if childType == 'mesh':
mesh = Mesh(child)
self.mesh = mesh
elif childType == 'camera':
if maya.cmds.camera(child, query=True, orthographic=True):
cam = OrthographicCamera(child)
else:
cam = PerspectiveCamera(child)
self.camera = cam
elif childType == 'transform':
node = Node(child, anim)
self.children.append(node)
def _get_animation(self, anim):
if maya.cmds.keyframe(self.maya_node, attribute='translate', query=True, keyframeCount=True):
translation_channel = AnimationChannel(self, 'translation')
anim.add_channel(translation_channel)
anim.add_sampler(translation_channel.sampler)
if maya.cmds.keyframe(self.maya_node, attribute='rotate', query=True, keyframeCount=True):
rotation_channel = AnimationChannel(self, 'rotation')
anim.add_channel(rotation_channel)
anim.add_sampler(rotation_channel.sampler)
if maya.cmds.keyframe(self.maya_node, attribute='scale', query=True, keyframeCount=True):
scale_channel = AnimationChannel(self, 'scale')
anim.add_channel(scale_channel)
anim.add_sampler(scale_channel.sampler)
def _get_rotation_quaternion(self):
obj=OpenMaya.MObject()
#make a object of type MSelectionList
sel_list=OpenMaya.MSelectionList()
#add something to it
#you could retrieve this form function or the user selection
sel_list.add(self.maya_node)
#fill in the MObject
sel_list.getDependNode(0,obj)
#check if its a transform
if (obj.hasFn(OpenMaya.MFn.kTransform)):
quat = OpenMaya.MQuaternion()
#then we can add it to transfrom Fn
#Fn is basically the collection of functions for given objects
xform=OpenMaya.MFnTransform(obj)
xform.getRotation(quat)
# glTF requires normalize quat
quat.normalizeIt()
py_quat = [quat[x] for x in range(4)]
return py_quat
def to_json(self):
node_def = {}
if self.matrix:
node_def['matrix'] = self.matrix
if self.translation:
node_def['translation'] = self.translation
if self.rotation:
node_def['rotation'] = self.rotation
if self.scale:
node_def['scale'] = self.scale
if self.children:
node_def['children'] = [child.index for child in self.children]
if self.mesh:
node_def['mesh'] = self.mesh.index
if self.camera:
node_def['camera'] = self.camera.index
return node_def
class Mesh(ExportItem):
'''Needs to add itself to node and its accesors to meshes list'''
instances = []
maya_node = None
material = None
indices_accessor = None
position_accessor = None
normal_accessor = None
texcoord0_accessor = None
@classmethod
def set_defaults(cls):
cls.instances = []
def __init__(self, maya_node):
self.maya_node = maya_node
name = maya.cmds.ls(maya_node, shortNames=True)[0]
super(Mesh, self).__init__(name=name)
self.index = len(Mesh.instances)
Mesh.instances.append(self)
self._getMeshData()
self._getMaterial()
def to_json(self):
mesh_def = {"primitives" : [ {
"mode": 4,
"attributes" : {
"POSITION" : self.position_accessor.index,
"NORMAL": self.normal_accessor.index ,
"TEXCOORD_0": self.texcoord0_accessor.index
},
"indices" : self.indices_accessor.index,
"material" : self.material.index
} ]
}
return mesh_def
def _getMaterial(self):
shadingGrps = maya.cmds.listConnections(self.maya_node,type='shadingEngine')
# glTF only allows one materical per mesh, so we'll just grab the first one.
shader = maya.cmds.ls(maya.cmds.listConnections(shadingGrps),materials=True)[0]
self.material = Material(shader)
@timeit
def _getMeshData(self):
maya.cmds.select(self.maya_node)
selList = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(selList)
meshPath = OpenMaya.MDagPath()
selList.getDagPath(0, meshPath)
meshIt = OpenMaya.MItMeshPolygon(meshPath)
meshFn = OpenMaya.MFnMesh(meshPath)
do_color = False
if meshFn.numColorSets():
do_color=True
print "doing color"
indices = []
positions = [None]*meshFn.numVertices()
normals = [None]*meshFn.numVertices()
all_colors = [None]*meshFn.numVertices()
uvs = [None]*meshFn.numVertices()
ids = OpenMaya.MIntArray()
tracker = {}
points = OpenMaya.MPointArray()
if do_color:
vertexColorList = OpenMaya.MColorArray()
meshFn.getFaceVertexColors(vertexColorList)
#meshIt.hasUVs()
normal = OpenMaya.MVector()
face_verts = OpenMaya.MIntArray()
polyNormals = OpenMaya.MFloatVectorArray()
meshFn.getNormals(polyNormals)
uv_util = OpenMaya.MScriptUtil()
uv_util.createFromList([0,0], 2 )
uv_ptr = uv_util.asFloat2Ptr()
bbox = BoundingBox()
while not meshIt.isDone():
meshIt.getTriangles(points, ids)
#meshIt.getUVs(u_list, v_list)
meshIt.getVertices(face_verts)
for x in range(0, ids.length()):
indices.append(ids[x])
pos = (points[x].x, points[x].y, points[x].z)
local_vert_id = getFaceVertexIndex(face_verts, ids[x])
#print "local vert:", local_vert_id
norm_id = meshIt.normalIndex(local_vert_id)
#meshIt.getNormal(local_vert_id, normal)
normal = polyNormals[norm_id]
norm = (normal.x, normal.y, normal.z)
#print norm
meshIt.getUV(local_vert_id, uv_ptr, meshFn.currentUVSetName())
u = uv_util.getFloat2ArrayItem( uv_ptr, 0, 0 )
v = uv_util.getFloat2ArrayItem( uv_ptr, 0, 1 )
# flip V for openGL
# This fails if the the UV is exactly on the border (e.g. (0.5,1))
# but we really don't know what udim it's in for that case.
if ExportSettings.vflip:
v = int(v) + (1 - (v % 1))
uv = | |
<reponame>daukantas/REACT-CODE-_ANALYZING-
# -*- coding: utf-8 -*-
import numpy as np
from scipy.sparse import csr_matrix
import networkx as nx
from scipy.optimize import brentq,bisect
from sys import version_info
from scipy import __version__ as vers_scipy
from warnings import warn
class NetworkFormatError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ThresholdError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def text_to_net (fname, **kwargs):
if 'separator' in kwargs:
separator = kwargs['separator']
else:
separator = '\t'
if 'directed' in kwargs:
directed = kwargs['directed']
else:
directed = False
# Read file
raw = open(fname).readlines()
# Assess whether the graph is weighted
if len(raw[0].strip().split(separator)) > 3:
isw = True
else:
isw = False
# Go through the file to compute the number of nodes and the period.
snodes = set()
stimes = set()
for line in raw:
azz = line.strip().split(separator)
stimes.add(int(azz[-1]))
snodes.add(azz[0])
snodes.add(azz[1])
N = len(snodes)
T = max(stimes) + 1
# Create empty graph objects.
if directed:
lG = [nx.DiGraph() for t in range(T)]
else:
lG = [nx.Graph() for t in range(T)]
# Fill with edges
if isw:
for line in raw:
azz = line.strip().split(separator)
x, y, t = azz[0], azz[1], int(azz[-1])
lG[t].add_edge(x, y, weight = int(azz[2]))
else:
for line in raw:
azz = line.strip().split(separator)
x,y,t = azz[0],azz[1],int(azz[-1])
lG[t].add_edge(x,y)
# Fill with all nodes
snodes = list(snodes)
for G in lG:
G.add_nodes_from(snodes)
# Return list of graphs and number of nodes
return (lG, N, T)
# From networx Graph, create adjacency matrices as sparse CSR objects.
def graph_to_csr (lG,dtype):
lAs = []
# Get nodelist from first graph, since they all contain all nodes.
lnodes = set(lG[0].nodes())
for G in lG:
A = nx.adjacency_matrix(G, nodelist=lnodes)
lAs.append(csr_matrix(A, dtype=dtype))
return lAs
# class for handling the temporal network
class tnet:
# Class constructor. Additional optional keywords: directed (bool), separator (str).
def __init__ (self, myn, period=None, dtype='float64', **kwargs ):
self.lG = None
self.T = None
self.N = None
self.lA = None
self.dtype = np.float64
self.weighted = None
# If dtype is different from 'float64', then np.float128 is set
if dtype != 'float64':
self.dtype = np.float128
# if: Path to file.
if type(myn) == str:
self.lG, self.N, buT = text_to_net(myn, **kwargs )
if self.T == None:
self.T = buT
else:
assert self.T <= buT, 'Specified period is longer than dataset.'
# else: list of graphs
else:
if not ( str(type(myn[0])) == "<class 'networkx.classes.graph.Graph'>" or str(type(myn[0])) == "<class 'networkx.classes.digraph.DiGraph'>" ): # networkx graph
raise NetworkFormatError('Unsupported format: could not find neither networkx Graph nor DiGraph objects.')
self.lG = myn
if self.T == None:
self.T = len(self.lG)
else:
assert self.T <= len(self.lG), 'Specified period is longer than dataset.'
# Fill all graphs with all nodes
snodes = set()
for G in self.lG:
snodes |= set(G.nodes())
if self.N == None:
self.N = len(snodes)
snodes = list(snodes)
for G in self.lG:
G.add_nodes_from(snodes)
# Check if weighted.
ct = 0
while len(self.lG[ct].edges()) == 0:
ct += 1
if 'weight' in self.lG[ct].edges(data=True)[0][2] :
self.weighted = True
else:
self.weighted = False
def getMatrices (self):
if self.lA == None:
self.lA = graph_to_csr(self.lG, self.dtype)
return self.lA
def __str__ (self):
spoutp = 'N = %d; T = %d\n' % (self.N,self.T)
spoutp += 'data type : %s\n' % str(self.dtype)
# Directed.
if str(type(self.lG[0])) == "<class 'networkx.classes.graph.Graph'>":
spu = 'False'
else:
spu = 'True'
spoutp += 'directed : ' + spu + '\n'
# Weighted.
t = 0
while len(self.lG[t].edges()) == 0:
t += 1
if self.weighted:
spu = 'True'
else:
spu = 'False'
spoutp += 'weighted : ' + spu + '\n'
#
# Whether matrices are loaded.
if self.lA == None:
spu = 'not loaded'
else:
spu = 'loaded'
spoutp += 'adjacency matrices : ' + spu + '\n'
return spoutp
def __repr__ (self):
return self.__str__()
# Now functions related to threshold computation.
# Compute the spectral radius using the modified power method.
def power_spectral_radius(ladda, mu, lA, N, T, valumax=1000, stabint=10, tolerance=0.00001):
rootT = 1.0 / float(T)
# Initialize
leval = []
v0 = 0.9*np.random.random(N) + 0.1
v = v0.copy()
vold = v.copy()
interrupt = False # When convergence is reached, it becomes True.
itmax = T * valumax
for k in range(itmax):
# Perform iteration:
v = ladda*lA[k%T].dot(v) + (1.-mu)*v
# Whether period is completed:
if k%T == T-1:
autoval = np.dot(vold,v)
leval.append(autoval**rootT)
# Check convergence
if len(leval) >= stabint:
fluct = ( max(leval[-stabint:]) - min(leval[-stabint:]) ) / np.mean(leval[-stabint:])
else:
fluct = 1. + tolerance
if fluct < tolerance:
interrupt = True
break
mnorm = np.linalg.norm(v)
v = v / mnorm
vold = v.copy()
# If never interrupted, check now convergence.
if not interrupt:
fluct = ( max(leval[-stabint:]) - min(leval[-stabint:]) ) / np.mean(leval[-stabint:])
if fluct >= tolerance:
raise ThresholdError, 'Power method did not converge.'
return leval[-1] - 1.
# Compute the spectral radius using the modified power method, implementing transmission on weighted networks.
def power_spectral_radius_weighted(ladda, mu, lA, N, T, valumax=1000, stabint=10, tolerance=0.0001):
loglad = np.log(1.-ladda)
rootT = 1.0 / float(T)
# Initialize
leval = []
v0 = 0.9*np.random.random(N) + 0.1
v = v0.copy()
vold = v.copy()
interrupt = False # When convergence is reached, it becomes True.
#
itmax = T * valumax
for k in range(itmax):
# Perform iteration. Meaning of function expm1: -(loglad*lA[k%T]).expm1() = 1-(1-ladda)^Aij
v = -(loglad*lA[k%T]).expm1().dot(v) + (1.-mu)*v
# Whether period is completed
if k%T == T-1:
autoval = np.dot(vold,v)
leval.append( autoval**rootT )
# Check convergence
if len(leval)>=stabint:
fluct = (max( leval[-stabint:]) - min(leval[-stabint:]) ) / np.mean(leval[-stabint:])
else:
fluct = 1. + tolerance
if fluct < tolerance:
interrupt = True
break
mnorm = np.linalg.norm(v)
v = v / mnorm
vold = v.copy()
# If never interrupted, check now convergence.
if not interrupt:
fluct = ( max(leval[-stabint:]) - min(leval[-stabint:]) ) / np.mean(leval[-stabint:])
if fluct >= tolerance:
raise ThresholdError,'Power method did not converge.'
return leval[-1] - 1.
# Function for computing the threshold. Additional optional keywords: weighted (bool), findroot (='brentq' or ='bisect'), xtol, rtol (for the last two, see scipy brentq, bisect documentation)
def find_threshold (mu, R, vmin=0.001, vmax=0.999, maxiter=50, **kwargs):
if 'weighted' in kwargs:
weighted = kwargs['weighted']
else:
weighted = R.weighted
if 'rootFinder' in kwargs:
findroot = kwargs['rootFinder']
else:
findroot = 'brentq'
if findroot == 'brentq':
rootFinder = brentq
elif findroot == 'bisect':
rootFinder = bisect
else:
raise ThresholdError,'method for root finding '+findroot+' is not supported.'
kwargs2 = {}
if 'xtol' in kwargs:
kwargs2['xtol'] = kwargs['xtol']
if 'rtol' in kwargs:
kwargs2['rtol'] = kwargs['rtol']
try:
if weighted:
result, rr = rootFinder(power_spectral_radius_weighted, vmin, vmax, args=(mu, R.getMatrices(), R.N, R.T), maxiter=maxiter, full_output=True, disp=False, **kwargs2)
else:
result, rr = rootFinder(power_spectral_radius, vmin, vmax, args=(mu, R.getMatrices(), R.N, R.T), maxiter=maxiter, full_output=True, disp=False, **kwargs2)
except ThresholdError, err_string:
print err_string
return np.nan
except ValueError:
if weighted:
srerr = power_spectral_radius_weighted(vmax, mu, R.getMatrices(), R.N, R.T)
else:
srerr = power_spectral_radius(vmax, mu, R.getMatrices(), R.N, R.T)
spuz = 'ValueError: Interval may not contain zeros (or other ValueError). vmax = %.5f, spectral radius in vmax = %.5f' % (vmax, srerr)
print spuz
return np.nan
else:
if not rr.converged:
print 'Optimization did not converge.'
return np.nan
return result
# CHECK VERSIONS
vers_python0 = '2.7.9'
vers_numpy0 = '1.9.2'
vers_scipy0 = '0.15.1'
vers_netx0 = '1.9.1'
vers_python = '%s.%s.%s' % version_info[:3]
vers_numpy = np.__version__
vers_netx = nx.__version__
if vers_python != vers_python0:
sp = 'This program has been tested for Python %s. Yours is version %s.' % (vers_python0, vers_python)
warn(sp)
if vers_numpy != vers_numpy0:
sp = 'This program has been tested for numpy %s. Yours is version %s. It is likely to work anyway.' % (vers_numpy0, vers_numpy)
warn(sp)
if vers_scipy != vers_scipy0:
sp = 'This program has been tested for scipy %s. Yours is version %s. It is likely to work anyway.' % (vers_scipy0, vers_scipy)
warn(sp)
if vers_netx != vers_netx0:
sp = 'This program has been tested for scipy %s. | |
<reponame>yrtf/QuantLib-SWIG<filename>Python/test/fdm.py<gh_stars>100-1000
"""
Copyright (C) 2020 <NAME>
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<<EMAIL>>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
import math
import unittest
import QuantLib as ql
class FdmTest(unittest.TestCase):
def setUp(self):
self.todaysDate = ql.Date(15, ql.May, 2019)
ql.Settings.instance().evaluationDate = self.todaysDate
def tearDown(self):
ql.Settings.instance().evaluationDate = ql.Date()
def test1dMesher(self):
"""Testing one dimensional mesher"""
m = ql.Concentrating1dMesher(0, 1, 10)
self.assertEqual(m.size(), 10)
for i in range(0,10):
self.assertAlmostEqual(m.location(i), i/9.0, 14)
m = ql.Concentrating1dMesher(0, 1, 10,
[ql.Concentrating1dMesherPoint(0.75, 0.01,False),
ql.Concentrating1dMesherPoint(0.5, 0.01, True)])
self.assertEqual(m.size(), 10)
self.assertAlmostEqual(m.location(0), 0.0, 14)
self.assertAlmostEqual(m.location(9), 1.0, 14)
p = list(x for x in m.locations() if ql.close_enough(x, 0.5))
self.assertEqual(len(p), 1)
p = list(x for x in m.locations() if ql.close_enough(x, 0.75))
self.assertEqual(len(p), 0)
m = ql.Predefined1dMesher([0,2,4])
self.assertEqual(m.size(), 3)
self.assertEqual(m.location(0), 0)
self.assertEqual(m.location(1), 2)
self.assertEqual(m.location(2), 4)
def testFdmLinearOpIterator(self):
"""Testing iterators for linear operators"""
dim = [2,2,3]
pos = [0,0,0]
idx = 0
opIter = ql.FdmLinearOpIterator(dim, pos, idx)
self.assertEqual(opIter.index(), 0)
opIter.increment()
self.assertEqual(opIter.index(), 1)
self.assertEqual(opIter.coordinates(), (1, 0, 0))
opIter.increment()
self.assertEqual(opIter.coordinates(), (0, 1, 0))
opIter2 = ql.FdmLinearOpIterator(dim, pos, idx)
self.assertEqual(opIter.notEqual(opIter2), True)
self.assertEqual(opIter.notEqual(opIter), False)
def testFdmLinearOpLayout(self):
"""Testing memory layout for linear operators"""
dim = [2,2,3]
m = ql.FdmLinearOpLayout(dim)
self.assertEqual(m.size(), 2*2*3)
self.assertEqual(m.dim(), (2, 2, 3))
self.assertEqual(m.spacing(), (1, 2, 4))
self.assertEqual(m.index((0,1,2)), 10)
self.assertEqual(m.neighbourhood(m.begin(), 0, 1), 1)
self.assertEqual(m.neighbourhood(m.begin(), 2, 2), 8)
self.assertEqual(m.neighbourhood(m.begin(), 0, 1, 2, 2), 9)
n = m.iter_neighbourhood(m.begin(), 0, 1)
opIter = m.begin()
opIter.increment()
self.assertEqual(opIter.notEqual(n), False)
def testFdmMesherComposite(self):
"""Testing mesher composites"""
m1 = ql.Concentrating1dMesher(0, 1, 2)
m2 = ql.Uniform1dMesher(0, 2, 3)
m = ql.FdmMesherComposite(m1, m2)
self.assertEqual(len(m.getFdm1dMeshers()), 2)
locations = m.locations(0)
self.assertEqual(len(locations), 6)
self.assertEqual(list(map(lambda x: int(x+0.5), locations)), [0, 1, 0, 1, 0, 1])
locations = m.locations(1)
self.assertEqual(list(map(lambda x: int(x+0.5), locations)), [0, 0, 1, 1, 2, 2])
def testFdmLinearOpComposite(self):
"""Testing linear operator composites"""
class Foo:
t1 = 0.0
t2 = 0.0
@classmethod
def size(self):
return 42
def setTime(self, t1, t2):
self.t1 = t1
self.t2 = t2
@classmethod
def apply(self, r):
return 2*r
@classmethod
def apply_mixed(self, r):
return 3*r
@classmethod
def apply_direction(self, direction , r):
return direction*r
@classmethod
def solve_splitting(self, direction , r, s):
return direction*s*r
@classmethod
def preconditioner(self, r, s):
return s*r
foo = Foo()
c = ql.FdmLinearOpCompositeProxy(foo)
self.assertEqual(c.size(), foo.size())
c.setTime(1.0, 2.0)
self.assertAlmostEqual(foo.t1, 1.0, 14)
self.assertAlmostEqual(foo.t2, 2.0, 14)
r = ql.Array([1,2,3,4])
self.assertEqual(list(c.apply(r)), list(2*r))
self.assertEqual(list(c.apply_mixed(r)), list(3*r))
self.assertEqual(list(c.apply_direction(7, r)), list(7*r))
s = list(c.solve_splitting(7, r, 0.5))
self.assertEqual(len(s), len(r))
for i, x in enumerate(s):
self.assertAlmostEqual(x, 3.5*r[i], 14)
self.assertEqual(list(c.preconditioner(r, 4)), list(4*r))
class Bar:
@classmethod
def apply(self, r):
return 1
def apply_mixed(self, r):
pass
with self.assertRaises(RuntimeError):
ql.FdmLinearOpCompositeProxy(Bar()).apply(r)
with self.assertRaises(RuntimeError):
ql.FdmLinearOpCompositeProxy(Bar()).apply_mixed(r)
def testFdmBlackScholesOp(self):
"""Testing linear Black-Scholes operator"""
todaysDate = ql.Date(1, ql.January, 2020)
ql.Settings.instance().evaluationDate = todaysDate
dc = ql.Actual365Fixed()
settlementDate = todaysDate + 2
riskFreeRate = ql.FlatForward(settlementDate, 0.05, dc)
exercise = ql.EuropeanExercise(ql.Date(27, ql.December, 2020))
maturity = dc.yearFraction(todaysDate, exercise.lastDate())
strike = 110.0
payoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)
underlying = ql.SimpleQuote(100.0)
volatility = ql.BlackConstantVol(settlementDate, ql.TARGET(), 0.10, dc)
dividendYield = ql.FlatForward(settlementDate, 0.05, dc)
process = ql.BlackScholesMertonProcess(
ql.QuoteHandle(underlying),
ql.YieldTermStructureHandle(dividendYield),
ql.YieldTermStructureHandle(riskFreeRate),
ql.BlackVolTermStructureHandle(volatility)
)
mesher = ql.FdmMesherComposite(
ql.FdmBlackScholesMesher(10, process, maturity, strike))
op = ql.FdmBlackScholesOp(mesher, process, strike)
self.assertEqual(op.size(), 1)
op.setTime(0, 0.1)
c = list(map(lambda x: payoff(math.exp(x)), mesher.locations(0)))
p = op.apply(c)
e = [ 0.0, 0.0, 0.0, 0.0, 0.0,
3.18353, 0.755402, -1.30583, -2.19881, -4.0271 ]
for i, x in enumerate(e):
self.assertAlmostEqual(x, p[i], 5)
def testFdmFirstOrderOperator(self):
"""Testing first order operator"""
mesher = ql.Uniform1dMesher(0.0, math.pi, 1000)
op = ql.FirstDerivativeOp(0, ql.FdmMesherComposite(mesher))
l = mesher.locations()
x = list(map(math.sin, l))
y = op.apply(x)
for u, v in zip(l, y):
self.assertAlmostEqual(v, math.cos(u), 4)
def testFdmSecondOrderOperator(self):
"""Testing second order operator"""
mesher = ql.Uniform1dMesher(0.0, math.pi, 1000)
op = ql.SecondDerivativeOp(0, ql.FdmMesherComposite(mesher))
x = list(map(math.sin, mesher.locations()))
y = op.apply(x)
for u, v in zip(x, y):
self.assertAlmostEqual(v, -u, 4)
def testFdmBoundaryCondition(self):
"""Testing Dirichlet Boundary conditions"""
m = ql.FdmMesherComposite(
ql.Uniform1dMesher(0.0, 1.0, 5))
b = ql.FdmDirichletBoundary(
m, math.pi, 0, ql.FdmBoundaryCondition.Upper)
x = ql.Array(len(m.locations(0)), 0.0)
b.applyAfterApplying(x)
self.assertEqual(list(x), [0,0,0,0, math.pi])
s = ql.FdmBoundaryConditionSet()
s.push_back(b)
self.assertEqual(len(s), 1)
def testFdmStepConditionCallBack(self):
"""Testing step condition call back function"""
class Foo:
@classmethod
def applyTo(self, a, t):
for i in range(5):
a[i] = t+1.0
m = ql.FdmStepConditionProxy(Foo())
x = ql.Array(5)
m.applyTo(x, 2.0)
self.assertEqual(len(x), 5)
self.assertEqual(list(x), [3.0, 3.0, 3.0, 3.0, 3.0])
def testFdmInnerValueCalculatorCallBack(self):
"""Testing inner value call back function"""
class Foo:
@classmethod
def innerValue(self, opIter, t):
return opIter.index() + t
@classmethod
def avgInnerValue(self, opIter, t):
return opIter.index() + 2*t
m = ql.FdmInnerValueCalculatorProxy(Foo())
dim = [2,2,3]
pos = [0,0,0]
opIter = ql.FdmLinearOpIterator(dim, pos, 0)
while (opIter.index() < 2*2*3):
idx = opIter.index()
self.assertEqual(m.innerValue(opIter, 2.0), idx + 2.0)
self.assertEqual(m.avgInnerValue(opIter, 2.0), idx + 4.0)
opIter.increment()
def testFdmLogInnerValueCalculator(self):
"""Testing log inner value calculator"""
m = ql.FdmMesherComposite(
ql.Uniform1dMesher(math.log(50), math.log(150), 11))
p = ql.PlainVanillaPayoff(ql.Option.Call, 100)
v = ql.FdmLogInnerValue(p, m, 0)
opIter = m.layout().begin()
while opIter.notEqual(m.layout().end()):
x = math.exp(m.location(opIter, 0));
self.assertAlmostEqual(p(x), v.innerValue(opIter, 1.0), 14)
opIter.increment()
def testAmericanOptionPricing(self):
"""Testing Black-Scholes and Heston American Option pricing"""
xSteps = 100
tSteps = 25
dampingSteps = 0
todaysDate = ql.Date(15, ql.January, 2020)
ql.Settings.instance().evaluationDate = todaysDate
dc = ql.Actual365Fixed()
riskFreeRate = ql.YieldTermStructureHandle(
ql.FlatForward(todaysDate, 0.06, dc))
dividendYield = ql.YieldTermStructureHandle(
ql.FlatForward(todaysDate, 0.02, dc))
strike = 110.0
payoff = ql.PlainVanillaPayoff(ql.Option.Put, strike)
maturityDate = todaysDate + ql.Period(1, ql.Years)
maturity = dc.yearFraction(todaysDate, maturityDate)
exercise = ql.AmericanExercise(todaysDate, maturityDate)
spot = ql.QuoteHandle(ql.SimpleQuote(100.0))
volatility = ql.BlackConstantVol(todaysDate, ql.TARGET(), 0.20, dc)
process = ql.BlackScholesMertonProcess(
spot, dividendYield, riskFreeRate,
ql.BlackVolTermStructureHandle(volatility)
)
option = ql.VanillaOption(payoff, exercise)
option.setPricingEngine(ql.FdBlackScholesVanillaEngine.make(
process, xGrid = xSteps, tGrid = tSteps,
dampingSteps = dampingSteps)
)
expected = option.NPV()
equityMesher = ql.FdmBlackScholesMesher(
xSteps, process, maturity,
strike, cPoint = (strike, 0.1)
)
mesher = ql.FdmMesherComposite(equityMesher)
op = ql.FdmBlackScholesOp(mesher, process, strike)
innerValueCalculator = ql.FdmLogInnerValue(payoff, mesher, 0)
x = []
rhs = []
layout = mesher.layout()
opIter = layout.begin()
while (opIter.notEqual(layout.end())):
x.append(mesher.location(opIter, 0))
rhs.append(innerValueCalculator.avgInnerValue(opIter, maturity))
opIter.increment()
rhs = ql.Array(rhs)
bcSet = ql.FdmBoundaryConditionSet()
stepCondition = ql.FdmStepConditionComposite.vanillaComposite(
ql.DividendSchedule(), exercise, mesher,
innerValueCalculator, todaysDate, dc
)
# only to test an Operator defined in python
class OperatorProxy:
def __init__(self, op):
self.op = op
def size(self):
return self.op.size()
def setTime(self, t1, t2):
return self.op.setTime(t1, t2)
def apply(self, r):
return self.op.apply(r)
def apply_direction(self, i, r):
return self.op.apply_direction(i, r)
def solve_splitting(self, i, r, s):
return self.op.solve_splitting(i, r, s)
proxyOp = ql.FdmLinearOpCompositeProxy(OperatorProxy(op))
solver = ql.FdmBackwardSolver(
proxyOp, bcSet, stepCondition, ql.FdmSchemeDesc.Douglas()
)
solver.rollback(rhs, maturity, 0.0, tSteps, dampingSteps)
spline = ql.CubicNaturalSpline(x, rhs);
logS = math.log(spot.value())
calculated = spline(logS)
self.assertAlmostEqual(calculated, expected, 1)
solverDesc = ql.FdmSolverDesc(
mesher, bcSet, stepCondition, innerValueCalculator,
maturity, tSteps, dampingSteps)
calculated = ql.Fdm1DimSolver(
solverDesc, ql.FdmSchemeDesc.Douglas(), op).interpolateAt(logS)
self.assertAlmostEqual(calculated, expected, 2)
v0 = 0.4*0.4
kappa = 1.0
theta = v0
sigma = 1e-4
rho = 0.0
hestonProcess = ql.HestonProcess(
riskFreeRate, dividendYield,
spot, v0, kappa, theta, sigma, rho)
leverageFct = ql.LocalVolSurface(
ql.BlackVolTermStructureHandle(
ql.BlackConstantVol(todaysDate, ql.TARGET(), 0.50, dc)),
riskFreeRate,
dividendYield,
spot.value()
)
vSteps = 3
vMesher = ql.FdmHestonLocalVolatilityVarianceMesher(
vSteps, hestonProcess, leverageFct, maturity)
avgVolaEstimate = vMesher.volaEstimate()
self.assertAlmostEqual(avgVolaEstimate, 0.2, 5)
mesher = ql.FdmMesherComposite(equityMesher, vMesher)
innerValueCalculator = ql.FdmLogInnerValue(payoff, mesher, 0)
stepCondition = ql.FdmStepConditionComposite.vanillaComposite(
ql.DividendSchedule(), exercise, mesher,
innerValueCalculator, todaysDate, dc
)
solverDesc = ql.FdmSolverDesc(
mesher, bcSet, stepCondition, innerValueCalculator,
maturity, tSteps, dampingSteps)
calculated = ql.FdmHestonSolver(
hestonProcess, solverDesc, leverageFct = leverageFct).valueAt(
spot.value(), 0.16)
self.assertAlmostEqual(calculated, expected, 1)
def testBSMRNDCalculator(self):
"""Testing Black-Scholes risk neutral density calculator"""
dc = ql.Actual365Fixed()
todaysDate = ql.Date(15, ql.January, 2020)
r = 0.0
q = 0.0
vol = 0.2
s0 = 100
process = ql.BlackScholesMertonProcess(
ql.QuoteHandle(ql.SimpleQuote(s0)),
ql.YieldTermStructureHandle(
ql.FlatForward(todaysDate, q, dc)),
ql.YieldTermStructureHandle(
ql.FlatForward(todaysDate, r, dc)),
ql.BlackVolTermStructureHandle(
ql.BlackConstantVol(todaysDate, ql.TARGET(), vol, dc))
)
rnd = ql.BSMRNDCalculator(process)
t = 1.2
x = math.log(80.0)
mu = math.log(s0) + (r-q-0.5*vol*vol)*t
calculated = rnd.pdf(x, t)
stdev = vol * math.sqrt(t)
expected = (1.0/(math.sqrt(2*math.pi)*stdev) *
math.exp( -0.5*math.pow((x-mu)/stdev, 2.0) ))
self.assertAlmostEqual(calculated, expected, 8)
def testOrnsteinUhlenbeckVsBachelier(self):
"""Testing Fdm Ornstein-Uhlenbeck pricing"""
todaysDate = ql.Date(15, ql.January, 2020)
ql.Settings.instance().evaluationDate = todaysDate
dc = ql.Actual365Fixed()
rTS = ql.FlatForward(todaysDate, 0.06, dc)
strike = 110.0
payoff = ql.PlainVanillaPayoff(ql.Option.Put, strike)
maturityDate = todaysDate + ql.Period(2, ql.Years)
exercise = ql.EuropeanExercise(maturityDate)
option | |
import os
import glob
import torch
import random
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from PIL import Image
from skimage import io
import torch.optim as optim
from torchvision import models
import torch.nn.functional as F
import pydensecrf.densecrf as dcrf
from torchvision import transforms
from alisuretool.Tools import Tools
import torch.backends.cudnn as cudnn
from multiprocessing.pool import Pool
from pydensecrf.utils import unary_from_softmax
from torch.utils.data import DataLoader, Dataset
from torchvision.models.resnet import BasicBlock as ResBlock
#######################################################################################################################
# 1 Data
class FixedResized(object):
def __init__(self, img_w=300, img_h=300):
self.img_w, self.img_h = img_w, img_h
pass
def __call__(self, img, label, image_crf=None):
img = img.resize((self.img_w, self.img_h))
label = label.resize((self.img_w, self.img_h))
if image_crf is not None:
image_crf = image_crf.resize((self.img_w, self.img_h))
return img, label, image_crf
pass
class RandomHorizontalFlip(transforms.RandomHorizontalFlip):
def __call__(self, img, label, image_crf=None):
if random.random() < self.p:
img = transforms.functional.hflip(img)
label = transforms.functional.hflip(label)
if image_crf is not None:
image_crf = transforms.functional.hflip(image_crf)
pass
return img, label, image_crf
pass
class ToTensor(transforms.ToTensor):
def __call__(self, img, label, image_crf=None):
img = super().__call__(img)
label = super().__call__(label)
if image_crf is not None:
image_crf = super().__call__(image_crf)
return img, label, image_crf
pass
class Normalize(transforms.Normalize):
def __call__(self, img, label, image_crf=None):
img = super().__call__(img)
return img, label, image_crf
pass
class Compose(transforms.Compose):
def __call__(self, img, label, image_crf=None):
for t in self.transforms:
img, label, image_crf = t(img, label, image_crf)
return img, label, image_crf
pass
class DatasetUSOD(Dataset):
def __init__(self, img_name_list, lab_name_list, size_train=224, is_filter=False):
self.is_filter = is_filter
self.image_name_list = img_name_list
self.label_name_list = lab_name_list
self.transform = Compose([FixedResized(size_train, size_train), RandomHorizontalFlip(), ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
pass
def __len__(self):
return len(self.image_name_list)
def __getitem__(self, idx):
image = Image.open(self.image_name_list[idx]).convert("RGB")
label = Image.open(self.label_name_list[idx]).convert("L")
image, label, image_for_crf = self.transform(image, label, image)
if self.is_filter:
num = np.sum(np.asarray(label))
num_all = label.shape[1] * label.shape[2]
ratio = num / num_all
if ratio < 0.01 or ratio > 0.8:
Tools.print("{} {:.4f} {}".format(idx, ratio, self.image_name_list[idx]))
image, label, image_for_crf = self.__getitem__(np.random.randint(0, self.__len__()))
pass
return image, label, image_for_crf
pass
class DatasetEvalUSOD(Dataset):
def __init__(self, img_name_list, lab_name_list, size_test=256):
self.image_name_list = img_name_list
self.label_name_list = lab_name_list
self.transform = Compose([FixedResized(size_test, size_test), ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
pass
def __len__(self):
return len(self.image_name_list)
def __getitem__(self, idx):
image = Image.open(self.image_name_list[idx]).convert("RGB")
label = Image.open(self.label_name_list[idx]).convert("L")
image, label, image_for_crf = self.transform(image, label, image)
return image, label, image_for_crf
@staticmethod
def eval_mae(y_pred, y):
return np.abs(y_pred - y).mean()
@staticmethod
def eval_pr(y_pred, y, th_num):
prec, recall = np.zeros(shape=(th_num,)), np.zeros(shape=(th_num,))
th_list = np.linspace(0, 1 - 1e-10, th_num)
for i in range(th_num):
y_temp = y_pred >= th_list[i]
tp = (y_temp * y).sum()
prec[i], recall[i] = tp / (y_temp.sum() + 1e-20), tp / y.sum()
pass
return prec, recall
pass
#######################################################################################################################
# 2 Model
class ConvBlock(nn.Module):
def __init__(self, cin, cout, stride=1, has_relu=True):
super(ConvBlock, self).__init__()
self.has_relu = has_relu
self.conv = nn.Conv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn = nn.BatchNorm2d(cout)
self.relu = nn.ReLU(inplace=True)
pass
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
if self.has_relu:
out = self.relu(out)
return out
pass
class BASNet(nn.Module):
def __init__(self):
super(BASNet, self).__init__()
resnet = models.resnet18(pretrained=False)
# -------------Encoder--------------
self.encoder0 = ConvBlock(3, 64, has_relu=True) # 64 * 224 * 224
self.encoder1 = resnet.layer1 # 64 * 224 * 224
self.encoder2 = resnet.layer2 # 128 * 112 * 112
self.encoder3 = resnet.layer3 # 256 * 56 * 56
self.encoder4 = resnet.layer4 # 512 * 28 * 28
# -------------Decoder-------------
self.decoder_1_b1 = ResBlock(512, 512) # 28
self.decoder_1_b2 = ResBlock(512, 512) # 28
self.decoder_1_b3 = ResBlock(512, 512) # 28
self.decoder_1_c = ConvBlock(512, 256, has_relu=True) # 28
self.decoder_2_b1 = ResBlock(256, 256) # 56
self.decoder_2_b2 = ResBlock(256, 256) # 56
self.decoder_2_b3 = ResBlock(256, 256) # 56
self.decoder_2_c = ConvBlock(256, 128, has_relu=True) # 56
self.decoder_3_b1 = ResBlock(128, 128) # 112
self.decoder_3_b2 = ResBlock(128, 128) # 112
self.decoder_3_b3 = ResBlock(128, 128) # 112
self.decoder_3_out = nn.Conv2d(128, 1, 3, padding=1, bias=False) # 112
pass
def forward(self, x):
# -------------Encoder-------------
e0 = self.encoder0(x) # 64 * 224 * 224
e1 = self.encoder1(e0) # 64 * 224 * 224
e2 = self.encoder2(e1) # 128 * 112 * 112
e3 = self.encoder3(e2) # 256 * 56 * 56
e4 = self.encoder4(e3) # 512 * 28 * 28
# -------------Decoder-------------
d1 = self.decoder_1_b3(self.decoder_1_b2(self.decoder_1_b1(e4))) # 512 * 28 * 28
d1_d2 = self._up_to_target(self.decoder_1_c(d1), e3) + e3 # 512 * 56 * 56
d2 = self.decoder_2_b3(self.decoder_2_b2(self.decoder_2_b1(d1_d2))) # 256 * 56 * 56
d2_d3 = self._up_to_target(self.decoder_2_c(d2), e2) + e2 # 128 * 112 * 112
d3 = self.decoder_3_b3(self.decoder_3_b2(self.decoder_3_b1(d2_d3))) # 128 * 112 * 112
d3_out = self.decoder_3_out(d3) # 1 * 112 * 112
d3_out_sigmoid = torch.sigmoid(d3_out) # 1 * 112 * 112 # 小输出
d3_out_up = self._up_to_target(d3_out, x) # 1 * 224 * 224
d3_out_up_sigmoid = torch.sigmoid(d3_out_up) # 1 * 224 * 224 # 大输出
return_result = {"out": d3_out, "out_sigmoid": d3_out_sigmoid,
"out_up": d3_out_up, "out_up_sigmoid": d3_out_up_sigmoid}
return return_result
@staticmethod
def _up_to_target(source, target):
if source.size()[2] != target.size()[2] or source.size()[3] != target.size()[3]:
source = torch.nn.functional.interpolate(
source, size=[target.size()[2], target.size()[3]], mode='bilinear', align_corners=False)
pass
return source
pass
#######################################################################################################################
# 3 Runner
class BASRunner(object):
def __init__(self, batch_size=8, size_train=224, size_test=256, is_un=True, is_filter=False,
data_dir='/mnt/4T/Data/SOD/DUTS/DUTS-TR', tra_image_dir='DUTS-TR-Image',
tra_label_dir="../BASNetTemp/cam/CAM_123_224_256", tra_label_name='cam_up_norm_C123',
model_dir="./saved_models/my_train_mic_only"):
self.batch_size = batch_size
self.size_train = size_train
self.size_test = size_test
# Dataset
self.model_dir = model_dir
self.data_dir = data_dir
self.tra_img_name_list, self.tra_lbl_name_list = self.get_tra_img_label_name(
tra_image_dir, tra_label_dir, tra_label_name, is_un=is_un)
self.dataset_sod = DatasetUSOD(img_name_list=self.tra_img_name_list, is_filter=is_filter,
lab_name_list=self.tra_lbl_name_list, size_train=self.size_train)
self.data_loader_sod = DataLoader(self.dataset_sod, self.batch_size, shuffle=True, num_workers=8)
self.data_batch_num = len(self.data_loader_sod)
# Model
self.net = BASNet()
self.net = nn.DataParallel(self.net).cuda()
cudnn.benchmark = True
# Loss and optimizer
self.bce_loss = nn.BCELoss().cuda()
self.learning_rate = [[0, 0.001], [30, 0.0001], [40, 0.00001]]
self.optimizer = optim.Adam(self.net.parameters(), lr=self.learning_rate[0][1],
betas=(0.9, 0.999), weight_decay=0)
pass
def _adjust_learning_rate(self, epoch):
for param_group in self.optimizer.param_groups:
for lr in self.learning_rate:
if epoch == lr[0]:
learning_rate = lr[1]
param_group['lr'] = learning_rate
pass
pass
def load_model(self, model_file_name):
checkpoint = torch.load(model_file_name)
# checkpoint = {key: checkpoint[key] for key in checkpoint.keys() if "_c1." not in key}
self.net.load_state_dict(checkpoint, strict=False)
Tools.print("restore from {}".format(model_file_name))
pass
def get_tra_img_label_name(self, tra_image_dir, tra_label_dir, tra_label_name, is_un=True):
tra_img_name_list = glob.glob(os.path.join(self.data_dir, tra_image_dir, '*.jpg'))
if is_un:
tra_lbl_name_list = [os.path.join(tra_label_dir, '{}_{}.bmp'.format(os.path.splitext(
os.path.basename(img_path))[0], tra_label_name)) for img_path in tra_img_name_list]
else:
tra_lbl_name_list = [os.path.join(self.data_dir, tra_label_dir, '{}.png'.format(
os.path.splitext(os.path.basename(img_path))[0])) for img_path in tra_img_name_list]
pass
Tools.print("train images: {}".format(len(tra_img_name_list)))
Tools.print("train labels: {}".format(len(tra_lbl_name_list)))
return tra_img_name_list, tra_lbl_name_list
def all_loss_fusion(self, sod_output, sod_label, ignore_label=255.0):
positions = sod_label.view(-1, 1) != ignore_label
loss_bce = self.bce_loss(sod_output.view(-1, 1)[positions], sod_label.view(-1, 1)[positions])
return loss_bce
def train(self, epoch_num=200, start_epoch=0, save_epoch_freq=10):
all_loss = 0
for epoch in range(start_epoch, epoch_num+1):
Tools.print()
self._adjust_learning_rate(epoch)
Tools.print('Epoch:{:03d}, lr={:.5f}'.format(epoch, self.optimizer.param_groups[0]['lr']))
###########################################################################
# 1 训练模型
all_loss = 0.0
Tools.print()
self.net.train()
for i, (inputs, targets, image_for_crf) in tqdm(enumerate(self.data_loader_sod), total=self.data_batch_num):
inputs = inputs.type(torch.FloatTensor).cuda()
targets = targets.type(torch.FloatTensor).cuda()
self.optimizer.zero_grad()
return_m = self.net(inputs)
sod_output = return_m["out_up_sigmoid"]
loss = self.all_loss_fusion(sod_output, targets)
loss.backward()
self.optimizer.step()
all_loss += loss.item()
pass
Tools.print("[E:{:3d}/{:3d}] loss:{:.3f}".format(epoch, epoch_num, all_loss/self.data_batch_num))
###########################################################################
# 2 保存模型
if epoch % save_epoch_freq == 0:
save_file_name = Tools.new_dir(os.path.join(
self.model_dir, "{}_train_{:.3f}.pth".format(epoch, all_loss/self.data_batch_num)))
torch.save(self.net.state_dict(), save_file_name)
Tools.print()
Tools.print("Save Model to {}".format(save_file_name))
Tools.print()
###########################################################################
# 3 评估模型
# self.eval(self.net, epoch=epoch, is_test=True, batch_size=self.batch_size, size_test=self.size_test)
self.eval(self.net, epoch=epoch, is_test=False, batch_size=self.batch_size, size_test=self.size_test)
###########################################################################
pass
###########################################################################
###########################################################################
# 3 评估模型
self.eval(self.net, epoch=epoch, is_test=True, batch_size=self.batch_size, size_test=self.size_test)
###########################################################################
pass
# Final Save
save_file_name = Tools.new_dir(os.path.join(
self.model_dir, "{}_train_{:.3f}.pth".format(epoch_num, all_loss/self.data_batch_num)))
torch.save(self.net.state_dict(), save_file_name)
Tools.print()
Tools.print("Save Model to {}".format(save_file_name))
Tools.print()
pass
@staticmethod
def eval(net, epoch=0, is_test=True, size_test=256, batch_size=16, th_num=100, beta_2=0.3):
which = "TE" if is_test else "TR"
data_dir = '/media/ubuntu/4T/ALISURE/Data/DUTS/DUTS-{}'.format(which)
image_dir, label_dir = 'DUTS-{}-Image'.format(which), 'DUTS-{}-Mask'.format(which)
# 数据
img_name_list = glob.glob(os.path.join(data_dir, image_dir, '*.jpg'))
lbl_name_list = [os.path.join(data_dir, label_dir, '{}.png'.format(
os.path.splitext(os.path.basename(img_path))[0])) for img_path in img_name_list]
dataset_eval_sod = DatasetEvalUSOD(img_name_list=img_name_list,
lab_name_list=lbl_name_list, size_test=size_test)
data_loader_eval_sod = DataLoader(dataset_eval_sod, batch_size, shuffle=False, num_workers=24)
# 执行
avg_mae = 0.0
avg_prec = np.zeros(shape=(th_num,)) + 1e-6
avg_recall = np.zeros(shape=(th_num,)) + 1e-6
net.eval()
with torch.no_grad():
for i, (inputs, labels, _) in tqdm(enumerate(data_loader_eval_sod), total=len(data_loader_eval_sod)):
inputs = inputs.type(torch.FloatTensor)
inputs = inputs.cuda() if torch.cuda.is_available() else inputs
now_label = labels.squeeze().data.numpy()
return_m = net(inputs)
now_pred = return_m["out_up_sigmoid"].squeeze().cpu().data.numpy()
mae = dataset_eval_sod.eval_mae(now_pred, now_label)
prec, recall = dataset_eval_sod.eval_pr(now_pred, now_label, th_num)
avg_mae += mae
avg_prec += prec
avg_recall += recall
pass
pass
# 结果
avg_mae = avg_mae / len(data_loader_eval_sod)
avg_prec = avg_prec / len(data_loader_eval_sod)
avg_recall = avg_recall / len(data_loader_eval_sod)
score = (1 + beta_2) * avg_prec * avg_recall / (beta_2 * avg_prec + avg_recall)
score[score != score] = 0
Tools.print("{} {} avg mae={} score={}".format("Test" if is_test else "Train", epoch, avg_mae, score.max()))
pass
pass
#######################################################################################################################
# 4 Main
"""
2020-07-13 00:08:59 [E: 64/200] loss:0.026
2020-07-13 00:11:42 Test 64 avg mae=0.06687459443943410 score=0.8030195696294923
2020-07-13 09:57:32 Train 190 avg mae=0.02006155672962919 score=0.9667652002840796
2020-07-13 14:27:31 [E: 37/ 50] loss:0.057
2020-07-13 14:30:10 Test 37 avg mae=0.07661225112855055 score=0.7964876461003649
2020-07-13 15:46:29 Train 50 avg mae=0.03753526809089112 score=0.949905201516531
CAM_123_224_256_AVG_1 | |
<filename>code/train_models.py
dev = False
## command-line args
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-r', dest='resume', type=int)
parser.add_argument('-e', dest='epochs', type=int, default=10)
parser.add_argument('-g', dest='gpu', type=int, default=0)
parser.add_argument('-b', dest='batch_size', type=int, default=128)
parser.add_argument('-d', dest='development', action='store_true')
parser.add_argument('-t', dest='target_dir', default='saved_models')
parser.add_argument('-l', dest='models_list', default='modes.txt')
args = parser.parse_args()
resume = args.resume
epochs = args.epochs
batch_size = args.batch_size
dev = args.development
dir_path = args.target_dir
models_list = args.models_list
from pathlib import Path
## library
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
import numpy as np
import json
from collections import Counter
import gc
from time import time
#from matplotlib import pyplot as plt
## load from files
base_url = 'https://cs.stanford.edu/people/rak248/'
print('loading image profiles')
s = time()
images = {
item['image_id']: {
'width' : item['width'],
'height' : item['height'],
'path' : item['url'].replace(base_url, 'visual_genome/data/')
}
for item in json.load(open('visual_genome/data/image_data.json'))
}
e = time()
print('image names loaded {0:0.2f}s'.format(e-s))
# read from file
print('loading relationships corpus')
s = time()
rels_from_file = json.load(open('visual_genome/data/relationships.json'))
e = time()
print('relationships loaded {0:0.2f}s'.format(e-s))
# name/names correction for reading content of nodes in the dataset
def name_extract(x):
if 'names' in x and len(x['names']):
name = x['names'][0]
elif 'name' in x:
name = x['name']
else:
name = ''
return name.strip().lower()
print('preprocessing relationships with image profiles')
s = time()
# convert it into a set of (image, subject, predicate, object)
triplets_key_values = {
(
rels_in_image['image_id'],
name_extract(rel['subject']),
rel['predicate'].lower().strip(),
name_extract(rel['object']),
): (
rels_in_image['image_id'],
(name_extract(rel['subject']), rel['subject']['object_id'], (rel['subject']['x'],rel['subject']['y'],rel['subject']['w'],rel['subject']['h'])),
rel['predicate'].lower().strip(),
(name_extract(rel['object']), rel['object']['object_id'], (rel['object']['x'], rel['object']['y'], rel['object']['w'], rel['object']['h'])),
)
for rels_in_image in rels_from_file
for rel in rels_in_image['relationships']
}
triplets = list(triplets_key_values.values())
del triplets_key_values
e = time()
print('preprocessed relationships {0:0.2f}s'.format(e-s))
print('loading image filters for training')
s = time()
image_ids = list(np.load('visual_genome/data/relationships/image_ids.npy'))
if dev:
image_ids = image_ids[:batch_size]
# only use the filtered ones
filtered_image_ids = set(list(np.load('visual_genome/data/relationships/image_ids_train.npy')))
e = time()
print('loaded image filters for training {0:0.2f}s'.format(e-s))
print('filtering triplets based on images')
s = time()
triplets = [
item
for item in triplets
#if item[0] in image_ids
if item[0] in filtered_image_ids
]
gc.collect()
e = time()
print('filtered triplets based on images {0:0.2f}s'.format(e-s))
print('creating filters for bboxes')
s = time()
filtered_obj_ids = set([
obj_id
for item in triplets
for obj_id in [item[1][1], item[3][1]]
])
e = time()
print('created filters for bboxes {0:0.2f}s'.format(e-s))
print('loading images')
chunck_size = 10000
img_visual_features = []
for l in range(0, len(image_ids), chunck_size):
s = time()
vfs = np.load('visual_genome/data/relationships/image_resnet50_features_['+str(l)+'].npy', allow_pickle=True)
img_visual_features += [
(iid, vf)
for iid, vf in list(zip(image_ids[l:l+chunck_size], vfs))
if iid in filtered_image_ids
if type(vf) != int
]
e = time()
print('{0} total files are loaded after filtering {1} in {2:0.2f}s'.format(len(img_visual_features), len(vfs), e-s))
del vfs
img_visual_features = dict(img_visual_features)
object_ids = list(np.load('visual_genome/data/relationships/object_ids.npy', allow_pickle=True))
print('loading bboxes')
chunck_size = 100000
visual_features = []
for l in range(0, len(object_ids), chunck_size):
s = time()
vfs = np.load('visual_genome/data/relationships/objects_resnet50_features_['+str(l)+'].npy', allow_pickle=True)
visual_features += [
(iid, vf)
for iid, vf in zip(object_ids[l:l+chunck_size], vfs)
if iid in filtered_obj_ids
if type(vf) != int
]
e = time()
print('{0} total files are loaded after filtering {1} in {2:0.2f}s'.format(len(visual_features), len(vfs), e-s))
del vfs
visual_features = dict(visual_features)
print('removing the triplets with missing pre-processed data')
s = time()
# clean the data from examples in which there is no saved vectors for them!
triplets = [
item
for item in triplets
if item[0] in img_visual_features
if type(img_visual_features[item[0]]) != int
if item[1][1] in visual_features
if type(visual_features[item[1][1]]) != int
if item[3][1] in visual_features
if type(visual_features[item[3][1]]) != int
]
e = time()
print('removed the triplets with missing pre-processed data {0:0.2f}s'.format(e-s))
#vocab = Counter([w.strip() for _,(sbj,_,_),pred,(obj,_,_) in triplets for w in ' '.join([sbj,pred,obj]).split(' ')])
#np.save('visual_genome/data/relationships/vocab_caption.npy', vocab)
vocab = np.load('visual_genome/data/relationships/vocab_caption.npy', allow_pickle=True)[None][0]
word2ix = {w:i for i,w in enumerate(['<0>', '<s>']+list(vocab))}
ix2word = {i:w for w,i in word2ix.items()}
word2onehot = lambda w: np.array([0.]*word2ix[w] + [1.] + [0.]*(len(word2ix)-word2ix[w]-1))
max_len = 16 #max(len(' '.join([sbj,pred,obj]).split(' ')) for _,(sbj,_,_),pred,(obj,_,_) in triplets)
np.save('triplets_train.npy', triplets)
print('# vocab_size:', len(vocab))
print('# images:', len(img_visual_features))
print('# bounding boxes:', len(visual_features))
print('# expressions:', len(triplets))
# keras
from keras.models import Model, Sequential, load_model
from keras.layers import Input, Flatten, AveragePooling2D
from keras.layers import Dense, LSTM, Embedding, Masking
from keras.layers import Input, Lambda, RepeatVector, Reshape, Dropout
from keras.layers import TimeDistributed
from keras.optimizers import Adam
from keras.metrics import sparse_top_k_categorical_accuracy, sparse_categorical_accuracy
from keras.callbacks import EarlyStopping
from keras import backend as K
def item2features(item):
img_id,(sbj,object_id1,sbj_bbx),pred,(obj,object_id2,obj_bbx) = item
# visual features
vf0 = img_visual_features[img_id]
vf1 = visual_features[object_id1]
vf2 = visual_features[object_id2]
# spatial features
# based on VisKE
# area of each bbox:
a1 = sbj_bbx[2] * sbj_bbx[3]
a2 = obj_bbx[2] * obj_bbx[3]
# overlap width:
if obj_bbx[0] <= sbj_bbx[0] <= obj_bbx[0]+obj_bbx[2] <= sbj_bbx[0] + sbj_bbx[2]:
# overlap
w = (obj_bbx[0]+obj_bbx[2]) - (sbj_bbx[0])
elif obj_bbx[0] <= sbj_bbx[0] <= sbj_bbx[0] + sbj_bbx[2] <= obj_bbx[0]+obj_bbx[2]:
# obj contains sbj
w = sbj_bbx[2]
elif sbj_bbx[0] <= obj_bbx[0] <= sbj_bbx[0] + sbj_bbx[2] <= obj_bbx[0]+obj_bbx[2]:
# overlaps
w = (sbj_bbx[0]+sbj_bbx[2]) - (obj_bbx[0])
elif sbj_bbx[0] <= obj_bbx[0] <= obj_bbx[0]+obj_bbx[2] <= sbj_bbx[0] + sbj_bbx[2]:
# subj contains obj
w = obj_bbx[2]
else:
w = 0
# overlap height:
if obj_bbx[1] <= sbj_bbx[1] <= obj_bbx[1]+obj_bbx[3] <= sbj_bbx[1] + sbj_bbx[3]:
# overlap
h = (obj_bbx[1]+obj_bbx[3]) - (sbj_bbx[1])
elif obj_bbx[1] <= sbj_bbx[1] <= sbj_bbx[1] + sbj_bbx[3] <= obj_bbx[1]+obj_bbx[3]:
# obj contains sbj
h = sbj_bbx[3]
elif sbj_bbx[1] <= obj_bbx[1] <= sbj_bbx[1] + sbj_bbx[3] <= obj_bbx[1]+obj_bbx[3]:
# overlaps
h = (sbj_bbx[1]+sbj_bbx[3]) - (obj_bbx[1])
elif sbj_bbx[1] <= obj_bbx[1] <= obj_bbx[1]+obj_bbx[3] <= sbj_bbx[1] + sbj_bbx[3]:
# subj contains obj
h = obj_bbx[3]
else:
h = 0
# overlap area
overlap_a = w * h
# dx; dy; ov; ov1; ov2; h1;w1; h2;w2; a1; a2
sf1 = [
#obj_bbx[0] - sbj_bbx[0], # dx = x2 - x1 #change in corners
#obj_bbx[1] - sbj_bbx[1], # dy = y2 - y1 #change in corners
obj_bbx[0] - sbj_bbx[0] + (obj_bbx[2] - sbj_bbx[2])/2, # dx = x2 - x1 + (w2 - w1)/2 #change in centers
obj_bbx[1] - sbj_bbx[1] + (obj_bbx[3] - sbj_bbx[3])/2, # dy = y2 - y1 + (h2 - h1)/2 #change in centers
0 if (a1+a2) == 0 else overlap_a/(a1+a2), # ov
0 if a1 == 0 else overlap_a/a1, # ov1
0 if a2 == 0 else overlap_a/a2, # ov2
sbj_bbx[3], # h1
sbj_bbx[2], # w1
obj_bbx[3], # h2
obj_bbx[2], # w2
a1, # a1
a2, # a2
]
# spatial template (two attention masks)
x1, y1, w1, h1 = sbj_bbx
x2, y2, w2, h2 = obj_bbx
mask = np.zeros([2,7,7])
mask[0, int(y1*7):int((y1+h1)*7), int(x1*7):int((x1+w1)*7)] = 1 # mask bbox 1
mask[1, int(y2*7):int((y2+h2)*7), int(x2*7):int((x2+w2)*7)] = 1 # mask bbox 2
sf2 = mask.flatten()
# sentence encoding
sent = ' '.join([sbj,pred,obj]).split(' ')
sent = [word2ix['<s>']]+[word2ix[w] for w in sent]+[word2ix['<0>']]*(1+max_len-len(sent))
return vf0, sf1, sf2, vf1, vf2, sent
# this is memory intensive, it is possible to do this in generator loop as well.
print('preparing data')
e = time()
prepared_data = [
item2features(item)
for item in triplets
]
e = time()
del visual_features
del img_visual_features
del triplets
gc.collect()
print('prepared data {0:0.2f}s'.format(e-s))
def reverse_viske(sf):
return [-sf[0], -sf[1], sf[2], sf[4], sf[3], sf[7], sf[8], sf[5], sf[6], sf[10], sf[9], ]
def reverse_mask(sf):
return np.concatenate([sf[49:], sf[:49]])
def generator_features_description(batch_size=32, split=(0.,1.), all_data = prepared_data, mode='bbox'):
while True:
gc.collect()
# shuffle
_all_data = all_data[int(len(all_data)*split[0]):int(len(all_data)*split[1])]
np.random.shuffle(_all_data)
# start
X_vfs = []
X_sfs = []
X_objs = []
X_sents = []
for item in _all_data:
#vf0, sf1, sf2, vf1, vf2, sent = item2features(item)
vf0, sf1, sf2, vf1, vf2, sent = item
# add to the batch
X_vfs.append(vf0)
X_sents.append(sent)
# list of objects
l = [vf1, vf2]
# if it needs to be shuffled
if mode[-2:] == '-r':
#np.random.shuffle(l)
if np.random.random() > 0.5:
l = [vf2, vf1]
sf1 = reverse_viske(sf1)
sf2 = reverse_mask(sf2)
# two types of spatial features:
if mode[:4] == 'bbox' or mode[-4:] == 'bbox':
X_sfs.append(sf1)
elif mode[:9] == 'attention':
X_sfs.append(sf2)
# two visual features from two bounding boxes
if mode[:4] == 'bbox' or mode[:9] == 'attention' or mode[:8] == 'implicit':
X_objs.append(l)
# add to flush the batch if needed
if len(X_sents) == batch_size:
sents = np.array(X_sents)
if mode[:4] == 'bbox' or mode[:9] == 'attention':
yield ([np.array(X_vfs), np.array(X_sfs), np.array(X_objs), sents[:, :-1]], np.expand_dims(sents[:, 1:], 2))
elif mode[:8] == 'implicit':
yield ([np.array(X_vfs), np.array(X_objs), sents[:, :-1]], np.expand_dims(sents[:, 1:], 2))
elif mode == 'spatial_adaptive-bbox':
yield ([np.array(X_vfs), np.array(X_sfs), sents[:, :-1]], np.expand_dims(sents[:, 1:], 2))
elif mode[:7] == 'no-beta' or mode == 'spatial_adaptive' or mode == 'spatial_adaptive-attention':
yield ([np.array(X_vfs), sents[:, :-1]], np.expand_dims(sents[:, 1:], 2))
X_vfs = []
X_sfs = []
X_objs = []
X_sents = []
def build_model(mode='bbox'):
print('mode:', mode)
unit_size = 200
dropout_rate = 0.5
regions_size = 7 * 7
beta_size = 2 + 1 + 1 # 2 objects + 1 sentential + 1 spatial
delayed_sent = Input(shape=[max_len+1])
sf_size = 11 # dx; dy; ov; ov1; ov2; h1; w1; h2; w2; a1; a2 (from VisKE)
beta_feature_size = 2*(beta_size-1)*unit_size
if mode[:9] == 'attention':
sf_size = 49*2 | |
<filename>setup/gcp_setup_runner.py
#!/usr/bin/env python
#
# Copyright 2018 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gcloud setup runner."""
from __future__ import print_function
import logging
import os
import re
import subprocess
import six
from acloud import errors
from acloud.internal.lib import utils
from acloud.public import config
from acloud.setup import base_task_runner
from acloud.setup import google_sdk
logger = logging.getLogger(__name__)
# APIs that need to be enabled for GCP project.
_ANDROID_BUILD_SERVICE = "androidbuildinternal.googleapis.com"
_ANDROID_BUILD_MSG = (
"This service (%s) help to download images from Android Build. If it isn't "
"enabled, acloud only supports local images to create AVD."
% _ANDROID_BUILD_SERVICE)
_COMPUTE_ENGINE_SERVICE = "compute.googleapis.com"
_COMPUTE_ENGINE_MSG = (
"This service (%s) help to create instance in google cloud platform. If it "
"isn't enabled, acloud can't work anymore." % _COMPUTE_ENGINE_SERVICE)
_OPEN_SERVICE_FAILED_MSG = (
"\n[Open Service Failed]\n"
"Service name: %(service_name)s\n"
"%(service_msg)s\n")
_BUILD_SERVICE_ACCOUNT = "<EMAIL>"
_BILLING_ENABLE_MSG = "billingEnabled: true"
_DEFAULT_SSH_FOLDER = os.path.expanduser("~/.ssh")
_DEFAULT_SSH_KEY = "acloud_rsa"
_DEFAULT_SSH_PRIVATE_KEY = os.path.join(_DEFAULT_SSH_FOLDER,
_DEFAULT_SSH_KEY)
_DEFAULT_SSH_PUBLIC_KEY = os.path.join(_DEFAULT_SSH_FOLDER,
_DEFAULT_SSH_KEY + ".pub")
_ENV_CLOUDSDK_PYTHON = "CLOUDSDK_PYTHON"
_GCLOUD_COMPONENT_ALPHA = "alpha"
# Regular expression to get project/zone information.
_PROJECT_RE = re.compile(r"^project = (?P<project>.+)")
_ZONE_RE = re.compile(r"^zone = (?P<zone>.+)")
def UpdateConfigFile(config_path, item, value):
"""Update config data.
Case A: config file contain this item.
In config, "project = A_project". New value is B_project
Set config "project = B_project".
Case B: config file didn't contain this item.
New value is B_project.
Setup config as "project = B_project".
Args:
config_path: String, acloud config path.
item: String, item name in config file. EX: project, zone
value: String, value of item in config file.
TODO(111574698): Refactor this to minimize writes to the config file.
TODO(111574698): Use proto method to update config.
"""
write_lines = []
find_item = False
write_line = item + ": \"" + value + "\"\n"
if os.path.isfile(config_path):
with open(config_path, "r") as cfg_file:
for read_line in cfg_file.readlines():
if read_line.startswith(item + ":"):
find_item = True
write_lines.append(write_line)
else:
write_lines.append(read_line)
if not find_item:
write_lines.append(write_line)
with open(config_path, "w") as cfg_file:
cfg_file.writelines(write_lines)
def SetupSSHKeys(config_path, private_key_path, public_key_path):
"""Setup the pair of the ssh key for acloud.config.
User can use the default path: "~/.ssh/acloud_rsa".
Args:
config_path: String, acloud config path.
private_key_path: Path to the private key file.
e.g. ~/.ssh/acloud_rsa
public_key_path: Path to the public key file.
e.g. ~/.ssh/acloud_rsa.pub
"""
private_key_path = os.path.expanduser(private_key_path)
if (private_key_path == "" or public_key_path == ""
or private_key_path == _DEFAULT_SSH_PRIVATE_KEY):
utils.CreateSshKeyPairIfNotExist(_DEFAULT_SSH_PRIVATE_KEY,
_DEFAULT_SSH_PUBLIC_KEY)
UpdateConfigFile(config_path, "ssh_private_key_path",
_DEFAULT_SSH_PRIVATE_KEY)
UpdateConfigFile(config_path, "ssh_public_key_path",
_DEFAULT_SSH_PUBLIC_KEY)
def _InputIsEmpty(input_string):
"""Check input string is empty.
Tool requests user to input client ID & client secret.
This basic check can detect user input is empty.
Args:
input_string: String, user input string.
Returns:
Boolean: True if input is empty, False otherwise.
"""
if input_string is None:
return True
if input_string == "":
print("Please enter a non-empty value.")
return True
return False
class GoogleSDKBins(object):
"""Class to run tools in the Google SDK."""
def __init__(self, google_sdk_folder):
"""GoogleSDKBins initialize.
Args:
google_sdk_folder: String, google sdk path.
"""
self.gcloud_command_path = os.path.join(google_sdk_folder, "gcloud")
self.gsutil_command_path = os.path.join(google_sdk_folder, "gsutil")
# TODO(137195528): Remove python2 environment after acloud support python3.
self._env = os.environ.copy()
self._env[_ENV_CLOUDSDK_PYTHON] = "python2"
def RunGcloud(self, cmd, **kwargs):
"""Run gcloud command.
Args:
cmd: String list, command strings.
Ex: [config], then this function call "gcloud config".
**kwargs: dictionary of keyword based args to pass to func.
Returns:
String, return message after execute gcloud command.
"""
return subprocess.check_output([self.gcloud_command_path] + cmd,
env=self._env, **kwargs)
def RunGsutil(self, cmd, **kwargs):
"""Run gsutil command.
Args:
cmd : String list, command strings.
Ex: [list], then this function call "gsutil list".
**kwargs: dictionary of keyword based args to pass to func.
Returns:
String, return message after execute gsutil command.
"""
return subprocess.check_output([self.gsutil_command_path] + cmd,
env=self._env, **kwargs)
class GoogleAPIService(object):
"""Class to enable api service in the gcp project."""
def __init__(self, service_name, error_msg, required=False):
"""GoogleAPIService initialize.
Args:
service_name: String, name of api service.
error_msg: String, show messages if api service enable failed.
required: Boolean, True for service must be enabled for acloud.
"""
self._name = service_name
self._error_msg = error_msg
self._required = required
def EnableService(self, gcloud_runner):
"""Enable api service.
Args:
gcloud_runner: A GcloudRunner class to run "gcloud" command.
"""
try:
gcloud_runner.RunGcloud(["services", "enable", self._name],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
self.ShowFailMessages(error.output)
def ShowFailMessages(self, error):
"""Show fail messages.
Show the fail messages to hint users the impact if the api service
isn't enabled.
Args:
error: String of error message when opening api service failed.
"""
msg_color = (utils.TextColors.FAIL if self._required else
utils.TextColors.WARNING)
utils.PrintColorString(
error + _OPEN_SERVICE_FAILED_MSG % {
"service_name": self._name,
"service_msg": self._error_msg}
, msg_color)
@property
def name(self):
"""Return name."""
return self._name
class GcpTaskRunner(base_task_runner.BaseTaskRunner):
"""Runner to setup google cloud user information."""
WELCOME_MESSAGE_TITLE = "Setup google cloud user information"
WELCOME_MESSAGE = (
"This step will walk you through gcloud SDK installation."
"Then configure gcloud user information."
"Finally enable some gcloud API services.")
def __init__(self, config_path):
"""Initialize parameters.
Load config file to get current values.
Args:
config_path: String, acloud config path.
"""
# pylint: disable=invalid-name
config_mgr = config.AcloudConfigManager(config_path)
cfg = config_mgr.Load()
self.config_path = config_mgr.user_config_path
self.project = cfg.project
self.zone = cfg.zone
self.ssh_private_key_path = cfg.ssh_private_key_path
self.ssh_public_key_path = cfg.ssh_public_key_path
self.stable_host_image_name = cfg.stable_host_image_name
self.client_id = cfg.client_id
self.client_secret = cfg.client_secret
self.service_account_name = cfg.service_account_name
self.service_account_private_key_path = cfg.service_account_private_key_path
self.service_account_json_private_key_path = cfg.service_account_json_private_key_path
def ShouldRun(self):
"""Check if we actually need to run GCP setup.
We'll only do the gcp setup if certain fields in the cfg are empty.
Returns:
True if reqired config fields are empty, False otherwise.
"""
# We need to ensure the config has the proper auth-related fields set,
# so config requires just 1 of the following:
# 1. client id/secret
# 2. service account name/private key path
# 3. service account json private key path
if ((not self.client_id or not self.client_secret)
and (not self.service_account_name or not self.service_account_private_key_path)
and not self.service_account_json_private_key_path):
return True
# If a project isn't set, then we need to run setup.
return not self.project
def _Run(self):
"""Run GCP setup task."""
self._SetupGcloudInfo()
SetupSSHKeys(self.config_path, self.ssh_private_key_path,
self.ssh_public_key_path)
def _SetupGcloudInfo(self):
"""Setup Gcloud user information.
1. Setup Gcloud SDK tools.
2. Setup Gcloud project.
a. Setup Gcloud project and zone.
b. Setup Client ID and Client secret.
c. Setup Google Cloud Storage bucket.
3. Enable Gcloud API services.
"""
google_sdk_init = google_sdk.GoogleSDK()
try:
google_sdk_runner = GoogleSDKBins(google_sdk_init.GetSDKBinPath())
google_sdk_init.InstallGcloudComponent(google_sdk_runner,
_GCLOUD_COMPONENT_ALPHA)
self._SetupProject(google_sdk_runner)
self._EnableGcloudServices(google_sdk_runner)
self._CreateStableHostImage()
finally:
google_sdk_init.CleanUp()
def _CreateStableHostImage(self):
"""Create the stable host image."""
# Write default stable_host_image_name with dummy value.
# TODO(113091773): An additional step to create the host image.
if not self.stable_host_image_name:
UpdateConfigFile(self.config_path, "stable_host_image_name", "")
def _NeedProjectSetup(self):
"""Confirm project setup should run or not.
If the project settings (project name and zone) are blank (either one),
we'll run the project setup flow. If they are set, we'll check with
the user if they want to update them.
Returns:
Boolean: True if we need to setup the project, False otherwise.
"""
user_question = (
"Your default Project/Zone settings are:\n"
"project:[%s]\n"
"zone:[%s]\n"
"Would you like to update them?[y/N]: \n") % (self.project, self.zone)
if not self.project or not self.zone:
logger.info("Project or zone is empty. Start to run setup process.")
return True
return utils.GetUserAnswerYes(user_question)
def _NeedClientIDSetup(self, project_changed):
"""Confirm client setup should run or not.
If project changed, client ID must also have to change.
So tool will force to run setup function.
If client ID or client secret is empty, tool force to run setup function.
If project didn't change and config hold user client ID/secret, tool
would skip client ID setup.
Args:
project_changed: Boolean, True for project changed.
Returns:
Boolean: True for run setup function.
"""
if project_changed:
logger.info("Your project changed. Start to run setup process.")
return True
elif not self.client_id or not self.client_secret:
logger.info("Client ID or client secret is empty. Start to run setup process.")
return True
logger.info("Project was unchanged and client ID didn't need to changed.")
return False
def _SetupProject(self, gcloud_runner):
"""Setup gcloud project information.
| |
<reponame>jfigui/pyrad
"""
pyrad.graph.plots_vol
=====================
Functions to plot radar volume data
.. autosummary::
:toctree: generated/
plot_ray
plot_ppi
plot_ppi_map
plot_rhi
plot_bscope
plot_time_range
plot_fixed_rng
plot_fixed_rng_span
plot_fixed_rng_sun
plot_cappi
plot_traj
plot_rhi_contour
plot_ppi_contour
plot_roi_contour
plot_rhi_profile
plot_along_coord
plot_field_coverage
"""
from warnings import warn
import numpy as np
from netCDF4 import num2date
try:
import cartopy
from cartopy.io.img_tiles import Stamen
_CARTOPY_AVAILABLE = True
except ImportError:
_CARTOPY_AVAILABLE = False
try:
import shapely
_SHAPELY_AVAILABLE = True
except ImportError:
warn('shapely not available')
_SHAPELY_AVAILABLE = False
import matplotlib as mpl
mpl.use('Agg')
# Increase a bit font size
mpl.rcParams.update({'font.size': 16})
mpl.rcParams.update({'font.family': "sans-serif"})
import matplotlib.pyplot as plt
import pyart
from .plots_aux import get_colobar_label, get_norm, generate_fixed_rng_title
from .plots_aux import generate_fixed_rng_span_title
from .plots_aux import generate_complex_range_Doppler_title
from .plots import plot_quantiles, plot_histogram, _plot_time_range, _plot_sunscan
from ..util.radar_utils import compute_quantiles_sweep, find_ang_index
from ..util.radar_utils import compute_histogram_sweep
def plot_ray(radar, field_name, ind_ray, prdcfg, fname_list, titl=None,
vmin=None, vmax=None, save_fig=True):
"""
plots a ray
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_ray : int
ray index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
plot_type : str
type of plot (PPI, QUANTILES or HISTOGRAM)
titl : str
Plot title
vmin, vmax : float
min and max values of the y axis
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
rng_km = radar.range['data']/1000.
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig']['xsize']
ysize = prdcfg['ppiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
if titl is None:
titl = generate_complex_range_Doppler_title(
radar, field_name, ind_ray)
labely = get_colobar_label(radar.fields[field_name], field_name)
ax = fig.add_subplot(111)
ax.plot(rng_km, radar.fields[field_name]['data'][ind_ray, :], marker='x')
ax.set_title(titl)
ax.set_xlabel('Range (km)')
ax.set_ylabel(labely)
ax.set_ylim(bottom=vmin, top=vmax)
ax.set_xlim([rng_km[0], rng_km[-1]])
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
def plot_ppi(radar, field_name, ind_el, prdcfg, fname_list, plot_type='PPI',
titl=None, vmin=None, vmax=None, step=None, quantiles=None,
save_fig=True):
"""
plots a PPI
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_el : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
plot_type : str
type of plot (PPI, QUANTILES or HISTOGRAM)
titl : str
Plot title
vmin, vmax : float
The minimum and maximum value. If None the scale is going to be
obtained from the Py-ART config file.
step : float
step for histogram plotting
quantiles : float array
quantiles to plot
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
if plot_type == 'PPI':
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
norm = None
ticks = None
ticklabs = None
if vmin is None or vmax is None:
norm, ticks, ticklabs = get_norm(field_name)
vmin = None
vmax = None
xsize = prdcfg['ppiImageConfig']['xsize']
ysize = prdcfg['ppiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
display = pyart.graph.RadarDisplay(radar)
display.plot_ppi(
field_name, title=titl, sweep=ind_el, norm=norm, ticks=ticks,
vmin=vmin, vmax=vmax, ticklabs=ticklabs, fig=fig, ax=ax)
display.set_limits(
ylim=[prdcfg['ppiImageConfig']['ymin'],
prdcfg['ppiImageConfig']['ymax']],
xlim=[prdcfg['ppiImageConfig']['xmin'],
prdcfg['ppiImageConfig']['xmax']], ax=ax)
if 'rngRing' in prdcfg['ppiImageConfig']:
if prdcfg['ppiImageConfig']['rngRing'] > 0:
display.plot_range_rings(np.arange(
0., radar.range['data'][-1]/1000.,
prdcfg['ppiImageConfig']['rngRing']), ax=ax)
display.plot_cross_hair(5., ax=ax)
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
if plot_type == 'QUANTILES':
quantiles, values = compute_quantiles_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_el],
radar.sweep_end_ray_index['data'][ind_el], quantiles=quantiles)
titl = pyart.graph.common.generate_title(radar, field_name, ind_el)
labely = get_colobar_label(radar.fields[field_name], field_name)
plot_quantiles(quantiles, values, fname_list, labelx='quantile',
labely=labely, titl=titl)
elif plot_type == 'HISTOGRAM':
bins, values = compute_histogram_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_el],
radar.sweep_end_ray_index['data'][ind_el], field_name, step=step)
titl = pyart.graph.common.generate_title(radar, field_name, ind_el)
labelx = get_colobar_label(radar.fields[field_name], field_name)
plot_histogram(bins, values, fname_list, labelx=labelx,
labely='Number of Samples', titl=titl)
else:
warn('Unknown plot type '+plot_type)
return fname_list
def plot_ppi_map(radar, field_name, ind_el, prdcfg, fname_list,
save_fig=True):
"""
plots a PPI on a geographic map
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_el : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax, display : tupple
list of names of the saved plots or handle of the figure an axes
"""
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
norm, ticks, ticklabs = get_norm(field_name)
xsize = prdcfg['ppiMapImageConfig']['xsize']
ysize = prdcfg['ppiMapImageConfig']['ysize']
lonstep = prdcfg['ppiMapImageConfig'].get('lonstep', 0.5)
latstep = prdcfg['ppiMapImageConfig'].get('latstep', 0.5)
min_lon = prdcfg['ppiMapImageConfig'].get('lonmin', 2.5)
max_lon = prdcfg['ppiMapImageConfig'].get('lonmax', 12.5)
min_lat = prdcfg['ppiMapImageConfig'].get('latmin', 43.5)
max_lat = prdcfg['ppiMapImageConfig'].get('latmax', 49.5)
resolution = prdcfg['ppiMapImageConfig'].get('mapres', '110m')
if resolution not in ('110m', '50m', '10m'):
warn('Unknown map resolution: '+resolution)
resolution = '110m'
background_zoom = prdcfg['ppiMapImageConfig'].get('background_zoom', 8)
lon_lines = np.arange(np.floor(min_lon), np.ceil(max_lon)+1, lonstep)
lat_lines = np.arange(np.floor(min_lat), np.ceil(max_lat)+1, latstep)
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111)
display_map = pyart.graph.RadarMapDisplay(radar)
display_map.plot_ppi_map(
field_name, sweep=ind_el, norm=norm, ticks=ticks, ticklabs=ticklabs,
min_lon=min_lon, max_lon=max_lon, min_lat=min_lat, max_lat=max_lat,
resolution=resolution, background_zoom=background_zoom,
lat_lines=lat_lines, lon_lines=lon_lines,
maps_list=prdcfg['ppiMapImageConfig']['maps'], ax=ax, fig=fig,
colorbar_flag=True, alpha=1)
ax = display_map.ax
if 'rngRing' in prdcfg['ppiMapImageConfig']:
if prdcfg['ppiMapImageConfig']['rngRing'] > 0:
rng_rings = np.arange(
0., radar.range['data'][-1]/1000.,
prdcfg['ppiMapImageConfig']['rngRing'])
for rng_ring in rng_rings:
display_map.plot_range_ring(rng_ring, ax=ax)
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax, display_map)
def plot_rhi(radar, field_name, ind_az, prdcfg, fname_list, plot_type='RHI',
titl=None, vmin=None, vmax=None, step=None, quantiles=None,
save_fig=True):
"""
plots an RHI
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_az : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
plot_type : str
type of plot (PPI, QUANTILES or HISTOGRAM)
titl : str
Plot title
vmin, vmax : float
The minimum and maximum value. If None the scale is going to be
obtained from the Py-ART config file.
step : float
step for histogram plotting
quantiles : float array
quantiles to plot
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
if plot_type == 'RHI':
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
norm = None
ticks = None
ticklabs = None
if vmin is None or vmax is None:
norm, ticks, ticklabs = get_norm(field_name)
vmin = None
vmax = None
xsize = prdcfg['rhiImageConfig']['xsize']
ysize = prdcfg['rhiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
display = pyart.graph.RadarDisplay(radar)
display.plot_rhi(
field_name, title=titl, sweep=ind_az, norm=norm, ticks=ticks,
ticklabs=ticklabs, vmin=vmin, vmax=vmax,
colorbar_orient='horizontal', reverse_xaxis=False, fig=fig, ax=ax)
display.set_limits(
ylim=[prdcfg['rhiImageConfig']['ymin'],
prdcfg['rhiImageConfig']['ymax']],
xlim=[prdcfg['rhiImageConfig']['xmin'],
prdcfg['rhiImageConfig']['xmax']],
ax=ax)
display.plot_cross_hair(5., ax=ax)
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
if plot_type == 'QUANTILES':
quantiles, values = compute_quantiles_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_az],
radar.sweep_end_ray_index['data'][ind_az], quantiles=quantiles)
if titl is None:
titl = pyart.graph.common.generate_title(
radar, field_name, ind_az)
labely = get_colobar_label(radar.fields[field_name], field_name)
plot_quantiles(quantiles, values, fname_list, labelx='quantile',
labely=labely, titl=titl)
elif plot_type == 'HISTOGRAM':
bins, values = compute_histogram_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_az],
radar.sweep_end_ray_index['data'][ind_az], field_name, step=step)
if titl is None:
titl = pyart.graph.common.generate_title(
radar, field_name, ind_az)
labelx = get_colobar_label(radar.fields[field_name], field_name)
plot_histogram(bins, values, fname_list, labelx=labelx,
labely='Number of Samples', titl=titl)
else:
warn('Unknown plot type '+plot_type)
return fname_list
def plot_bscope(radar, field_name, ind_sweep, prdcfg, fname_list,
vmin=None, vmax=None, ray_dim='ang', xaxis_rng=True):
"""
plots a | |
"""Testing creation and manipulation of DataFrameSchema objects."""
# pylint: disable=too-many-lines,redefined-outer-name
import copy
from functools import partial
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
import pandas as pd
import pytest
from pandera import (
STRING,
Bool,
Category,
Check,
Column,
DataFrameSchema,
DateTime,
Float,
Index,
Int,
MultiIndex,
Object,
PandasDtype,
SeriesSchema,
String,
Timedelta,
errors,
)
from pandera.dtypes import LEGACY_PANDAS
from pandera.schemas import SeriesSchemaBase
from .test_dtypes import TESTABLE_DTYPES
def test_dataframe_schema() -> None:
"""Tests the Checking of a DataFrame that has a wide variety of types and
conditions. Tests include: when the Schema works, when a column is dropped,
and when a columns values change its type.
"""
schema = DataFrameSchema(
{
"a": Column(Int, Check(lambda x: x > 0, element_wise=True)),
"b": Column(
Float, Check(lambda x: 0 <= x <= 10, element_wise=True)
),
"c": Column(String, Check(lambda x: set(x) == {"x", "y", "z"})),
"d": Column(Bool, Check(lambda x: x.mean() > 0.5)),
"e": Column(
Category, Check(lambda x: set(x) == {"c1", "c2", "c3"})
),
"f": Column(Object, Check(lambda x: x.isin([(1,), (2,), (3,)]))),
"g": Column(
DateTime,
Check(
lambda x: x >= pd.Timestamp("2015-01-01"),
element_wise=True,
),
),
"i": Column(
Timedelta,
Check(
lambda x: x < pd.Timedelta(10, unit="D"), element_wise=True
),
),
}
)
df = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [1.1, 2.5, 9.9],
"c": ["z", "y", "x"],
"d": [True, True, False],
"e": pd.Series(["c2", "c1", "c3"], dtype="category"),
"f": [(3,), (2,), (1,)],
"g": [
pd.Timestamp("2015-02-01"),
pd.Timestamp("2015-02-02"),
pd.Timestamp("2015-02-03"),
],
"i": [
pd.Timedelta(1, unit="D"),
pd.Timedelta(5, unit="D"),
pd.Timedelta(9, unit="D"),
],
}
)
assert isinstance(schema.validate(df), pd.DataFrame)
# error case
with pytest.raises(errors.SchemaError):
schema.validate(df.drop("a", axis=1))
with pytest.raises(errors.SchemaError):
schema.validate(df.assign(a=[-1, -2, -1]))
# checks if 'a' is converted to float, while schema says int, will a schema
# error be thrown
with pytest.raises(errors.SchemaError):
schema.validate(df.assign(a=[1.7, 2.3, 3.1]))
def test_dataframe_schema_equality() -> None:
"""Test DataframeSchema equality."""
schema = DataFrameSchema({"a": Column(Int)})
assert schema == copy.copy(schema)
assert schema != "schema"
assert DataFrameSchema(coerce=True) != DataFrameSchema(coerce=False)
assert schema != schema.update_column("a", pandas_dtype=Float)
assert schema != schema.update_column("a", checks=Check.eq(1))
def test_dataframe_schema_strict() -> None:
"""
Checks if strict=True whether a schema error is raised because 'a' is
not present in the dataframe.
"""
schema = DataFrameSchema(
{"a": Column(Int, nullable=True), "b": Column(Int, nullable=True)},
strict=True,
)
df = pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
assert isinstance(schema.validate(df.loc[:, ["a", "b"]]), pd.DataFrame)
with pytest.raises(errors.SchemaError):
schema.validate(df)
schema.strict = "filter"
assert isinstance(schema.validate(df), pd.DataFrame)
assert list(schema.validate(df).columns) == ["a", "b"]
with pytest.raises(errors.SchemaInitError):
DataFrameSchema(
{"a": Column(Int, nullable=True), "b": Column(Int, nullable=True)},
strict="foobar",
)
with pytest.raises(errors.SchemaError):
schema.validate(df.loc[:, ["a"]])
with pytest.raises(errors.SchemaError):
schema.validate(df.loc[:, ["a", "c"]])
def test_dataframe_schema_strict_regex() -> None:
"""Test that strict dataframe schema checks for regex matches."""
schema = DataFrameSchema(
{"foo_*": Column(Int, regex=True)},
strict=True,
)
df = pd.DataFrame({"foo_%d" % i: range(10) for i in range(5)})
assert isinstance(schema.validate(df), pd.DataFrame)
# Raise a SchemaError if schema is strict and a regex pattern yields
# no matches
with pytest.raises(errors.SchemaError):
schema.validate(
pd.DataFrame({"bar_%d" % i: range(10) for i in range(5)})
)
def test_dataframe_pandas_dtype_coerce() -> None:
"""
Test that pandas dtype specified at the dataframe level overrides
column data types.
"""
schema = DataFrameSchema(
columns={f"column_{i}": Column(float) for i in range(5)},
pandas_dtype=int,
coerce=True,
)
df = pd.DataFrame({f"column_{i}": range(10) for i in range(5)}).astype(
float
)
assert (schema(df).dtypes == Int.str_alias).all()
# test that pandas_dtype in columns are preserved
for col in schema.columns.values():
assert col.pandas_dtype is float
# raises SchemeError if dataframe can't be coerced
with pytest.raises(errors.SchemaErrors):
schema.coerce_dtype(pd.DataFrame({"foo": list("abcdef")}))
# raises SchemaErrors on lazy validation
with pytest.raises(errors.SchemaErrors):
schema(pd.DataFrame({"foo": list("abcdef")}), lazy=True)
# test that original dataframe dtypes are preserved
assert (df.dtypes == Float.str_alias).all()
# test case where pandas_dtype is string
schema.pandas_dtype = str
assert (schema(df).dtypes == "object").all()
schema.pandas_dtype = PandasDtype.String
assert (schema(df).dtypes == "object").all()
# raises ValueError if _coerce_dtype is called when pandas_dtype is None
schema.pandas_dtype = None
with pytest.raises(ValueError):
schema._coerce_dtype(df)
# test setting coerce as false at the dataframe level no longer coerces
# columns to int
schema.coerce = False
assert (schema(df).dtypes == "float64").all()
def test_dataframe_coerce_regex() -> None:
"""Test dataframe pandas dtype coercion for regex columns"""
schema = DataFrameSchema(
columns={"column_": Column(float, regex=True, required=False)},
pandas_dtype=int,
coerce=True,
)
no_match_df = pd.DataFrame({"foo": [1, 2, 3]})
match_valid_df = pd.DataFrame(
{
"column_1": [1, 2, 3],
"column_2": ["1", "2", "3"],
}
)
schema(no_match_df)
schema(match_valid_df)
# if the regex column is required, no matches should raise an error
schema_required = schema.update_column("column_", required=True)
with pytest.raises(
errors.SchemaError, match="Column regex name='column_' did not match"
):
schema_required(no_match_df)
def test_dataframe_reset_column_name() -> None:
"""Test resetting column name at DataFrameSchema init on named column."""
with pytest.warns(UserWarning):
DataFrameSchema(columns={"new_name": Column(name="old_name")})
@pytest.mark.parametrize(
"columns,index",
[
(
{
"a": Column(Int, required=False),
"b": Column(Int, required=False),
},
None,
),
(
None,
MultiIndex(
indexes=[Index(Int, name="a"), Index(Int, name="b")],
),
),
],
)
def test_ordered_dataframe(
columns: Dict[str, Column], index: MultiIndex
) -> None:
"""Test that columns are ordered."""
schema = DataFrameSchema(columns=columns, index=index, ordered=True)
df = pd.DataFrame(
data=[[1, 2, 3]],
columns=["a", "a", "b"],
index=pd.MultiIndex.from_arrays(
[[1], [2], [3]], names=["a", "a", "b"]
),
)
assert isinstance(schema.validate(df), pd.DataFrame)
# test optional column
df = pd.DataFrame(
data=[[1]],
columns=["b"],
index=pd.MultiIndex.from_arrays([[1], [2]], names=["a", "b"]),
)
assert isinstance(schema.validate(df), pd.DataFrame)
df = pd.DataFrame(
data=[[1, 2]],
columns=["b", "a"],
index=pd.MultiIndex.from_arrays([[1], [2]], names=["b", "a"]),
)
with pytest.raises(
errors.SchemaErrors, match="A total of 2 schema errors"
):
schema.validate(df, lazy=True)
# test out-of-order duplicates
df = pd.DataFrame(
data=[[1, 2, 3, 4]],
columns=["a", "b", "c", "a"],
index=pd.MultiIndex.from_arrays(
[[1], [2], [3], [4]], names=["a", "b", "c", "a"]
),
)
with pytest.raises(
errors.SchemaErrors, match="A total of 1 schema errors"
):
schema.validate(df, lazy=True)
def test_series_schema() -> None:
"""Tests that a SeriesSchema Check behaves as expected for integers and
strings. Tests error cases for types, duplicates, name errors, and issues
around float and integer handling of nulls"""
SeriesSchema("int").validate(pd.Series([1, 2, 3]))
int_schema = SeriesSchema(
Int, Check(lambda x: 0 <= x <= 100, element_wise=True)
)
assert isinstance(
int_schema.validate(pd.Series([0, 30, 50, 100])), pd.Series
)
str_schema = SeriesSchema(
String,
Check(lambda s: s.isin(["foo", "bar", "baz"])),
nullable=True,
coerce=True,
)
assert isinstance(
str_schema.validate(pd.Series(["foo", "bar", "baz", None])), pd.Series
)
assert isinstance(
str_schema.validate(pd.Series(["foo", "bar", "baz", np.nan])),
pd.Series,
)
# error cases
for data in [-1, 101, 50.1, "foo"]:
with pytest.raises(errors.SchemaError):
int_schema.validate(pd.Series([data]))
for data in [-1, {"a": 1}, -1.0]:
with pytest.raises(TypeError):
int_schema.validate(TypeError)
non_duplicate_schema = SeriesSchema(Int, allow_duplicates=False)
with pytest.raises(errors.SchemaError):
non_duplicate_schema.validate(pd.Series([0, 1, 2, 3, 4, 1]))
# when series name doesn't match schema
named_schema = SeriesSchema(Int, name="my_series")
with pytest.raises(errors.SchemaError, match=r"^Expected .+ to have name"):
named_schema.validate(pd.Series(range(5), name="your_series"))
# when series floats are declared to be integer
with pytest.raises(
errors.SchemaError,
match=r"^after dropping null values, expected values in series",
):
SeriesSchema(Int, nullable=True).validate(
pd.Series([1.1, 2.3, 5.5, np.nan])
)
# when series contains null values when schema is not nullable
with pytest.raises(
errors.SchemaError,
match=r"^non-nullable series .+ contains null values",
):
SeriesSchema(Float, nullable=False).validate(
pd.Series([1.1, 2.3, 5.5, np.nan])
)
# when series can't be coerced
with pytest.raises(
errors.SchemaError,
match="Error while coercing",
):
SeriesSchema(Float, coerce=True).validate(pd.Series(list("abcdefg")))
def test_series_schema_checks() -> None:
"""Test SeriesSchema check property."""
series_schema_no_checks = SeriesSchema()
series_schema_one_check = SeriesSchema(checks=Check.eq(0))
series_schema_multiple_checks = SeriesSchema(
checks=[Check.gt(0), Check.lt(100)]
)
for schema in [
series_schema_no_checks,
series_schema_one_check,
series_schema_multiple_checks,
]:
assert isinstance(schema.checks, list)
assert len(series_schema_no_checks.checks) == 0
assert len(series_schema_one_check.checks) == 1
assert len(series_schema_multiple_checks.checks) == 2
def test_series_schema_multiple_validators() -> None:
"""Tests how multiple Checks on a Series Schema are handled both
successfully and when errors are expected."""
schema = SeriesSchema(
Int,
[
Check(lambda x: 0 <= x <= 50, element_wise=True),
Check(lambda s: (s == 21).any()),
],
)
validated_series = schema.validate(pd.Series([1, 5, 21, 50]))
assert isinstance(validated_series, pd.Series)
# raise error if any of the validators fails
with pytest.raises(errors.SchemaError):
schema.validate(pd.Series([1, 5, 20, 50]))
@pytest.mark.parametrize("coerce", [True, False])
def test_series_schema_with_index(coerce: bool) -> None:
"""Test SeriesSchema with Index and MultiIndex components."""
schema_with_index = SeriesSchema(
pandas_dtype=Int,
index=Index(Int, coerce=coerce),
)
validated_series = schema_with_index(pd.Series([1, 2, 3], index=[1, 2, 3]))
assert isinstance(validated_series, pd.Series)
schema_with_multiindex = SeriesSchema(
pandas_dtype=Int,
index=MultiIndex(
[
Index(Int, coerce=coerce),
Index(String, coerce=coerce),
]
),
)
multi_index = pd.MultiIndex.from_arrays(
[[0, 1, 2], ["foo", "bar", "foo"]],
)
validated_series_multiindex = schema_with_multiindex(
pd.Series([1, 2, 3], index=multi_index)
)
assert isinstance(validated_series_multiindex, pd.Series)
assert (validated_series_multiindex.index == multi_index).all()
class SeriesGreaterCheck:
# pylint: disable=too-few-public-methods
"""Class creating callable objects to check if series elements exceed a
lower bound.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def __call__(self, series: pd.Series) -> pd.Series:
"""Check if the elements of s are > lower_bound.
:returns Series with bool elements
"""
return series > self.lower_bound
def series_greater_than_zero(series: pd.Series) -> pd.Series:
"""Return a bool series indicating whether the elements | |
whole number of samples so we have to interpolate.
xp = sample_interval * np.arange(count)
rsf = int(count * resample_factor[sample_interval])
yp = resample_interval * np.arange(rsf)
interp_f = interp1d(xp, this_data[:,0:count], kind='previous', axis=1,
bounds_error=False, fill_value=np.nan, assume_sorted=True)
this_data = interp_f(yp)
else:
# Reduce the number of samples by taking the mean.
n_mean = int(resample_factor[sample_interval])
this_data = np.mean(this_data.reshape(-1, n_mean), axis=1)
if is_power:
# Convert power back to log units.
this_data = 10.0 * np.log10(this_data)
elif resample_interval < sample_interval:
# We're increasing the number of samples.
if use_interp:
# If we're resampling power, convert power to linear units.
if is_power:
this_data = 10.0 ** (data[pings_this_interval_count] / 10.0)
else:
this_data = data[pings_this_interval_count]
# We can't expand by replicating a whole number of samples so we have to interpolate.
xp = sample_interval * np.arange(count)
rsf = int(count * resample_factor[sample_interval])
yp = resample_interval * np.arange(rsf)
interp_f = interp1d(xp, this_data[:,0:count], kind='previous', axis=1,
bounds_error=False, fill_value=np.nan, assume_sorted=True)
this_data = interp_f(yp)
if is_power:
# Convert power back to log units.
this_data = 10.0 * np.log10(this_data)
else:
# Replicate the values to fill out the higher resolution array.
n_repeat = int(resample_factor[sample_interval])
this_data = data[pings_this_interval_count]
if data.ndim == 3:
this_data = np.repeat(this_data[:, 0:count,:], n_repeat, axis=1)
else:
this_data = np.repeat(this_data[:, 0:count], n_repeat, axis=1)
else:
# The data exists on the resample_interval grid - no change
this_data = data[pings_this_interval_count, 0:count]
# Assign new values to output array. At the same time, we will shift the data by sample offset.
unique_sample_offsets = np.unique(sample_offsets_this_interval[sample_interval]).astype('int')
for offset in unique_sample_offsets:
if this_data.ndim == 3:
resampled_data[pings_this_interval_count, offset:offset + this_data.shape[1],:] = this_data
else:
resampled_data[pings_this_interval_count, offset:offset + this_data.shape[1]] = this_data
# Return the resampled data and the sampling interval used.
return resampled_data, resample_interval
def _vertical_shift(self, data, sample_offsets, unique_sample_offsets,
min_sample_offset):
"""Adjusts the output array size and pads the top of the samples
array to vertically shift the positions of the sample data in
the output array.
Pings with offsets greater than the minimum will be padded on the
top, shifting them into their correct location relative to the other
pings. The result is an output array with samples that are properly
aligned vertically relative to each other with a sample offset that is
constant and equal to the minimum of the original sample offsets.
This method is only called if our data has a constant sample interval,
but varying sample offsets. If the data has multiple sample intervals
the offset adjustment is done in vertical_resample.
Args:
data (array): A numpy array of data to be shifted.
sample_offsets (array): A numpy array with the sample offset for
each ping.
unique_sample_offsets (list): The lis tof unique sample offset
values.
min_sample_offset (int):
Returns:
The shifted data array.
"""
# Determine the new array size.
new_sample_dims = (data.shape[1] + max(sample_offsets) -
min_sample_offset)
# Create the new array.
shifted_data = np.empty((data.shape[0], new_sample_dims),
dtype=self.sample_dtype, order='C')
shifted_data.fill(np.nan)
# Fill the array, looping over the different sample offsets.
for offset in unique_sample_offsets:
rows_this_offset = np.where(sample_offsets == offset)[0]
start_index = offset - min_sample_offset
end_index = start_index + data.shape[1]
shifted_data[rows_this_offset, start_index:end_index] = \
data[rows_this_offset, 0:data.shape[1]]
return shifted_data
def _copy(self, obj):
"""Copies attributes.
This is an internal helper method that is called by child "copy"
methods to copy the data and object attributes.
Args:
obj (ping_data): The object to copy attributes to.
Returns:
The copy of the object.
"""
# Copy the common attributes.
obj.sample_dtype = self.sample_dtype
obj.n_samples = self.n_samples
obj.n_pings = self.n_pings
obj.shape = self.shape
obj._data_attributes = list(self._data_attributes)
obj._object_attributes = list(self._object_attributes)
# Copy object attributes
for attr_name in self._object_attributes:
attr = getattr(self, attr_name)
# check if this attribute is a numpy array
if isinstance(attr, np.ndarray):
# it is - use ndarray's copy method
setattr(obj, attr_name, attr.copy())
else:
# it's not - use the copy module
setattr(obj, attr_name, copy.deepcopy(attr))
# Copy the data attributes
for attr_name in obj._data_attributes:
attr = getattr(self, attr_name)
# data attributes are always numpy arrays so use ndarray's copy method
setattr(obj, attr_name, attr.copy())
# Return the copy.
return obj
def _shape(self):
'''Internal method used to update the shape attribute
'''
shape = None
if hasattr(self, 'power'):
shape = self.power.shape
elif hasattr(self, 'angles_alongship_e'):
shape = self.angles_alongship_e.shape
elif hasattr(self, 'complex'):
shape = self.complex.shape
elif hasattr(self, 'data'):
shape = self.data.shape
return shape
def _like(self, obj, n_pings, value, empty_times=False, no_data=False):
"""Copies ping_data attributes and creates data arrays filled with the
specified value.
This is an internal helper method that is called by "empty_like" and
"zeros_like" methods of child classes which copy the ping_data
attributes into the provided ping_data based object as well as
create "data" arrays that are filled with the specified value. All
vertical axes will be copied without modification.
If empty_times is False, the ping_time vector of this instance is copied
to the new object. If it is True, the new ping_time vector is filled
with NaT (not a time) values. If n_pings != self.n_pings THIS
ARGUMENT IS IGNORED AND THE NEW PING VECTOR IS FILLED WITH NaT.
The result should be a new object where horizontal axes (excepting
ping_time) and sample data arrays are empty (NaN or NaT). The
contents of the ping_time vector will depend on the state of the
empty_times keyword. The new object's shape will be (n_pings,
self.n_samples).
Args:
obj (ping_data): An empty object to copy attributes to.
n_pings (int): Number of pings (horizontal axis)
value (int,float): A scalar value to fill the array with.
empty_times (bool): Controls whether ping_time data is copied
over to the new object (TRUE) or if it will be filled with NaT
values (FALSE).
no_data (bool): Set to True to to set 2d and 3d data attributes
to None, rather than creating numpy arrays. When False,
numpy arrays are created. This allows you to avoid allocating
the data arrays if you are planning on replacing them.
This is primarily used internally. Default: False
Returns:
The object copy, obj.
"""
# If n_pings is None, we create an empty array with the same number
# of pings.
if n_pings is None:
n_pings = self.n_pings
# Copy the common attributes.
obj.sample_dtype = self.sample_dtype
obj.n_samples = self.n_samples
obj.n_pings = n_pings
obj._data_attributes = list(self._data_attributes)
obj._object_attributes = list(self._object_attributes)
# Copy object attributes - this is simple as there are no
# size or type checks.
for attr_name in self._object_attributes:
attr = getattr(self, attr_name)
# check if attribute is a numpy array
if isinstance(attr, np.ndarray):
# it is - use ndarray's copy method
setattr(obj, attr_name, attr.copy())
else:
# it's not - use the copy module
setattr(obj, attr_name, copy.deepcopy(attr))
# Check if n_pings != self.n_pings. If the new object's horizontal
# axis is a different shape than this object's we can't copy
# ping_time data since there isn't a direct mapping and we don't know
# what the user wants here. This can/should be handled in the child
# method if needed.
if n_pings != self.n_pings:
# We have to force an empty ping_time vector since the axes differ.
empty_times = True
# Create the dynamic attributes.
for attr_name in self._data_attributes:
# Get the attribute.
attr = getattr(self, attr_name)
if attr.shape[0] == self.n_samples:
# Copy all vertical axes w/o changing them.
data = attr.copy()
else:
# Create an array with the appropriate shape filled with the
# specified value.
if attr.ndim == 1:
# Create an array with the same shape filled with the
# specified value.
data = np.empty(n_pings, dtype=attr.dtype)
# Check if this is the ping_time attribute and if we
# should copy this instance's ping_time data or create an
# empty ping_time vector
if attr_name == 'ping_time':
if empty_times:
data[:] = np.datetime64('NaT')
else:
data[:] = attr.copy()
elif data.dtype == 'datetime64[ms]':
data[:] = np.datetime64('NaT')
elif np.issubdtype(data.dtype, np.integer):
data[:] = 0
else:
data[:] = value
else:
# Check if we're supposed to create the sample data arrays
if no_data:
# No - we'll set them to None assuming the user will set them
data = None
else:
# Yes, create the | |
+ str(self.constant_repulsion) + '\n')
log.write(' -- VELOCITY WEIGHT COEFFICIENT: ' + str(self.VELOCITY_WEIGHT) + '\n')
log.write(' -- BOUNDARY REPULSION: ' + str(self.BOUNDARY_REPULSION) + '\n')
log.write(' -- AGING COEFFICIENT: ' + str(self.LOSING_CONFIDENCE_RATE) + '\n')
log.write(' -- INPUT TIME: ' + str(self.INPUT_TIME) + '\n')
log.write(' -- GAUSSIAN PROCESSES: ' + str(self.GP) + ' at each ' + str(self.gp_step) + ' steps ' + '\n')
log.write('\n')
log.write('OPERATORS INFO:' + '\n')
log.write(' -- OPERATORS COOMUNICATION RANGE: ' + str(self.operator_vision_radius) + '\n')
log.write(' -- OPERATORS SIZE: ' + str(self.operators_size) + '\n')
for i in range(len(self.operator_list)):
log.write(' -- operator ' + str(i) + ": x position: " + str(self.operator_list[i].center_x) + " & y position: " + str(self.operator_list[i].center_y) + '\n')
log.write('\n')
log.write('DISASTERS INFO:' + '\n')
log.write(' -- DISASTERS SIZE: ' + str(self.disaster_size) + '\n')
for i in range(len(self.disaster_list)):
log.write(' -- disaster ' + str(i) + ": x position: " + str(self.disaster_list[i].center_x) + " & y position: " + str(self.disaster_list[i].center_y) + " & is moving: " + str(self.disaster_list[i].moving) + '\n')
log.write('\n')
log.write('DRONE INFO:' + '\n')
log.write(' -- SWARM SIZE: ' + str(self.SWARM_SIZE) + '\n')
log.write(' -- DRONE RELIABILITY RANGE: (' + str(self.min_reliability) + ', ' + str(self.max_reliability - 1) + ')' + '\n')
log.write(' -- DRONE UNRELIABILITY PERCENTAGE: ' + str(self.unreliability_percentage) + '\n')
log.write(' -- DRONE COMMUNICATION NOISE RANGE: (0.0, ' + str(self.communication_noise/10000.0) + ')' + '\n')
log.write(' -- DRONE COMMUNICATION RANGE: ' + str(self.BOUDARY_DIAMETER) + '\n')
log.write(' -- DRONE BELIEF VISION RANGE: ' + str(int(self.BOUDARY_DIAMETER/2)) + '\n')
log.write(' -- DRONE CONFIDENCE VISION RANGE: ' + str(self.VISION_BOUNDARY) + '\n')
log.write('\n')
for info in self.drone_info:
log.write(' -- ' + info + '\n')
if self.maze != None:
log.write('\n')
log.write('MAZE: ' + self.maze + ' \n')
log.write('OBSTACLE INFO:' + '\n')
for obstacle in self.obstacle_list:
log.write(' -- ' + 'TYPE: ' + obstacle.type + ', POSITION: (' + str(obstacle.center_x) + ', ' + str(obstacle.center_y) + '), ' +
'VELOCITY: (' + str(obstacle.change_x) + ', ' + str(obstacle.change_y) + ')'+ '\n')
log.close()
# Open file to save timings
self.results_file = open(self.directory + '/performance_test/' + 'stress_test_results.csv', "w")
def get_current_drones_positions(self):
positions = np.array([[0.0 for i in range(self.GRID_X)] for j in range(self.GRID_Y)])
for drone in self.drone_list:
positions[drone.grid_pos_y, drone.grid_pos_x] += 1
return positions
def get_swarm_confidence(self):
confidence = []
for drone in self.drone_list:
confidence.append(drone.confidence_map.sum())
return confidence
def get_operator_confidence(self):
confidence = []
for operator in self.operator_list:
confidence.append(operator.confidence_map.sum())
return confidence
def get_median_confidence(self):
return np.median(self.get_swarm_confidence())
def get_swarm_internal_error(self, belief_map = 'belief_map'):
errors = []
for drone in self.drone_list:
if belief_map == 'belief_map':
errors.append(np.sum(np.abs(self.global_map - drone.internal_map)))
elif belief_map == 'gp_predict':
errors.append(np.sum(np.abs(self.global_map - drone.internal_map1)))
return errors
def get_operator_internal_error(self):
errors = []
for operator in self.operator_list:
errors.append(np.sum(np.abs(self.global_map - operator.internal_map)))
return errors
def get_median_belief_error(self, belief_map = 'belief_map'):
return np.median(self.get_swarm_internal_error(belief_map))
def update_map(self):
self.global_map=[[0 for i in range(self.GRID_X)] for j in range(self.GRID_Y)]
for j in range(self.GRID_X):
for i in range(self.GRID_Y):
sum_value = 0
for adisaster in self.disaster_list:
disaster_grid_center_pos_x = math.trunc((adisaster.center_x * (self.GRID_X -1)/self.ARENA_WIDTH) )
disaster_grid_center_pos_y = math.trunc((adisaster.center_y * (self.GRID_Y -1)/self.ARENA_HEIGHT) )
dist_x = j -disaster_grid_center_pos_x
dist_y = i-disaster_grid_center_pos_y
disaster_witdh = adisaster.width * (self.GRID_X -1)/self.ARENA_WIDTH
if ((dist_x*dist_x) + (dist_y*dist_y) < disaster_witdh/2 * disaster_witdh/2):
sum_value=1
for aoperator in self.operator_list:
operator_grid_center_pos_x = math.trunc((aoperator.center_x * (self.GRID_X -1)/self.ARENA_WIDTH) )
operator_grid_center_pos_y = math.trunc((aoperator.center_y * (self.GRID_Y -1)/self.ARENA_HEIGHT) )
dist_x = j - operator_grid_center_pos_x
dist_y = i - operator_grid_center_pos_y
operator_width = aoperator.width * (self.GRID_X -1)/self.ARENA_WIDTH + 1
operator_height = aoperator.height * (self.GRID_Y -1)/self.ARENA_HEIGHT + 1
# As players are not in circular shape, this needs to be changed later
if ((dist_x*dist_x) + (dist_y*dist_y) < operator_width/2 * operator_height/2):
sum_value=1
'''
for obstacle in self.obstacle_list:
obstacle_grid_center_pos_x = math.trunc((obstacle.center_x * (self.GRID_X -1)/self.ARENA_WIDTH) )
obstacle_grid_center_pos_y = math.trunc((obstacle.center_y * (self.GRID_Y -1)/self.ARENA_HEIGHT) )
dist_x = j - obstacle_grid_center_pos_x
dist_y = i - obstacle_grid_center_pos_y
obstacle_witdh = obstacle.width * (self.GRID_X -1)/self.ARENA_WIDTH
if ((dist_x*dist_x) + (dist_y*dist_y) < obstacle_witdh/2 * obstacle_witdh/2):
sum_value=0
'''
if (sum_value==1):
self.global_map[self.GRID_Y - i - 1][j] = 1
else:
self.global_map[self.GRID_Y - i - 1][j] = 0
def get_drone_distances(self):
distances = []
for i in range(len(self.drone_list) - 1):
drone_1 = self.drone_list[i]
for j in range(i + 1, len(self.drone_list)):
drone_2 = self.drone_list[j]
dx = (drone_1.center_x - drone_2.center_x)
dy = (drone_1.center_y - drone_2.center_y)
distances.append(int(math.sqrt(dx*dx + dy*dy)))
return collections.Counter(distances)
'''
def get_precision(self):
#old way: np.sum(np.abs(self.global_map - self.operator_list[0].internal_map))
size = self.ARENA_WIDTH, self.ARENA_HEIGHT
#belief_map_high_res = im.fromarray(self.operator_list[0].internal_map, 'RGB')
array = 255 - self.operator_list[0].internal_map*255
array = array.astype('uint8')
image = im.fromarray(array)
image_high_res = image.resize(size, im.ANTIALIAS)
image_high_res.save("my_image_resized.png", "PNG")
screen_original = arcade.draw_commands.get_image(x=0, y=0, width=None, height=None)
screen_gray = screen_original.convert('LA')
threshold = 191
screen_threshold = screen_gray.point(lambda p: p > threshold and 255)
#ret,thresh1 = cv2.threshold(gen,0.95,1,cv2.THRESH_BINARY)
#image = get_image()
screen_threshold.save('screenshot.png', 'PNG')
return 0
#array = np.reshape(self.operator_list[0].internal_map, (self.ARENA_WIDTH, self.ARENA_HEIGHT))
'''
def on_update(self, delta_time):
if self.timer == 2:
self.w_time = delta_time
self.u_timer += delta_time
def update(self, interval):
if self.timer == 1:
if self.exp_type == "normal_network":
Thread(target=listener, args=[self]).start()
'''
elif self.exp_type == "user_study":
self.u_name = input("Please enter your name: ")
elif self.exp_type == "user_study_2":
self.u_name = input("Please enter your name: ")
'''
if self.timer >= self.run_time:
if self.exp_type == "user_study" or "user_study_2":
directory = self.directory
if directory == None:
log = open("log_setup.txt", "a")
else:
log = open(directory + "/log_setup.txt", "a")
log.write('\nUser study info:' + '\n')
if self.exp_type == "user_study":
log.write(' -- Experiment: ' + str("User study 1") + '\n')
if self.exp_type == "user_study_2":
log.write(' -- Experiment: ' + str("User study 2") + '\n')
log.write(' -- Player: ' + str(self.u_name) + '\n')
log.write(' -- Data: ' + str(datetime.datetime.now().strftime("%d-%m-%Y_%H:%M:%S")) + '\n')
log.write(' -- Click count: ' + str(self.c_count) + '\n')
log.close()
self.save_click_map()
arcade.close_window()
local_f = []
global_f = []
for drone in self.drone_list:
local_f += drone.local_forces
global_f += drone.global_forces
# print('LOCAL: ', np.mean(local_f))
# print('GLOBAL: ', np.mean(global_f))
import pandas as pd
distances = pd.DataFrame([(v) for k, v in self.drone_distances.items()])
distances.to_csv(self.directory + '/distances.csv', sep=',')
'''
import csv
with open(self.directory + '\\distances.csv','w') as f:
w = csv.writer(f)
w.writerow(self.drone_distances.keys())
w.writerow(self.drone_distances.values())
'''
if self.exp_type == "normal_network":
# Sending maps to web-api for game interface
self.send_data(self.operator_list[0])
# Start update timer
start_time = timeit.default_timer()
self.timer += 1
if self.exp_type == "user_study":
self.im.set_array(self.operator_list[0].internal_map)
self.u_fig.canvas.draw()
if self.exp_type == "user_study_2":
if self.timer > 1:
t_now_s = int(self.u_timer) % 60
t_now_m = int(self.u_timer) // 60
self.u_fig.suptitle("{}m:{}s elapsed\n\n".format(t_now_m, t_now_s), fontsize=16)
# Main window rectangle creation
for rect in self.s_areas:
if rect[4] == "appended":
rectangle = Rectangle((rect[0], rect[1]), rect[2] - rect[0], rect[3] - rect[1], color='orange', alpha=0.5)
self.axes[0,1].add_patch(rectangle)
rx, ry = rectangle.get_xy()
cx = rx + rectangle.get_width()/2.0
cy = ry + rectangle.get_height()/2.0
i = self.s_areas.index(rect)
if rect[5] == "a":
ann = self.axes[0,1].annotate(f"Area {i} (A)", (cx, cy), color='w', weight='bold',
fontsize=6, ha='center', va='center', alpha=0.5)
self.s_rects.append([rectangle, "appended", ann, "a", "pause"])
elif rect[5] == "d":
ann = self.axes[0,1].annotate(f"Area {i} (D)", (cx, cy), color='w', weight='bold',
fontsize=6, ha='center', va='center', alpha=0.5)
self.s_rects.append([rectangle, "appended", ann, "d", "pause"])
rect[4] = "plotted"
for rec in self.s_rects:
if rec[1] == "appended":
if rec[3] == "a":
# list
if len(self.s_rects) == 1:
l_pos = self.s_list_pos
else:
itm = None
for s in reversed(self.s_list):
if self.s_rects[s[4]][1] == "plotted":
itm = s
break
if itm == None:
l_pos = self.s_list_pos
else:
l_pos = list(itm[2].get_position().bounds)
l_pos[1] -= 0.05
ax = self.s_fig.add_axes(l_pos)
entry = Button(ax, f"Area {i} (Attract) - Status: Initialized", color="orange")
entry.on_clicked(self.u2_btn)
l_pos_2 = l_pos.copy()
l_pos_2[0] = 0.09
l_pos_2[2] = 0.1
ax2 = self.s_fig.add_axes(l_pos_2)
entry2 = Button(ax2, "Remove")
entry2.on_clicked(self.u2_btn_rm)
ax.set_visible(True)
ax2.set_visible(True)
rect_index = self.s_rects.index(rec)
self.s_list.append([entry, entry2, ax, ax2, rect_index])
if rec[3] == "d":
# list
if len(self.s_rects) == 1:
l_pos = self.s_list_pos
else:
itm = None
for s in reversed(self.s_list):
if self.s_rects[s[4]][1] == "plotted":
itm = s
break
if itm == None:
l_pos | |
<filename>core/modules/nginx.py
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019, doudoudzj
# Copyright (c) 2012 - 2016, VPSMate development team
# All rights reserved.
#
# InPanel is distributed under the terms of the (new) BSD License.
# The full license can be found in 'LICENSE'.
'''Module for Nginx Management'''
import os
import re
from glob import glob
from core.utils import is_valid_ipv4, is_valid_ipv6, version_get
DEBUG = False
# d stand for directive, and c stand for context
DIRECTIVES = {
# REF: http://wiki.nginx.org/CoreModule
'daemon': ['_'],
'env': ['_'],
'debug_points': ['_'],
'error_log': ['_', 'http', 'server', 'location'],
'include': ['_', 'http', 'server', 'location'],
'lock_file': ['_'],
'master_process': ['_'],
'pcre_jit': ['_'],
'pid': ['_'],
# 'ssl_engine': ['_'],
'timer_resolution': ['_'],
'user': ['_'],
'worker_cpu_affinity': ['_'],
'worker_priority': ['_'],
'worker_processes': ['_'],
'worker_rlimit_core': ['_'],
'worker_rlimit_nofile': ['_'],
'worker_rlimit_sigpending': ['_'],
'working_directory': ['_'],
# REF: http://wiki.nginx.org/EventsModule
'events': ['_'],
'accept_mutex': ['events'],
'accept_mutex_delay': ['events'],
'debug_connection': ['events'],
'devpoll_changes': ['events'],
'devpoll_events': ['events'],
'kqueue_changes': ['events'],
'kqueue_events': ['events'],
'epoll_events': ['events'],
'multi_accept': ['events'],
'rtsig_signo': ['events'],
'rtsig_overflow_events': ['events'],
'rtsig_overflow_test': ['events'],
'rtsig_overflow_threshold': ['events'],
'use': ['events'],
'worker_connections': ['events'],
# REF: http://wiki.nginx.org/HttpCoreModule
'http': ['_'],
'aio': ['http', 'server', 'location'],
'alias': ['location'],
'chunked_transfer_encoding': ['http', 'server', 'location'],
'client_body_in_file_only': ['http', 'server', 'location'],
'client_body_in_single_buffer': ['http', 'server', 'location'],
'client_body_buffer_size': ['http', 'server', 'location'],
'client_body_temp_path': ['http', 'server', 'location'],
'client_body_timeout': ['http', 'server', 'location'],
'client_header_buffer_size': ['http', 'server'],
'client_header_timeout': ['http', 'server'],
'client_max_body_size': ['http', 'server', 'location'],
'connection_pool_size': ['http', 'server'],
'default_type': ['http', 'server', 'location'],
'directio': ['http', 'server', 'location'],
'directio_alignment': ['http', 'server', 'location'],
'disable_symlinks': ['http', 'server', 'location'],
'error_page': ['http', 'server', 'location', 'if in location', 'if'], #+if
'if_modified_since': ['http', 'server', 'location'],
'ignore_invalid_headers': ['http', 'server'],
'internal': ['location'],
'keepalive_disable': ['http', 'server', 'location'],
'keepalive_timeout': ['http', 'server', 'location'],
'keepalive_requests': ['http', 'server', 'location'],
'large_client_header_buffers': ['http', 'server'],
'limit_except': ['location'],
'limit_rate': ['http', 'server', 'location', 'if in location', 'if'], #+if
'limit_rate_after': ['http', 'server', 'location', 'if in location', 'if'], #+if
'lingering_close': ['http', 'server', 'location'],
'lingering_time': ['http', 'server', 'location'],
'lingering_timeout': ['http', 'server', 'location'],
'listen': ['server'],
'location': ['server', 'location'],
'log_not_found': ['http', 'server', 'location'],
'log_subrequest': ['http', 'server', 'location'],
'max_ranges': ['http', 'server', 'location'],
'merge_slashes': ['http', 'server'],
'msie_padding': ['http', 'server', 'location'],
'msie_refresh': ['http', 'server', 'location'],
'open_file_cache': ['http', 'server', 'location'],
'open_file_cache_errors': ['http', 'server', 'location'],
'open_file_cache_min_uses': ['http', 'server', 'location'],
'open_file_cache_valid': ['http', 'server', 'location'],
'optimize_server_names': ['http', 'server'],
'port_in_redirect': ['http', 'server', 'location'],
'post_action': ['http', 'server', 'location'],
'postpone_output': ['http', 'server', 'location'],
'read_ahead': ['http', 'server', 'location'],
'recursive_error_pages': ['http', 'server', 'location'],
'request_pool_size': ['http', 'server'],
'reset_timedout_connection': ['http', 'server', 'location'],
'resolver': ['http', 'server', 'location'],
'resolver_timeout': ['http', 'server', 'location'],
'root': ['http', 'server', 'location', 'if in location', 'if'], #+if
'satisfy': ['http', 'server', 'location'],
'satisfy_any': ['http', 'server', 'location'],
'send_lowat': ['http', 'server', 'location'],
'send_timeout': ['http', 'server', 'location'],
'sendfile': ['http', 'server', 'location', 'if in location', 'if'], #+if
'sendfile_max_chunk': ['http', 'server', 'location'],
'server': ['http', 'upstream'],
'server_name': ['server'],
'server_name_in_redirect': ['http', 'server', 'location'],
'server_names_hash_max_size': ['http'],
'server_names_hash_bucket_size':['http'],
'server_tokens': ['http', 'server', 'location'],
'tcp_nodelay': ['http', 'server', 'location'],
'tcp_nopush': ['http', 'server', 'location'],
'try_files': ['server', 'location'],
'types': ['http', 'server', 'location'],
'types_hash_bucket_size': ['http', 'server', 'location'],
'types_hash_max_size': ['http', 'server', 'location'],
'underscores_in_headers': ['http', 'server'],
'variables_hash_bucket_size': ['http'],
'variables_hash_max_size': ['http'],
# REF: http://wiki.nginx.org/HttpAccessModule
'allow': ['http', 'server', 'location', 'limit_except'],
'deny': ['http', 'server', 'location', 'limit_except'],
# REF: http://wiki.nginx.org/HttpAuthBasicModule
'auth_basic': ['http', 'server', 'location', 'limit_except'],
'auth_basic_user_file': ['http', 'server', 'location', 'limit_except'],
# REF: http://wiki.nginx.org/HttpAutoindexModule
'autoindex': ['http', 'server', 'location'],
'autoindex_exact_size': ['http', 'server', 'location'],
'autoindex_localtime': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpBrowserModule
'ancient_browser': ['http', 'server', 'location'],
'ancient_browser_value': ['http', 'server', 'location'],
'modern_browser': ['http', 'server', 'location'],
'modern_browser_value': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpCharsetModule
'charset': ['http', 'server', 'location', 'if in location', 'if'], #+if
'charset_map': ['http'],
'charset_types': ['http', 'server', 'location'],
'override_charset': ['http', 'server', 'location', 'if in location', 'if'], #+if
'source_charset': ['http', 'server', 'location', 'if in location', 'if'], #+if
# REF: http://wiki.nginx.org/HttpEmptyGifModule
'empty_gif': ['location'],
# REF: http://wiki.nginx.org/HttpFastcgiModule
'fastcgi_bind': ['http', 'server', 'location'],
'fastcgi_buffer_size': ['http', 'server', 'location'],
'fastcgi_buffers': ['http', 'server', 'location'],
'fastcgi_busy_buffers_size': ['http', 'server', 'location'],
'fastcgi_cache': ['http', 'server', 'location'],
'fastcgi_cache_bypass': ['http', 'server', 'location'],
'fastcgi_cache_key': ['http', 'server', 'location'],
'fastcgi_cache_lock': ['http', 'server', 'location'],
'fastcgi_cache_lock_timeout': ['http', 'server', 'location'],
'fastcgi_cache_methods': ['http', 'server', 'location'],
'fastcgi_cache_min_uses': ['http', 'server', 'location'],
'fastcgi_cache_path': ['http'],
'fastcgi_cache_use_stale': ['http', 'server', 'location'],
'fastcgi_cache_valid': ['http', 'server', 'location'],
'fastcgi_connect_timeout': ['http', 'server', 'location'],
'fastcgi_hide_header': ['http', 'server', 'location'],
'fastcgi_ignore_client_abort': ['http', 'server', 'location'],
'fastcgi_ignore_headers': ['http', 'server', 'location'],
'fastcgi_index': ['http', 'server', 'location'],
'fastcgi_intercept_errors': ['http', 'server', 'location'],
'fastcgi_keep_conn': ['http', 'server', 'location'],
'fastcgi_max_temp_file_size': ['http', 'server', 'location'],
'fastcgi_next_upstream': ['http', 'server', 'location'],
'fastcgi_no_cache': ['http', 'server', 'location'],
'fastcgi_param': ['http', 'server', 'location'],
'fastcgi_pass': ['location', 'if in location', 'if'], #+if
'fastcgi_pass_header': ['http', 'server', 'location'],
'fastcgi_pass_request_body': ['http', 'server', 'location'],
'fastcgi_pass_request_headers': ['http', 'server', 'location'],
'fastcgi_read_timeout': ['http', 'server', 'location'],
'fastcgi_redirect_errors': ['http', 'server', 'location'],
'fastcgi_send_timeout': ['http', 'server', 'location'],
'fastcgi_split_path_info': ['location'],
'fastcgi_store': ['http', 'server', 'location'],
'fastcgi_store_access': ['http', 'server', 'location'],
'fastcgi_temp_file_write_size': ['http', 'server', 'location'],
'fastcgi_temp_path': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpGeoModule
'geo': ['http'],
# REF: http://wiki.nginx.org/HttpGzipModule
'gzip': ['http', 'server', 'location', 'if in location', 'if'], #+if
'gzip_buffers': ['http', 'server', 'location'],
'gzip_comp_level': ['http', 'server', 'location'],
'gzip_disable': ['http', 'server', 'location'],
'gzip_http_version': ['http', 'server', 'location'],
'gzip_min_length': ['http', 'server', 'location'],
'gzip_proxied': ['http', 'server', 'location'],
'gzip_types': ['http', 'server', 'location'],
'gzip_vary': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpHeadersModule
'add_header': ['http', 'server', 'location'],
'expires': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpIndexModule
'index': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpLimitReqModule
'limit_req': ['http', 'server', 'location'],
'limit_req_log_level': ['http', 'server', 'location'],
'limit_req_zone': ['http'],
# Deprecated in 1.1.8
# REF: http://wiki.nginx.org/HttpLimitZoneModule
'limit_zone': ['http'],
#'limit_conn': ['http', 'server', 'location'],
#'limit_conn_log_level': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpLimitConnModule
'limit_conn': ['http', 'server', 'location'],
'limit_conn_zone': ['http'],
'limit_conn_log_level': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpLogModule
'access_log': ['http', 'server', 'location', 'if in location', 'limit_except', 'if'], #+if
'log_format': ['http'],
'open_log_file_cache': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpMapModule
'map': ['http'],
'map_hash_max_size': ['http'],
'map_hash_bucket_size': ['http'],
# REF: http://wiki.nginx.org/HttpMemcachedModule
'memcached_pass': ['location', 'if in location', 'if'], #+if
'memcached_connect_timeout': ['http', 'server', 'location'],
'memcached_read_timeout': ['http', 'server', 'location'],
'memcached_send_timeout': ['http', 'server', 'location'],
'memcached_buffer_size': ['http', 'server', 'location'],
'memcached_next_upstream': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpProxyModule
'proxy_bind': ['http', 'server', 'location'],
'proxy_buffer_size': ['http', 'server', 'location'],
'proxy_buffering': ['http', 'server', 'location'],
'proxy_buffers': ['http', 'server', 'location'],
'proxy_busy_buffers_size': ['http', 'server', 'location'],
'proxy_cache': ['http', 'server', 'location'],
'proxy_cache_bypass': ['http', 'server', 'location'],
'proxy_cache_key': ['http', 'server', 'location'],
'proxy_cache_lock': ['http', 'server', 'location'],
'proxy_cache_lock_timeout': ['http', 'server', 'location'],
'proxy_cache_methods': ['http', 'server', 'location'],
'proxy_cache_min_uses': ['http', 'server', 'location'],
'proxy_cache_path': ['http'],
'proxy_cache_use_stale': ['http', 'server', 'location'],
'proxy_cache_valid': ['http', 'server', 'location'],
'proxy_connect_timeout': ['http', 'server', 'location'],
'proxy_cookie_domain': ['http', 'server', 'location'],
'proxy_cookie_path': ['http', 'server', 'location'],
'proxy_headers_hash_bucket_size':['http', 'server', 'location'],
'proxy_headers_hash_max_size': ['http', 'server', 'location'],
'proxy_hide_header': ['http', 'server', 'location'],
'proxy_http_version': ['http', 'server', 'location'],
'proxy_ignore_client_abort': ['http', 'server', 'location'],
'proxy_ignore_headers': ['http', 'server', 'location'],
'proxy_intercept_errors': ['http', 'server', 'location'],
'proxy_max_temp_file_size': ['http', 'server', 'location'],
'proxy_method': ['http', 'server', 'location'],
'proxy_next_upstream': ['http', 'server', 'location'],
'proxy_no_cache': ['http', 'server', 'location'],
'proxy_pass': ['location', 'if in location', 'limit_except', 'if'], #+if
'proxy_pass_header': ['http', 'server', 'location'],
'proxy_pass_request_body': ['http', 'server', 'location'],
'proxy_pass_request_headers': ['http', 'server', 'location'],
'proxy_redirect': ['http', 'server', 'location'],
'proxy_read_timeout': ['http', 'server', 'location'],
'proxy_redirect_errors': ['http', 'server', 'location'],
'proxy_send_lowat': ['http', 'server', 'location'],
'proxy_send_timeout': ['http', 'server', 'location'],
'proxy_set_body': ['http', 'server', 'location'],
'proxy_set_header': ['http', 'server', 'location'],
'proxy_ssl_session_reuse': ['http', 'server', 'location'],
'proxy_store': ['http', 'server', 'location'],
'proxy_store_access': ['http', 'server', 'location'],
'proxy_temp_file_write_size': ['http', 'server', 'location'],
'proxy_temp_path': ['http', 'server', 'location'],
'proxy_upstream_fail_timeout': ['http', 'server', 'location'],
'proxy_upstream_max_fails': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpRefererModule
'valid_referers': ['server', 'location'],
# REF: http://wiki.nginx.org/HttpRewriteModule
'break': ['server', 'location', 'if'],
'if': ['server', 'location'],
'return': ['server', 'location', 'if'],
'rewrite': ['server', 'location', 'if'],
'rewrite_log': ['server', 'location', 'if'],
'set': ['server', 'location', 'if'],
'uninitialized_variable_warn': ['server', 'location', 'if'],
# REF: http://wiki.nginx.org/HttpScgiModule
'scgi_bind': ['http', 'server', 'location'],
'scgi_buffer_size': ['http', 'server', 'location'],
'scgi_buffering': ['http', 'server', 'location'],
'scgi_buffers': ['http', 'server', 'location'],
'scgi_busy_buffers_size': ['http', 'server', 'location', 'if'],
'scgi_cache': ['http', 'server', 'location'],
'scgi_cache_bypass': ['http', 'server', 'location'],
'scgi_cache_key': ['http', 'server', 'location'],
'scgi_cache_methods': ['http', 'server', 'location'],
'scgi_cache_min_uses': ['http', 'server', 'location'],
'scgi_cache_path': ['http'],
'scgi_cache_use_stale': ['http', 'server', 'location'],
'scgi_cache_valid': ['http', 'server', 'location'],
'scgi_connect_timeout': ['http', 'server', 'location'],
'scgi_hide_header': ['http', 'server', 'location'],
'scgi_ignore_client_abort': ['http', 'server', 'location'],
'scgi_ignore_headers': ['http', 'server', 'location'],
'scgi_intercept_errors': ['http', 'server', 'location'],
'scgi_max_temp_file_size': ['http', 'server', 'location'], #?
'scgi_next_upstream': ['http', 'server', 'location'],
'scgi_no_cache': ['http', 'server', 'location'],
'scgi_param': ['http', 'server', 'location'],
'scgi_pass': ['location', 'if in location', 'if'], #+if
'scgi_pass_header': ['http', 'server', 'location'],
'scgi_pass_request_body': ['http', 'server', 'location'],
'scgi_pass_request_headers': ['http', 'server', 'location'],
'scgi_read_timeout': ['http', 'server', 'location'],
'scgi_send_timeout': ['http', 'server', 'location'],
'scgi_store': ['http', 'server', 'location'],
'scgi_store_access': ['http', 'server', 'location'],
'scgi_temp_file_write_size': ['http', 'server', 'location'],
'scgi_temp_path': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpSplitClientsModule
'split_clients': ['http'],
# REF: http://wiki.nginx.org/HttpSsiModule
'ssi': ['http', 'server', 'location', 'if in location', 'if'], #+if
'ssi_silent_errors': ['http', 'server', 'location'],
'ssi_types': ['http', 'server', 'location'],
'ssi_value_length': ['http', 'server', 'location'],
# REF: http://wiki.nginx.org/HttpUpstreamModule
'ip_hash': ['upstream'],
'keepalive': ['upstream'],
'least_conn': | |
"pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cls.cam1 = camera_params_setup(1, res, proj_model, dist_model, params)
cls.cam1.fix = True
# Setup camera extrinsics
# -- cam0
T_BC0 = cls.dataset.cam0_data.config.T_BS
cls.cam0_exts = extrinsics_setup(T_BC0)
cls.cam0_exts.fix = True
# -- cam1
T_BC1 = cls.dataset.cam1_data.config.T_BS
cls.cam1_exts = extrinsics_setup(T_BC1)
cls.cam1_exts.fix = True
def setUp(self):
# Setup test dataset
self.dataset = TestTracker.dataset
self.imu_params = TestTracker.imu_params
self.cam0 = TestTracker.cam0
self.cam1 = TestTracker.cam1
self.cam0_exts = TestTracker.cam0_exts
self.cam1_exts = TestTracker.cam1_exts
# Setup tracker
ts0 = self.dataset.ground_truth.timestamps[0]
T_WB = self.dataset.ground_truth.T_WB[ts0]
feature_tracker = FeatureTracker()
self.tracker = Tracker(feature_tracker)
self.tracker.add_imu(self.imu_params)
self.tracker.add_camera(0, self.cam0, self.cam0_exts)
self.tracker.add_camera(1, self.cam1, self.cam1_exts)
self.tracker.add_overlap(0, 1)
self.tracker.set_initial_pose(T_WB)
def test_tracker_add_camera(self):
""" Test Tracker.add_camera() """
self.assertTrue(len(self.tracker.cam_params), 2)
self.assertTrue(len(self.tracker.cam_geoms), 2)
self.assertTrue(len(self.tracker.cam_exts), 2)
def test_tracker_set_initial_pose(self):
""" Test Tracker.set_initial_pose() """
self.assertTrue(self.tracker.pose_init is not None)
def test_tracker_inertial_callback(self):
""" Test Tracker.inertial_callback() """
ts = 0
acc = np.array([0.0, 0.0, 10.0])
gyr = np.array([0.0, 0.0, 0.0])
self.tracker.inertial_callback(ts, acc, gyr)
self.assertEqual(self.tracker.imu_buf.length(), 1)
self.assertTrue(self.tracker.imu_started)
def test_tracker_triangulate(self):
""" Test Tracker._triangulate() """
# Feature in world frame
p_W = np.array([1.0, 0.01, 0.02])
# Body pose in world frame
C_WB = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_WB = np.array([0.0, 0.0, 0.0])
T_WB = tf(C_WB, r_WB)
# Camera parameters and geometry
cam_i = 0
cam_j = 1
cam_params_i = self.tracker.cam_params[cam_i]
cam_params_j = self.tracker.cam_params[cam_j]
cam_geom_i = self.tracker.cam_geoms[cam_i]
cam_geom_j = self.tracker.cam_geoms[cam_j]
# Camera extrinsics
T_BCi = pose2tf(self.tracker.cam_exts[cam_i].param)
T_BCj = pose2tf(self.tracker.cam_exts[cam_j].param)
# Point relative to cam_i and cam_j
p_Ci = tf_point(inv(T_WB @ T_BCi), p_W)
p_Cj = tf_point(inv(T_WB @ T_BCj), p_W)
# Image point z_i and z_j
z_i = cam_geom_i.project(cam_params_i.param, p_Ci)
z_j = cam_geom_j.project(cam_params_j.param, p_Cj)
# Triangulate
p_W_est = self.tracker._triangulate(cam_i, cam_j, z_i, z_j, T_WB)
# Assert
self.assertTrue(np.allclose(p_W_est, p_W))
def test_tracker_add_pose(self):
""" Test Tracker._add_pose() """
# Timestamp
ts = 0
# Body pose in world frame
C_WB = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_WB = np.array([0.0, 0.0, 0.0])
T_WB = tf(C_WB, r_WB)
# Add pose
pose = self.tracker._add_pose(ts, T_WB)
self.assertTrue(pose is not None)
def test_tracker_add_feature(self):
""" Test Tracker._add_feature() """
# Feature in world frame
p_W = np.array([1.0, 0.01, 0.02])
# Body pose in world frame
C_WB = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_WB = np.array([0.0, 0.0, 0.0])
T_WB = tf(C_WB, r_WB)
# Project world point to image plane
cam_idx = 0
cam_params = self.tracker.cam_params[cam_idx]
cam_geom = self.tracker.cam_geoms[cam_idx]
T_BC = pose2tf(self.tracker.cam_exts[cam_idx].param)
p_C = tf_point(inv(T_WB @ T_BC), p_W)
z = cam_geom.project(cam_params.param, p_C)
# Add feature
fid = 0
ts = 0
kp = cv2.KeyPoint(z[0], z[1], 0)
self.tracker._add_feature(fid, ts, cam_idx, kp)
# Assert
self.assertTrue(fid in self.tracker.features)
self.assertEqual(len(self.tracker.features), 1)
def test_tracker_update_feature(self):
""" Test Tracker._update_feature() """
# Feature in world frame
p_W = np.array([1.0, 0.01, 0.02])
# Body pose in world frame
C_WB = euler321(*deg2rad([-90.0, 0.0, -90.0]))
r_WB = np.array([0.0, 0.0, 0.0])
T_WB = tf(C_WB, r_WB)
# Camera parameters and geometry
cam_i = 0
cam_j = 1
cam_params_i = self.tracker.cam_params[cam_i]
cam_params_j = self.tracker.cam_params[cam_j]
cam_geom_i = self.tracker.cam_geoms[cam_i]
cam_geom_j = self.tracker.cam_geoms[cam_j]
# Project p_W to image point z_i and z_j
T_BCi = pose2tf(self.tracker.cam_exts[cam_i].param)
T_BCj = pose2tf(self.tracker.cam_exts[cam_j].param)
p_Ci = tf_point(inv(T_WB @ T_BCi), p_W)
p_Cj = tf_point(inv(T_WB @ T_BCj), p_W)
z_i = cam_geom_i.project(cam_params_i.param, p_Ci)
z_j = cam_geom_j.project(cam_params_j.param, p_Cj)
# Add feature
fid = 0
ts = 0
kp_i = cv2.KeyPoint(z_i[0], z_i[1], 0)
kp_j = cv2.KeyPoint(z_j[0], z_j[1], 0)
self.tracker._add_feature(fid, ts, cam_i, kp_i)
self.tracker._update_feature(fid, ts, cam_j, kp_j, T_WB)
# Assert
feature = self.tracker.features[fid]
p_W_est = feature.param
self.assertTrue(fid in self.tracker.features)
self.assertEqual(len(self.tracker.features), 1)
self.assertTrue(feature.data.initialized())
self.assertTrue(np.allclose(p_W_est, p_W))
def test_tracker_process_features(self):
""" Test Tracker._process_features() """
for ts in self.dataset.cam0_data.timestamps:
# Get ground truth pose
T_WB = self.dataset.get_ground_truth_pose(ts)
if T_WB is None:
continue
# Feed camera images to feature tracker
img0 = self.dataset.get_camera_image(0, ts)
img1 = self.dataset.get_camera_image(1, ts)
ft_data = self.tracker.feature_tracker.update(ts, {0: img0, 1: img1})
# Process features
pose = self.tracker._add_pose(ts, T_WB)
self.tracker._process_features(ts, ft_data, pose)
self.assertTrue(self.tracker.nb_features() > 0)
break
def test_tracker_add_keyframe(self):
""" Test Tracker._add_keyframe() """
for ts in self.dataset.cam0_data.timestamps:
# Get ground truth pose
T_WB = self.dataset.get_ground_truth_pose(ts)
if T_WB is None:
continue
# Feed camera images to feature tracker
img0 = self.dataset.get_camera_image(0, ts)
img1 = self.dataset.get_camera_image(1, ts)
mcam_imgs = {0: img0, 1: img1}
ft_data = self.tracker.feature_tracker.update(ts, mcam_imgs)
# Process features
pose = self.tracker._add_pose(ts, T_WB)
self.tracker._process_features(ts, ft_data, pose)
self.tracker._add_keyframe(ts, mcam_imgs, ft_data, pose)
self.assertTrue(self.tracker.nb_features() > 0)
self.assertEqual(self.tracker.nb_keyframes(), 1)
break
@unittest.skip("")
def test_tracker_vision_callback(self):
""" Test Tracker.vision_callback() """
# Disable imu in Tracker
self.tracker.imu_params = None
# Create csv files
pose_est_csv = open("/tmp/poses_est.csv", "w")
pose_gnd_csv = open("/tmp/poses_gnd.csv", "w")
pose_est_csv.write("ts,rx,ry,rz,qw,qx,qy,qz\n")
pose_gnd_csv.write("ts,rx,ry,rz,qw,qx,qy,qz\n")
poses_est = []
poses_gnd = []
# Loop through timestamps
for k, ts in enumerate(self.dataset.cam0_data.timestamps[0:300]):
# Get ground truth pose
T_WB = self.dataset.get_ground_truth_pose(ts)
if T_WB is None:
continue
# Vision callback
img0 = self.dataset.get_camera_image(0, ts)
img1 = self.dataset.get_camera_image(1, ts)
self.tracker.vision_callback(ts, {0: img0, 1: img1})
# print(f"{ts}, {self.tracker.nb_features()}")
# self.assertTrue(self.tracker.nb_features() > 0)
# self.assertEqual(self.tracker.nb_keyframes(), 1)
last_kf = self.tracker.keyframes[-1]
poses_est.append(tf2pose(pose2tf(last_kf.pose.param)))
poses_gnd.append(tf2pose(T_WB))
print(f"frame_idx: {k}")
pose_est_csv.write("%ld,%f,%f,%f,%f,%f,%f,%f\n" % (ts, *poses_est[-1]))
pose_gnd_csv.write("%ld,%f,%f,%f,%f,%f,%f,%f\n" % (ts, *poses_gnd[-1]))
# Close csv files
pose_est_csv.close()
pose_gnd_csv.close()
# Plot
poses_gnd = pandas.read_csv("/tmp/poses_gnd.csv")
poses_est = pandas.read_csv("/tmp/poses_est.csv")
title = "Displacement"
data = {"Ground Truth": poses_gnd, "Estimate": poses_est}
plot_xyz(title, data, 'ts', 'rx', 'ry', 'rz', 'Displacement [m]')
plt.show()
# CALIBRATION #################################################################
class TestCalibration(unittest.TestCase):
""" Test calibration functions """
def test_aprilgrid(self):
""" Test aprilgrid """
# grid = AprilGrid()
# self.assertTrue(grid is not None)
grid = AprilGrid.load(
"/tmp/aprilgrid_test/mono/cam0/1403709383937837056.csv")
self.assertTrue(grid is not None)
dataset = EurocDataset(euroc_data_path)
res = dataset.cam0_data.config.resolution
proj_params = dataset.cam0_data.config.intrinsics
dist_params = dataset.cam0_data.config.distortion_coefficients
proj_model = "pinhole"
dist_model = "radtan4"
params = np.block([*proj_params, *dist_params])
cam0 = camera_params_setup(0, res, proj_model, dist_model, params)
grid.solvepnp(cam0)
# debug = True
debug = False
if debug:
_, ax = plt.subplots()
for _, _, kp, _ in grid.get_measurements():
ax.plot(kp[0], kp[1], 'r.')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlim([0, 752])
ax.set_ylim([0, 480])
ax.set_ylim(ax.get_ylim()[::-1])
plt.show()
def test_calib_generate_poses(self):
""" Test calib_generate_poses() """
# Calibration target
calib_target = AprilGrid()
poses = calib_generate_poses(calib_target)
self.assertTrue(len(poses) > 0)
# Calibration target pose in world frame
C_WF = euler321(-pi / 2.0, 0.0, deg2rad(80.0))
r_WF = np.array([0.0, 0.0, 0.0])
T_WF = tf(C_WF, r_WF)
# debug = True
debug = False
if debug:
plt.figure()
ax = plt.axes(projection='3d')
calib_target.plot(ax, T_WF)
for T_FC in poses:
plot_tf(ax, T_WF @ T_FC, size=0.05)
plot_set_axes_equal(ax)
ax.set_xlabel("x [m]")
ax.set_ylabel("y [m]")
ax.set_zlabel("z [m]")
plt.show()
def test_calib_generate_random_poses(self):
""" Test calib_generate_random_poses() """
# Calibration target
calib_target = AprilGrid()
poses = calib_generate_random_poses(calib_target)
self.assertTrue(len(poses) > 0)
# Calibration target pose in world frame
C_WF = euler321(-pi / 2.0, 0.0, deg2rad(80.0))
r_WF = np.array([0.0, 0.0, 0.0])
T_WF = tf(C_WF, r_WF)
# debug = True
debug = False
if debug:
plt.figure()
ax = plt.axes(projection='3d')
calib_target.plot(ax, T_WF)
for T_FC in poses:
plot_tf(ax, T_WF @ T_FC, size=0.05)
plot_set_axes_equal(ax)
ax.set_xlabel("x [m]")
ax.set_ylabel("y [m]")
ax.set_zlabel("z [m]")
plt.show()
def test_calibrator(self):
""" Test Calibrator """
# Setup
grid_csvs = glob.glob("/tmp/aprilgrid_test/mono/cam0/*.csv")
grids = [AprilGrid.load(csv_path) for csv_path in grid_csvs]
self.assertTrue(len(grid_csvs) > 0)
self.assertTrue(len(grids) > 0)
# Calibrator
calib = Calibrator()
# -- Add cam0
cam_idx = 0
cam_res = [752, 480]
proj_model = "pinhole"
dist_model = "radtan4"
calib.add_camera(cam_idx, cam_res, proj_model, dist_model)
# -- Add camera views
for grid in grids:
if grid is not None:
calib.add_camera_view(grid.ts, cam_idx, grid)
if calib.get_num_views() == 10:
break
# -- Solve
calib.solve()
# SIMULATION #################################################################
class TestSimulation(unittest.TestCase):
""" Test simulation functions """
def test_create_3d_features(self):
""" Test create 3D features """
debug = False
x_bounds = np.array([-10.0, 10.0])
y_bounds = np.array([-10.0, 10.0])
z_bounds = np.array([-10.0, 10.0])
nb_features = 1000
features = create_3d_features(x_bounds, y_bounds, z_bounds, nb_features)
self.assertTrue(features.shape == (nb_features, 3))
if debug:
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(features[:, 0], features[:, 1], features[:, 2])
ax.set_xlabel("x [m]")
ax.set_ylabel("y [m]")
ax.set_zlabel("z [m]")
plt.show()
def test_create_3d_features_perimeter(self):
""" Test create_3d_features_perimeter() """
debug = False
origin = np.array([0.0, 0.0, 0.0])
dim = np.array([10.0, 10.0, 5.0])
nb_features = 1000
features = create_3d_features_perimeter(origin, dim, nb_features)
self.assertTrue(features.shape == (nb_features, 3))
if debug:
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(features[:, 0], features[:, 1], features[:, 2])
ax.set_xlabel("x [m]")
ax.set_ylabel("y [m]")
ax.set_zlabel("z [m]")
plt.show()
def test_sim_camera_frame(self):
""" Test SimCameraFrame() """
# Camera properties
cam_idx = 0
img_w = 640
img_h = 480
res = [img_w, img_h]
fov = 120.0
fx = focal_length(img_w, fov)
fy = focal_length(img_w, fov)
cx = img_w / 2.0
cy = img_h / 2.0
# Camera parameters
proj_model = "pinhole"
dist_model = "radtan4"
proj_params = [fx, fy, cx, cy]
dist_params = [0.0, 0.0, 0.0, 0.0]
params | |
#!/usr/bin/env python
"""
<Program Name>
test_util.py
<Author>
<NAME>.
<Started>
February 1, 2013.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'util.py'
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import gzip
import shutil
import logging
import tempfile
import unittest
import tuf
import tuf.log
import tuf.hash
import tuf.util
import tuf.unittest_toolbox as unittest_toolbox
import tuf._vendor.six as six
logger = logging.getLogger('tuf.test_util')
class TestUtil(unittest_toolbox.Modified_TestCase):
def setUp(self):
unittest_toolbox.Modified_TestCase.setUp(self)
self.temp_fileobj = tuf.util.TempFile()
def tearDown(self):
unittest_toolbox.Modified_TestCase.tearDown(self)
self.temp_fileobj.close_temp_file()
def test_A1_tempfile_close_temp_file(self):
# Was the temporary file closed?
self.temp_fileobj.close_temp_file()
self.assertTrue(self.temp_fileobj.temporary_file.closed)
def _extract_tempfile_directory(self, config_temp_dir=None):
"""
Takes a directory (essentially specified in the conf.py as
'temporary_directory') and substitutes tempfile.TemporaryFile() with
tempfile.mkstemp() in order to extract actual directory of the stored
tempfile. Returns the config's temporary directory (or default temp
directory) and actual directory.
"""
# Patching 'tuf.conf.temporary_directory'.
tuf.conf.temporary_directory = config_temp_dir
if config_temp_dir is None:
# 'config_temp_dir' needs to be set to default.
config_temp_dir = tempfile.gettempdir()
# Patching 'tempfile.TemporaryFile()' (by substituting
# temfile.TemporaryFile() with tempfile.mkstemp()) in order to get the
# directory of the stored tempfile object.
saved_tempfile_TemporaryFile = tuf.util.tempfile.NamedTemporaryFile
tuf.util.tempfile.NamedTemporaryFile = tempfile.mkstemp
_temp_fileobj = tuf.util.TempFile()
tuf.util.tempfile.NamedTemporaryFile = saved_tempfile_TemporaryFile
junk, _tempfilepath = _temp_fileobj.temporary_file
_tempfile_dir = os.path.dirname(_tempfilepath)
# In the case when 'config_temp_dir' is None or some other discrepancy,
# '_temp_fileobj' needs to be closed manually since tempfile.mkstemp()
# was used.
if os.path.exists(_tempfilepath):
os.remove(_tempfilepath)
return config_temp_dir, _tempfile_dir
def test_A2_tempfile_init(self):
# Goal: Verify that temporary files are stored in the appropriate temp
# directory. The location of the temporary files is set in 'tuf.conf.py'.
# Test: Expected input verification.
# Assumed 'tuf.conf.temporary_directory' is 'None' initially.
temp_file = tuf.util.TempFile()
temp_file_directory = os.path.dirname(temp_file.temporary_file.name)
self.assertEqual(tempfile.gettempdir(), temp_file_directory)
saved_temporary_directory = tuf.conf.temporary_directory
temp_directory = self.make_temp_directory()
tuf.conf.temporary_directory = temp_directory
temp_file = tuf.util.TempFile()
temp_file_directory = os.path.dirname(temp_file.temporary_file.name)
self.assertEqual(temp_directory, temp_file_directory)
tuf.conf.temporary_directory = saved_temporary_directory
# Test: Unexpected input handling.
config_temp_dirs = [self.random_string(), 123, ['a'], {'a':1}]
for config_temp_dir in config_temp_dirs:
config_temp_dir, actual_dir = \
self._extract_tempfile_directory(config_temp_dir)
self.assertEqual(tempfile.gettempdir(), actual_dir)
def test_A3_tempfile_read(self):
filepath = self.make_temp_data_file(data = '1234567890')
fileobj = open(filepath, 'rb')
# Patching 'temp_fileobj.temporary_file'.
self.temp_fileobj.temporary_file = fileobj
# Test: Expected input.
self.assertEqual(self.temp_fileobj.read().decode('utf-8'), '1234567890')
self.assertEqual(self.temp_fileobj.read(4).decode('utf-8'), '1234')
# Test: Unexpected input.
for bogus_arg in ['abcd', ['abcd'], {'a':'a'}, -100]:
self.assertRaises(tuf.FormatError, self.temp_fileobj.read, bogus_arg)
def test_A4_tempfile_write(self):
data = self.random_string()
self.temp_fileobj.write(data.encode('utf-8'))
self.assertEqual(data, self.temp_fileobj.read().decode('utf-8'))
self.temp_fileobj.write(data.encode('utf-8'), auto_flush=False)
self.assertEqual(data, self.temp_fileobj.read().decode('utf-8'))
def test_A5_tempfile_move(self):
# Destination directory to save the temporary file in.
dest_temp_dir = self.make_temp_directory()
dest_path = os.path.join(dest_temp_dir, self.random_string())
self.temp_fileobj.write(self.random_string().encode('utf-8'))
self.temp_fileobj.move(dest_path)
self.assertTrue(dest_path)
def _compress_existing_file(self, filepath):
"""
[Helper]Compresses file 'filepath' and returns file path of
the compresses file.
"""
# NOTE: DO NOT forget to remove the newly created compressed file!
if os.path.exists(filepath):
compressed_filepath = filepath+'.gz'
f_in = open(filepath, 'rb')
f_out = gzip.open(compressed_filepath, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
return compressed_filepath
else:
logger.error('Compression of '+repr(filepath)+' failed. Path does not exist.')
sys.exit(1)
def _decompress_file(self, compressed_filepath):
"""[Helper]"""
if os.path.exists(compressed_filepath):
f = gzip.open(compressed_filepath, 'rb')
file_content = f.read()
f.close()
return file_content
else:
logger.error('Decompression of '+repr(compressed_filepath)+' failed. '+\
'Path does not exist.')
sys.exit(1)
def test_A6_tempfile_decompress_temp_file_object(self):
# Setup: generate a temp file (self.make_temp_data_file()),
# compress it. Write it to self.temp_fileobj().
filepath = self.make_temp_data_file()
fileobj = open(filepath, 'rb')
compressed_filepath = self._compress_existing_file(filepath)
compressed_fileobj = open(compressed_filepath, 'rb')
self.temp_fileobj.write(compressed_fileobj.read())
os.remove(compressed_filepath)
# Try decompression using incorrect compression type i.e. compressions
# other than 'gzip'. In short feeding incorrect input.
bogus_args = ['zip', 1234, self.random_string()]
for arg in bogus_args:
self.assertRaises(tuf.Error,
self.temp_fileobj.decompress_temp_file_object, arg)
self.temp_fileobj.decompress_temp_file_object('gzip')
self.assertEqual(self.temp_fileobj.read(), fileobj.read())
# Checking the content of the TempFile's '_orig_file' instance.
check_compressed_original = self.make_temp_file()
with open(check_compressed_original, 'wb') as file_object:
file_object.write(self.temp_fileobj._orig_file.read())
data_in_orig_file = self._decompress_file(check_compressed_original)
fileobj.seek(0)
self.assertEqual(data_in_orig_file, fileobj.read())
# Try decompressing once more.
self.assertRaises(tuf.Error,
self.temp_fileobj.decompress_temp_file_object, 'gzip')
# Test decompression of invalid gzip file.
temp_file = tuf.util.TempFile()
fileobj.seek(0)
temp_file.write(fileobj.read())
temp_file.decompress_temp_file_object('gzip')
def test_B1_get_file_details(self):
# Goal: Verify proper output given certain expected/unexpected input.
# Making a temporary file.
filepath = self.make_temp_data_file()
# Computing the hash and length of the tempfile.
digest_object = tuf.hash.digest_filename(filepath, algorithm='sha256')
file_hash = {'sha256' : digest_object.hexdigest()}
file_length = os.path.getsize(filepath)
# Test: Expected input.
self.assertEqual(tuf.util.get_file_details(filepath), (file_length, file_hash))
# Test: Incorrect input.
bogus_inputs = [self.random_string(), 1234, [self.random_string()],
{'a': 'b'}, None]
for bogus_input in bogus_inputs:
if isinstance(bogus_input, six.string_types):
self.assertRaises(tuf.Error, tuf.util.get_file_details, bogus_input)
else:
self.assertRaises(tuf.FormatError, tuf.util.get_file_details, bogus_input)
def test_B2_ensure_parent_dir(self):
existing_parent_dir = self.make_temp_directory()
non_existing_parent_dir = os.path.join(existing_parent_dir, 'a', 'b')
for parent_dir in [existing_parent_dir, non_existing_parent_dir, 12, [3]]:
if isinstance(parent_dir, six.string_types):
tuf.util.ensure_parent_dir(os.path.join(parent_dir, 'a.txt'))
self.assertTrue(os.path.isdir(parent_dir))
else:
self.assertRaises(tuf.FormatError, tuf.util.ensure_parent_dir, parent_dir)
def test_B3_file_in_confined_directories(self):
# Goal: Provide invalid input for 'filepath' and 'confined_directories'.
# Include inputs like: '[1, 2, "a"]' and such...
# Reference to 'file_in_confined_directories()' to improve readability.
in_confined_directory = tuf.util.file_in_confined_directories
list_of_confined_directories = ['a', 12, {'a':'a'}, [1]]
list_of_filepaths = [12, ['a'], {'a':'a'}, 'a']
for bogus_confined_directory in list_of_confined_directories:
for filepath in list_of_filepaths:
self.assertRaises(tuf.FormatError, in_confined_directory,
filepath, bogus_confined_directory)
# Test: Inputs that evaluate to False.
confined_directories = ['a/b/', 'a/b/c/d/']
self.assertFalse(in_confined_directory('a/b/c/1.txt', confined_directories))
confined_directories = ['a/b/c/d/e/']
self.assertFalse(in_confined_directory('a', confined_directories))
self.assertFalse(in_confined_directory('a/b', confined_directories))
self.assertFalse(in_confined_directory('a/b/c', confined_directories))
self.assertFalse(in_confined_directory('a/b/c/d', confined_directories))
# Below, 'e' is a file in the 'a/b/c/d/' directory.
self.assertFalse(in_confined_directory('a/b/c/d/e', confined_directories))
# Test: Inputs that evaluate to True.
self.assertTrue(in_confined_directory('a/b/c.txt', ['']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['a/b/']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['x', '']))
self.assertTrue(in_confined_directory('a/b/c/..', ['a/']))
def test_B4_import_json(self):
self.assertTrue('json' in sys.modules)
def test_B5_load_json_string(self):
# Test normal case.
data = ['a', {'b': ['c', None, 30.3, 29]}]
json_string = tuf.util.json.dumps(data)
self.assertEqual(data, tuf.util.load_json_string(json_string))
# Test invalid arguments.
self.assertRaises(tuf.Error, tuf.util.load_json_string, 8)
invalid_json_string = {'a': tuf.FormatError}
self.assertRaises(tuf.Error, tuf.util.load_json_string, invalid_json_string)
def test_B6_load_json_file(self):
data = ['a', {'b': ['c', None, 30.3, 29]}]
filepath = self.make_temp_file()
fileobj = open(filepath, 'wt')
tuf.util.json.dump(data, fileobj)
fileobj.close()
self.assertEqual(data, tuf.util.load_json_file(filepath))
# Test a gzipped file.
compressed_filepath = self._compress_existing_file(filepath)
self.assertEqual(data, tuf.util.load_json_file(compressed_filepath))
Errors = (tuf.FormatError, IOError)
for bogus_arg in [b'a', 1, [b'a'], {'a':b'b'}]:
self.assertRaises(Errors, tuf.util.load_json_file, bogus_arg)
def test_C1_get_target_hash(self):
# Test normal case.
expected_target_hashes = {
'/file1.txt': 'e3a3d89eb3b70ce3fbce6017d7b8c12d4abd5635427a0e8a238f53157df85b3d',
'/README.txt': '8faee106f1bb69f34aaf1df1e3c2e87d763c4d878cb96b91db13495e32ceb0b0',
'/warehouse/file2.txt': 'd543a573a2cec67026eff06e75702303559e64e705eba06f65799baaf0424417'
}
for filepath, target_hash in six.iteritems(expected_target_hashes):
self.assertTrue(tuf.formats.RELPATH_SCHEMA.matches(filepath))
self.assertTrue(tuf.formats.HASH_SCHEMA.matches(target_hash))
self.assertEqual(tuf.util.get_target_hash(filepath), target_hash)
# Test for improperly formatted argument.
self.assertRaises(tuf.FormatError, tuf.util.get_target_hash, 8)
def test_C2_find_delegated_role(self):
# Test normal case. Create an expected role list, which is one of the
# required arguments to 'find_delegated_role()'.
role_list = [
{
"keyids": [
"<KEY>"
],
"name": "targets/warehouse",
"paths": [
"/file1.txt", "/README.txt", '/warehouse/'
],
"threshold": 3
},
{
"keyids": [
"<KEY>"
],
"name": "targets/tuf",
"paths": [
"/updater.py", "formats.py", '/tuf/'
],
"threshold": 4
}
]
self.assertTrue(tuf.formats.ROLELIST_SCHEMA.matches(role_list))
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/tuf'), 1)
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/warehouse'), 0)
# Test for non-existent role. 'find_delegated_role()' returns 'None'
# if the role is not found.
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/non-existent'),
None)
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError, tuf.util.find_delegated_role, 8, role_list)
self.assertRaises(tuf.FormatError, tuf.util.find_delegated_role, 8, 'targets/tuf')
# Test duplicate roles.
role_list.append(role_list[1])
self.assertRaises(tuf.RepositoryError, tuf.util.find_delegated_role, role_list,
'targets/tuf')
# Test missing 'name' attribute (optional, but required by
# 'find_delegated_role()'.
# Delete the duplicate role, and the remaining role's 'name' attribute.
del role_list[2]
del role_list[0]['name']
self.assertRaises(tuf.RepositoryError, tuf.util.find_delegated_role, role_list,
'targets/warehouse')
def test_C3_paths_are_consistent_with_hash_prefixes(self):
# Test normal case.
path_hash_prefixes = ['e3a3', '8fae', 'd543']
list_of_targets = ['/file1.txt', '/README.txt', '/warehouse/file2.txt']
# Ensure the paths of 'list_of_targets' each have the epected path hash
# prefix listed in 'path_hash_prefixes'.
for filepath in list_of_targets:
self.assertTrue(tuf.util.get_target_hash(filepath)[0:4] in path_hash_prefixes)
self.assertTrue(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
path_hash_prefixes))
extra_invalid_prefix = ['e3a3', '8fae', 'd543', '0000']
self.assertTrue(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
extra_invalid_prefix))
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes, 8,
path_hash_prefixes)
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes,
list_of_targets, 8)
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes,
list_of_targets, ['zza1'])
# Test invalid list of targets.
bad_target_path = '/file5.txt'
self.assertTrue(tuf.util.get_target_hash(bad_target_path)[0:4] not in
path_hash_prefixes)
self.assertFalse(tuf.util.paths_are_consistent_with_hash_prefixes([bad_target_path],
path_hash_prefixes))
# Add invalid target path to 'list_of_targets'.
list_of_targets.append(bad_target_path)
self.assertFalse(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
path_hash_prefixes))
def test_C4_ensure_all_targets_allowed(self):
# Test normal case.
rolename = 'targets/warehouse'
self.assertTrue(tuf.formats.ROLENAME_SCHEMA.matches(rolename))
list_of_targets = ['/file1.txt', '/README.txt', '/warehouse/file2.txt']
self.assertTrue(tuf.formats.RELPATHS_SCHEMA.matches(list_of_targets))
parent_delegations = {"keys": {
"<KEY>": {
"keytype": "ed25519",
"keyval": {
"public": "<KEY>"
}
}
},
"roles": [
{
"keyids": [
"<KEY>"
],
"name": "targets/warehouse",
"paths": [
"/file1.txt", "/README.txt", '/warehouse/'
],
"threshold": 1
}
]
}
self.assertTrue(tuf.formats.DELEGATIONS_SCHEMA.matches(parent_delegations))
tuf.util.ensure_all_targets_allowed(rolename, list_of_targets,
parent_delegations)
# The target files of 'targets' are always allowed. 'list_of_targets' and
# 'parent_delegations' are not checked in this case.
tuf.util.ensure_all_targets_allowed('targets', list_of_targets,
parent_delegations)
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
8, | |
one=0)
self.redis.zadd('foo', two=0)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='MAX')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2), (b'three', 3)])
def test_zunionstore_min(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=0)
self.redis.zadd('bar', two=0)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='MIN')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 0), (b'two', 0), (b'three', 3)])
def test_zunionstore_weights(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', four=4)
self.redis.zunionstore('baz', {'foo': 1, 'bar': 2}, aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 3), (b'two', 6), (b'four', 8)])
def test_zunionstore_mixed_set_types(self):
# No score, redis will use 1.0.
self.redis.sadd('foo', 'one')
self.redis.sadd('foo', 'two')
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'three', 3), (b'two', 3)])
def test_zunionstore_badkey(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zunionstore('baz', ['foo', 'bar'], aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2)])
self.redis.zunionstore('baz', {'foo': 1, 'bar': 2}, aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2)])
def test_zinterstore(self):
self.redis.zadd('foo', one=1)
self.redis.zadd('foo', two=2)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zinterstore('baz', ['foo', 'bar'])
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'two', 4)])
def test_zinterstore_mixed_set_types(self):
self.redis.sadd('foo', 'one')
self.redis.sadd('foo', 'two')
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zinterstore('baz', ['foo', 'bar'], aggregate='SUM')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 2), (b'two', 3)])
def test_zinterstore_max(self):
self.redis.zadd('foo', one=0)
self.redis.zadd('foo', two=0)
self.redis.zadd('bar', one=1)
self.redis.zadd('bar', two=2)
self.redis.zadd('bar', three=3)
self.redis.zinterstore('baz', ['foo', 'bar'], aggregate='MAX')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1), (b'two', 2)])
def test_zinterstore_onekey(self):
self.redis.zadd('foo', one=1)
self.redis.zinterstore('baz', ['foo'], aggregate='MAX')
self.assertEqual(self.redis.zrange('baz', 0, -1, withscores=True),
[(b'one', 1)])
def test_zinterstore_nokey(self):
with self.assertRaises(redis.ResponseError):
self.redis.zinterstore('baz', [], aggregate='MAX')
def test_zunionstore_nokey(self):
with self.assertRaises(redis.ResponseError):
self.redis.zunionstore('baz', [], aggregate='MAX')
def test_multidb(self):
r1 = self.create_redis(db=0)
r2 = self.create_redis(db=1)
r1['r1'] = 'r1'
r2['r2'] = 'r2'
self.assertTrue('r2' not in r1)
self.assertTrue('r1' not in r2)
self.assertEqual(r1['r1'], b'r1')
self.assertEqual(r2['r2'], b'r2')
r1.flushall()
self.assertTrue('r1' not in r1)
self.assertTrue('r2' not in r2)
def test_basic_sort(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo'), [b'1', b'2', b'3'])
def test_empty_sort(self):
self.assertEqual(self.redis.sort('foo'), [])
def test_sort_range_offset_range(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo', start=0, num=2), [b'1', b'2'])
def test_sort_range_offset_range_and_desc(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort("foo", start=0, num=1, desc=True),
[b"4"])
def test_sort_range_offset_norange(self):
with self.assertRaises(redis.RedisError):
self.redis.sort('foo', start=1)
def test_sort_range_with_large_range(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
# num=20 even though len(foo) is 4.
self.assertEqual(self.redis.sort('foo', start=1, num=20),
[b'2', b'3', b'4'])
def test_sort_descending(self):
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo', desc=True), [b'3', b'2', b'1'])
def test_sort_alpha(self):
self.redis.rpush('foo', '2a')
self.redis.rpush('foo', '1b')
self.redis.rpush('foo', '2b')
self.redis.rpush('foo', '1a')
self.assertEqual(self.redis.sort('foo', alpha=True),
[b'1a', b'1b', b'2a', b'2b'])
def test_foo(self):
self.redis.rpush('foo', '2a')
self.redis.rpush('foo', '1b')
self.redis.rpush('foo', '2b')
self.redis.rpush('foo', '1a')
with self.assertRaises(redis.ResponseError):
self.redis.sort('foo', alpha=False)
def test_sort_with_store_option(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.assertEqual(self.redis.sort('foo', store='bar'), 4)
self.assertEqual(self.redis.lrange('bar', 0, -1),
[b'1', b'2', b'3', b'4'])
def test_sort_with_by_and_get_option(self):
self.redis.rpush('foo', '2')
self.redis.rpush('foo', '1')
self.redis.rpush('foo', '4')
self.redis.rpush('foo', '3')
self.redis['weight_1'] = '4'
self.redis['weight_2'] = '3'
self.redis['weight_3'] = '2'
self.redis['weight_4'] = '1'
self.redis['data_1'] = 'one'
self.redis['data_2'] = 'two'
self.redis['data_3'] = 'three'
self.redis['data_4'] = 'four'
self.assertEqual(self.redis.sort('foo', by='weight_*', get='data_*'),
[b'four', b'three', b'two', b'one'])
self.assertEqual(self.redis.sort('foo', by='weight_*', get='#'),
[b'4', b'3', b'2', b'1'])
self.assertEqual(
self.redis.sort('foo', by='weight_*', get=('data_*', '#')),
[b'four', b'4', b'three', b'3', b'two', b'2', b'one', b'1'])
self.assertEqual(self.redis.sort('foo', by='weight_*', get='data_1'),
[None, None, None, None])
def test_sort_with_hash(self):
self.redis.rpush('foo', 'middle')
self.redis.rpush('foo', 'eldest')
self.redis.rpush('foo', 'youngest')
self.redis.hset('record_youngest', 'age', 1)
self.redis.hset('record_youngest', 'name', 'baby')
self.redis.hset('record_middle', 'age', 10)
self.redis.hset('record_middle', 'name', 'teen')
self.redis.hset('record_eldest', 'age', 20)
self.redis.hset('record_eldest', 'name', 'adult')
self.assertEqual(self.redis.sort('foo', by='record_*->age'),
[b'youngest', b'middle', b'eldest'])
self.assertEqual(
self.redis.sort('foo', by='record_*->age', get='record_*->name'),
[b'baby', b'teen', b'adult'])
def test_sort_with_set(self):
self.redis.sadd('foo', '3')
self.redis.sadd('foo', '1')
self.redis.sadd('foo', '2')
self.assertEqual(self.redis.sort('foo'), [b'1', b'2', b'3'])
def test_pipeline(self):
# The pipeline method returns an object for
# issuing multiple commands in a batch.
p = self.redis.pipeline()
p.watch('bam')
p.multi()
p.set('foo', 'bar').get('foo')
p.lpush('baz', 'quux')
p.lpush('baz', 'quux2').lrange('baz', 0, -1)
res = p.execute()
# Check return values returned as list.
self.assertEqual(res, [True, b'bar', 1, 2, [b'quux2', b'quux']])
# Check side effects happened as expected.
self.assertEqual(self.redis.lrange('baz', 0, -1), [b'quux2', b'quux'])
# Check that the command buffer has been emptied.
self.assertEqual(p.execute(), [])
def test_pipeline_ignore_errors(self):
"""Test the pipeline ignoring errors when asked."""
with self.redis.pipeline() as p:
p.set('foo', 'bar')
p.rename('baz', 'bats')
with self.assertRaises(redis.exceptions.ResponseError):
p.execute()
self.assertEqual([], p.execute())
with self.redis.pipeline() as p:
p.set('foo', 'bar')
p.rename('baz', 'bats')
res = p.execute(raise_on_error=False)
self.assertEqual([], p.execute())
self.assertEqual(len(res), 2)
self.assertIsInstance(res[1], redis.exceptions.ResponseError)
def test_multiple_successful_watch_calls(self):
p = self.redis.pipeline()
p.watch('bam')
p.multi()
p.set('foo', 'bar')
# Check that the watched keys buffer has been emptied.
p.execute()
# bam is no longer being watched, so it's ok to modify
# it now.
p.watch('foo')
self.redis.set('bam', 'boo')
p.multi()
p.set('foo', 'bats')
self.assertEqual(p.execute(), [True])
def test_pipeline_non_transactional(self):
# For our simple-minded model I don't think
# there is any observable difference.
p = self.redis.pipeline(transaction=False)
res = p.set('baz', 'quux').get('baz').execute()
self.assertEqual(res, [True, b'quux'])
def test_pipeline_raises_when_watched_key_changed(self):
self.redis.set('foo', 'bar')
self.redis.rpush('greet', 'hello')
p = self.redis.pipeline()
self.addCleanup(p.reset)
p.watch('greet', 'foo')
nextf = fakeredis.to_bytes(p.get('foo')) + b'baz'
# Simulate change happening on another thread.
self.redis.rpush('greet', 'world')
# Begin pipelining.
p.multi()
p.set('foo', nextf)
with self.assertRaises(redis.WatchError):
p.execute()
def test_pipeline_succeeds_despite_unwatched_key_changed(self):
# Same setup as before except for the params to the WATCH command.
self.redis.set('foo', 'bar')
self.redis.rpush('greet', 'hello')
p = self.redis.pipeline()
try:
# Only watch one of the 2 keys.
p.watch('foo')
nextf = fakeredis.to_bytes(p.get('foo')) + b'baz'
# Simulate change happening on another thread.
self.redis.rpush('greet', 'world')
p.multi()
p.set('foo', nextf)
p.execute()
# Check the commands were executed.
self.assertEqual(self.redis.get('foo'), b'barbaz')
finally:
p.reset()
def test_pipeline_succeeds_when_watching_nonexistent_key(self):
self.redis.set('foo', 'bar')
self.redis.rpush('greet', 'hello')
p = self.redis.pipeline()
try:
# Also watch a nonexistent key.
p.watch('foo', 'bam')
nextf = fakeredis.to_bytes(p.get('foo')) + b'baz'
# Simulate change happening on another thread.
self.redis.rpush('greet', 'world')
p.multi()
p.set('foo', nextf)
p.execute()
# Check the commands were executed.
self.assertEqual(self.redis.get('foo'), b'barbaz')
finally:
p.reset()
def test_watch_state_is_cleared_across_multiple_watches(self):
self.redis.set('foo', 'one')
self.redis.set('bar', 'baz')
p = self.redis.pipeline()
self.addCleanup(p.reset)
p.watch('foo')
# Simulate change happening on another thread.
self.redis.set('foo', 'three')
p.multi()
p.set('foo', 'three')
with self.assertRaises(redis.WatchError):
p.execute()
# Now watch another key. It should be ok to change
# foo as we're no longer watching it.
p.watch('bar')
self.redis.set('foo', 'four')
p.multi()
p.set('bar', 'five')
self.assertEqual(p.execute(), [True])
def test_pipeline_proxies_to_redis_object(self):
p = self.redis.pipeline()
self.assertTrue(hasattr(p, 'zadd'))
with self.assertRaises(AttributeError):
p.non_existent_attribute
def test_pipeline_as_context_manager(self):
self.redis.set('foo', 'bar')
with self.redis.pipeline() as p:
p.watch('foo')
self.assertTrue(isinstance(p, redis.client.BasePipeline)
or p.need_reset)
p.multi()
p.set('foo', 'baz')
p.execute()
# Usually you would consider the pipeline to
# have been destroyed
# after the with statement, but we need to check
# it was reset properly:
self.assertTrue(isinstance(p, redis.client.BasePipeline)
or not p.need_reset)
def test_pipeline_transaction_shortcut(self):
# This example taken pretty much from the redis-py documentation.
self.redis.set('OUR-SEQUENCE-KEY', 13)
calls = []
def client_side_incr(pipe):
calls.append((pipe,))
current_value = pipe.get('OUR-SEQUENCE-KEY')
next_value = int(current_value) + 1
if len(calls) < 3:
# Simulate a change from another thread.
self.redis.set('OUR-SEQUENCE-KEY', next_value)
pipe.multi()
pipe.set('OUR-SEQUENCE-KEY', next_value)
res = self.redis.transaction(client_side_incr, 'OUR-SEQUENCE-KEY')
self.assertEqual([True], res)
self.assertEqual(16, int(self.redis.get('OUR-SEQUENCE-KEY')))
self.assertEqual(3, len(calls))
def test_key_patterns(self):
self.redis.mset({'one': 1, 'two': 2, 'three': 3, 'four': 4})
self.assertItemsEqual(self.redis.keys('*o*'),
[b'four', b'one', b'two'])
self.assertItemsEqual(self.redis.keys('t??'), [b'two'])
self.assertItemsEqual(self.redis.keys('*'),
[b'four', b'one', b'two', b'three'])
self.assertItemsEqual(self.redis.keys(),
[b'four', b'one', b'two', b'three'])
def test_ping(self):
self.assertTrue(self.redis.ping())
def test_type(self):
self.redis.set('string_key', "value")
self.redis.lpush("list_key", "value")
self.redis.sadd("set_key", "value")
self.redis.zadd("zset_key", 1, "value")
self.redis.hset('hset_key', 'key', 'value')
self.assertEqual(self.redis.type('string_key'), b'string')
self.assertEqual(self.redis.type('list_key'), b'list')
self.assertEqual(self.redis.type('set_key'), b'set')
self.assertEqual(self.redis.type('zset_key'), b'zset')
self.assertEqual(self.redis.type('hset_key'), b'hash')
@attr('slow')
def test_pubsub_subscribe(self):
pubsub = self.redis.pubsub()
pubsub.subscribe("channel")
sleep(1)
expected_message = {'type': 'subscribe', 'pattern': None,
'channel': b'channel', 'data': 1}
message = pubsub.get_message()
keys = list(pubsub.channels.keys())
key = keys[0]
if not self.decode_responses:
key = (key if type(key) == bytes
else bytes(key, encoding='utf-8'))
self.assertEqual(len(keys), 1)
self.assertEqual(key, b'channel')
self.assertEqual(message, expected_message)
@attr('slow')
def test_pubsub_psubscribe(self):
pubsub = self.redis.pubsub()
pubsub.psubscribe("channel.*")
sleep(1)
expected_message = {'type': 'psubscribe', 'pattern': None,
'channel': b'channel.*', 'data': 1}
message = pubsub.get_message()
keys = list(pubsub.patterns.keys())
self.assertEqual(len(keys), 1)
self.assertEqual(message, expected_message)
@attr('slow')
def test_pubsub_unsubscribe(self):
pubsub = self.redis.pubsub()
pubsub.subscribe('channel-1', 'channel-2', 'channel-3')
sleep(1)
expected_message = {'type': 'unsubscribe', 'pattern': None,
'channel': b'channel-1', 'data': 2}
pubsub.get_message()
pubsub.get_message()
pubsub.get_message()
# unsubscribe from one
pubsub.unsubscribe('channel-1')
sleep(1)
message = pubsub.get_message()
keys = list(pubsub.channels.keys())
self.assertEqual(message, expected_message)
self.assertEqual(len(keys), 2)
# unsubscribe from multiple
pubsub.unsubscribe()
sleep(1)
pubsub.get_message()
pubsub.get_message()
keys = list(pubsub.channels.keys())
self.assertEqual(message, expected_message)
self.assertEqual(len(keys), 0)
@attr('slow')
def test_pubsub_punsubscribe(self):
pubsub = self.redis.pubsub()
pubsub.psubscribe('channel-1.*', 'channel-2.*', 'channel-3.*')
sleep(1)
expected_message = {'type': 'punsubscribe', 'pattern': None,
'channel': b'channel-1.*', 'data': 2}
pubsub.get_message()
pubsub.get_message()
pubsub.get_message()
# unsubscribe from one
pubsub.punsubscribe('channel-1.*')
sleep(1)
message = pubsub.get_message()
keys = list(pubsub.patterns.keys())
self.assertEqual(message, expected_message)
self.assertEqual(len(keys), 2)
# unsubscribe from multiple
pubsub.punsubscribe()
sleep(1)
pubsub.get_message()
pubsub.get_message()
keys = list(pubsub.patterns.keys())
self.assertEqual(len(keys), 0)
@attr('slow')
def test_pubsub_listen(self):
def _listen(pubsub, q):
count = 0
for message in pubsub.listen():
q.put(message)
count += 1
if count == 4:
pubsub.close()
channel = 'ch1'
patterns = ['ch1*', 'ch[1]', 'ch?']
pubsub | |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`484`- and :pep:`585`-compliant **dual type hint utilities**
(i.e., callables generically applicable to both :pep:`484`- and
:pep:`585`-compliant type hints).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.roar import BeartypeDecorHintPep484585Exception
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignForwardRef,
HintSignType,
HintSignUnion,
)
from beartype._util.cls.pep.utilpep3119 import (
die_unless_type_issubclassable,
die_unless_type_or_types_issubclassable,
)
from beartype._util.hint.pep.proposal.pep484585.utilpeparg import (
get_hint_pep484585_args_1)
from beartype._util.hint.pep.proposal.pep484585.utilpepforwardref import (
HINT_PEP484585_FORWARDREF_UNION)
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_7
from typing import Any, Tuple, Type, TypeVar, Union
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ HINTS ~ private }....................
_HINT_PEP484585_SUBCLASS_ARGS_1_UNION: Any = (
# If the active Python interpreter targets Python >= 3.7, include the sane
# "typing.TypeVar" type in this union;
Union[type, Tuple[type], TypeVar, HINT_PEP484585_FORWARDREF_UNION,]
if IS_PYTHON_AT_LEAST_3_7 else
# Else, the active Python interpreter targets Python 3.6. In this case,
# exclude the insane "typing.TypeVar" type from this union. Naively
# including that type here induces fatal runtime exceptions resembling:
# AttributeError: type object 'TypeVar' has no attribute '_gorg'
Union[type, Tuple[type], HINT_PEP484585_FORWARDREF_UNION,]
)
'''
Union of the types of all permissible :pep:`484`- or :pep:`585`-compliant
**subclass type hint arguments** (i.e., PEP-compliant child type hints
subscripting (indexing) a subclass type hint).
'''
# ....................{ GETTERS }....................
def get_hint_pep484585_subclass_superclass(
# Mandatory parameters.
hint: object,
# Optional parameters.
exception_prefix: str = '',
) -> _HINT_PEP484585_SUBCLASS_ARGS_1_UNION:
'''
**Issubclassable superclass(es)** (i.e., class whose metaclass does *not*
define a ``__subclasscheck__()`` dunder method that raises an exception,
tuple of such classes, or forward reference to such a class) subscripting
the passed :pep:`484`- or :pep:`585`-compliant **subclass type hint**
(i.e., hint constraining objects to subclass that superclass).
This getter is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Object to be inspected.
exception_prefix : Optional[str]
Human-readable label prefixing the representation of this object in the
exception message. Defaults to the empty string.
Returns
----------
_HINT_PEP484585_SUBCLASS_ARGS_1_UNION
Argument subscripting this subclass type hint, guaranteed to be either:
* An issubclassable class.
* A tuple of issubclassable classes.
* A :pep:`484`-compliant forward reference to an issubclassable class
that typically has yet to be declared (i.e.,
:class:`typing.ForwardRef` instance).
* A :pep:`484`-compliant type variable constrained to classes (i.e.,
:class:`typing.TypeVar` instance).
* A :pep:`585`-compliant union of two or more issubclassable classes.
* A :pep:`484`-compliant type variable constrained to classes (i.e.,
:class:`typing.TypeVar` instance).
(i.e., string).
Raises
----------
:exc:`BeartypeDecorHintPep3119Exception`
If this superclass subscripting this type hint is *not*
**issubclassable** (i.e., class whose metaclass defines a
``__subclasscheck__()`` dunder method raising an exception).
:exc:`BeartypeDecorHintPep484585Exception`
If this hint is either:
* Neither a :pep:`484`- nor :pep:`585`-compliant subclass type hint.
* A :pep:`484`- or :pep:`585`-compliant subclass type hint subscripted
by one argument that is neither a class, union of classes, nor
forward reference to a class.
:exc:`BeartypeDecorHintPep585Exception`
If this hint is either:
* A :pep:`585`-compliant subclass type hint subscripted by either:
* *No* arguments.
* Two or more arguments.
'''
# Avoid circular import dependencies.
from beartype._util.hint.pep.utilpepget import (
get_hint_pep_args,
get_hint_pep_sign_or_none,
)
# If this is *NOT* a subclass type hint, raise an exception.
_die_unless_hint_pep484585_subclass(
hint=hint, exception_prefix=exception_prefix)
# Else, this is either a subclass type hint.
# Superclass subscripting this hint.
hint_superclass = get_hint_pep484585_args_1(
hint=hint, exception_prefix=exception_prefix)
# Sign identifying this superclass.
hint_superclass_sign = get_hint_pep_sign_or_none(hint_superclass)
# If this superclass is actually a union of superclasses...
if hint_superclass_sign is HintSignUnion:
# Efficiently reduce this superclass to the tuple of superclasses
# subscripting and thus underlying this union.
hint_superclass = get_hint_pep_args(hint_superclass)
# If any item of this tuple is *NOT* an issubclassable class, raise an
# exception.
# print(f'hint_superclass union arg: {hint_superclass}')
die_unless_type_or_types_issubclassable(
type_or_types=hint_superclass, exception_prefix=exception_prefix) # type: ignore[arg-type]
# If this superclass is actually a forward reference to a superclass,
# silently accept this reference as is. This conditional exists only to
# avoid raising a subsequent exception.
elif hint_superclass_sign is HintSignForwardRef:
pass
# Else, this superclass is *NOT* a union of superclasses...
#
# If this superclass is a class...
elif isinstance(hint_superclass, type):
# If this superclass is *NOT* issubclassable, raise an exception.
die_unless_type_issubclassable(
cls=hint_superclass, exception_prefix=exception_prefix)
# Else, this superclass is issubclassable.
# Else, this superclass is of an unexpected type. In this case, raise an
# exception.
#
# Note that PEP 585-compliant subclass type hints infrequently trigger this
# edge case. Although the "typing" module explicitly validates the
# arguments subscripting PEP 484-compliant type hints, the CPython
# interpreter applies *NO* such validation to PEP 585-compliant subclass
# type hints. For example, PEP 585-compliant subclass type hints are
# subscriptable by the empty tuple, which is technically an argument:
# >>> type[()].__args__
# () # <---- thanks fer nuthin
else:
raise BeartypeDecorHintPep484585Exception(
f'{exception_prefix}subclass type hint {repr(hint)} '
f'child type hint {repr(hint_superclass)} neither '
f'class, union of classes, nor forward reference to class.'
)
# Return this superclass.
return hint_superclass
# ....................{ REDUCERS }....................
#FIXME: Unit test us up.
def reduce_hint_pep484585_subclass_superclass_if_ignorable(
# Mandatory parameters.
hint: Any,
# Optional parameters.
exception_prefix: str = '',
) -> Any:
'''
Reduce the passed :pep:`484`- or :pep:`585`-compliant **subclass type
hint** (i.e., hint constraining objects to subclass that superclass) to the
:class:`type` superclass if that hint is subscripted by an ignorable child
type hint (e.g., :attr:`typing.Any`, :class:`type`) *or* preserve this hint
as is otherwise (i.e., if that hint is *not* subscripted by an ignorable
child type hint).
This reducer is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Subclass type hint to be reduced.
exception_prefix : Optional[str]
Human-readable label prefixing the representation of this object in the
exception message. Defaults to the empty string.
Raises
----------
:exc:`BeartypeDecorHintPep484585Exception`
If this hint is neither a :pep:`484`- nor :pep:`585`-compliant subclass
type hint.
'''
# Avoid circular import dependencies.
from beartype._util.hint.utilhinttest import is_hint_ignorable
# If this is *NOT* a subclass type hint, raise an exception.
_die_unless_hint_pep484585_subclass(
hint=hint, exception_prefix=exception_prefix)
# Else, this is either a subclass type hint.
# If this hint is the unsubscripted PEP 484-compliant subclass type hint,
# immediately reduce this hint to the "type" superclass.
#
# Note that this is *NOT* merely a nonsensical optimization. The
# implementation of the unsubscripted PEP 484-compliant subclass type hint
# significantly differs across Python versions. Under some but *NOT* all
# supported Python versions (notably, Python 3.7 and 3.8), the "typing"
# module subversively subscripts this hint by a type variable; under all
# others, this hint remains unsubscripted. In the latter case, passing this
# hint to the subsequent get_hint_pep484585_args_1() would erroneously
# raise an exception.
if hint == Type:
return type
# Else, this hint is *NOT* the unsubscripted PEP 484-compliant subclass
# type hint.
# Superclass subscripting this hint.
#
# Note that we intentionally do *NOT* call the high-level
# get_hint_pep484585_subclass_superclass() getter here, as the
# validation performed by that function would raise exceptions for
# various child type hints that are otherwise permissible (e.g.,
# "typing.Any").
hint_superclass = get_hint_pep484585_args_1(
hint=hint, exception_prefix=exception_prefix)
# If this argument is either...
if (
# An ignorable type hint (e.g., "typing.Any") *OR*...
is_hint_ignorable(hint_superclass) or
# The "type" superclass, which is effectively ignorable in this
# context of subclasses, as *ALL* classes necessarily subclass
# that superclass.
hint_superclass is type
):
# Reduce this subclass type hint to the "type" superclass.
hint = type
# Else, this argument is unignorable and thus irreducible.
# Return this possibly reduced type hint.
return hint
# ....................{ PRIVATE ~ validators }....................
def _die_unless_hint_pep484585_subclass(
# Mandatory parameters.
hint: object,
# Optional parameters.
exception_prefix: str = '',
) -> None:
'''
Raise an exception unless the passed object is a :pep:`484`- or
:pep:`585`-compliant **subclass type hint** (i.e., hint constraining
objects to subclass that superclass).
Parameters
----------
hint : object
Object to be validated.
exception_prefix : Optional[str]
Human-readable label prefixing the representation of this object in the
exception message. Defaults to the empty string.
Raises
----------
:exc:`BeartypeDecorHintPep484585Exception`
If this hint is neither a :pep:`484`- nor | |
at which `value` should be inserted into
`array`.
Example:
>>> sorted_index([1, 2, 2, 3, 4], 2)
1
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
Move iteratee support to :func:`sorted_index_by`.
"""
return sorted_index_by(array, value)
def sorted_index_by(array, value, iteratee=None):
"""This method is like :func:`sorted_index` except that it accepts
iteratee which is invoked for `value` and each element of `array` to
compute their sort ranking. The iteratee is invoked with one argument:
``(value)``.
Args:
array (list): List to inspect.
value (mixed): Value to evaluate.
iteratee (mixed, optional): The iteratee invoked per element. Defaults
to :func:`.identity`.
Returns:
int: Returns the index at which `value` should be inserted into
`array`.
Example:
>>> array = [{'x': 4}, {'x': 5}]
>>> sorted_index_by(array, {'x': 4}, lambda o: o['x'])
0
>>> sorted_index_by(array, {'x': 4}, 'x')
0
.. versionadded:: 4.0.0
"""
if iteratee:
# Generate array of sorted keys computed using iteratee.
iteratee = pyd.iteratee(iteratee)
array = sorted(iteratee(item) for item in array)
value = iteratee(value)
return bisect_left(array, value)
def sorted_index_of(array, value):
"""Returns the index of the matched `value` from the sorted `array`, else
``-1``.
Args:
array (list): Array to inspect.
value (mixed): Value to search for.
Returns:
int: Returns the index of the first matched value, else ``-1``.
Example:
>>> sorted_index_of([3, 5, 7, 10], 3)
0
>>> sorted_index_of([10, 10, 5, 7, 3], 10)
-1
.. versionadded:: 4.0.0
"""
index = sorted_index(array, value)
if index < len(array) and array[index] == value:
return index
else:
return -1
def sorted_last_index(array, value):
"""This method is like :func:`sorted_index` except that it returns the
highest index at which `value` should be inserted into `array` in order to
maintain its sort order.
Args:
array (list): List to inspect.
value (mixed): Value to evaluate.
Returns:
int: Returns the index at which `value` should be inserted into
`array`.
Example:
>>> sorted_last_index([1, 2, 2, 3, 4], 2)
3
.. versionadded:: 1.1.0
.. versionchanged:: 4.0.0
Move iteratee support to :func:`sorted_last_index_by`.
"""
return sorted_last_index_by(array, value)
def sorted_last_index_by(array, value, iteratee=None):
"""This method is like :func:`sorted_last_index` except that it accepts
iteratee which is invoked for `value` and each element of `array` to
compute their sort ranking. The iteratee is invoked with one argument:
``(value)``.
Args:
array (list): List to inspect.
value (mixed): Value to evaluate.
iteratee (mixed, optional): The iteratee invoked per element. Defaults
to :func:`.identity`.
Returns:
int: Returns the index at which `value` should be inserted into
`array`.
Example:
>>> array = [{'x': 4}, {'x': 5}]
>>> sorted_last_index_by(array, {'x': 4}, lambda o: o['x'])
1
>>> sorted_last_index_by(array, {'x': 4}, 'x')
1
"""
if iteratee:
# Generate array of sorted keys computed using iteratee.
iteratee = pyd.iteratee(iteratee)
array = sorted(iteratee(item) for item in array)
value = iteratee(value)
return bisect_right(array, value)
def sorted_last_index_of(array, value):
"""This method is like :func:`last_index_of` except that it performs a
binary search on a sorted `array`.
Args:
array (list): Array to inspect.
value (mixed): Value to search for.
Returns:
int: Returns the index of the matched value, else ``-1``.
Example:
>>> sorted_last_index_of([4, 5, 5, 5, 6], 5)
3
>>> sorted_last_index_of([6, 5, 5, 5, 4], 6)
-1
.. versionadded:: 4.0.0
"""
index = sorted_last_index(array, value) - 1
if index < len(array) and array[index] == value:
return index
else:
return -1
def sorted_uniq(array):
"""Return sorted array with unique elements.
Args:
array (list): List of values to be sorted.
Returns:
list: List of unique elements in a sorted fashion.
Example:
>>> sorted_uniq([4, 2, 2, 5])
[2, 4, 5]
>>> sorted_uniq([-2, -2, 4, 1])
[-2, 1, 4]
.. versionadded:: 4.0.0
"""
return sorted(uniq(array))
def sorted_uniq_by(array, iteratee=None):
"""This method is like :func:`sorted_uniq` except that it accepts iteratee
which is invoked for each element in array to generate the criterion by
which uniqueness is computed. The order of result values is determined by
the order they occur in the array. The iteratee is invoked with one
argument: ``(value)``.
Args:
array (list): List of values to be sorted.
iteratee (mixed, optional): Function to transform the elements of the
arrays. Defaults to :func:`.identity`.
Returns:
list: Unique list.
Example:
>>> sorted_uniq_by([3, 2, 1, 3, 2, 1], lambda val: val % 2)
[2, 3]
.. versionadded:: 4.0.0
"""
return sorted(uniq_by(array, iteratee=iteratee))
def splice(array, start, count=None, *items):
"""Modify the contents of `array` by inserting elements starting at index
`start` and removing `count` number of elements after.
Args:
array (list|str): List to splice.
start (int): Start to splice at.
count (int, optional): Number of items to remove starting at
`start`. If ``None`` then all items after `start` are removed.
Defaults to ``None``.
items (mixed): Elements to insert starting at `start`. Each item is
inserted in the order given.
Returns:
list|str: The removed elements of `array` or the spliced string.
Warning:
`array` is modified in place if ``list``.
Example:
>>> array = [1, 2, 3, 4]
>>> splice(array, 1)
[2, 3, 4]
>>> array
[1]
>>> array = [1, 2, 3, 4]
>>> splice(array, 1, 2)
[2, 3]
>>> array
[1, 4]
>>> array = [1, 2, 3, 4]
>>> splice(array, 1, 2, 0, 0)
[2, 3]
>>> array
[1, 0, 0, 4]
.. versionadded:: 2.2.0
.. versionchanged:: 3.0.0
Support string splicing.
"""
if count is None:
count = len(array) - start
is_string = pyd.is_string(array)
if is_string:
array = list(array)
removed = array[start:start + count]
del array[start:start + count]
for item in reverse(items):
array.insert(start, item)
if is_string:
return ''.join(array)
else:
return removed
def split_at(array, index):
"""Returns a list of two lists composed of the split of `array` at `index`.
Args:
array (list): List to split.
index (int): Index to split at.
Returns:
list: Split list.
Example:
>>> split_at([1, 2, 3, 4], 2)
[[1, 2], [3, 4]]
.. versionadded:: 2.0.0
"""
return [take(array, index), drop(array, index)]
def tail(array):
"""Return all but the first element of `array`.
Args:
array (list): List to process.
Returns:
list: Rest of the list.
Example:
>>> tail([1, 2, 3, 4])
[2, 3, 4]
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
Renamed from ``rest`` to ``tail``.
"""
return array[1:]
def take(array, n=1):
"""Creates a slice of `array` with `n` elements taken from the beginning.
Args:
array (list): List to process.
n (int, optional): Number of elements to take. Defaults to ``1``.
Returns:
list: Taken list.
Example:
>>> take([1, 2, 3, 4], 2)
[1, 2]
.. versionadded:: 1.0.0
.. versionchanged:: 1.1.0
Added ``n`` argument and removed as alias of :func:`first`.
.. versionchanged:: 3.0.0
Made ``n`` default to ``1``.
"""
return take_while(array, lambda _, index: index < n)
def take_right(array, n=1):
"""Creates a slice of `array` with `n` elements taken from the end.
Args:
array (list): List to process.
n (int, optional): Number of elements to take. Defaults to ``1``.
Returns:
list: Taken list.
Example:
>>> take_right([1, 2, 3, 4], 2)
[3, 4]
.. versionadded:: 1.1.0
.. versionchanged:: 3.0.0
Made ``n`` default to ``1``.
"""
length = len(array)
return take_right_while(array, lambda _, index: (length - index) <= n)
def take_right_while(array, predicate=None):
"""Creates a slice of `array` with elements taken from the end. Elements
are taken until the `predicate` returns falsey. The `predicate` is
invoked with three arguments: ``(value, index, array)``.
Args:
array (list): List to process.
predicate (mixed): Predicate called per iteration
Returns:
list: Dropped list.
Example:
>>> take_right_while([1, 2, 3, 4], lambda x: x >= 3)
[3, 4]
.. versionadded:: 1.1.0
"""
n = len(array)
for is_true, _, _, _ in iteriteratee(array, predicate, reverse=True):
if is_true:
n -= 1
else:
break
return array[n:]
def take_while(array, predicate=None):
"""Creates a slice of `array` with elements taken from the beginning.
Elements are taken until the `predicate` returns falsey. The
`predicate` is invoked with three arguments: ``(value, index, array)``.
Args:
array (list): List to process.
predicate (mixed): Predicate called per iteration
Returns:
list: Taken list.
Example:
>>> take_while([1, 2, 3, 4], lambda x: x < 3)
[1, 2]
.. versionadded:: 1.1.0
"""
n = 0
for is_true, _, _, _ in iteriteratee(array, predicate):
if is_true:
n += 1
else:
break
return array[:n]
def union(array, *others):
"""Computes the union of the passed-in arrays.
Args:
array (list): | |
from __future__ import unicode_literals
from future.builtins import int, str
from json import dumps
from django.contrib.auth.decorators import login_required
from django.contrib.messages import info
from django.urls import reverse
from django.db.models import Sum
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.defaultfilters import slugify
from django.template.loader import get_template
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from mezzanine.conf import settings
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.views import set_cookie, paginate
from mezzanine.utils.urls import next_url
from cartridge.shop import checkout
from cartridge.shop.forms import (AddProductForm, CartItemFormSet,
DiscountForm, OrderForm)
from cartridge.shop.models import Product, ProductVariation, Order
from cartridge.shop.models import DiscountCode
from cartridge.shop.utils import recalculate_cart, sign
try:
from xhtml2pdf import pisa
except (ImportError, SyntaxError):
pisa = None
HAS_PDF = pisa is not None
# Set up checkout handlers.
handler = lambda s: import_dotted_path(s) if s else lambda *args: None
billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING)
tax_handler = handler(settings.SHOP_HANDLER_TAX)
payment_handler = handler(settings.SHOP_HANDLER_PAYMENT)
order_handler = handler(settings.SHOP_HANDLER_ORDER)
def product(request, slug, template="shop/product.html",
form_class=AddProductForm, extra_context=None):
"""
Display a product - convert the product variations to JSON as well as
handling adding the product to either the cart or the wishlist.
"""
published_products = Product.objects.published(for_user=request.user)
product = get_object_or_404(published_products, slug=slug)
fields = [f.name for f in ProductVariation.option_fields()]
variations = product.variations.all()
variations_json = dumps([dict([(f, getattr(v, f))
for f in fields + ["sku", "image_id"]]) for v in variations])
to_cart = (request.method == "POST" and
request.POST.get("add_wishlist") is None)
initial_data = {}
if variations:
initial_data = dict([(f, getattr(variations[0], f)) for f in fields])
initial_data["quantity"] = 1
add_product_form = form_class(request.POST or None, product=product,
initial=initial_data, to_cart=to_cart)
if request.method == "POST":
if add_product_form.is_valid():
if to_cart:
quantity = add_product_form.cleaned_data["quantity"]
request.cart.add_item(add_product_form.variation, quantity)
recalculate_cart(request)
info(request, _("Item added to cart"))
return redirect("shop_cart")
else:
skus = request.wishlist
sku = add_product_form.variation.sku
if sku not in skus:
skus.append(sku)
info(request, _("Item added to wishlist"))
response = redirect("shop_wishlist")
set_cookie(response, "wishlist", ",".join(skus))
return response
related = []
if settings.SHOP_USE_RELATED_PRODUCTS:
related = product.related_products.published(for_user=request.user)
context = {
"product": product,
"editable_obj": product,
"images": product.images.all(),
"variations": variations,
"variations_json": variations_json,
"has_available_variations": any([v.has_price() for v in variations]),
"related_products": related,
"add_product_form": add_product_form
}
context.update(extra_context or {})
templates = [u"shop/%s.html" % str(product.slug), template]
# Check for a template matching the page's content model.
if getattr(product, 'content_model', None) is not None:
templates.insert(0, u"shop/products/%s.html" % product.content_model)
return TemplateResponse(request, templates, context)
@never_cache
def wishlist(request, template="shop/wishlist.html",
form_class=AddProductForm, extra_context=None):
"""
Display the wishlist and handle removing items from the wishlist and
adding them to the cart.
"""
if not settings.SHOP_USE_WISHLIST:
raise Http404
skus = request.wishlist
error = None
if request.method == "POST":
to_cart = request.POST.get("add_cart")
add_product_form = form_class(request.POST or None,
to_cart=to_cart)
if to_cart:
if add_product_form.is_valid():
request.cart.add_item(add_product_form.variation, 1)
recalculate_cart(request)
message = _("Item added to cart")
url = "shop_cart"
else:
error = list(add_product_form.errors.values())[0]
else:
message = _("Item removed from wishlist")
url = "shop_wishlist"
sku = request.POST.get("sku")
if sku in skus:
skus.remove(sku)
if not error:
info(request, message)
response = redirect(url)
set_cookie(response, "wishlist", ",".join(skus))
return response
# Remove skus from the cookie that no longer exist.
published_products = Product.objects.published(for_user=request.user)
f = {"product__in": published_products, "sku__in": skus}
wishlist = ProductVariation.objects.filter(**f).select_related("product")
wishlist = sorted(wishlist, key=lambda v: skus.index(v.sku))
context = {"wishlist_items": wishlist, "error": error}
context.update(extra_context or {})
response = TemplateResponse(request, template, context)
if len(wishlist) < len(skus):
skus = [variation.sku for variation in wishlist]
set_cookie(response, "wishlist", ",".join(skus))
return response
@never_cache
def cart(request, template="shop/cart.html",
cart_formset_class=CartItemFormSet,
discount_form_class=DiscountForm,
extra_context=None):
"""
Display cart and handle removing items from the cart.
"""
cart_formset = cart_formset_class(instance=request.cart)
discount_form = discount_form_class(request, request.POST or None)
if request.method == "POST":
valid = True
if request.POST.get("update_cart"):
valid = request.cart.has_items()
if not valid:
# Session timed out.
info(request, _("Your cart has expired"))
else:
cart_formset = cart_formset_class(request.POST,
instance=request.cart)
valid = cart_formset.is_valid()
if valid:
cart_formset.save()
recalculate_cart(request)
info(request, _("Cart updated"))
else:
# Reset the cart formset so that the cart
# always indicates the correct quantities.
# The user is shown their invalid quantity
# via the error message, which we need to
# copy over to the new formset here.
errors = cart_formset._errors
cart_formset = cart_formset_class(instance=request.cart)
cart_formset._errors = errors
else:
valid = discount_form.is_valid()
if valid:
discount_form.set_discount()
# Potentially need to set shipping if a discount code
# was previously entered with free shipping, and then
# another was entered (replacing the old) without
# free shipping, *and* the user has already progressed
# to the final checkout step, which they'd go straight
# to when returning to checkout, bypassing billing and
# shipping details step where shipping is normally set.
recalculate_cart(request)
if valid:
return redirect("shop_cart")
context = {"cart_formset": cart_formset}
context.update(extra_context or {})
settings.clear_cache()
if (settings.SHOP_DISCOUNT_FIELD_IN_CART and
DiscountCode.objects.active().exists()):
context["discount_form"] = discount_form
return TemplateResponse(request, template, context)
@never_cache
def checkout_steps(request, form_class=OrderForm, extra_context=None):
"""
Display the order form and handle processing of each step.
"""
# Do the authentication check here rather than using standard
# login_required decorator. This means we can check for a custom
# LOGIN_URL and fall back to our own login view.
authenticated = request.user.is_authenticated()
if settings.SHOP_CHECKOUT_ACCOUNT_REQUIRED and not authenticated:
url = "%s?next=%s" % (settings.LOGIN_URL, reverse("shop_checkout"))
return redirect(url)
try:
settings.SHOP_CHECKOUT_FORM_CLASS
except AttributeError:
pass
else:
from warnings import warn
warn("The SHOP_CHECKOUT_FORM_CLASS setting is deprecated - please "
"define your own urlpattern for the checkout_steps view, "
"passing in your own form_class argument.")
form_class = import_dotted_path(settings.SHOP_CHECKOUT_FORM_CLASS)
initial = checkout.initial_order_data(request, form_class)
step = int(request.POST.get("step", None) or
initial.get("step", None) or
checkout.CHECKOUT_STEP_FIRST)
form = form_class(request, step, initial=initial)
data = request.POST
checkout_errors = []
if request.POST.get("back") is not None:
# Back button in the form was pressed - load the order form
# for the previous step and maintain the field values entered.
step -= 1
form = form_class(request, step, initial=initial)
elif request.method == "POST" and request.cart.has_items():
form = form_class(request, step, initial=initial, data=data)
if form.is_valid():
# Copy the current form fields to the session so that
# they're maintained if the customer leaves the checkout
# process, but remove sensitive fields from the session
# such as the credit card fields so that they're never
# stored anywhere.
request.session["order"] = dict(form.cleaned_data)
sensitive_card_fields = ("card_number", "card_expiry_month",
"card_expiry_year", "card_ccv")
for field in sensitive_card_fields:
if field in request.session["order"]:
del request.session["order"][field]
# FIRST CHECKOUT STEP - handle discount code. This needs to
# be set before shipping, to allow for free shipping to be
# first set by a discount code.
if step == checkout.CHECKOUT_STEP_FIRST:
form.set_discount()
# ALL STEPS - run billing/tax handlers. These are run on
# all steps, since all fields (such as address fields) are
# posted on each step, even as hidden inputs when not
# visible in the current step.
try:
billship_handler(request, form)
tax_handler(request, form)
except checkout.CheckoutError as e:
checkout_errors.append(e)
# FINAL CHECKOUT STEP - run payment handler and process order.
if step == checkout.CHECKOUT_STEP_LAST and not checkout_errors:
# Create and save the initial order object so that
# the payment handler has access to all of the order
# fields. If there is a payment error then delete the
# order, otherwise remove the cart items from stock
# and send the order receipt email.
order = form.save(commit=False)
order.setup(request)
# Try payment.
try:
transaction_id = payment_handler(request, form, order)
except checkout.CheckoutError as e:
# Error in payment handler.
order.delete()
checkout_errors.append(e)
if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION:
step -= 1
else:
# Finalize order - ``order.complete()`` performs
# final cleanup of session and cart.
# ``order_handler()`` can be defined by the
# developer to implement custom order processing.
# Then send the order email to the customer.
order.transaction_id = transaction_id
order.complete(request)
order_handler(request, form, order)
checkout.send_order_email(request, order)
# Set the cookie for remembering address details
# if the "remember" checkbox was checked.
response = redirect("shop_complete")
if form.cleaned_data.get("remember"):
remembered = "%s:%s" % (sign(order.key), order.key)
set_cookie(response, "remember", remembered,
secure=request.is_secure())
else:
response.delete_cookie("remember")
return response
# If any checkout errors, assign them to a new form and
# re-run is_valid. If valid, then set form to the next step.
form = form_class(request, step, initial=initial, data=data,
errors=checkout_errors)
if form.is_valid():
step += 1
form = form_class(request, step, initial=initial)
# Update the step so that we don't rely on POST data to take us back to
# the same point in the checkout process.
try:
request.session["order"]["step"] = step
request.session.modified = True
except KeyError:
pass
step_vars = checkout.CHECKOUT_STEPS[step - 1]
template = "shop/%s.html" % step_vars["template"]
context = {"CHECKOUT_STEP_FIRST": step == checkout.CHECKOUT_STEP_FIRST,
"CHECKOUT_STEP_LAST": | |
_beamformer.delete_DOAEstimatorSRPDSBLAPtr
__del__ = lambda self: None
def next(self, frame_no=-5):
return _beamformer.DOAEstimatorSRPDSBLAPtr_next(self, frame_no)
def reset(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_reset(self)
def set_array_geometry(self, positions):
return _beamformer.DOAEstimatorSRPDSBLAPtr_set_array_geometry(self, positions)
def clear_channel(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_clear_channel(self)
def get_weights(self, fbinX):
return _beamformer.DOAEstimatorSRPDSBLAPtr_get_weights(self, fbinX)
def calc_array_manifold_vectors(self, samplerate, delays):
return _beamformer.DOAEstimatorSRPDSBLAPtr_calc_array_manifold_vectors(self, samplerate, delays)
def calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j):
return _beamformer.DOAEstimatorSRPDSBLAPtr_calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j)
def calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC=2):
return _beamformer.DOAEstimatorSRPDSBLAPtr_calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC)
def snapshot_array_f(self, fbinX):
return _beamformer.DOAEstimatorSRPDSBLAPtr_snapshot_array_f(self, fbinX)
def snapshot_array(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_snapshot_array(self)
def set_channel(self, chan):
return _beamformer.DOAEstimatorSRPDSBLAPtr_set_channel(self, chan)
def is_end(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_is_end(self)
def dim(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_dim(self)
def fftLen(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_fftLen(self)
def chanN(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_chanN(self)
def name(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_name(self)
def size(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_size(self)
def current(self):
return _beamformer.DOAEstimatorSRPDSBLAPtr_current(self)
DOAEstimatorSRPDSBLAPtr_swigregister = _beamformer.DOAEstimatorSRPDSBLAPtr_swigregister
DOAEstimatorSRPDSBLAPtr_swigregister(DOAEstimatorSRPDSBLAPtr)
class DOAEstimatorSRPEBPtr(EigenBeamformerPtr):
__swig_setmethods__ = {}
for _s in [EigenBeamformerPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DOAEstimatorSRPEBPtr, name, value)
__swig_getmethods__ = {}
for _s in [EigenBeamformerPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DOAEstimatorSRPEBPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _beamformer.new_DOAEstimatorSRPEBPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _beamformer.DOAEstimatorSRPEBPtr___iter__(self)
def __deref__(self):
return _beamformer.DOAEstimatorSRPEBPtr___deref__(self)
__swig_destroy__ = _beamformer.delete_DOAEstimatorSRPEBPtr
__del__ = lambda self: None
def next(self, frame_no=-5):
return _beamformer.DOAEstimatorSRPEBPtr_next(self, frame_no)
def reset(self):
return _beamformer.DOAEstimatorSRPEBPtr_reset(self)
def dim(self):
return _beamformer.DOAEstimatorSRPEBPtr_dim(self)
def set_sigma2(self, sigma2):
return _beamformer.DOAEstimatorSRPEBPtr_set_sigma2(self, sigma2)
def set_weight_gain(self, wgain):
return _beamformer.DOAEstimatorSRPEBPtr_set_weight_gain(self, wgain)
def set_eigenmike_geometry(self):
return _beamformer.DOAEstimatorSRPEBPtr_set_eigenmike_geometry(self)
def set_array_geometry(self, a, theta_s, phi_s):
return _beamformer.DOAEstimatorSRPEBPtr_set_array_geometry(self, a, theta_s, phi_s)
def set_look_direction(self, theta, phi):
return _beamformer.DOAEstimatorSRPEBPtr_set_look_direction(self, theta, phi)
def mode_amplitudes(self):
return _beamformer.DOAEstimatorSRPEBPtr_mode_amplitudes(self)
def array_geometry(self, type):
return _beamformer.DOAEstimatorSRPEBPtr_array_geometry(self, type)
def beampattern(self, *args, **kwargs):
return _beamformer.DOAEstimatorSRPEBPtr_beampattern(self, *args, **kwargs)
def snapshot_array(self):
return _beamformer.DOAEstimatorSRPEBPtr_snapshot_array(self)
def snapshot_array2(self):
return _beamformer.DOAEstimatorSRPEBPtr_snapshot_array2(self)
def blocking_matrix(self, fbinX, unitX=0):
return _beamformer.DOAEstimatorSRPEBPtr_blocking_matrix(self, fbinX, unitX)
def clear_channel(self):
return _beamformer.DOAEstimatorSRPEBPtr_clear_channel(self)
def get_weights(self, fbinX):
return _beamformer.DOAEstimatorSRPEBPtr_get_weights(self, fbinX)
def calc_array_manifold_vectors(self, samplerate, delays):
return _beamformer.DOAEstimatorSRPEBPtr_calc_array_manifold_vectors(self, samplerate, delays)
def calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j):
return _beamformer.DOAEstimatorSRPEBPtr_calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j)
def calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC=2):
return _beamformer.DOAEstimatorSRPEBPtr_calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC)
def snapshot_array_f(self, fbinX):
return _beamformer.DOAEstimatorSRPEBPtr_snapshot_array_f(self, fbinX)
def set_channel(self, chan):
return _beamformer.DOAEstimatorSRPEBPtr_set_channel(self, chan)
def is_end(self):
return _beamformer.DOAEstimatorSRPEBPtr_is_end(self)
def fftLen(self):
return _beamformer.DOAEstimatorSRPEBPtr_fftLen(self)
def chanN(self):
return _beamformer.DOAEstimatorSRPEBPtr_chanN(self)
def name(self):
return _beamformer.DOAEstimatorSRPEBPtr_name(self)
def size(self):
return _beamformer.DOAEstimatorSRPEBPtr_size(self)
def current(self):
return _beamformer.DOAEstimatorSRPEBPtr_current(self)
DOAEstimatorSRPEBPtr_swigregister = _beamformer.DOAEstimatorSRPEBPtr_swigregister
DOAEstimatorSRPEBPtr_swigregister(DOAEstimatorSRPEBPtr)
class SphericalDSBeamformerPtr(EigenBeamformerPtr):
__swig_setmethods__ = {}
for _s in [EigenBeamformerPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SphericalDSBeamformerPtr, name, value)
__swig_getmethods__ = {}
for _s in [EigenBeamformerPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SphericalDSBeamformerPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _beamformer.new_SphericalDSBeamformerPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _beamformer.SphericalDSBeamformerPtr___iter__(self)
def __deref__(self):
return _beamformer.SphericalDSBeamformerPtr___deref__(self)
__swig_destroy__ = _beamformer.delete_SphericalDSBeamformerPtr
__del__ = lambda self: None
def next(self, frame_no=-5):
return _beamformer.SphericalDSBeamformerPtr_next(self, frame_no)
def reset(self):
return _beamformer.SphericalDSBeamformerPtr_reset(self)
def calc_wng(self):
return _beamformer.SphericalDSBeamformerPtr_calc_wng(self)
def dim(self):
return _beamformer.SphericalDSBeamformerPtr_dim(self)
def set_sigma2(self, sigma2):
return _beamformer.SphericalDSBeamformerPtr_set_sigma2(self, sigma2)
def set_weight_gain(self, wgain):
return _beamformer.SphericalDSBeamformerPtr_set_weight_gain(self, wgain)
def set_eigenmike_geometry(self):
return _beamformer.SphericalDSBeamformerPtr_set_eigenmike_geometry(self)
def set_array_geometry(self, a, theta_s, phi_s):
return _beamformer.SphericalDSBeamformerPtr_set_array_geometry(self, a, theta_s, phi_s)
def set_look_direction(self, theta, phi):
return _beamformer.SphericalDSBeamformerPtr_set_look_direction(self, theta, phi)
def mode_amplitudes(self):
return _beamformer.SphericalDSBeamformerPtr_mode_amplitudes(self)
def array_geometry(self, type):
return _beamformer.SphericalDSBeamformerPtr_array_geometry(self, type)
def beampattern(self, *args, **kwargs):
return _beamformer.SphericalDSBeamformerPtr_beampattern(self, *args, **kwargs)
def snapshot_array(self):
return _beamformer.SphericalDSBeamformerPtr_snapshot_array(self)
def snapshot_array2(self):
return _beamformer.SphericalDSBeamformerPtr_snapshot_array2(self)
def blocking_matrix(self, fbinX, unitX=0):
return _beamformer.SphericalDSBeamformerPtr_blocking_matrix(self, fbinX, unitX)
def clear_channel(self):
return _beamformer.SphericalDSBeamformerPtr_clear_channel(self)
def get_weights(self, fbinX):
return _beamformer.SphericalDSBeamformerPtr_get_weights(self, fbinX)
def calc_array_manifold_vectors(self, samplerate, delays):
return _beamformer.SphericalDSBeamformerPtr_calc_array_manifold_vectors(self, samplerate, delays)
def calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j):
return _beamformer.SphericalDSBeamformerPtr_calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j)
def calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC=2):
return _beamformer.SphericalDSBeamformerPtr_calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC)
def snapshot_array_f(self, fbinX):
return _beamformer.SphericalDSBeamformerPtr_snapshot_array_f(self, fbinX)
def set_channel(self, chan):
return _beamformer.SphericalDSBeamformerPtr_set_channel(self, chan)
def is_end(self):
return _beamformer.SphericalDSBeamformerPtr_is_end(self)
def fftLen(self):
return _beamformer.SphericalDSBeamformerPtr_fftLen(self)
def chanN(self):
return _beamformer.SphericalDSBeamformerPtr_chanN(self)
def name(self):
return _beamformer.SphericalDSBeamformerPtr_name(self)
def size(self):
return _beamformer.SphericalDSBeamformerPtr_size(self)
def current(self):
return _beamformer.SphericalDSBeamformerPtr_current(self)
SphericalDSBeamformerPtr_swigregister = _beamformer.SphericalDSBeamformerPtr_swigregister
SphericalDSBeamformerPtr_swigregister(SphericalDSBeamformerPtr)
class DualSphericalDSBeamformerPtr(SphericalDSBeamformerPtr):
__swig_setmethods__ = {}
for _s in [SphericalDSBeamformerPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DualSphericalDSBeamformerPtr, name, value)
__swig_getmethods__ = {}
for _s in [SphericalDSBeamformerPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DualSphericalDSBeamformerPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _beamformer.new_DualSphericalDSBeamformerPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _beamformer.DualSphericalDSBeamformerPtr___iter__(self)
def __deref__(self):
return _beamformer.DualSphericalDSBeamformerPtr___deref__(self)
__swig_destroy__ = _beamformer.delete_DualSphericalDSBeamformerPtr
__del__ = lambda self: None
def snapshot_array(self):
return _beamformer.DualSphericalDSBeamformerPtr_snapshot_array(self)
def next(self, frame_no=-5):
return _beamformer.DualSphericalDSBeamformerPtr_next(self, frame_no)
def reset(self):
return _beamformer.DualSphericalDSBeamformerPtr_reset(self)
def calc_wng(self):
return _beamformer.DualSphericalDSBeamformerPtr_calc_wng(self)
def dim(self):
return _beamformer.DualSphericalDSBeamformerPtr_dim(self)
def set_sigma2(self, sigma2):
return _beamformer.DualSphericalDSBeamformerPtr_set_sigma2(self, sigma2)
def set_weight_gain(self, wgain):
return _beamformer.DualSphericalDSBeamformerPtr_set_weight_gain(self, wgain)
def set_eigenmike_geometry(self):
return _beamformer.DualSphericalDSBeamformerPtr_set_eigenmike_geometry(self)
def set_array_geometry(self, a, theta_s, phi_s):
return _beamformer.DualSphericalDSBeamformerPtr_set_array_geometry(self, a, theta_s, phi_s)
def set_look_direction(self, theta, phi):
return _beamformer.DualSphericalDSBeamformerPtr_set_look_direction(self, theta, phi)
def mode_amplitudes(self):
return _beamformer.DualSphericalDSBeamformerPtr_mode_amplitudes(self)
def array_geometry(self, type):
return _beamformer.DualSphericalDSBeamformerPtr_array_geometry(self, type)
def beampattern(self, *args, **kwargs):
return _beamformer.DualSphericalDSBeamformerPtr_beampattern(self, *args, **kwargs)
def snapshot_array2(self):
return _beamformer.DualSphericalDSBeamformerPtr_snapshot_array2(self)
def blocking_matrix(self, fbinX, unitX=0):
return _beamformer.DualSphericalDSBeamformerPtr_blocking_matrix(self, fbinX, unitX)
def clear_channel(self):
return _beamformer.DualSphericalDSBeamformerPtr_clear_channel(self)
def get_weights(self, fbinX):
return _beamformer.DualSphericalDSBeamformerPtr_get_weights(self, fbinX)
def calc_array_manifold_vectors(self, samplerate, delays):
return _beamformer.DualSphericalDSBeamformerPtr_calc_array_manifold_vectors(self, samplerate, delays)
def calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j):
return _beamformer.DualSphericalDSBeamformerPtr_calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j)
def calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC=2):
return _beamformer.DualSphericalDSBeamformerPtr_calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC)
def snapshot_array_f(self, fbinX):
return _beamformer.DualSphericalDSBeamformerPtr_snapshot_array_f(self, fbinX)
def set_channel(self, chan):
return _beamformer.DualSphericalDSBeamformerPtr_set_channel(self, chan)
def is_end(self):
return _beamformer.DualSphericalDSBeamformerPtr_is_end(self)
def fftLen(self):
return _beamformer.DualSphericalDSBeamformerPtr_fftLen(self)
def chanN(self):
return _beamformer.DualSphericalDSBeamformerPtr_chanN(self)
def name(self):
return _beamformer.DualSphericalDSBeamformerPtr_name(self)
def size(self):
return _beamformer.DualSphericalDSBeamformerPtr_size(self)
def current(self):
return _beamformer.DualSphericalDSBeamformerPtr_current(self)
DualSphericalDSBeamformerPtr_swigregister = _beamformer.DualSphericalDSBeamformerPtr_swigregister
DualSphericalDSBeamformerPtr_swigregister(DualSphericalDSBeamformerPtr)
class DOAEstimatorSRPSphDSBPtr(SphericalDSBeamformerPtr):
__swig_setmethods__ = {}
for _s in [SphericalDSBeamformerPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DOAEstimatorSRPSphDSBPtr, name, value)
__swig_getmethods__ = {}
for _s in [SphericalDSBeamformerPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DOAEstimatorSRPSphDSBPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _beamformer.new_DOAEstimatorSRPSphDSBPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr___iter__(self)
def __deref__(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr___deref__(self)
__swig_destroy__ = _beamformer.delete_DOAEstimatorSRPSphDSBPtr
__del__ = lambda self: None
def next(self, frame_no=-5):
return _beamformer.DOAEstimatorSRPSphDSBPtr_next(self, frame_no)
def reset(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_reset(self)
def calc_wng(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_calc_wng(self)
def dim(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_dim(self)
def set_sigma2(self, sigma2):
return _beamformer.DOAEstimatorSRPSphDSBPtr_set_sigma2(self, sigma2)
def set_weight_gain(self, wgain):
return _beamformer.DOAEstimatorSRPSphDSBPtr_set_weight_gain(self, wgain)
def set_eigenmike_geometry(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_set_eigenmike_geometry(self)
def set_array_geometry(self, a, theta_s, phi_s):
return _beamformer.DOAEstimatorSRPSphDSBPtr_set_array_geometry(self, a, theta_s, phi_s)
def set_look_direction(self, theta, phi):
return _beamformer.DOAEstimatorSRPSphDSBPtr_set_look_direction(self, theta, phi)
def mode_amplitudes(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_mode_amplitudes(self)
def array_geometry(self, type):
return _beamformer.DOAEstimatorSRPSphDSBPtr_array_geometry(self, type)
def beampattern(self, *args, **kwargs):
return _beamformer.DOAEstimatorSRPSphDSBPtr_beampattern(self, *args, **kwargs)
def snapshot_array(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_snapshot_array(self)
def snapshot_array2(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_snapshot_array2(self)
def blocking_matrix(self, fbinX, unitX=0):
return _beamformer.DOAEstimatorSRPSphDSBPtr_blocking_matrix(self, fbinX, unitX)
def clear_channel(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_clear_channel(self)
def get_weights(self, fbinX):
return _beamformer.DOAEstimatorSRPSphDSBPtr_get_weights(self, fbinX)
def calc_array_manifold_vectors(self, samplerate, delays):
return _beamformer.DOAEstimatorSRPSphDSBPtr_calc_array_manifold_vectors(self, samplerate, delays)
def calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j):
return _beamformer.DOAEstimatorSRPSphDSBPtr_calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j)
def calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC=2):
return _beamformer.DOAEstimatorSRPSphDSBPtr_calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC)
def snapshot_array_f(self, fbinX):
return _beamformer.DOAEstimatorSRPSphDSBPtr_snapshot_array_f(self, fbinX)
def set_channel(self, chan):
return _beamformer.DOAEstimatorSRPSphDSBPtr_set_channel(self, chan)
def is_end(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_is_end(self)
def fftLen(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_fftLen(self)
def chanN(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_chanN(self)
def name(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_name(self)
def size(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_size(self)
def current(self):
return _beamformer.DOAEstimatorSRPSphDSBPtr_current(self)
DOAEstimatorSRPSphDSBPtr_swigregister = _beamformer.DOAEstimatorSRPSphDSBPtr_swigregister
DOAEstimatorSRPSphDSBPtr_swigregister(DOAEstimatorSRPSphDSBPtr)
class SphericalHWNCBeamformerPtr(EigenBeamformerPtr):
__swig_setmethods__ = {}
for _s in [EigenBeamformerPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SphericalHWNCBeamformerPtr, name, value)
__swig_getmethods__ = {}
for _s in [EigenBeamformerPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SphericalHWNCBeamformerPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _beamformer.new_SphericalHWNCBeamformerPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _beamformer.SphericalHWNCBeamformerPtr___iter__(self)
def __deref__(self):
return _beamformer.SphericalHWNCBeamformerPtr___deref__(self)
__swig_destroy__ = _beamformer.delete_SphericalHWNCBeamformerPtr
__del__ = lambda self: None
def next(self, frame_no=-5):
return _beamformer.SphericalHWNCBeamformerPtr_next(self, frame_no)
def reset(self):
return _beamformer.SphericalHWNCBeamformerPtr_reset(self)
def calc_wng(self):
return _beamformer.SphericalHWNCBeamformerPtr_calc_wng(self)
def set_wng(self, ratio):
return _beamformer.SphericalHWNCBeamformerPtr_set_wng(self, ratio)
def dim(self):
return _beamformer.SphericalHWNCBeamformerPtr_dim(self)
def set_sigma2(self, sigma2):
return _beamformer.SphericalHWNCBeamformerPtr_set_sigma2(self, sigma2)
def set_weight_gain(self, wgain):
return _beamformer.SphericalHWNCBeamformerPtr_set_weight_gain(self, wgain)
def set_eigenmike_geometry(self):
return _beamformer.SphericalHWNCBeamformerPtr_set_eigenmike_geometry(self)
def set_array_geometry(self, a, theta_s, phi_s):
return _beamformer.SphericalHWNCBeamformerPtr_set_array_geometry(self, a, theta_s, phi_s)
def set_look_direction(self, theta, phi):
return _beamformer.SphericalHWNCBeamformerPtr_set_look_direction(self, theta, phi)
def mode_amplitudes(self):
return _beamformer.SphericalHWNCBeamformerPtr_mode_amplitudes(self)
def array_geometry(self, type):
return _beamformer.SphericalHWNCBeamformerPtr_array_geometry(self, type)
def beampattern(self, *args, **kwargs):
return _beamformer.SphericalHWNCBeamformerPtr_beampattern(self, *args, **kwargs)
def snapshot_array(self):
return _beamformer.SphericalHWNCBeamformerPtr_snapshot_array(self)
def snapshot_array2(self):
return _beamformer.SphericalHWNCBeamformerPtr_snapshot_array2(self)
def blocking_matrix(self, fbinX, unitX=0):
return _beamformer.SphericalHWNCBeamformerPtr_blocking_matrix(self, fbinX, unitX)
def clear_channel(self):
return _beamformer.SphericalHWNCBeamformerPtr_clear_channel(self)
def get_weights(self, fbinX):
return _beamformer.SphericalHWNCBeamformerPtr_get_weights(self, fbinX)
def calc_array_manifold_vectors(self, samplerate, delays):
return _beamformer.SphericalHWNCBeamformerPtr_calc_array_manifold_vectors(self, samplerate, delays)
def calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j):
return _beamformer.SphericalHWNCBeamformerPtr_calc_array_manifold_vectors_2(self, samplerate, delays_t, delays_j)
def calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC=2):
return _beamformer.SphericalHWNCBeamformerPtr_calc_array_manifold_vectors_n(self, samplerate, delays_t, delays_j, NC)
def snapshot_array_f(self, fbinX):
return _beamformer.SphericalHWNCBeamformerPtr_snapshot_array_f(self, fbinX)
def set_channel(self, chan):
return _beamformer.SphericalHWNCBeamformerPtr_set_channel(self, chan)
def is_end(self):
return _beamformer.SphericalHWNCBeamformerPtr_is_end(self)
def fftLen(self):
return _beamformer.SphericalHWNCBeamformerPtr_fftLen(self)
def chanN(self):
return | |
"""Roles utils"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from itertools import chain
from sqlalchemy import func
from tornado.log import app_log
from . import orm
from . import scopes
def get_default_roles():
"""Returns:
default roles (list): default role definitions as dictionaries:
{
'name': role name,
'description': role description,
'scopes': list of scopes,
}
"""
default_roles = [
{
'name': 'user',
'description': 'Standard user privileges',
'scopes': [
'self',
],
},
{
'name': 'admin',
'description': 'Elevated privileges (can do anything)',
'scopes': [
'admin:users',
'admin:servers',
'tokens',
'admin:groups',
'list:services',
'read:services',
'read:hub',
'proxy',
'shutdown',
'access:services',
'access:servers',
'read:roles',
],
},
{
'name': 'server',
'description': 'Post activity only',
'scopes': [
'users:activity!user',
'access:servers!user',
],
},
{
'name': 'token',
'description': 'Token with same permissions as its owner',
'scopes': ['all'],
},
]
return default_roles
def expand_self_scope(name):
"""
Users have a metascope 'self' that should be expanded to standard user privileges.
At the moment that is a user-filtered version (optional read) access to
users
users:name
users:groups
users:activity
tokens
servers
access:servers
Arguments:
name (str): user name
Returns:
expanded scopes (set): set of expanded scopes covering standard user privileges
"""
scope_list = [
'read:users',
'read:users:name',
'read:users:groups',
'users:activity',
'read:users:activity',
'servers',
'read:servers',
'tokens',
'read:tokens',
'access:servers',
]
return {f"{scope}!user={name}" for scope in scope_list}
def horizontal_filter(func):
"""Decorator to account for horizontal filtering in scope syntax"""
def expand_server_filter(hor_filter):
resource, mark, value = hor_filter.partition('=')
if resource == 'server':
user, mark, server = value.partition('/')
return f'read:users:name!user={user}'
def ignore(scopename):
# temporarily remove horizontal filtering if present
scopename, mark, hor_filter = scopename.partition('!')
expanded_scope = func(scopename)
# add the filter back
full_expanded_scope = {scope + mark + hor_filter for scope in expanded_scope}
server_filter = expand_server_filter(hor_filter)
if server_filter:
full_expanded_scope.add(server_filter)
return full_expanded_scope
return ignore
@horizontal_filter
def _expand_scope(scopename):
"""Returns a set of all subscopes
Arguments:
scopename (str): name of the scope to expand
Returns:
expanded scope (set): set of all scope's subscopes including the scope itself
"""
expanded_scope = []
def _add_subscopes(scopename):
expanded_scope.append(scopename)
if scopes.scope_definitions[scopename].get('subscopes'):
for subscope in scopes.scope_definitions[scopename].get('subscopes'):
_add_subscopes(subscope)
_add_subscopes(scopename)
return set(expanded_scope)
def expand_roles_to_scopes(orm_object):
"""Get the scopes listed in the roles of the User/Service/Group/Token
If User, take into account the user's groups roles as well
Arguments:
orm_object: orm.User, orm.Service, orm.Group or orm.APIToken
Returns:
expanded scopes (set): set of all expanded scopes for the orm object
"""
if not isinstance(orm_object, orm.Base):
raise TypeError(f"Only orm objects allowed, got {orm_object}")
pass_roles = []
pass_roles.extend(orm_object.roles)
if isinstance(orm_object, orm.User):
for group in orm_object.groups:
pass_roles.extend(group.roles)
expanded_scopes = _get_subscopes(*pass_roles, owner=orm_object)
return expanded_scopes
def _get_subscopes(*roles, owner=None):
"""Returns a set of all available subscopes for a specified role or list of roles
Arguments:
roles (obj): orm.Roles
owner (obj, optional): orm.User or orm.Service as owner of orm.APIToken
Returns:
expanded scopes (set): set of all expanded scopes for the role(s)
"""
scopes = set()
for role in roles:
scopes.update(role.scopes)
expanded_scopes = set(chain.from_iterable(list(map(_expand_scope, scopes))))
# transform !user filter to !user=ownername
for scope in expanded_scopes.copy():
base_scope, _, filter = scope.partition('!')
if filter == 'user':
expanded_scopes.remove(scope)
if isinstance(owner, orm.APIToken):
token_owner = owner.user
if token_owner is None:
token_owner = owner.service
name = token_owner.name
else:
name = owner.name
trans_scope = f'{base_scope}!user={name}'
expanded_scopes.add(trans_scope)
if 'self' in expanded_scopes:
expanded_scopes.remove('self')
if owner and isinstance(owner, orm.User):
expanded_scopes |= expand_self_scope(owner.name)
return expanded_scopes
def _check_scopes(*args, rolename=None):
"""Check if provided scopes exist
Arguments:
scope (str): name of the scope to check
or
scopes (list): list of scopes to check
Raises NameError if scope does not exist
"""
allowed_scopes = set(scopes.scope_definitions.keys())
allowed_filters = ['!user=', '!service=', '!group=', '!server=', '!user']
if rolename:
log_role = f"for role {rolename}"
else:
log_role = ""
for scope in args:
scopename, _, filter_ = scope.partition('!')
if scopename not in allowed_scopes:
raise NameError(f"Scope '{scope}' {log_role} does not exist")
if filter_:
full_filter = f"!{filter_}"
if not any(f in scope for f in allowed_filters):
raise NameError(
f"Scope filter '{full_filter}' in scope '{scope}' {log_role} does not exist"
)
def _overwrite_role(role, role_dict):
"""Overwrites role's description and/or scopes with role_dict if role not 'admin'"""
for attr in role_dict.keys():
if attr == 'description' or attr == 'scopes':
if role.name == 'admin':
admin_role_spec = [
r for r in get_default_roles() if r['name'] == 'admin'
][0]
if role_dict[attr] != admin_role_spec[attr]:
raise ValueError(
'admin role description or scopes cannot be overwritten'
)
else:
if role_dict[attr] != getattr(role, attr):
setattr(role, attr, role_dict[attr])
app_log.info(
'Role %r %r attribute has been changed', role.name, attr
)
_role_name_pattern = re.compile(r'^[a-z][a-z0-9\-_~\.]{1,253}[a-z0-9]$')
def _validate_role_name(name):
"""Ensure a role has a valid name
Raises ValueError if role name is invalid
"""
if not _role_name_pattern.match(name):
raise ValueError(
f"Invalid role name: {name!r}."
" Role names must:\n"
" - be 3-255 characters\n"
" - contain only lowercase ascii letters, numbers, and URL unreserved special characters '-.~_'\n"
" - start with a letter\n"
" - end with letter or number\n"
)
return True
def create_role(db, role_dict):
"""Adds a new role to database or modifies an existing one"""
default_roles = get_default_roles()
if 'name' not in role_dict.keys():
raise KeyError('Role definition must have a name')
else:
name = role_dict['name']
_validate_role_name(name)
role = orm.Role.find(db, name)
description = role_dict.get('description')
scopes = role_dict.get('scopes')
# check if the provided scopes exist
if scopes:
_check_scopes(*scopes, rolename=role_dict['name'])
if role is None:
if not scopes:
app_log.warning('Warning: New defined role %s has no scopes', name)
role = orm.Role(name=name, description=description, scopes=scopes)
db.add(role)
if role_dict not in default_roles:
app_log.info('Role %s added to database', name)
else:
_overwrite_role(role, role_dict)
db.commit()
def delete_role(db, rolename):
"""Removes a role from database"""
# default roles are not removable
default_roles = get_default_roles()
if any(role['name'] == rolename for role in default_roles):
raise ValueError('Default role %r cannot be removed', rolename)
role = orm.Role.find(db, rolename)
if role:
db.delete(role)
db.commit()
app_log.info('Role %s has been deleted', rolename)
else:
raise NameError('Cannot remove role %r that does not exist', rolename)
def existing_only(func):
"""Decorator for checking if objects and roles exist"""
def _check_existence(db, entity, rolename):
role = orm.Role.find(db, rolename)
if entity is None:
raise ValueError(
f"{entity!r} of kind {type(entity).__name__!r} does not exist"
)
elif role is None:
raise ValueError("Role %r does not exist" % rolename)
else:
func(db, entity, role)
return _check_existence
@existing_only
def grant_role(db, entity, rolename):
"""Adds a role for users, services, groups or tokens"""
if isinstance(entity, orm.APIToken):
entity_repr = entity
else:
entity_repr = entity.name
if rolename not in entity.roles:
entity.roles.append(rolename)
db.commit()
app_log.info(
'Adding role %s for %s: %s',
rolename.name,
type(entity).__name__,
entity_repr,
)
@existing_only
def strip_role(db, entity, rolename):
"""Removes a role for users, services, groups or tokens"""
if isinstance(entity, orm.APIToken):
entity_repr = entity
else:
entity_repr = entity.name
if rolename in entity.roles:
entity.roles.remove(rolename)
db.commit()
app_log.info(
'Removing role %s for %s: %s',
rolename.name,
type(entity).__name__,
entity_repr,
)
def _switch_default_role(db, obj, admin):
"""Switch between default user/service and admin roles for users/services"""
user_role = orm.Role.find(db, 'user')
admin_role = orm.Role.find(db, 'admin')
def add_and_remove(db, obj, current_role, new_role):
if current_role in obj.roles:
strip_role(db, entity=obj, rolename=current_role.name)
# only add new default role if the user has no other roles
if len(obj.roles) < 1:
grant_role(db, entity=obj, rolename=new_role.name)
if admin:
add_and_remove(db, obj, user_role, admin_role)
else:
add_and_remove(db, obj, admin_role, user_role)
def _token_allowed_role(db, token, role):
"""Checks if requested role for token does not grant the token
higher permissions than the token's owner has
Returns:
True if requested permissions are within the owner's permissions, False otherwise
"""
owner = token.user
if owner is None:
owner = token.service
if owner is None:
raise ValueError(f"Owner not found for {token}")
expanded_scopes = _get_subscopes(role, owner=owner)
implicit_permissions = {'all', 'read:all'}
explicit_scopes = expanded_scopes - implicit_permissions
# ignore horizontal filters
no_filter_scopes = {
scope.split('!', 1)[0] if '!' in scope else scope for scope in explicit_scopes
}
# find the owner's scopes
expanded_owner_scopes = expand_roles_to_scopes(owner)
# ignore horizontal filters
no_filter_owner_scopes = {
scope.split('!', 1)[0] if '!' in scope else scope
for scope in expanded_owner_scopes
}
disallowed_scopes = no_filter_scopes.difference(no_filter_owner_scopes)
if not disallowed_scopes:
# no scopes requested outside owner's own scopes
return True
else:
app_log.warning(
f"Token requesting scopes exceeding owner {owner.name}: {disallowed_scopes}"
)
return False
def assign_default_roles(db, entity):
"""Assigns default role to an entity:
users and services get 'user' role, or admin role if they have admin flag
tokens get 'token' role
"""
if isinstance(entity, orm.Group):
pass
elif isinstance(entity, orm.APIToken):
app_log.debug('Assigning default roles to tokens')
default_token_role = orm.Role.find(db, 'token')
if not entity.roles and (entity.user or entity.service) is not None:
| |
relative to the root of the source directory."""
return self.__name
def dot(self, marks):
"""Print a dot representation of this node build graph."""
if self in marks:
return True
marks[self] = None
print(' node_%s [label="%s"]' % (self.uid, self.__name))
if self.builder is not None:
if self.builder.dot(marks):
print(' builder_%s -> node_%s' % (self.builder.uid,
self.uid))
return True
def compilation_database(self):
"""Print a representation of this node compilation."""
if self.builder is None:
return
if not re.match('.*\\.o$', self.makefile_name()):
return
def to_string(s):
from pipes import quote
if isinstance(s, list):
s = ' '.join(s)
if not isinstance(s, str):
s = str(s)
return '"{}"'.format(re.sub(r'([\\"])', r'\\\1', s))
print('''\
{{
"file": {file},
"output": {output},
"directory": {directory},
"command": {command},
}},\
'''.format(command=to_string(self.builder.command),
directory=to_string(_OS.getcwd()),
file=to_string("../../" + str(self.builder.source)),
output=to_string(self.makefile_name())))
@classmethod
def drake_type(self):
"""The qualified name of this type."""
return '%s.%s' % (self.__module__, self.__name__)
def __str__(self):
"""String representation."""
return str(self.__name)
def __repr__(self):
"""Python representation."""
return '%s(%s)' % (self.__class__.drake_type(), self.name())
def build(self):
"""Build this node.
Take necessary action to ensure this node is up to date. That
is, roughly, run this node runner.
"""
import sys
if not _scheduled():
Coroutine(self.build, str(self), Drake.current.scheduler)
Drake.current.scheduler.run()
else:
with sched.logger.log(
'drake.Builder',
drake.log.LogLevel.trace,
'%s: build', self):
self._build()
with sched.Scope() as scope:
for dep in self.dependencies:
if not dep.skippable():
scope.run(dep.build, str(dep))
self.polish()
def _build(self):
if self.builder is not None:
self.builder.run()
@property
def build_status(self):
return self.builder.build_status
def polish(self):
"""A hook called when a node has been built.
Called when a node has been built, that is, when all its
dependencies have been built and the builder run. Default
implementation does nothing.
>>> class MyNode (Node):
... def polish(self):
... print('Polishing.')
>>> n = MyNode('/tmp/.drake.polish')
>>> n.path().remove()
>>> b = TouchBuilder(n)
>>> n.build()
Touch /tmp/.drake.polish
Polishing.
"""
pass
def clean(self):
"""Clean recursively for this node sources."""
if self.builder is not None:
self.builder.clean()
def missing(self):
"""Whether this node is missing and must be built.
Always False, so unless redefined, BaseNode are built only if
a dependency changed.
"""
return False
def makefile_name(self):
path = self.path() if isinstance(self, Node) \
else Path(self.name(), virtual=False)
return str(path)
def makefile(self, marks=None):
"""Print a Makefile for this node."""
from pipes import quote
if self.builder is None:
return
if marks is None:
marks = set()
if str(self.name()) in marks:
return
else:
marks.add(str(self.name()))
print('%s: %s' % (self.makefile_name(),
' '.join(map(lambda n: n.makefile_name(),
self.dependencies))))
cmd = self.builder.command
if cmd is not None:
if isinstance(self, Node):
print('\t@mkdir -p %s' % self.path().dirname())
if not isinstance(cmd, tuple):
cmd = (cmd,)
for c in cmd:
print('\t%s' % ' '.join(
map(lambda a: quote(str(a)).replace('$', '$$'), c)))
print('')
for dependency in self.dependencies:
dependency.makefile(marks)
def report_dependencies(self, dependencies):
"""Called when dependencies have been built.
This hook is always called no matter whether the nodes
were successfully built or not.
"""
pass
@property
def builder(self):
return self._builder
@builder.setter
def builder(self, builder):
del Drake.current.nodes[self._BaseNode__name]
self._builder = builder
Drake.current.nodes[self._BaseNode__name] = self
def dependency_add(self, dep):
assert dep is not None
self.__dependencies.add(dep)
def dependencies_add(self, deps):
for dep in deps:
self.dependency_add(dep)
@property
def dependencies(self):
return self.__dependencies
@property
def dependencies_recursive(self):
for dep in self.__dependencies:
yield dep
for sub in dep.dependencies_recursive:
yield sub
def __lt__(self, rhs):
"""Arbitrary global order on nodes, to enable
sorting/indexing."""
return self.name_absolute() < rhs.name_absolute()
def hash(self):
"""Digest of the file as a string."""
def _hash_file(hasher, path):
if _OS.path.isdir(str(path)):
for sub_path in _OS.listdir(str(path)):
_hash_file(hasher, _OS.path.join(str(path), str(sub_path)))
elif _OS.path.islink(str(path)):
hasher.update(_OS.readlink(str(path)).encode('utf-8'))
else:
with open(str(path), 'rb') as f:
while True:
chunk = f.read(8192)
if not chunk:
break
hasher.update(chunk)
if self.__hash is None:
with profile_hashing():
hasher = hashlib.sha1()
hashable = (
dep for dep in chain((self,), self.dependencies)
if isinstance(dep, Node))
for node in sorted(hashable):
_hash_file(hasher, node.path())
self.__hash = hasher.digest()
return self.__hash
def skippable(self):
if self.__skippable:
return True
self.__skippable = self._skippable()
return self.__skippable
def _skippable(self):
if self.builder is None:
if self.missing():
return False
else:
if not self.builder._Builder__executed:
return False
if self.builder._Builder__executed_exception is not None:
raise self.builder._Builder__executed_exception
return all(dep.skippable() for dep in self.dependencies)
@property
def mtime_local(self):
raise NotImplementedError()
class VirtualNode(BaseNode):
"""BaseNode that does not represent a file.
These may be configuration or meta information such as the version
system revision, used by other nodes as sources. They are also
used to implement Rule, which is a node that recursively builds
other nodes, but does not directly produce a file.
"""
def __init__(self, name):
"""Create a virtual node with the given name."""
path = drake.Drake.current.prefix / name
path = drake.Path(path._Path__path, False, True)
BaseNode.__init__(self, path)
class Node(BaseNode):
"""BaseNode representing a file."""
def __init__(self, path):
"""Construct a Node with the given path."""
path = drake.Drake.current.prefix / path
BaseNode.__init__(self, path)
self.__exists = False
self.__mtime = None
self.__path = None
self.__path_absolute = None
def clone(self, path):
"""Clone of this node, with an other path."""
return Node(path)
def clean(self):
"""Clean this node's file if it is generated, and recursively
its sources recursively."""
BaseNode.clean(self)
if self.builder is not None and self.path().exists():
print('Deleting %s' % self)
_OS.remove(str(self.path()))
def path(self, absolute=False):
"""Filesystem path to node file, relative to the root of the
build directory.
>>> with Drake('source/tree') as drake:
... n = node('file1')
... print(n.path())
... n = node('file2')
... builder = TouchBuilder([n])
... print(n.path())
source/tree/file1
file2
"""
if self.__path is None:
name = self._BaseNode__name
if name.absolute() or name.virtual:
self.__path = name
self.__path_absolute = name
elif self.builder is None:
self.__path = drake.path_source() / name
else:
self.__path = name
if absolute:
if self.__path_absolute is None:
self.__path_absolute = drake.path_root() / self.__path
return self.__path_absolute
else:
return self.__path
def missing(self):
"""Whether the associated file doesn't exist.
Nodes are built if their file does not exist.
"""
if not self.__exists:
self.__exists = self.path().exists()
return not self.__exists
def build(self):
"""Builds this node.
Building a Node raises an error if the associated file does
not exist and it has no builder.
>>> n = node('/tmp/.drake.node')
>>> n.path().remove()
>>> n.build()
Traceback (most recent call last):
...
drake.NoBuilder: no builder to make /tmp/.drake.node
If the file exist, drake consider it is a provided input and
building it does nothing.
>>> n.path().touch()
>>> n.build()
If a Node needs to be built and its builder is executed, it
must create the Node's associated file.
>>> n = node('/tmp/.drake.othernode')
>>> n.path().remove()
>>> class EmptyBuilder(Builder):
... def execute(self):
... return True
>>> builder = EmptyBuilder([], [n])
>>> n.build()
Traceback (most recent call last):
...
Exception: /tmp/.drake.othernode was not created by EmptyBuilder
"""
super().build()
def _build(self):
if self.builder is None:
if self.missing():
raise NoBuilder(self)
else:
self.builder.run()
def __repr__(self):
"""Filesystem path to the node file, as a string."""
return str(self.path())
@property
def install_command(self):
return None
@property
def mtime(self):
return max(chain(
(self.mtime_local,),
(d.mtime for d in self.dependencies if isinstance(d, Node))))
@property
def mtime_local(self):
if self.__mtime is None:
self.__mtime = _OS.lstat(str(self.path())).st_mtime
return self.__mtime
def touch(self, t):
now = time.time()
def set(t):
if t > now:
if Drake.current.adjust_mtime_future:
time.sleep(t - now)
else:
raise NotImplementedError()
_OS.utime(str(self.path()), (t, t), follow_symlinks=False)
self.__mtime = None
return self.mtime_local >= t
try:
if not Drake.current.adjust_mtime_second and set(t + 0.001):
return True
if set(t + 1):
return True
print('Failed to adjust mtime of %s' % self)
return False
except NotImplementedError:
print('Refusing to adjust mtime of %s in the future' % self)
return False
@BaseNode.builder.setter
def builder(self, builder):
# Reset the path cache, as we might have believe that this node
# was in the source tree, while it's actually now discovered to
# belong to the build tree.
self.__path = None
# There is no cute syntax to call a super setter. See
# http://bugs.python.org/issue14965 for instance.
BaseNode.builder.__set__(self, builder)
def node(path, type=None):
"""Create or get a BaseNode.
path -- path to the node file.
type -- optional type of the node.
The returned node is determined as follow:
* If a node exists for the given path, it is returned.
* If the type is given, a node of that type is constructed with
the path as argument.
* If the path as a known extension, a node of the associated type
is constructed with the path as argument.
* If the path is virtual (e.g., //foo on Unix) a simple VirtualNode
with that | |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unittests for monorail.feature.alert2issue."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import email
import unittest
from mock import patch
import mox
from parameterized import parameterized
from features import alert2issue
from framework import authdata
from framework import emailfmt
from proto import tracker_pb2
from services import service_manager
from testing import fake
from testing import testing_helpers
from tracker import tracker_helpers
AlertEmailHeader = emailfmt.AlertEmailHeader
class TestData(object):
"""Contains constants or such objects that are intended to be read-only."""
cnxn = 'fake cnxn'
test_issue_local_id = 100
component_id = 123
trooper_queue = 'my-trooper-bug-queue'
project_name = 'proj'
project_addr = '%s+<EMAIL>+%s<EMAIL>' % (
project_name, trooper_queue)
project_id = 987
from_addr = '<EMAIL>'
user_id = 111
msg_body = 'this is the body'
msg_subject = 'this is the subject'
msg = testing_helpers.MakeMessage(
testing_helpers.ALERT_EMAIL_HEADER_LINES, msg_body)
incident_id = msg.get(AlertEmailHeader.INCIDENT_ID)
incident_label = alert2issue._GetIncidentLabel(incident_id)
# All the tests in this class use the following alert properties, and
# the generator functions/logic should be tested in a separate class.
alert_props = {
'owner_id': 0,
'cc_ids': [],
'status': 'Available',
'incident_label': incident_label,
'priority': 'Pri-0',
'trooper_queue': trooper_queue,
'field_values': [],
'labels': [
'Restrict-View-Google', 'Pri-0', incident_label, trooper_queue
],
'component_ids': [component_id],
}
class ProcessEmailNotificationTests(unittest.TestCase, TestData):
"""Implements unit tests for alert2issue.ProcessEmailNotification."""
def setUp(self):
# services
self.services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
user=fake.UserService(),
usergroup=fake.UserGroupService(),
project=fake.ProjectService(),
features=fake.FeaturesService())
# project
self.project = self.services.project.TestAddProject(
self.project_name, project_id=self.project_id,
process_inbound_email=True, contrib_ids=[self.user_id])
# config
proj_config = fake.MakeTestConfig(self.project_id, [], ['Available'])
comp_def_1 = tracker_pb2.ComponentDef(
component_id=123, project_id=987, path='FOO', docstring='foo docstring')
proj_config.component_defs = [comp_def_1]
self.services.config.StoreConfig(self.cnxn, proj_config)
# sender
self.auth = authdata.AuthData(user_id=self.user_id, email=self.from_addr)
# issue
self.issue = tracker_pb2.Issue(
project_id=self.project_id,
local_id=self.test_issue_local_id,
summary=self.msg_subject,
reporter_id=self.user_id,
component_ids=[self.component_id],
status=self.alert_props['status'],
labels=self.alert_props['labels'],
)
self.services.issue.TestAddIssue(self.issue)
# Patch send_notifications functions.
self.notification_patchers = [
patch('features.send_notifications.%s' % func, spec=True)
for func in [
'PrepareAndSendIssueBlockingNotification',
'PrepareAndSendIssueChangeNotification',
]
]
self.blocking_notification = self.notification_patchers[0].start()
self.blocking_notification = self.notification_patchers[1].start()
self.mox = mox.Mox()
def tearDown(self):
self.notification_patchers[0].stop()
self.notification_patchers[1].stop()
self.mox.UnsetStubs()
self.mox.ResetAll()
def testGoogleAddrsAreAllowlistedSender(self):
self.assertTrue(alert2issue.IsAllowlisted('<EMAIL>'))
self.assertFalse(alert2issue.IsAllowlisted('<EMAIL>'))
def testSkipNotification_IfFromNonAllowlistedSender(self):
self.mox.StubOutWithMock(alert2issue, 'IsAllowlisted')
alert2issue.IsAllowlisted(self.from_addr).AndReturn(False)
# None of them should be called, if the sender has not been allowlisted.
self.mox.StubOutWithMock(self.services.issue, 'CreateIssueComment')
self.mox.StubOutWithMock(self.services.issue, 'CreateIssue')
self.mox.ReplayAll()
alert2issue.ProcessEmailNotification(
self.services, self.cnxn, self.project, self.project_addr,
self.from_addr, self.auth, self.msg_subject, self.msg_body,
self.incident_label, self.msg, self.trooper_queue)
self.mox.VerifyAll()
def testSkipNotification_TooLongComment(self):
self.mox.StubOutWithMock(alert2issue, 'IsAllowlisted')
alert2issue.IsAllowlisted(self.from_addr).AndReturn(True)
self.mox.StubOutWithMock(alert2issue, 'IsCommentSizeReasonable')
alert2issue.IsCommentSizeReasonable(
'Filed by %s on behalf of %s\n\n%s' %
(self.auth.email, self.from_addr, self.msg_body)).AndReturn(False)
# None of them should be called, if the comment is too long.
self.mox.StubOutWithMock(self.services.issue, 'CreateIssueComment')
self.mox.StubOutWithMock(self.services.issue, 'CreateIssue')
self.mox.ReplayAll()
alert2issue.ProcessEmailNotification(
self.services, self.cnxn, self.project, self.project_addr,
self.from_addr, self.auth, self.msg_subject, self.msg_body,
self.incident_label, self.msg, self.trooper_queue)
self.mox.VerifyAll()
def testProcessNotification_IfFromAllowlistedSender(self):
self.mox.StubOutWithMock(alert2issue, 'IsAllowlisted')
alert2issue.IsAllowlisted(self.from_addr).AndReturn(True)
self.mox.StubOutWithMock(tracker_helpers, 'LookupComponentIDs')
tracker_helpers.LookupComponentIDs(
['Infra'],
mox.IgnoreArg()).AndReturn([1])
self.mox.StubOutWithMock(self.services.issue, 'CreateIssueComment')
self.mox.StubOutWithMock(self.services.issue, 'CreateIssue')
self.mox.ReplayAll()
# Either of the methods should be called, if the sender is allowlisted.
with self.assertRaises(mox.UnexpectedMethodCallError):
alert2issue.ProcessEmailNotification(
self.services, self.cnxn, self.project, self.project_addr,
self.from_addr, self.auth, self.msg_subject, self.msg_body,
self.incident_label, self.msg, self.trooper_queue)
self.mox.VerifyAll()
def testIssueCreated_ForNewIncident(self):
"""Tests if a new issue is created for a new incident."""
self.mox.StubOutWithMock(alert2issue, 'IsAllowlisted')
alert2issue.IsAllowlisted(self.from_addr).AndReturn(True)
# FindAlertIssue() returns None for a new incident.
self.mox.StubOutWithMock(alert2issue, 'FindAlertIssue')
alert2issue.FindAlertIssue(
self.services, self.cnxn, self.project.project_id,
self.incident_label).AndReturn(None)
# Mock GetAlertProperties() to create the issue with the expected
# properties.
self.mox.StubOutWithMock(alert2issue, 'GetAlertProperties')
alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.msg).AndReturn(self.alert_props)
self.mox.ReplayAll()
alert2issue.ProcessEmailNotification(
self.services, self.cnxn, self.project, self.project_addr,
self.from_addr, self.auth, self.msg_subject, self.msg_body,
self.incident_id, self.msg, self.trooper_queue)
# the local ID of the newly created issue should be +1 from the highest ID
# in the existing issues.
comments = self._verifyIssue(self.test_issue_local_id + 1, self.alert_props)
self.assertEqual(comments[0].content,
'Filed by %s on behalf of %s\n\n%s' % (
self.from_addr, self.from_addr, self.msg_body))
self.mox.VerifyAll()
def testProcessEmailNotification_ExistingIssue(self):
"""When an alert for an ongoing incident comes in, add a comment."""
self.mox.StubOutWithMock(alert2issue, 'IsAllowlisted')
alert2issue.IsAllowlisted(self.from_addr).AndReturn(True)
# FindAlertIssue() returns None for a new incident.
self.mox.StubOutWithMock(alert2issue, 'FindAlertIssue')
alert2issue.FindAlertIssue(
self.services, self.cnxn, self.project.project_id,
self.incident_label).AndReturn(self.issue)
# Mock GetAlertProperties() to create the issue with the expected
# properties.
self.mox.StubOutWithMock(alert2issue, 'GetAlertProperties')
alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.msg).AndReturn(self.alert_props)
self.mox.ReplayAll()
# Before processing the notification, ensures that there is only 1 comment
# in the test issue.
comments = self._verifyIssue(self.test_issue_local_id, self.alert_props)
self.assertEqual(len(comments), 1)
# Process
alert2issue.ProcessEmailNotification(
self.services, self.cnxn, self.project, self.project_addr,
self.from_addr, self.auth, self.msg_subject, self.msg_body,
self.incident_id, self.msg, self.trooper_queue)
# Now, it should have a new comment added.
comments = self._verifyIssue(self.test_issue_local_id, self.alert_props)
self.assertEqual(len(comments), 2)
self.assertEqual(comments[1].content,
'Filed by %s on behalf of %s\n\n%s' % (
self.from_addr, self.from_addr, self.msg_body))
self.mox.VerifyAll()
def _verifyIssue(self, local_issue_id, alert_props):
actual_issue = self.services.issue.GetIssueByLocalID(
self.cnxn, self.project.project_id, local_issue_id)
actual_comments = self.services.issue.GetCommentsForIssue(
self.cnxn, actual_issue.issue_id)
self.assertEqual(actual_issue.summary, self.msg_subject)
self.assertEqual(actual_issue.status, alert_props['status'])
self.assertEqual(actual_issue.reporter_id, self.user_id)
self.assertEqual(actual_issue.component_ids, [self.component_id])
if alert_props['owner_id']:
self.assertEqual(actual_issue.owner_id, alert_props['owner_id'])
self.assertEqual(sorted(actual_issue.labels), sorted(alert_props['labels']))
return actual_comments
class GetAlertPropertiesTests(unittest.TestCase, TestData):
"""Implements unit tests for alert2issue.GetAlertProperties."""
def assertSubset(self, lhs, rhs):
if not (lhs <= rhs):
raise AssertionError('%s not a subset of %s' % (lhs, rhs))
def assertCaseInsensitiveEqual(self, lhs, rhs):
self.assertEqual(lhs if lhs is None else lhs.lower(),
rhs if lhs is None else rhs.lower())
def setUp(self):
# services
self.services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
user=fake.UserService(),
usergroup=fake.UserGroupService(),
project=fake.ProjectService())
# project
self.project = self.services.project.TestAddProject(
self.project_name, project_id=self.project_id,
process_inbound_email=True, contrib_ids=[self.user_id])
proj_config = fake.MakeTestConfig(
self.project_id,
[
# test labels for Pri field
'Pri-0', 'Pri-1', 'Pri-2', 'Pri-3',
# test labels for OS field
'OS-Android', 'OS-Windows',
# test labels for Type field
'Type-Bug', 'Type-Bug-Regression', 'Type-Bug-Security', 'Type-Task',
],
['Assigned', 'Available', 'Unconfirmed']
)
self.services.config.StoreConfig(self.cnxn, proj_config)
# create a test email message, which tests can alternate the header values
# to verify the behaviour of a given parser function.
self.test_msg = email.Message.Message()
for key, value in self.msg.items():
self.test_msg[key] = value
self.mox = mox.Mox()
@parameterized.expand([
(None,),
('',),
(' ',),
])
def testDefaultComponent(self, header_value):
"""Checks if the default component is Infra."""
self.test_msg.replace_header(AlertEmailHeader.COMPONENT, header_value)
self.mox.StubOutWithMock(tracker_helpers, 'LookupComponentIDs')
tracker_helpers.LookupComponentIDs(
['Infra'],
mox.IgnoreArg()).AndReturn([self.component_id])
self.mox.ReplayAll()
props = alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.test_msg)
self.assertEqual(props['component_ids'], [self.component_id])
self.mox.VerifyAll()
@parameterized.expand([
# an existing single component with componentID 1
({'Infra': 1}, [1]),
# 3 of existing components
({'Infra': 1, 'Foo': 2, 'Bar': 3}, [1, 2, 3]),
# a non-existing component
({'Infra': None}, []),
# 3 of non-existing components
({'Infra': None, 'Foo': None, 'Bar': None}, []),
# a mix of existing and non-existing components
({'Infra': 1, 'Foo': None, 'Bar': 2}, [1, 2]),
])
def testGetComponentIDs(self, components, expected_component_ids):
"""Tests _GetComponentIDs."""
self.test_msg.replace_header(
AlertEmailHeader.COMPONENT, ','.join(sorted(components.keys())))
self.mox.StubOutWithMock(tracker_helpers, 'LookupComponentIDs')
tracker_helpers.LookupComponentIDs(
sorted(components.keys()),
mox.IgnoreArg()).AndReturn(
[components[key] for key in sorted(components.keys())
if components[key]]
)
self.mox.ReplayAll()
props = alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.test_msg)
self.assertEqual(sorted(props['component_ids']),
sorted(expected_component_ids))
self.mox.VerifyAll()
def testLabelsWithNecessaryValues(self):
"""Checks if the labels contain all the necessary values."""
props = alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.test_msg)
# This test assumes that the test message contains non-empty values for
# all the headers.
self.assertTrue(props['incident_label'])
self.assertTrue(props['priority'])
self.assertTrue(props['issue_type'])
self.assertTrue(props['oses'])
# Here are a list of the labels that props['labels'] should contain
self.assertIn('Restrict-View-Google'.lower(), props['labels'])
self.assertIn(self.trooper_queue, props['labels'])
self.assertIn(props['incident_label'], props['labels'])
self.assertIn(props['priority'], props['labels'])
self.assertIn(props['issue_type'], props['labels'])
for os in props['oses']:
self.assertIn(os, props['labels'])
@parameterized.expand([
(None, 0),
('', 0),
(' ', 0),
])
def testDefaultOwnerID(self, header_value, expected_owner_id):
"""Checks if _GetOwnerID returns None in default."""
self.test_msg.replace_header(AlertEmailHeader.OWNER, header_value)
props = alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.test_msg)
self.assertEqual(props['owner_id'], expected_owner_id)
@parameterized.expand(
[
# an existing user with userID 1.
('<EMAIL>', 1),
# a non-existing user.
('<EMAIL>', 0),
])
def testGetOwnerID(self, owner, expected_owner_id):
"""Tests _GetOwnerID returns the ID of the owner."""
self.test_msg.replace_header(AlertEmailHeader.CC, '')
self.test_msg.replace_header(AlertEmailHeader.OWNER, owner)
self.mox.StubOutWithMock(self.services.user, 'LookupExistingUserIDs')
self.services.user.LookupExistingUserIDs(self.cnxn, [owner]).AndReturn(
{owner: expected_owner_id})
self.mox.ReplayAll()
props = alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.test_msg)
self.mox.VerifyAll()
self.assertEqual(props['owner_id'], expected_owner_id)
@parameterized.expand([
(None, []),
('', []),
(' ', []),
])
def testDefaultCCIDs(self, header_value, expected_cc_ids):
"""Checks if _GetCCIDs returns an empty list in default."""
self.test_msg.replace_header(AlertEmailHeader.CC, header_value)
props = alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.test_msg)
self.assertEqual(props['cc_ids'], expected_cc_ids)
@parameterized.expand([
# with one existing user cc-ed.
({'<EMAIL>': 1}, [1]),
# with two of existing users.
({'<EMAIL>': 1, '<EMAIL>': 2}, [1, 2]),
# with one non-existing user.
({'<EMAIL>': None}, []),
# with two of non-existing users.
({'<EMAIL>': None, '<EMAIL>': None}, []),
# with a mix of existing and non-existing users.
({'<EMAIL>': 1, '<EMAIL>': None}, [1]),
])
def testGetCCIDs(self, ccers, expected_cc_ids):
"""Tests _GetCCIDs returns the IDs of the email addresses to be cc-ed."""
self.test_msg.replace_header(
AlertEmailHeader.CC, ','.join(sorted(ccers.keys())))
self.test_msg.replace_header(AlertEmailHeader.OWNER, '')
self.mox.StubOutWithMock(self.services.user, 'LookupExistingUserIDs')
self.services.user.LookupExistingUserIDs(
self.cnxn, sorted(ccers.keys())).AndReturn(ccers)
self.mox.ReplayAll()
props = alert2issue.GetAlertProperties(
self.services, self.cnxn, self.project_id, self.incident_id,
self.trooper_queue, self.test_msg)
self.mox.VerifyAll()
self.assertEqual(sorted(props['cc_ids']), sorted(expected_cc_ids))
@parameterized.expand([
# None and '' should result in the default priority returned.
(None, 'Pri-2'),
('', 'Pri-2'),
(' ', 'Pri-2'),
# Tests for valid priority values
('0', 'Pri-0'),
('1', 'Pri-1'),
('2', 'Pri-2'),
('3', | |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Helpers for indexed updates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as onp
from ..abstract_arrays import ShapedArray, ConcreteArray
from .. import core
from .. import lax
from ..numpy import lax_numpy as np
# TODO(mattjj): clean up this logic
def _is_advanced_int_indexer(idx):
_int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer)
try:
abstract_idx = core.get_aval(idx)
except TypeError:
abstract_idx = None
out = not (isinstance(abstract_idx, ConcreteArray) and _int(abstract_idx) or
isinstance(abstract_idx, ShapedArray) and _int(abstract_idx) or
isinstance(idx, slice) or
isinstance(idx, tuple) and all(onp.ndim(elt) == 0 for elt in idx))
return out and np._is_advanced_int_indexer(idx)
def _scatter_update(x, idx, y, scatter_op):
"""Helper for indexed updates.
Computes the value of x that would result from computing::
x[idx] op= y
except in a pure functional way, with no in-place updating.
Args:
x: ndarray to be updated.
idx: None, an integer, a slice, an ellipsis, an ndarray with integer dtype,
or a tuple of those indicating the locations of `x` into which to scatter-
update the values in `y`.
y: values to be scattered.
scatter_op: callable, either lax.scatter or lax.scatter_add.
Returns:
An ndarray representing an updated `x` after performing the scatter-update.
"""
# For more clues on the logic of this implementation, see the code for
# jax.numpy._rewriting_take (which has links to NumPy docs).
x = np.asarray(x)
y = np.asarray(y)
x_shape = np.shape(x)
y_shape = np.shape(y)
y = lax.convert_element_type(y, lax.dtype(x))
# Check if there's advanced indexing going on, and handle differently based on
# whether it is or isn't mixed with basic indexing.
if _is_advanced_int_indexer(idx):
if np._is_advanced_int_indexer_without_slices(idx):
if isinstance(idx, (tuple, list)):
if any(onp.shape(e) for e in idx):
# At least one sequence element in the index list means broadcasting.
idx = np.broadcast_arrays(*idx)
else:
# The index list is a flat list of integers.
idx = [lax.concatenate([lax.reshape(e, (1,)) for e in idx], 0)]
else:
# The indexer is just a single integer array.
idx = [idx]
stacked_idx = np.concatenate(
[np.mod(np.reshape(a, (-1, 1)), np._constant_like(a, x.shape[i]))
for i, a in enumerate(idx)], axis=1)
y = np.broadcast_to(y, idx[0].shape + onp.shape(x)[len(idx):])
y = lax.reshape(y, (stacked_idx.shape[0],) + onp.shape(x)[len(idx):])
dnums = lax.ScatterDimensionNumbers(
update_window_dims=tuple(range(1, y.ndim)),
inserted_window_dims=tuple(range(len(idx))),
scatter_dims_to_operand_dims=tuple(range(len(idx))))
return scatter_op(x, stacked_idx, y, dnums)
elif np._is_advanced_int_indexer(idx):
# TODO(mattjj, phawkins): one of us is going to implement this case someday
msg = "Unimplemented case for indexed update. Open a feature request!"
raise NotImplementedError(msg)
else:
assert False # unreachable
# At this point there's no advanced indexing going on, so we process each
# element of the index one at a time to build up a scatter.
if not isinstance(idx, tuple):
idx = (idx,)
# Remove ellipses and add trailing slice(None)s.
idx = np._canonicalize_tuple_index(x, idx)
_int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer)
x_axis = 0
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
update_window_dims = []
inserted_window_dims = []
scatter_dims_to_operand_dims = []
scatter_indices = np.zeros((0,), dtype=np.int32)
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is reshaped to collapsed_slice_shape. This is to handle `None`
# indices, which the scatter cannot remove itself.
collapsed_slice_shape = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
for i in idx:
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if (isinstance(abstract_i, ConcreteArray) or
isinstance(abstract_i, ShapedArray)) and _int(abstract_i):
i = np.mod(i, np._constant_like(i, x.shape[x_axis]))
i = lax.convert_element_type(i, np.int32)
i = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,))
scatter_indices = np.concatenate((scatter_indices, i), -1)
inserted_window_dims.append(x_axis)
scatter_dims_to_operand_dims.append(x_axis)
x_axis += 1
elif i is None:
slice_shape.append(1)
y_axis += 1
elif np._is_slice_none(i):
slice_shape.append(x_shape[x_axis])
collapsed_slice_shape.append(x_shape[x_axis])
update_window_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
elif isinstance(i, slice):
start, limit, stride, needs_rev = np._static_idx(i, x.shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, np.int32)
i = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,))
scatter_indices = np.concatenate((scatter_indices, i), -1)
slice_shape.append(limit - start)
collapsed_slice_shape.append(limit - start)
update_window_dims.append(collapsed_y_axis)
scatter_dims_to_operand_dims.append(x_axis)
else:
i = np.arange(start, limit, stride, dtype=np.int32)
size = i.shape[0]
slice_shape.append(size)
collapsed_slice_shape.append(size)
scatter_indices_shape = tuple(scatter_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i, shape=scatter_indices_shape + (1,),
broadcast_dimensions=(len(scatter_indices_shape) - 1,))
scatter_indices = lax.broadcast_in_dim(
scatter_indices,
shape=scatter_indices_shape + (len(scatter_dims_to_operand_dims),),
broadcast_dimensions=(
tuple(range(len(scatter_indices_shape) - 1)) +
(len(scatter_indices_shape),)))
scatter_indices = np.concatenate(
(scatter_indices, i), len(scatter_indices_shape))
scatter_dims_to_operand_dims.append(x_axis)
inserted_window_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
raise IndexError("Unknown index type ", i)
y = np.broadcast_to(y, tuple(slice_shape))
y = lax.reshape(y, collapsed_slice_shape)
if reversed_y_dims:
y = lax.rev(y, reversed_y_dims)
dnums = lax.ScatterDimensionNumbers(
update_window_dims = tuple(update_window_dims),
inserted_window_dims = tuple(inserted_window_dims),
scatter_dims_to_operand_dims = tuple(scatter_dims_to_operand_dims)
)
return scatter_op(x, scatter_indices, y, dnums)
class _Indexable(object):
"""Helper object for building indexes for indexed update functions.
This is a singleton object that overrides the :code:`__getitem__` method
to return the index it is passed.
>>> jax.ops.index[1:2, 3, None, ..., ::2]
(slice(1, 2, None), 3, None, Ellipsis, slice(None, None, 2))
"""
__slots__ = ()
def __getitem__(self, index):
return index
#: Index object singleton
index = _Indexable()
def index_add(x, idx, y):
"""Pure equivalent of :code:`x[idx] += y`.
Returns the value of `x` that would result from the
NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`::
x[idx] += y
Note the `index_add` operator is pure; `x` itself is
not modified, instead the new value that `x` would have taken is returned.
Unlike the NumPy code :code:`x[idx] += y`, if multiple indices refer to the
same location the updates will be summed. (NumPy would only apply the last
update, rather than summing the updates.) The order in which conflicting
updates are applied is implementation-defined and may be nondeterministic
(e.g., due to concurrency on some hardware platforms).
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, `slice` objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above. A
convenient syntactic sugar for forming indices is via the
:data:`jax.ops.index` object.
y: the array of updates. `y` must be broadcastable to the shape of the
array that would be returned by `x[idx]`.
Returns:
An array.
>>> x = jax.numpy.ones((5, 6))
>>> jax.ops.index_add(x, jax.ops.index[2:4, 3:], 6.)
array([[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 7., 7., 7.],
[1., 1., 1., 7., 7., 7.],
[1., 1., 1., 1., 1., 1.]], dtype=float32)
"""
return _scatter_update(x, idx, y, lax.scatter_add)
def index_update(x, idx, y):
"""Pure equivalent of :code:`x[idx] = y`.
Returns the value of `x` that would result from the
NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`::
x[idx] = y
Note the `index_update` operator is pure; `x` itself is
not modified, instead the new value that `x` would have taken is returned.
Unlike NumPy's :code:`x[idx] = y`, if multiple indices refer to the same
location it is undefined which update is chosen; JAX may choose the order of
updates arbitrarily and nondeterministically (e.g., due to concurrent
updates on some hardware platforms).
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, `slice` objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above. A
convenient syntactic sugar for forming indices is via the
:data:`jax.ops.index` object.
y: the array of updates. `y` must be broadcastable to the shape of the
array that would be returned by `x[idx]`.
Returns:
An array.
>>> x = jax.numpy.ones((5, 6))
>>> jax.ops.index_update(x, jax.ops.index[::2, 3:], 6.)
array([[1., 1., 1., 6., 6., 6.],
[1., 1., 1., 1., 1., 1.],
[1., 1., 1., 6., 6., 6.],
| |
""" This module implements the definition of the different configuration.
NOTE: Make sure to run the function 'save_common_default_template'
to save the default config after altering CompleteConfiguration.
Write the default raw configuration template
>>> import muteria.configmanager.configurations as mcc
>>> mcc.save_common_default_template()
We have a class defined for each configuration type. namely:
- `ExecutionConfig`: Execution Configuration.
- `ReportingConfig`: Reporting Configuration.
- `ProjectConfig`: Project Configuration.
- `TestcaseToolsConfig`: Testcase Tools Configuration.
- `CodecoverageToolsConfig`: Codecoverage Tools Configuration.
- `MutationToolsConfig`: Mutation Tools Configuration.
TODO: Implement the restriction in adding extra element after creation
TODO: Implement the verification of the parameters of each config
TODO: Implement loading the values of the parameters from files... and
Checking
"""
#TODO: Implement test tool ordering (who use whose tests)
from __future__ import print_function
import os
import logging
import muteria.common.mix as common_mix
from muteria.drivers.testgeneration import TestToolType
ERROR_HANDLER = common_mix.ErrorHandler
# TODO Find way to make the classes so that new elements cannot be added on the fly
class SessionMode(common_mix.EnumAutoName):
EXECUTE_MODE = 0
VIEW_MODE = 1
INTERNAL_MODE = 2
RESTORE_REPOS_MODE = 3
CUSTOM_EXECUTION_MODE = 4
#~ class SessionMode
class ConfigClasses(common_mix.EnumAutoName):
CONTROLLER_CONF = "CONTROLLER_CONF"
PROJECT_CONF = "PROJECT_CONF"
TESTCASES_CONF = "TESTCASES_CONF"
CRITERIA_CONF = "CRITERIA_CONF"
OTHER_CONF = "OTHER_CONF"
#~ class ConfigClasses
class ConfigElement(object):
def __init__(self, val=None, desc=None, val_range=None, conf_class=None):
self.val = val
self.desc = desc
self.val_range = val_range
self.conf_class = conf_class
#~ def __self__()
def set_val(self, new_val):
self.val = new_val
def get_val(self):
return self.val
def set_desc(self, new_desc):
self.desc = new_desc
def get_desc(self):
return self.desc
def set_val_range(self, new_val_range):
self.val_range = new_val_range
def get_val_range(self):
return self.val_range
def set_conf_class(self, new_conf_class):
self.conf_class = new_conf_class
def get_conf_class(self):
return self.conf_class
#~ class ConfigElement()
class CompleteConfiguration(object):
#######################################################
####### Execution Parameters ######
#######################################################
# Criteria to Include in the analysis
# The corresponding configs of codecoverage (`CodecoverageToolsConfig`)
# and mutation (`MutationToolsConfig`) are considered if and only if
# the corresponding include is enabled
# Decide whether to start over, deleting previous, for execution mode
EXECUTION_CLEANSTART=False
# Value of type SessionMode (Mandatory)
RUN_MODE = None
# List of enabled criteria
# (must have tool specified is specifically enabled)
# None mean all criteria with tool specified
ENABLED_CRITERIA = []
# Enable keeping output summary for program passfail execution
GET_PASSFAIL_OUTPUT_SUMMARY = True
# keepoutput summary for the following criteria (may run slower)
CRITERIA_WITH_OUTPUT_SUMMARY = []
# Decides whether to hash the output log
HASH_OUTLOG = True
# PARALELISM
SINGLE_REPO_PARALLELISM = 1 # Max number of parallel exec in a repo dir
# MICRO CONTROLS
EXECUTE_ONLY_CURENT_CHECKPOINT_META_TASK = False # for Debugging
RESTART_CURRENT_EXECUTING_META_TASKS = False
# Specify a Step to go back to
RE_EXECUTE_FROM_CHECKPOINT_META_TASKS = [] # Make interaction easy
# Output dir pathname (Mandatory)
OUTPUT_ROOT_DIR = None
# Enable logging debug data
LOG_DEBUG = False
#######################################################
####### Reporting Parameters ######
#######################################################
GENERATE_HTML_REPORT = True
OUTPUT_CRITERIA_SCORES = True
CRITERIA_SCORE_BY_TOOL = True
OUTPUT_CRITERIA_SUBSUMPTION_SCORE = True
OUTPUT_CRITERIA_COVERED_ELEMENTS = False
OUTPUT_CRITERIA_UNCOVERED_ELEMENTS = True
DETAILED_ELEMENTS_OUTPUT = False
OUTPUT_CRITERIA_SUBSUMING_ELEM_NUM = True
# When iteratively try to cover some element, show
OUTPUT_STATS_HISTORY = True
#######################################################
####### Project Config Parameters ######
#######################################################
# Project programming language (Mandatory)
PROGRAMMING_LANGUAGE = None
# Repository dir pathname (Mandatory)
REPOSITORY_ROOT_DIR = None
# string representing the relative path to the executable
# (or entry point file) in the repository
REPO_EXECUTABLE_RELATIVE_PATHS = None
# optional map between each source and the corresponding intermediate
# file map (such as object or assembly file for c of .class file for java)
TARGET_SOURCE_INTERMEDIATE_CODE_MAP = None
# Specify the optional general scope of the evaluation,
# Specific scope can be specified per tool
TARGET_CLASSES_NAMES = None
# None value mean all functions
TARGET_METHODS_BY_TARGET_CLASSES = None
# each source file element is the relative path from repos rootdir.
# None value means all source files
##TARGET_SOURCE_FILES_LIST = None
# each test file element is the relative path from repos rootdir.
# None value means all dev tests
DEVELOPER_TESTS_LIST = None
# Function that takes 3 arguments:
# <test_name: str>
# <repos directory rootdir: str>
# <Executable relative path map: dict>
# <env_vars: map>
# <timeout: int>
# and run with the executable as in repository
# The function return:
# 0 on passing test
# 1 on failing test
# -1 on error
CUSTOM_DEV_TEST_RUNNER_FUNCTION = None
CUSTOM_DEV_TEST_PROGRAM_WRAPPER_CLASS = None
# Take the outlog as a string and return the cleaned version
CUSTOM_TEST_EXECUTION_OUTPUT_CLEANER_FUNCTION = None
# Optional. When not None, the CUSTOM_DEV_TEST_RUNNER is the name of
# the function in this file to use
CUSTOM_DEV_TEST_RUNNER_MODULE = None
# Function that build the code to execute
# (for compiled languages such as C)
# The function has 5 parameters:
# <repos directory rootdir: str>
# <Executable relative path: str>
# <Optional compiler to use(compiler name): str>
# <optional flags to pass to compiler: list of flags>
# <clean temporary before build: bool>
# <reconfigure before build: bool>
# And returns:
# True on success
# False on failure
# A code builder class is defined and make use of this function
# It ensure on call to this functin at the time and no call while any
# processing in the repodir.
CODE_BUILDER_FUNCTION = None
# Optional. When not None, the CODE_BUILDER_FUNC is the name of
# the function in this file to use
CODE_BUILDER_MODULE = None
#######################################################
####### Tools parameters ######
#######################################################
# ===================== TESTCASES =====================#
# Tests already existing before execution starts
DEVELOPER_TESTS_ENABLED = True
# Test created during execution
GENERATED_TESTS_ENABLED = True
STOP_TESTS_EXECUTION_ON_FAILURE = False # Will not get full matrix
DISCARD_FLAKY_TESTS = True
# Test tool types in order of execution (Test generation).
# elements in same tuple have parallel execution. elements at tuple i
# generate tests after element at i-1 and may use results of test from i-1.
TEST_TOOL_TYPES_SCHEDULING = [
(TestToolType.USE_ONLY_CODE,),
(TestToolType.USE_CODE_AND_TESTS,),
]
# Test generation tool. Example:
# >>> TestcaseToolsConfig(tooltype=TestToolType.USE_ONLY_CODE,
# toolname='klee', config_id=0)
TESTCASE_TOOLS_CONFIGS = [
]
TESTCASES_SELECTION = None
# Reporting
REPORT_NUMBER_OF_TESTS_GENERATED = True
REPORT_NUMBER_OF_DUPLICATED_TESTS = True
## --- Modifiable (Testcase) ---##
# use test case oracle as oracle
#TESTS_ORACLE_TESTS = True
# Use output of the specified version as oracle,
# Pass filepath to repo patch
#TESTS_ORACLE_OTHER_VERSION = None
# file path to an executable to use as oracle
#TESTS_ORACLE_OTHER_EXECUTABLE = None
#TEST_GENERATION_TIMEOUT = 7200.0 # in seconds
#ONE_TEST_EXECUTION_TIMEOUT = 900.0 # in seconds (Handle inifnite loops)
# ========================================================#
# ===================== CRITERIA COVERAGE =====================#
# Map with key criteria and values the list of tools
# criteria tool. Example:
# >>> CriteriaToolConfig(tooltype=CriteriaToolType.USE_ONLY_CODE,
# toolname='gcov', config_id=0)
CRITERIA_TOOLS_CONFIGS_BY_CRITERIA = {
}
# List of sets of test criteria stating the order in which criteria
# should be executed. Example strom mutation after weak mutation.
# When None, Default to the order in:
# >>> muteria.drivers.criteria.CRITERIA_SEQUENCE
CRITERIA_SEQUENCE = None
# List of criteria that have test objectives covered when test execution
# differs with original
# When None, Default to the order in:
# >>> muteria.drivers.criteria.CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM
CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM = None
# List of criteria to run with failing tests
RUN_FAILING_TESTS_WITH_CRITERIA = [
]
# List of criteria to run with passing tests
RUN_PASSING_TESTS_WITH_CRITERIA = [
]
# list of test tool aliases for which we should not measure coverages
TESTCASE_TOOLALIASES_TO_SKIP_CRITERIA_COVERAGE = []
CRITERIA_RESTRICTION_ENABLED = True # Enable restricting mutation(scope)
# criterion: selection tools. Example: SM and TCE or E-SELECTIVE
CRITERIA_ELEM_SELECTIONS = {
}
ONLY_EXECUTE_SELECTED_CRITERIA_ELEM = True
MAX_CRITERIA_ELEM_SELECTION_NUM_PERCENT = '100%'
# Criterion: guider dict. ex: {STRONG_MUTATION: Surviving}
CRITERIA_TESTGEN_GUIDANCE = {
}
# Criterion: optimizers dict. ex: {STRONG_MUTATION: SM_OPTIMIZED_BY_WM}
CRITERIA_EXECUTION_OPTIMIZERS = {
}
# Will not get full matrix. the non executed will be uncertain
COVER_CRITERIA_ELEMENTS_ONCE = False
## --- Modifiable (Code) ---##
#SEPARATED_TEST_EXECUTION_EXTRA_TIMEOUT_TIMES = 60.0 # in seconds
#META_TEST_EXECUTION_EXTRA_TIMEOUT_TIMES = 600.0 # in seconds
# ========================================================#
#######################################################
####### Extra parameters ######
#######################################################
#LLVM_TO_NATIVE_LINKING_FLAGS = None
#~ class CompleteConfiguration
class BaseToolConfig(dict):
"""
:param criteria_on: (list) alternate way to represent criteria of a tool (TODO)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: zabbix_mediatype
short_description: Create/Update/Delete Zabbix media types
description:
- This module allows you to create, modify and delete Zabbix media types.
author:
- <NAME> (@rubentsirunyan)
requirements:
- "zabbix-api >= 0.5.4"
options:
name:
type: 'str'
description:
- Name of the media type.
required: true
state:
type: 'str'
description:
- Desired state of the mediatype.
- On C(present), it will create a mediatype if it does not exist or update the mediatype if the associated data is different.
- On C(absent), it will remove the mediatype if it exists.
choices:
- present
- absent
default: 'present'
type:
type: 'str'
description:
- Type of the media type.
- Media types I(jabber) and I(ez_texting) workable only with Zabbix 4.2 or less.
choices:
- email
- script
- sms
- jabber
- ez_texting
required: true
status:
type: 'str'
description:
- Whether the media type is enabled or no.
choices:
- enabled
- disabled
default: 'enabled'
max_sessions:
type: 'int'
description:
- The maximum number of alerts that can be processed in parallel.
- Possible value is 1 when I(type=sms) and 0-100 otherwise.
default: 1
max_attempts:
type: 'int'
description:
- The maximum number of attempts to send an alert.
- Possible range is 0-10
default: 3
attempt_interval:
type: 'int'
description:
- The interval between retry attempts.
- Possible range is 0-60
default: 10
script_name:
type: 'str'
description:
- The name of the executed script.
- Required when I(type=script).
script_params:
type: 'list'
elements: str
description:
- List of script parameters.
- Required when I(type=script).
gsm_modem:
type: 'str'
description:
- Serial device name of the gsm modem.
- Required when I(type=sms).
username:
type: 'str'
description:
- Username or Jabber identifier.
- Required when I(type=jabber) or I(type=ez_texting).
- Required when I(type=email) and I(smtp_authentication=true).
password:
type: 'str'
description:
- Authentication password.
- Required when I(type=jabber) or I(type=ez_texting).
- Required when I(type=email) and I(smtp_authentication=true).
smtp_server:
type: 'str'
description:
- SMTP server host.
- Required when I(type=email).
default: 'localhost'
smtp_server_port:
type: 'int'
description:
- SMTP server port.
- Required when I(type=email).
default: 25
smtp_helo:
type: 'str'
description:
- SMTP HELO.
- Required when I(type=email).
default: 'localhost'
smtp_email:
type: 'str'
description:
- Email address from which notifications will be sent.
- Required when I(type=email).
smtp_authentication:
type: 'bool'
description:
- Whether SMTP authentication with username and password should be enabled or not.
- If set to C(true), C(username) and C(password) should be specified.
default: false
smtp_security:
type: 'str'
description:
- SMTP connection security level to use.
choices:
- None
- STARTTLS
- SSL/TLS
smtp_verify_host:
type: 'bool'
description:
- SSL verify host for SMTP.
- Can be specified when I(smtp_security=STARTTLS) or I(smtp_security=SSL/TLS)
default: false
smtp_verify_peer:
type: 'bool'
description:
- SSL verify peer for SMTP.
- Can be specified when I(smtp_security=STARTTLS) or I(smtp_security=SSL/TLS)
default: false
message_text_limit:
type: 'str'
description:
- The message text limit.
- Required when I(type=ez_texting).
- 160 characters for USA and 136 characters for Canada.
choices:
- USA
- Canada
extends_documentation_fragment:
- community.general.zabbix
'''
RETURN = r''' # '''
EXAMPLES = r'''
- name: 'Create an email mediatype with SMTP authentication'
zabbix_mediatype:
name: "Ops email"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "<PASSWORD>"
type: 'email'
smtp_server: 'example.com'
smtp_server_port: 2000
smtp_email: '<EMAIL>'
smtp_authentication: true
username: 'smtp_user'
password: '<PASSWORD>'
- name: 'Create a script mediatype'
zabbix_mediatype:
name: "my script"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "<PASSWORD>"
type: 'script'
script_name: 'my_script.py'
script_params:
- 'arg1'
- 'arg2'
- name: 'Create a jabber mediatype'
zabbix_mediatype:
name: "My jabber"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "<PASSWORD>"
type: 'jabber'
username: 'jabber_id'
password: '<PASSWORD>'
- name: 'Create an SMS mediatype'
zabbix_mediatype:
name: "My SMS Mediatype"
server_url: "http://example.com/zabbix/"
login_user: Admin
login_password: "<PASSWORD>"
type: 'sms'
gsm_modem: '/dev/ttyS0'
'''
import atexit
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from distutils.version import LooseVersion
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
ZBX_IMP_ERR = traceback.format_exc()
HAS_ZABBIX_API = False
def to_numeric_value(value, strs):
return strs.get(value)
def validate_params(module, params):
"""Validates arguments that are required together.
Fails the module with the message that shows the missing
requirements if there are some.
Args:
module: AnsibleModule object.
params (list): Each element of this list
is a list like
['argument_key', 'argument_value', ['required_arg_1',
'required_arg_2']].
Format is the same as `required_if` parameter of AnsibleModule.
"""
for param in params:
if module.params[param[0]] == param[1]:
if None in [module.params[i] for i in param[2]]:
module.fail_json(
msg="Following arguments are required when {key} is {value}: {arguments}".format(
key=param[0],
value=param[1],
arguments=', '.join(param[2])
)
)
def construct_parameters(**kwargs):
"""Translates data to a format suitable for Zabbix API and filters
the ones that are related to the specified mediatype type.
Args:
**kwargs: Arguments passed to the module.
Returns:
A dictionary of arguments that are related to kwargs['transport_type'],
and are in a format that is understandable by Zabbix API.
"""
if kwargs['transport_type'] == 'email':
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
smtp_server=kwargs['smtp_server'],
smtp_port=str(kwargs['smtp_server_port']),
smtp_helo=kwargs['smtp_helo'],
smtp_email=kwargs['smtp_email'],
smtp_security=to_numeric_value(str(kwargs['smtp_security']),
{'None': '0',
'STARTTLS': '1',
'SSL/TLS': '2'}),
smtp_authentication=to_numeric_value(str(kwargs['smtp_authentication']),
{'False': '0',
'True': '1'}),
smtp_verify_host=to_numeric_value(str(kwargs['smtp_verify_host']),
{'False': '0',
'True': '1'}),
smtp_verify_peer=to_numeric_value(str(kwargs['smtp_verify_peer']),
{'False': '0',
'True': '1'}),
username=kwargs['username'],
passwd=kwargs['password']
)
elif kwargs['transport_type'] == 'script':
if kwargs['script_params'] is None:
_script_params = '' # ZBX-15706
else:
_script_params = '\n'.join(str(i) for i in kwargs['script_params']) + '\n'
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
exec_path=kwargs['script_name'],
exec_params=_script_params
)
elif kwargs['transport_type'] == 'sms':
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
gsm_modem=kwargs['gsm_modem']
)
elif kwargs['transport_type'] == 'jabber' and LooseVersion(kwargs['zbx_api_version']) <= LooseVersion('4.2'):
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
username=kwargs['username'],
passwd=kwargs['password']
)
elif kwargs['transport_type'] == 'ez_texting' and LooseVersion(kwargs['zbx_api_version']) <= LooseVersion('4.2'):
return dict(
description=kwargs['name'],
status=to_numeric_value(kwargs['status'],
{'enabled': '0',
'disabled': '1'}),
type=to_numeric_value(kwargs['transport_type'],
{'email': '0',
'script': '1',
'sms': '2',
'jabber': '3',
'ez_texting': '100'}),
maxsessions=str(kwargs['max_sessions']),
maxattempts=str(kwargs['max_attempts']),
attempt_interval=str(kwargs['attempt_interval']),
username=kwargs['username'],
passwd=kwargs['password'],
exec_path=to_numeric_value(kwargs['message_text_limit'],
{'USA': '0',
'Canada': '1'}),
)
return {'unsupported_parameter': kwargs['transport_type'], 'zbx_api_version': kwargs['zbx_api_version']}
def check_if_mediatype_exists(module, zbx, name, zbx_api_version):
"""Checks if mediatype exists.
Args:
module: AnsibleModule object
zbx: ZabbixAPI object
name: Zabbix mediatype name
Returns:
Tuple of (True, `id of the mediatype`) if mediatype exists, (False, None) otherwise
"""
filter_key_name = 'description'
if LooseVersion(zbx_api_version) >= LooseVersion('4.4'):
# description key changed to name key from zabbix 4.4
filter_key_name = 'name'
try:
mediatype_list = zbx.mediatype.get({
'output': 'extend',
'filter': {filter_key_name: [name]}
})
if len(mediatype_list) < 1:
return False, None
else:
return True, mediatype_list[0]['mediatypeid']
except Exception as e:
module.fail_json(msg="Failed to get ID of the mediatype '{name}': {e}".format(name=name, e=e))
def diff(existing, new):
"""Constructs the diff for Ansible's --diff option.
Args:
existing (dict): Existing mediatype data.
new (dict): New mediatype data.
Returns:
A dictionary like {'before': existing, 'after': new}
with filtered empty values.
"""
before = {}
after = {}
for key in new:
before[key] = existing[key]
if new[key] is None:
after[key] = ''
else:
after[key] = new[key]
return {'before': before, 'after': after}
def get_update_params(module, zbx, mediatype_id, **kwargs):
"""Filters only the parameters that are different and need to be updated.
Args:
module: AnsibleModule object.
zbx: ZabbixAPI object.
mediatype_id (int): ID of the mediatype to be updated.
**kwargs: Parameters for the new mediatype.
Returns:
A tuple where the first element is a dictionary of parameters
that need to be updated and the second one is a dictionary
returned by diff() function with
existing mediatype data and new params passed to it.
"""
existing_mediatype = zbx.mediatype.get({
'output': 'extend',
'mediatypeids': [mediatype_id]
})[0]
if existing_mediatype['type'] != kwargs['type']:
return kwargs, diff(existing_mediatype, kwargs)
else:
params_to_update = {}
for key in kwargs:
if (not (kwargs[key] is None and existing_mediatype[key] == '')) and kwargs[key] != existing_mediatype[key]:
params_to_update[key] = kwargs[key]
return params_to_update, diff(existing_mediatype, kwargs)
def delete_mediatype(module, zbx, mediatype_id):
try:
return zbx.mediatype.delete([mediatype_id])
except Exception as e:
module.fail_json(msg="Failed to delete mediatype '{_id}': {e}".format(_id=mediatype_id, e=e))
def update_mediatype(module, zbx, **kwargs):
try:
mediatype_id = zbx.mediatype.update(kwargs)
except Exception as e:
module.fail_json(msg="Failed to update mediatype '{_id}': {e}".format(_id=kwargs['mediatypeid'], e=e))
def | |
state with a Boolean statement later on.'''
last_epoch = self.data[self.data.State == 1].Epoch.max()
if time_dependent_covariates == False:
'''This option will produce data appropriate for construction of
Nelson-Aalen cumulative incidence functions and to produce the state
tables.
This option is not appropriate if we wish to perform Cox Proportional
Hazards Regression modeling as we will not be able to join to time-
dependent covariates in R.
'''
columns = ['FreqCode','state','presence','Epoch','timeDelta','time0','firstObs'] # create columns
self.master_stateTable = pd.DataFrame()
for i in self.fish:
fishDat = self.data[self.data.FreqCode == i] # get data for this fish
# get first recapture in state 1
fishDat.sort_values(by = 'Epoch', ascending = True, inplace = True) # sort by exposure time
fishDat['prevState'] = fishDat['State'].shift(1) # get previous state
fishDat['prevState'].fillna(fishDat.State.values[0], inplace = True) # fill NaN states with current state - for first record in data frame
presence = 1
firstObs = 1
stateTable = pd.DataFrame(columns = columns) # create empty data frame
time0 = self.startTimes[self.startTimes.FreqCode == i].FirstRecapture.iloc[0] # get initial time - need to calculate seconds between current record and time of release
fishDat = fishDat[fishDat.Epoch >= time0]
time1 = fishDat.Epoch.iloc[0]
timeDelta = time1 - time0 # seconds between observation and release
rowArr = [i,fishDat.State.values[0],presence,time1,timeDelta,time0,firstObs] # create initial row for state table
row = pd.DataFrame(np.array([rowArr]),columns = columns) # now it's officially a pandas row
stateTable = stateTable.append(row) # now append that row
firstObs = 0
fishDat['idx'] = np.arange(0,len(fishDat),1) # gives row an arbitrary index
maxIdx = fishDat.idx.iloc[-1] # get that maximum index number
for j in fishDat.iterrows(): # for every row in fish data
rowIdx = j[1]['idx'] # what's the row number?
state = j[1]['State'] # what's the state
prevState = j[1]['prevState'] # what was the previous state
if state != prevState or rowIdx == maxIdx: # if the present state does not equal the previous state or if we reach the end of the dataframe...
time1 = j[1]['Epoch'] # what time is it?
if unknown_state != None and rowIdx == maxIdx and state == 1 and time1 < last_epoch:
state = unknown_state
timeDelta = time1 - time0 # calculate difference in seconds between current time and release # if it's a new state
presence = presence + 1 # oh snap new observation for new state
rowArr = [i,state,presence,time1,timeDelta,time0,firstObs] # start a new row
row = pd.DataFrame(np.array([rowArr]),columns = columns)
stateTable = stateTable.append(row) # add the row to the state table data frame
time0 = j[1]['Epoch']
print ("State Table Completed for Fish %s"%(i))
stateTable['state'] = stateTable['state'].astype(np.int32)
from_rec = stateTable['state'].shift(1).fillna(stateTable.iloc[0]['state']).astype(np.int32)
to_rec = stateTable['state'].astype(np.int32)
trans = tuple(zip(from_rec,to_rec))
stateTable['transition'] = trans
stateTable['startState'] = from_rec
stateTable['endState'] = to_rec
stateTable['t0'] = stateTable['time0']
stateTable['time0'] = pd.to_numeric(stateTable['time0'], errors='coerce')
#stateTable['time1'] = pd.to_numeric(stateTable['time1'], errors='coerce')
stateTable['Epoch'] = pd.to_numeric(stateTable['Epoch'], errors = 'coerce')
stateTable.time0 = stateTable.time0.astype(np.int32)
stateTable.Epoch = stateTable.Epoch.astype(np.int32)
stateTable['t1'] = stateTable['Epoch'] - stateTable['time0']
self.master_stateTable = self.master_stateTable.append(stateTable)
del i,j
else:
columns = ['FreqCode','startState','endState','presence','timeStamp','firstObs','t0','t1'] # create columns
self.master_stateTable = pd.DataFrame()
self.bucket_length = bucket_length_min
for i in self.fish:
fishDat = self.data[self.data.FreqCode == i] # get data for this fish
fishDat.sort_values(by = 'Epoch', ascending = True, inplace = True) # sort by exposure time
fishDat['prevState'] = fishDat['State'].shift(1) # get previous state
fishDat['prevState'].fillna(fishDat.State.values[0], inplace = True) # fill NaN states with current state - for first record in data frame
presence = 0
firstObs = 1
stateTable = pd.DataFrame(columns = columns) # create empty data frame
time0 = self.startTimes[self.startTimes.FreqCode == i].FirstRecapture.iloc[0] # get initial time - need to calculate seconds between current record and time of release
fishDat = fishDat[fishDat.Epoch >= time0]
initialTime = pd.to_datetime(time0, unit = 's')
time1 = fishDat.Epoch.iloc[0]
timeDelta = time1 - time0 # seconds between observation and release
firstObs = 0
fishDat['idx'] = np.arange(0,len(fishDat),1) # gives row an arbitrary index
maxIdx = fishDat.idx.iloc[-1] # get that maximum index number
for j in fishDat.iterrows(): # for every row in fish data
rowIdx = j[1]['idx'] # what's the row number?
state1 = int(j[1]['prevState']) # what's the state
state2 = int(j[1]['State']) # what was the previous state
ts = j[1]['timeStamp']
if state1 != state2 or rowIdx == maxIdx: # if the present state does not equal the previous state or if we reach the end of the dataframe...
time1 = j[1]['Epoch'] # what time is it?
timeDelta = time1 - time0 # calculate difference in seconds between current time and release # if it's a new state
#if state1 != state2 or rowIdx == maxIdx:
presence = presence + 1 # oh snap new observation for new state
rowArr = [i,state1,state2,presence,ts,firstObs,time0,time1] # start a new row
row = pd.DataFrame(np.array([rowArr]),columns = columns)
stateTable = stateTable.append(row) # add the row to the state table data frame
time0 = j[1]['Epoch']
print ("State Table Completed for Fish %s"%(i))
stateTable['t0'] = pd.to_datetime(stateTable.t0.values, unit = 's') # get timestamp values
stateTable['t1'] = pd.to_datetime(stateTable.t1.values, unit = 's') # get timestamp values
stateTable.sort_values(by = 't0', ascending = True, inplace = True) # sort by exposure time
timeBucket = self.bucket_length*60*1000000000 # time bucket in nanoseconds
stateTable['flowPeriod'] = (stateTable['t0'].astype(np.int64)//timeBucket+1) * timeBucket # round to nearest 15 minute period
stateTable['flowPeriod'] = pd.to_datetime(stateTable['flowPeriod']) # turn it into a datetime object so we can use pandas to expand and fill
rowNum = np.arange(0,len(stateTable),1)
stateTable['rowNum'] = rowNum
exp_stateTable = pd.DataFrame()
for row in stateTable.iterrows():
rowIdx = row[1]['rowNum'] # get row index number
t0 = row[1]['flowPeriod'] # identify current rows flow period
t1 = row[1]['t1'] # identify next row's flow period
try:
expand = pd.date_range(t0,t1,freq = '%smin'%(self.bucket_length)) # expand into 15 minute intervals
except ValueError:
expand = []
except AttributeError:
expand = []
if len(expand) > 0:
#expand = expand[1:]
series = pd.Series(expand, index = expand, name = 'flowPeriod') # create series using exanded time intervals
intervals = series.to_frame() # convert series to dataframe
intervals.reset_index(inplace = True, drop = True) # reset index
intervals['t0'] = row[1]['t0'] # fill data for variables that don't change
intervals['t1'] = row[1]['t1']
intervals['startState'] = row[1]['startState']
intervals['endState'] = row[1]['endState']
intervals['timeStamp'] = row[1]['timeStamp']
intervals['FreqCode'] = row[1]['FreqCode']
intervals['presence'] = row[1]['presence']
newRowArr = np.array([row[1]['FreqCode'],row[1]['startState'],row[1]['endState'],row[1]['timeStamp'],row[1]['flowPeriod'],row[1]['t0'],row[1]['t1'],row[1]['presence']])
newRow = pd.DataFrame(np.array([newRowArr]),columns = ['FreqCode','startState','endState','timeStamp','flowPeriod','t0','t1','presence']) # add first, non expanded row to new state table
newRow = newRow.append(intervals) # add filled and expanded data
newRow['nextFlowPeriod'] = newRow['flowPeriod'].shift(-1) # identify the next flow period
newRow['idx'] = np.arange(0,len(newRow),1) # add a count index field, but don't index it yet
newRow.reset_index(inplace = True, drop = True) # remove the index
idxL = newRow.idx.values # generate list of indexes
newRow.loc[idxL[1]:,'t0'] = newRow.loc[idxL[1]:,'flowPeriod'].astype(str) # after the initial t0, re-write the current t0 as the current row's flow period
newRow.ix[:idxL[-2],'t1'] = newRow.loc[:idxL[-2],'nextFlowPeriod'].astype(str) # other than the last t1, re-write the current t1 as the current row's next flow period - see what we did there?
newRow.ix[:idxL[-2]:,'endState'] = row[1]['startState']# other than the last row in the series, re-write the end state as the start state - there will be a lot of to-from same site here. it's ok, these are censored observations.
newRow['t0'] = pd.to_datetime(newRow['t0']) # convert time text to datetime - so we can do stuff with it
newRow['t1'] = pd.to_datetime(newRow['t1'])
exp_stateTable = exp_stateTable.append(newRow) # now add all that stuff to the state table dataframe
del newRow, intervals, newRowArr, expand
else:
newRowArr = np.array([row[1]['FreqCode'],row[1]['startState'],row[1]['endState'],row[1]['timeStamp'],row[1]['flowPeriod'],row[1]['t0'],row[1]['t1'],row[1]['presence']])
newRow = pd.DataFrame(np.array([newRowArr]),columns = ['FreqCode','startState','endState','timeStamp','flowPeriod','t0','t1','presence']) # add first, non expanded row to new state table
exp_stateTable = exp_stateTable.append(newRow)
del newRow, newRowArr
exp_stateTable.sort_values(by = 't0', ascending = True, inplace = True) # sort by exposure time
exp_stateTable['time0'] = pd.to_datetime(exp_stateTable['t0']) # create new time columns
exp_stateTable['time1'] = pd.to_datetime(exp_stateTable['t1'])
exp_stateTable['t0'] = (pd.to_datetime(exp_stateTable['t0']) - initialTime)/np.timedelta64(1,'s')
exp_stateTable['t1'] = (pd.to_datetime(exp_stateTable['t1']) - initialTime)/np.timedelta64(1,'s')
# calculate minimum t0 by presence
min_t0 = exp_stateTable.groupby(['presence'])['t0'].min()#.to_frame().rename({'t0':'min_t0'},inplace = True)
min_t0 = pd.Series(min_t0, name = 'min_t0')
min_t0 = pd.DataFrame(min_t0).reset_index()
# join to exp_stateTable as presence_time_0
exp_stateTable = pd.merge(left = exp_stateTable, right = min_t0, how = u'left',left_on = 'presence', right_on = 'presence')
# subtract presence_time_0 from t0 and t1
exp_stateTable['t0'] = exp_stateTable['t0'] - exp_stateTable['min_t0']
exp_stateTable['t1'] = exp_stateTable['t1'] - exp_stateTable['min_t0']
# drop presence_time_0 from exp_stateTable
exp_stateTable['hour'] = pd.DatetimeIndex(exp_stateTable['time0']).hour # get the hour of | |
None and tail > 5000:
table = html.Div([
html.H5(children='Please enter number less than 5000 into tail',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
elif str(type(head)) == "<class 'float'>":
table = html.Div([
html.H5(children='Please enter positive integer into head',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
elif str(type(tail)) == "<class 'float'>":
table = html.Div([
html.H5(children='Please enter positive integer into tail',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
else:
table = showing_msg_data(head, tail)
return table
except Exception as e:
print(e)
###### set 0 n_clicks view_message button
@app.callback(Output('view_message', 'n_clicks'),
[Input('msg_head', 'value'), Input('msg_tail', 'value')])
def view_message_button(head, tail):
if len(msg_headList) >= 1 and msg_headList[-1] != head:
return None
elif len(msg_tailList) >= 1 and msg_tailList[-1] != tail:
return None
########## show all message users
@app.callback(Output('show_all_message_users', 'children'),
[Input('get_message_users', 'n_clicks')
])
def show_all_message_users(n_clicks):
table = html.Div()
if n_clicks is not None:
try:
all_users = update_message_data[-1][2]
tab = []
column = []
column.append(
html.Th('Users',
style={'border': '1px solid black', 'background-color': 'lightslategray', 'color': 'white',
'text-align': 'center'}))
tab.append(html.Tr(children=column))
for user in all_users:
row_content = []
row_content.append(html.Td(user, style={'border': '1px solid black', 'text-align': 'center'}))
tab.append(html.Tr(children=row_content, style={'height': '5px'}))
table = html.Div([
html.Table(children=tab,
style={'border-collapse': 'collapse',
'border': '1px solid black',
'width': '200px'
})
])
return table
except Exception as e:
print(e)
connected_messageList = []
######## show connected users of specific user in message dataset
@app.callback(Output('show_connected_message_users', 'children'),
[Input('connected_message_users', 'n_clicks'),
Input('user_message', 'value')
])
def show_connected_message_users(n_clicks, searchUser):
table = html.Div()
if n_clicks is not None:
connected_messageList.append(searchUser)
try:
message_data = update_message_data[-1][-1]
all_users = update_message_data[-1][2]
if searchUser is None or len(searchUser) == 0:
table = html.Div([
html.H5(children='Please select user',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
elif searchUser not in all_users:
table = html.Div([
html.H5(children='User does not exist',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
else:
connected_users = message_data.get_connected_users(searchUser)
if len(connected_users) == 0:
table = html.Div([
html.H5(children='No connected users',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
else:
tab = []
column = []
column.append(html.Th('Connected Users',
style={'border': '1px solid black', 'background-color': 'lightslategray',
'color': 'white', 'text-align': 'center'}))
tab.append(html.Tr(children=column))
for user in connected_users:
row_content = []
row_content.append(html.Td(user, style={'border': '1px solid black', 'text-align': 'center'}))
tab.append(html.Tr(children=row_content, style={'height': '5px'}))
table = html.Div([
html.Table(children=tab,
style={'border-collapse': 'collapse',
'border': '1px solid black',
'width': '200px'
})
])
return table
except Exception as e:
print(e)
###### set 0 n_clicks connected_message_users button
@app.callback(Output('connected_message_users', 'n_clicks'),
[Input('user_message', 'value')])
def connected_message_users_button(user):
if len(connected_messageList) >= 1 and connected_messageList[-1] != user:
return None
message_record1 = []
message_record2 = []
####### show message records between 2 input users
@app.callback(Output('show_records_message_users', 'children'),
[Input('message_user3', 'value'),
Input('message_user2', 'value'),
Input('record_message_users', 'n_clicks')
])
def between_message_users_records(user_1, user_2, click):
table = html.Div()
if click is not None:
message_record1.append(user_1)
message_record2.append(user_2)
try:
message_data = update_message_data[-1][-1]
all_users = update_message_data[-1][2]
if user_1 is None or user_2 is None or len(user_1) == 0 or len(user_2) == 0:
table = html.Div([
html.H5(children='Please select users',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
elif user_2 not in all_users:
table = html.Div([
html.H5(children='User 1 does not exist',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
elif user_1 not in all_users:
table = html.Div([
html.H5(children='User 2 does not exist',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
else:
dict_list = []
for record in message_data.get_records(user_1, user_2):
dict_list.append(vars(record))
if len(dict_list) == 0:
table = html.Div([
html.H5(children='No records between two users',
style={'color': 'red', 'font-size': '20px', 'padding-left': '20px'})])
else:
header = list(dict_list[0].keys())
tab = []
column = []
for i in header:
column.append(
html.Th(i, style={'border': '1px solid black', 'background-color': 'lightslategray',
'color': 'white', 'text-align': 'center'}))
tab.append(html.Tr(children=column))
for j in dict_list:
value = list(j.values())
row_content = []
for x in value:
row_content.append(html.Td(x, style={'border': '1px solid black', 'text-align': 'center'}))
tab.append(html.Tr(children=row_content, style={'height': '5px'}))
table = html.Div([
html.Table(children=tab,
style={'border-collapse': 'collapse',
'border': '1px solid black',
'width': '100%'
})
])
return table
except Exception as e:
print(e)
###### set 0 n_clicks record_message_users button
@app.callback(Output('record_message_users', 'n_clicks'),
[Input('message_user3', 'value'), Input('message_user2', 'value')])
def record_message_users_button(user1, user2):
if len(message_record1) >= 1 and message_record1[-1] != user1:
return None
elif len(message_record2) >= 1 and message_record2[-1] != user2:
return None
msguser_visuconn = []
###### Visualize connection between users in message dataset
@app.callback(Output('show_visualize_message_connection', 'children'),
[Input('visualize_message_connection', 'n_clicks'), Input('msg_user_visu_conn', 'value')
])
def show_visualize_message_connection(n_clicks, user):
try:
table = html.Div()
if n_clicks is not None:
msguser_visuconn.append(user)
message_data = update_message_data[-1][-1]
filename = update_message_data[-1][0]
name_id = ''
if user is None or len(user) == 0:
name_id = filename
visu_conn = message_data.visualize_connection_network(gui=True, fig_id=name_id)
else:
for x in user:
name_id = name_id + str(x)
visu_conn = message_data.visualize_connection_network(users=user, gui=True, fig_id=name_id)
return table
except Exception as e:
print(e)
###### set 0 n_clicks visu_connection_message button
@app.callback(Output('visualize_message_connection', 'n_clicks'),
[Input('msg_user_visu_conn', 'value')])
def visualize_message_connection_button(user):
if len(msguser_visuconn) >= 1 and msguser_visuconn[-1] != user:
return None
######## return message file name to the next page
@app.callback(dash.dependencies.Output('file_name_message', 'children'),
[dash.dependencies.Input('url', 'pathname')
])
def file_name_message_nextpage(pathname):
file_message = pathname.split('/')
for a in message_option:
if len(file_message) > 2 and file_message[2] == a[0]:
for data in message_data_list:
dataNew = data[0].split('.')
if file_message[2] == dataNew[0]:
update_message_data.append(data)
name = "Message Dataset :<%s>" % file_message[2]
return SimpleTitleBar(name)
######## get message option
@app.callback(dash.dependencies.Output('message_option', 'children'),
[dash.dependencies.Input('url', 'pathname')
])
def message_option_visu(pathname):
file_message = pathname.split('/')
for a in message_option:
if len(file_message) > 2 and file_message[2] == a[0]:
return a[1]
######## return message records card to the home page
@app.callback(dash.dependencies.Output('message_record_home', 'children'),
[dash.dependencies.Input('url', 'pathname')
])
def message_record_card_home(pathname):
if pathname == '/Dataset':
if len(message_data_list) > 0:
message_data_name = []
for x in message_data_list:
a = x[0]
record = len(x[3])
link = '/Message_Dataset/' + str(a)
message_data_name.append(DataSetCard(a, record, link))
message_data_name.append(AddDataSetCard('message'))
return message_data_name
else:
return AddDataSetCard('message')
########### get message visualize option for navigation bar
@app.callback(dash.dependencies.Output('navbar_message_visu', 'children'),
[dash.dependencies.Input('url', 'pathname')
])
def get_navbar_visu_message(pathname):
path_set = pathname.split('/')
if len(path_set) == 4 and path_set[-3] == 'Message_Dataset':
if path_set[-1] == 'view_data':
return SimpleTitleBar("Message Dataset :<%s> All Data" % path_set[-2])
elif path_set[-1] == 'all_users':
return SimpleTitleBar("Message Dataset :<%s> All Users" % path_set[-2])
elif path_set[-1] == 'connected_users':
return SimpleTitleBar("Message Dataset :<%s> Connected Users" % path_set[-2])
elif path_set[-1] == 'records_between_users':
return SimpleTitleBar("Message Dataset :<%s> Records Between Users" % path_set[-2])
elif path_set[-1] == 'visualize_connection':
return SimpleTitleBar("Message Dataset :<%s> Visualize Connection" % path_set[-2])
def get_msg_call_users():
msg_users = update_message_data[-1][2]
add_msg = []
for user in msg_users:
add_msg.append({'label': user, 'value': user})
return add_msg
######## select message user for get connected users using dropdown
@app.callback(Output('user_message', 'options'),
[Input('select_msg_user_connected', 'n_clicks')
])
def select_msg_users_connected(n_clicks):
if n_clicks is not None:
return get_msg_call_users()
######## select message user 1 for get msg records between 2 users using dropdown
@app.callback(Output('message_user3', 'options'),
[Input('select_msg_user_2_records', 'n_clicks')
])
def select_1_msg_user_2_record(n_clicks):
if n_clicks is not None:
return get_msg_call_users()
######## select message user 2 for get msg records between 2 users using dropdown
@app.callback(Output('message_user2', 'options'),
[Input('select_msg_user_2_records', 'n_clicks')
])
def select_2_msg_user_2_record(n_clicks):
if n_clicks is not None:
return get_msg_call_users()
######## select message user for get visualize connection using dropdown
@app.callback(Output('msg_user_visu_conn', 'options'),
[Input('select_msg_user_visu_conn', 'n_clicks')
])
def select_msg_users_visu_conn(n_clicks):
if n_clicks is not None:
return get_msg_call_users()
#### over message dataset
####### show all available datasets in the dashboard of call, message, cell dataset page
@app.callback(dash.dependencies.Output('all_dataset', 'children'),
[dash.dependencies.Input('url', 'pathname')
])
def show_all_dataset(pathname):
output = []
if pathname == '/Call_Dataset' or pathname == '/Cell_Dataset' or pathname == '/Message_Dataset':
if len(call_data_list) > 0:
tempory.clear()
output.append(html.P('Call Datasets', style={'font-size': 17, 'color': 'white', 'margin-bottom': 0}))
for x in call_data_list:
a = x[0]
output.append(dcc.Link(a, href='/Call_Dataset/' + str(a), style={'color': 'lightgreen'}))
output.append(html.Br())
if len(cell_data_list) > 0:
tempory_cell.clear()
output.append(html.Br())
output.append(html.P('Cell Datasets', style={'font-size': 17, 'color': 'white', 'margin-bottom': 0}))
for x in cell_data_list:
a = x[0]
output.append(dcc.Link(a, href='/Cell_Dataset/' + str(a), style={'color': 'lightgreen'}))
output.append(html.Br())
if len(message_data_list) > 0:
tempory_message.clear()
output.append(html.Br())
output.append(html.P('Message Datasets', style={'font-size': 17, 'color': 'white', 'margin-bottom': 0}))
for x in message_data_list:
a = x[0]
output.append(dcc.Link(a, href='/Message_Dataset/' + str(a), style={'color': 'lightgreen'}))
output.append(html.Br())
return html.Div(children=output)
####### show all available datasets in the dashboard of all visualization page
@app.callback(dash.dependencies.Output('all_dataset_visu', 'children'),
[dash.dependencies.Input('url', 'pathname')
])
def show_all_dataset_visu(pathname):
path_set = pathname.split('/')
output = []
if len(path_set) >= 3:
if len(call_data_list) > 0:
output.append(html.P('Call Datasets', style={'font-size': 17, 'color': 'white', 'margin-bottom': 0}))
for x in call_data_list:
a = x[0]
output.append(dcc.Link(a, href='/Call_Dataset/' + str(a), style={'color': 'lightgreen'}))
output.append(html.Br())
if len(cell_data_list) > 0:
output.append(html.Br())
output.append(html.P('Cell Datasets', style={'font-size': 17, 'color': 'white', 'margin-bottom': 0}))
for x in cell_data_list:
a = x[0]
output.append(dcc.Link(a, href='/Cell_Dataset/' + str(a), style={'color': 'lightgreen'}))
output.append(html.Br())
if len(message_data_list) > 0:
output.append(html.Br())
output.append(html.P('Message Datasets', style={'font-size': 17, 'color': 'white', 'margin-bottom': 0}))
for x in message_data_list:
a = x[0]
output.append(dcc.Link(a, href='/Message_Dataset/' + str(a), style={'color': 'lightgreen'}))
output.append(html.Br())
return html.Div(children=output)
##################################################################################
##################################################################################
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')
])
def display_page(pathname):
try:
path_set = pathname.split('/')
if pathname == '/Call_Dataset':
return call_dataset
elif pathname == '/Cell_Dataset':
return cell_dataset
elif pathname == '/Message_Dataset':
return message_dataset
elif len(path_set) == 3 and path_set[-2] == 'Call_Dataset':
return call_dataset_file
elif len(path_set) == 4 and path_set[-3] == 'Call_Dataset' and path_set[-1] == 'view_data':
return view_all_call_data
elif len(path_set) | |
that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \\include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \\include or \\dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \\image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER ) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# function all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see http://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \\class.
# The default value is: YES.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
# which the alphabetical index list will be split.
# Minimum value: 1, maximum value: 20, default value: 5.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered | |
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'FlowLog'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_applicationloadbalancers_flowlogs_get(self, datacenter_id, application_load_balancer_id, **kwargs): # noqa: E501
"""List ALB Flow Logs # noqa: E501
List the Flow Logs for the specified Application Load Balancer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_applicationloadbalancers_flowlogs_get(datacenter_id, application_load_balancer_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param application_load_balancer_id: The unique ID of the Application Load Balancer. (required)
:type application_load_balancer_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: FlowLogs
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_applicationloadbalancers_flowlogs_get_with_http_info(datacenter_id, application_load_balancer_id, **kwargs) # noqa: E501
def datacenters_applicationloadbalancers_flowlogs_get_with_http_info(self, datacenter_id, application_load_balancer_id, **kwargs): # noqa: E501
"""List ALB Flow Logs # noqa: E501
List the Flow Logs for the specified Application Load Balancer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_applicationloadbalancers_flowlogs_get_with_http_info(datacenter_id, application_load_balancer_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param application_load_balancer_id: The unique ID of the Application Load Balancer. (required)
:type application_load_balancer_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(FlowLogs, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'application_load_balancer_id',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_applicationloadbalancers_flowlogs_get" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_applicationloadbalancers_flowlogs_get`") # noqa: E501
# verify the required parameter 'application_load_balancer_id' is set
if self.api_client.client_side_validation and ('application_load_balancer_id' not in local_var_params or # noqa: E501
local_var_params['application_load_balancer_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `application_load_balancer_id` when calling `datacenters_applicationloadbalancers_flowlogs_get`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_applicationloadbalancers_flowlogs_get`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_applicationloadbalancers_flowlogs_get`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'application_load_balancer_id' in local_var_params:
path_params['applicationLoadBalancerId'] = local_var_params['application_load_balancer_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'FlowLogs'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_applicationloadbalancers_flowlogs_patch(self, datacenter_id, application_load_balancer_id, flow_log_id, application_load_balancer_flow_log_properties, **kwargs): # noqa: E501
"""Partially modify ALB Flow Logs # noqa: E501
Update the properties of the specified Application Load Balancer Flow Log. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_applicationloadbalancers_flowlogs_patch(datacenter_id, application_load_balancer_id, flow_log_id, application_load_balancer_flow_log_properties, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param application_load_balancer_id: The unique ID of the Application Load Balancer. (required)
:type application_load_balancer_id: str
:param flow_log_id: The unique ID of the Flow Log. (required)
:type flow_log_id: str
:param application_load_balancer_flow_log_properties: The properties of the ALB Flow Log to be updated. (required)
:type application_load_balancer_flow_log_properties: FlowLogProperties
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: | |
## hexcontrol is a class whose objects have methods for working with a completed hex tree
from __future__ import print_function
import world
import fileinput
import hexagon
import world
import math
import pylab
import os
import random
import numpy
class hexcontrol:
def __init__(self,hexagon,my_world):
self.source_hex = hexagon.get_root()
self.roots = []
self.my_world = my_world
self.lowest_center_hex = self.source_hex
while self.lowest_center_hex.children != []:
self.lowest_center_hex = self.lowest_center_hex.children[0]
if self.lowest_center_hex != self.source_hex: # we have a non-empty tree
self.sort_adjacent_hexs()
# calculate the convex hull of lowest hexagon field
self.convex_hull = []
for i in range(0,6): # go in each of the six directions from the center lowest hex
temp_hex = self.lowest_center_hex
self.convex_hull_count = 0
while len(temp_hex.adjacent) == 6: # we have not reached the edge of the hex field
self.convex_hull_count += 1
temp_hex = temp_hex.adjacent[i]
self.convex_hull.append(temp_hex.location)
self.convex_hull_outside_diameter = my_world.distance_between(self.convex_hull[0],self.convex_hull[3])
#update the field of view of my_world to hold hex field
my_world.x1_lower_limit = self.source_hex.location.x1 - (640.0/480.0)*1.1*self.convex_hull_outside_diameter/2
my_world.x1_upper_limit = self.source_hex.location.x1 + (640.0/480.0)*1.1*self.convex_hull_outside_diameter/2
my_world.x2_lower_limit = self.source_hex.location.x1 - 1.1*self.convex_hull_outside_diameter/2
my_world.x2_upper_limit = self.source_hex.location.x1 + 1.1*self.convex_hull_outside_diameter/2
def change_scale(self,new_outside_diameter):
###change scale of entire hex field so outside diameter of source hex matches new_outside_diameter ###
if not(new_outside_diameter > 0):
raise badOutsideDiameter
if self.roots == []:
self.find_roots()
scale_factor = new_outside_diameter/self.source_hex.outside_diameter
# go through all the hexagons at each level of the tree and update their locations.
for temp_root_hex in self.roots:
for temp_key,temp1_hex in temp_root_hex.hexagon_dictionary.items():
#make a line from the center of the hex field to our current hexagon
temp_line = world.line(self.source_hex.location,temp1_hex.location)
# calculate new point along ray properly scaled
temp1_hex.location = temp_line.along_line(temp_line,scale_factor)
# change the outside diameter for our current hexagon
temp1_hex.outside_diameter = temp1_hex.outside_diameter * scale_factor
# update the locations for the convex hull
for i in range(0,len(self.convex_hull)):
#make a line from the center of the hex field to our current convex hull
temp_line = world.line(self.source_hex.location,self.convex_hull[i])
# make end of the line the new location for our current hexagon
self.convex_hull[i] = temp_line.along_line(temp_line,scale_factor)
return True
def change_orientation(self,new_orientation):
###rotate entire hex field so that orientation of source hex points in direction of new_orientation ###
if not((new_orientation >= 0) and (new_orientation <= 360)):
raise badOrientation
if self.roots == []:
self.find_roots()
delta_orientation = (new_orientation - self.source_hex.orientation)%360
# go through all the hexagons at each level of the tree and update their locations.
for temp_root_hex in self.roots:
for temp_key,temp1_hex in temp_root_hex.hexagon_dictionary.items():
#make a line from the center of the hex field to our current hexagon
temp_line = world.line(self.source_hex.location,temp1_hex.location)
# rotate said line about the center of the hex field
temp_line = temp_line.rotate_line(temp_line,delta_orientation)
# make end of the line the new location for our current hexagon
temp1_hex.location = temp_line.p2
# change the orientation for our current hexagon
temp1_hex.orientation = (temp1_hex.orientation + delta_orientation) % 360
# update the locations for the convex hull
for i in range(0,len(self.convex_hull)):
#make a line from the center of the hex field to our current convex hull
temp_line = world.line(self.source_hex.location,self.convex_hull[i])
# rotate said line about the center of the hex field
temp_line = temp_line.rotate_line(temp_line,delta_orientation)
# make end of the line the new location for our current hexagon
self.convex_hull[i] = temp_line.p2
return True
def translate_hex_field(self,new_location):
### translates entire hex field to be centered at new_location ###
# assumes self is a properly initialized hex controller
if type(new_location) != type(world.location(0,0)):
raise badLocation
if self.roots == []:
self.find_roots()
## print('IN translate_hex_field')
## print('number of roots is'+str(len(self.roots)))
delta_location = new_location.subtract(self.source_hex.location)
## print('new_location is'+str(new_location)+' and delta_location is'+str(delta_location))
# go through all the hexagons at each level of the tree and update their locations.
for temp_root_hex in self.roots:
for temp_key,temp1_hex in temp_root_hex.hexagon_dictionary.items():
temp1_hex.location = temp1_hex.location.add(delta_location)
# update the locations for the convex hull
for i in range(0,len(self.convex_hull)):
self.convex_hull[i] = self.convex_hull[i].add(delta_location)
#update the field of view of my_world to hold hex field
self.my_world.x1_lower_limit = self.source_hex.location.x1 - (640.0/480.0)*1.1*self.convex_hull_outside_diameter/2
self.my_world.x1_upper_limit = self.source_hex.location.x1 + (640.0/480.0)*1.1*self.convex_hull_outside_diameter/2
self.my_world.x2_lower_limit = self.source_hex.location.x1 - 1.1*self.convex_hull_outside_diameter/2
self.my_world.x2_upper_limit = self.source_hex.location.x1 + 1.1*self.convex_hull_outside_diameter/2
return True
def line_of_hexagons(self,p_start,p_end,use_convex_hull = True):
### generator for a iterator which returns hexagons which from line from p_start to p_end ###
# Note: p_start must be within the hexagon field
# guarantees that first hex contains p_start and last hex contains p_end or else p_end is off the field
## print('line of hexagons called with use_convex_hull = '+str(use_convex_hull))
current_hex = self.lowest_hex_containing_point(p_start,use_convex_hull)
## print('starting hex location is '+str(current_hex.location))
reference_line = world.line(current_hex.location,p_end)
## print('reference line is '+str(reference_line))
current_distance = current_hex.distance_between(current_hex.location,p_end)
yield current_hex
for temp_hex in current_hex.adjacent:
if temp_hex.line_intersects(reference_line):
current_hex = temp_hex
current_distance = current_hex.distance_between(current_hex.location,p_end)
break
else: # none of and adjacents have the reference line crossing them, we are done
return
yield current_hex
while current_hex.is_interior(p_end) != True and len(current_hex.adjacent) == 6:
# zigzag across reference line until we get there, stopping when we get to p_end or reach the edge
candidate_hexs = []
for temp_hex in current_hex.adjacent: # see which adjacent hexs are on other side of reference line
temp_line = world.line(current_hex.location,temp_hex.location)
if temp_hex.segments_intersect(p_start,p_end,current_hex.location,temp_hex.location):
# we found an adjacent hex on the other side of the reference line
candidate_hexs.append(temp_hex)
if candidate_hexs == []: # we did not find any hexs on the other side of the reference line
return
else:
old_current_hex = current_hex
for temp_hex in candidate_hexs:
if temp_hex.distance_between(temp_hex.location,p_end) < current_distance:
current_hex = temp_hex
current_distance = temp_hex.distance_between(temp_hex.location,p_end)
if old_current_hex == current_hex: # none of the candidate hexs are better than before
return
yield current_hex
# when we get this far, current_hex contains p_end
return
def lowest_hex_containing_point(self,p,use_convex_hull=True):
### returns leaf hexagon containing point p, p must be within the original root hexagon ###
## print(' ')
## print('in method lowest_hex_containing_point')
## print('point is '+str(p))
## print('use_convex_hull is '+str(use_convex_hull))
## print('within_convex_hull is '+str(self.my_world.within_convex_hull(p,self.convex_hull)))
if use_convex_hull and self.my_world.within_convex_hull(p,self.convex_hull) != True:
raise locationNotInWorld
else:
current_hex = self.source_hex #points to the hex whose children are examined for containing p
for i in range(0,self.source_hex.depth()): # go through levels of hexagon tree, stopping one above bottom
## print('depth in tree is '+str(i))
temp_counter = -1
for temp_hex in current_hex.children:
temp_counter += 1
## print('looking at child number '+str(temp_counter))
if temp_hex.is_interior(p) == True: # we found the child hex containing the point
## print('p is interior to hexagon at '+str(temp_hex.location))
## print('hexagon diameter is '+str(temp_hex.outside_diameter))
current_hex = temp_hex
break
else:
# We did not find a child hex containing the point
# Either there was a rounding error or we are within the convex hull of the lowest level
# but not in a hex at this level. Find the closest hex from among the children
temp_hex = current_hex.children[0]
temp_distance = temp_hex.distance_between(temp_hex.location,p)
for other_hex in current_hex.children[1:]:
if temp_hex.distance_between(p,other_hex.location) < temp_distance:
temp_hex = other_hex
temp_distance = temp_hex.distance_between(temp_hex.location,other_hex.location)
## print('temp_hex location is '+str(temp_hex.location))
current_hex = temp_hex
return current_hex
def generate_random_costs(self):
### populate costs and benefits at the lowest level randomly, 3 costs and 2 benefits per ###
temp_hex = self.source_hex
## print('depth of tree is '+str(self.source_hex.depth()))
for i in range(0,self.source_hex.depth()+1): # go through levels of hexagon tree
print('generating random costs and benefits for level '+str(i))
for temp_key,temp1_hex in temp_hex.get_center().hexagon_dictionary.items():
temp1_hex.costs = [[random.random(),random.random(),random.random()] for j in temp1_hex.adjacent]
# temp1_hex.benefit = [random.random(),random.random()]
temp1_hex.benefit = [0.0,0.0]
if i < self.source_hex.depth():
## print('depth of tree is '+str(self.source_hex.depth()))
temp_hex = temp_hex.children[0] # make it point to the center hex of the next level
def sort_adjacent_hexs(self):
###goes through completed tree doing clockwise sorting of adjacent hexs lists ###
temp_hex = self.source_hex
for i in range(0,self.source_hex.depth()+1): # go through levels of hexagon tree
for temp_key,temp1_hex in temp_hex.get_center().hexagon_dictionary.items():
temp1_hex.sort_adjacent()
if i != self.source_hex.depth():
temp_hex = temp_hex.children[0] # make it point to the center hex of the next level
def check_children(self):
temp_hex = self.source_hex
for i in range(0,self.source_hex.depth()+1): # go through levels of hexagon tree
for temp_key in temp_hex.get_center().hexagon_dictionary: #iterate through the dictionary for that level
temp1_hex = temp_hex.get_center().hexagon_dictionary[temp_key]
if len(temp1_hex.children) != 0 and len(temp1_hex.children) != 7: #Houston, we have a problem
print('temp_hex number of children is'+str(len(temp1_hex.children)))
print('depth of hex is'+str(temp1_hex.height()))
print('key is '+str(temp1_hex.key))
print(str(temp1_hex.children))
if i != self.source_hex.depth():
temp_hex = temp_hex.children[0] # make it point to the center hex of the next level
def compare_centers(self):
# makes a list distances between locations for every pair of hexs in the dictionary at current and | |
(`pulumi.Input[str]`) - (string)
* `domainId` (`pulumi.Input[str]`) - Required if `domain_name` not provided. (string)
* `domainName` (`pulumi.Input[str]`) - Required if `domain_id` not provided. (string)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `tenant_id` (`pulumi.Input[str]`) - Azure tenant ID to use (string)
* `tenantName` (`pulumi.Input[str]`) - Required if `tenant_id` not provided. (string)
* `trustId` (`pulumi.Input[str]`) - (string)
* `username` (`pulumi.Input[str]`) - (string)
* `loadBalancer` (`pulumi.Input[dict]`) - (list maxitems:1)
* `createMonitor` (`pulumi.Input[bool]`) - (bool)
* `floatingNetworkId` (`pulumi.Input[str]`) - (string)
* `lbMethod` (`pulumi.Input[str]`) - (string)
* `lbProvider` (`pulumi.Input[str]`) - (string)
* `lbVersion` (`pulumi.Input[str]`) - (string)
* `manageSecurityGroups` (`pulumi.Input[bool]`) - (bool)
* `monitorDelay` (`pulumi.Input[str]`) - Default `60s` (string)
* `monitorMaxRetries` (`pulumi.Input[float]`) - Default 5 (int)
* `monitorTimeout` (`pulumi.Input[str]`) - Default `30s` (string)
* `subnetId` (`pulumi.Input[str]`) - (string)
* `useOctavia` (`pulumi.Input[bool]`) - (bool)
* `metadata` (`pulumi.Input[dict]`) - (list maxitems:1)
* `requestTimeout` (`pulumi.Input[float]`) - (int)
* `searchOrder` (`pulumi.Input[str]`) - (string)
* `route` (`pulumi.Input[dict]`) - (list maxitems:1)
* `routerId` (`pulumi.Input[str]`) - (string)
* `vsphereCloudProvider` (`pulumi.Input[dict]`) - RKE Vsphere Cloud Provider config for Cloud Provider [rke-vsphere-cloud-provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/) Extra argument `name` is required on `virtual_center` configuration. (list maxitems:1)
* `disk` (`pulumi.Input[dict]`) - (list maxitems:1)
* `scsiControllerType` (`pulumi.Input[str]`) - (string)
* `global` (`pulumi.Input[dict]`) - (list maxitems:1)
* `datacenters` (`pulumi.Input[str]`) - (string)
* `insecureFlag` (`pulumi.Input[bool]`) - (bool)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`pulumi.Input[float]`) - (int)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `network` (`pulumi.Input[dict]`) - Network for GKE cluster (string)
* `publicNetwork` (`pulumi.Input[str]`) - (string)
* `virtualCenters` (`pulumi.Input[list]`) - (List)
* `datacenters` (`pulumi.Input[str]`) - (string)
* `name` (`pulumi.Input[str]`) - Name of cluster registration token (string)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `soapRoundtripCount` (`pulumi.Input[float]`) - (int)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `workspace` (`pulumi.Input[dict]`) - (list maxitems:1)
* `datacenter` (`pulumi.Input[str]`) - (string)
* `defaultDatastore` (`pulumi.Input[str]`) - (string)
* `folder` (`pulumi.Input[str]`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `resourcepoolPath` (`pulumi.Input[str]`) - (string)
* `server` (`pulumi.Input[str]`) - (string)
* `dns` (`pulumi.Input[dict]`) - RKE dns add-on. Just for Rancher v2.2.x (list maxitems:1)
* `nodeSelector` (`pulumi.Input[dict]`) - Node selector for RKE Ingress (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `reverseCidrs` (`pulumi.Input[list]`) - DNS add-on reverse cidr (list)
* `upstreamNameservers` (`pulumi.Input[list]`) - DNS add-on upstream nameservers (list)
* `ignoreDockerVersion` (`pulumi.Input[bool]`) - Ignore docker version. Default `true` (bool)
* `ingress` (`pulumi.Input[dict]`) - Kubernetes ingress configuration (list maxitems:1)
* `dnsPolicy` (`pulumi.Input[str]`) - Ingress controller DNS policy. `ClusterFirstWithHostNet`, `ClusterFirst`, `Default`, and `None` are supported. [K8S dns Policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) (string)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `nodeSelector` (`pulumi.Input[dict]`) - Node selector for RKE Ingress (map)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `kubernetesVersion` (`pulumi.Input[str]`) - The Kubernetes master version (string)
* `monitoring` (`pulumi.Input[dict]`) - Kubernetes cluster monitoring (list maxitems:1)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `provider` (`pulumi.Input[str]`) - Provider for RKE monitoring (string)
* `network` (`pulumi.Input[dict]`) - Network for GKE cluster (string)
* `calicoNetworkProvider` (`pulumi.Input[dict]`) - Calico provider config for RKE network (list maxitems:1)
* `cloudProvider` (`pulumi.Input[str]`) - RKE options for Calico network provider (string)
* `canalNetworkProvider` (`pulumi.Input[dict]`) - Canal provider config for RKE network (list maxitems:1)
* `iface` (`pulumi.Input[str]`) - Iface config Flannel network provider (string)
* `flannelNetworkProvider` (`pulumi.Input[dict]`) - Flannel provider config for RKE network (list maxitems:1)
* `iface` (`pulumi.Input[str]`) - Iface config Flannel network provider (string)
* `options` (`pulumi.Input[dict]`) - RKE options for network (map)
* `plugin` (`pulumi.Input[str]`) - Plugin for RKE network. `canal` (default), `flannel`, `calico` and `weave` are supported. (string)
* `weaveNetworkProvider` (`pulumi.Input[dict]`) - Weave provider config for RKE network (list maxitems:1)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `nodes` (`pulumi.Input[list]`) - RKE cluster nodes (list)
* `address` (`pulumi.Input[str]`) - Address ip for node (string)
* `dockerSocket` (`pulumi.Input[str]`) - Docker socket for node (string)
* `hostnameOverride` (`pulumi.Input[str]`) - Hostname override for node (string)
* `internalAddress` (`pulumi.Input[str]`) - Internal ip for node (string)
* `labels` (`pulumi.Input[dict]`) - Labels for cluster registration token object (map)
* `nodeId` (`pulumi.Input[str]`) - Id for the node (string)
* `port` (`pulumi.Input[str]`) - Port for node. Default `22` (string)
* `roles` (`pulumi.Input[list]`) - Roles for the node. `controlplane`, `etcd` and `worker` are supported. (list)
* `sshAgentAuth` (`pulumi.Input[bool]`) - Use ssh agent auth. Default `false` (bool)
* `sshKey` (`pulumi.Input[str]`) - Node SSH private key (string)
* `sshKeyPath` (`pulumi.Input[str]`) - Node SSH private key path (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `prefixPath` (`pulumi.Input[str]`) - Prefix to customize Kubernetes path (string)
* `privateRegistries` (`pulumi.Input[list]`) - private registries for docker images (list)
* `isDefault` (`pulumi.Input[bool]`) - Set as default registry. Default `false` (bool)
* `password` (`pulumi.Input[str]`) - Registry password (string)
* `url` (`pulumi.Input[str]`) - Registry URL (string)
* `user` (`pulumi.Input[str]`) - Registry user (string)
* `services` (`pulumi.Input[dict]`) - Kubernetes cluster services (list maxitems:1)
* `etcd` (`pulumi.Input[dict]`) - Etcd options for RKE services (list maxitems:1)
* `backup_config` (`pulumi.Input[dict]`) - Backup options for etcd service. Just for Rancher v2.2.x (list maxitems:1)
* `enabled` (`pulumi.Input[bool]`) - Enable the authorized cluster endpoint. Default `true` (bool)
* `intervalHours` (`pulumi.Input[float]`) - Interval hours for etcd backup. Default `12` (int)
* `retention` (`pulumi.Input[float]`) - Retention for etcd backup. Default `6` (int)
* `s3BackupConfig` (`pulumi.Input[dict]`) - S3 config options for etcd backup (list maxitems:1)
* `access_key` (`pulumi.Input[str]`) - The AWS Client ID to use (string)
* `bucketName` (`pulumi.Input[str]`) - Bucket name for S3 service (string)
* `customCa` (`pulumi.Input[str]`) - Base64 encoded custom CA for S3 service. Use filebase64(<FILE>) for encoding file. Available from Rancher v2.2.5 (string)
* `endpoint` (`pulumi.Input[str]`) - Endpoint for S3 service (string)
* `folder` (`pulumi.Input[str]`) - Folder for S3 service. Available from Rancher v2.2.7 (string)
* `region` (`pulumi.Input[str]`) - The AWS Region to create the EKS cluster in. Default `us-west-2` (string)
* `secret_key` (`pulumi.Input[str]`) - The AWS Client Secret associated with the Client ID (string)
* `safeTimestamp` (`pulumi.Input[bool]`) - Safe timestamp for etcd backup. Default: `false` (bool)
* `caCert` (`pulumi.Input[str]`) - TLS CA certificate for etcd service (string)
* `cert` (`pulumi.Input[str]`) - TLS certificate for etcd service (string)
* `creation` (`pulumi.Input[str]`) - Creation option for etcd service (string)
* `externalUrls` (`pulumi.Input[list]`) - External urls for etcd service (list)
* `extraArgs` (`pulumi.Input[dict]`) - Extra arguments for scheduler service (map)
* `extraBinds` (`pulumi.Input[list]`) - Extra binds for scheduler service (list)
* `extraEnvs` (`pulumi.Input[list]`) - Extra environment for scheduler service (list)
* `gid` (`pulumi.Input[float]`) - Etcd service GID. Default: `0`. For Rancher v2.3.x or above (int)
* `image` (`pulumi.Input[str]`) - Docker image for scheduler service (string)
* `key` (`pulumi.Input[str]`) - TLS key for etcd service (string)
* `path` (`pulumi.Input[str]`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `retention` (`pulumi.Input[str]`) - Retention for etcd backup. Default `6` (int)
* `snapshot` (`pulumi.Input[bool]`) - Snapshot option for etcd service (bool)
* `uid` (`pulumi.Input[float]`) - Etcd service UID. Default: `0`. For Rancher v2.3.x or above (int)
* `kubeApi` (`pulumi.Input[dict]`) - Kube API options for RKE services (list maxitems:1)
* `admissionConfiguration` (`pulumi.Input[dict]`) - Admission configuration (map)
* `alwaysPullImages` (`pulumi.Input[bool]`) - Enable [AlwaysPullImages](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) Admission controller plugin. [Rancher docs](https://rancher.com/docs/rke/latest/en/config-options/services/#kubernetes-api-server-options) Default: `false` (bool)
* `auditLog` (`pulumi.Input[dict]`) - K8s audit log configuration. (list maxitem: 1)
* `configuration` (`pulumi.Input[dict]`) - Event rate limit configuration. (map)
* `format` (`pulumi.Input[str]`) - Audit log format. Default: 'json' (string)
* `maxAge` (`pulumi.Input[float]`) - Audit log max age. Default: `30` (int)
* `maxBackup` (`pulumi.Input[float]`) - Audit log max backup. Default: `10` (int)
* `maxSize` (`pulumi.Input[float]`) - Audit log max size. Default: `100` (int)
* `path` (`pulumi.Input[str]`) - (Optional) Audit log path. Default: `/var/log/kube-audit/audit-log.json` (string)
* `policy` (`pulumi.Input[str]`) - Audit log policy json formated string. `omitStages` and `rules` json fields are supported. Example: `policy = | |
self.height + self.SPACER)
qp = QtGui.QPainter()
qp.begin(self.newPix)
qp.setWindow(-50, 0, self.COLUMNS * self.fontWidth, self.ROWS * self.fontHeight)
qp.drawPixmap(0, 0, self.qpix)
# self.transformationEngine.decorateText()
# highlight selected text
self.selector.highlightText()
# draw other selections
self.selector.drawSelections(qp)
# draw our cursor
self.drawCursor(qp)
self.drawBranch(qp)
self.drawSelected(qp)
qp.end()
def _getNewPixmap(self, width, height):
return QtGui.QPixmap(width, height)
def getColumnsbyRow(self, row):
if row < len(self.OPCODES):
obj = self.OPCODES[row]
return obj.get_length()
else:
return 0
def _getVA(self, offset):
if self.plugin:
return self.plugin.hintDisasmVA(offset)
return 0
def _drawRow(self, qp, cemu, row, asm, offset=-1):
log.debug('DRAW AN INSTRUCTION %s %s %s %s %s', asm, row, asm.get_name(), len(asm.get_operands(offset)), hex(self.getPageOffset()))
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
hex_data = asm.get_hex()
# write hexdump
cemu.writeAt(0, row, hex_data)
# fill with spaces
cemu.write((MNEMONIC_COLUMN - len(hex_data)) * ' ')
# let's color some branch instr
# if asm.isBranch():
# qp.setPen(QtGui.QPen(QtGui.QColor(255, 80, 0)))
# else:
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
mnemonic = asm.get_name()
cemu.write(mnemonic)
# leave some spaces
cemu.write((MNEMONIC_WIDTH - len(mnemonic)) * ' ')
if asm.get_symbol():
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
cemu.write_c('[')
qp.setPen(QtGui.QPen(QtGui.QColor('yellow'), 1, QtCore.Qt.SolidLine))
cemu.write(asm.get_symbol())
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
cemu.write_c(']')
self._write_operands(asm, qp, cemu, offset)
self._write_comments(asm, qp, cemu, offset)
def _write_comments(self, asm, qp, cemu, offset):
comments = asm.get_comments()
if comments:
cemu.write(30 * ' ')
qp.setPen(QtGui.QPen(QtGui.QColor(82, 192, 192), 1, QtCore.Qt.SolidLine))
cemu.write('; "{}"'.format(' '.join(comments)))
def _write_operands(self, asm, qp, cemu, offset):
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
operands = asm.get_operands(offset)
for operand in operands:
qp.save()
if operand[0] == dvm_types.OPERAND_REGISTER:
qp.setPen(QtGui.QPen(QtGui.QColor('white')))
cemu.write("%s" % operand[1])
elif operand[0] == dvm_types.OPERAND_LITERAL:
qp.setPen(QtGui.QPen(QtGui.QColor('yellow')))
cemu.write("%s" % operand[1])
elif operand[0] == dvm_types.OPERAND_RAW:
qp.setPen(QtGui.QPen(QtGui.QColor('red')))
cemu.write("%s" % operand[1])
elif operand[0] == dvm_types.OPERAND_OFFSET:
qp.setPen(QtGui.QPen(QtGui.QColor('purple')))
cemu.write("%s" % operand[1])
elif operand[0] & dvm_types.OPERAND_KIND:
if operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_STRING):
qp.setPen(QtGui.QPen(QtGui.QColor('red')))
cemu.write("%s" % operand[1])
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_METH):
qp.setPen(QtGui.QPen(QtGui.QColor('cyan')))
cemu.write("%s" % operand[1])
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_FIELD):
qp.setPen(QtGui.QPen(QtGui.QColor('green')))
cemu.write("%s" % operand[1])
elif operand[0] == (dvm_types.OPERAND_KIND + dvm_types.KIND_TYPE):
qp.setPen(QtGui.QPen(QtGui.QColor('blue')))
cemu.write("%s" % operand[1])
cemu.write(" ")
qp.restore()
def _write_instruction2(self, asm, qp, cemu):
s = asm.operands
idx = 0
qp.setPen(QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine))
for tok in asm.lexer:
if tok.lexpos > idx:
cemu.write(s[idx:tok.lexpos])
idx = tok.lexpos
qp.save()
if tok.type == 'REGISTER':
qp.setPen(QtGui.QPen(QtGui.QColor('white')))
if tok.type == 'NUMBER':
qp.setPen(QtGui.QPen(QtGui.QColor('green')))
cemu.write(tok.value)
qp.restore()
idx = tok.lexpos + len(tok.value)
if idx < len(s):
cemu.write(s[idx:])
def drawTextMode(self, qp):
log.debug('OFFSET %s', self.dataModel.getOffset())
# draw background
qp.fillRect(0, 0, self.COLUMNS * self.fontWidth, self.ROWS * self.fontHeight, self.backgroundBrush)
# set text pen&font
qp.setFont(self.font)
qp.setPen(self.textPen)
cemu = ConsoleEmulator(qp, self.ROWS, self.COLUMNS)
offset = 0
for i in range(self.ROWS):
if i < len(self.OPCODES):
asm = self.OPCODES[i]
self._drawRow(qp, cemu, i, asm, offset)
offset += asm.get_length()
def _getRowInPage(self, offset):
offset -= self.dataModel.getOffset()
size = 0
for i, asm in enumerate(self.OPCODES):
if size + asm.get_length() > offset:
return i
size += asm.get_length()
return None
def _getOffsetOfRow(self, row):
# of course, it could be done nicely, not like this
size = 0
for i, asm in enumerate(self.OPCODES):
if i == row:
return size
size += asm.get_length()
return None
def goTo(self, offset):
log.debug("GOTO %s", offset)
tsize = sum([opcode.get_length() for opcode in self.OPCODES])
if self.dataModel.getOffset() + tsize > offset > self.dataModel.getOffset():
# if in current page, move cursor
row = self._getRowInPage(offset)
off_row = self._getOffsetOfRow(row)
diff = offset - self.dataModel.getOffset() - off_row # self.OPCODES[row].size
if row is not None:
self.cursor.moveAbsolute(diff * 3, row)
self.draw(refresh=False)
else:
# else, move page
self.dataModel.goTo(offset)
self.FeedOpcodes(self.ROWS)
self.cursor.moveAbsolute(0, 0)
self.draw(refresh=True)
# TODO: getDisplayablePage() won't contain what we want to disasm. we will use dataModel
# in this view, getDisplayablePage will contain disasm text, because that is what is displayed
if self.widget:
self.widget.update()
def scrollPages(self, number, cachePix=None, pageOffset=None):
self.scroll(0, -number * self.ROWS, cachePix=cachePix, pageOffset=pageOffset)
def scroll_v(self, dy, cachePix=None, pageOffset=None):
log.debug('scroll_v %s %s %s %s', dy, cachePix, pageOffset, hex(self.getCursorAbsolutePosition()))
RowsToDraw = []
factor = abs(dy)
# repeat as many rows we have scrolled
for row in range(factor):
current_idx = None
if dy < 0:
tsize = sum([asm.get_length() for asm in self.OPCODES])
current_offset = self.dataModel.getOffset() + tsize
if current_offset not in self.CACHE_IDX_OPCODES_OFF:
log.debug('INVALID OFFSET %s', hex(current_offset))
return
current_idx = self.CACHE_IDX_OPCODES_OFF[current_offset] - 1
log.debug("IDX %s %s", current_idx, hex(current_offset))
if current_idx + 1 >= len(self.CACHE_OPCODES):
log.debug('END OF DATA')
return
current_idx += 1
if dy >= 0:
current_offset = self.dataModel.getOffset()
current_idx = self.CACHE_IDX_OPCODES_OFF[current_offset]
log.debug("IDX %s %s", current_idx, hex(current_offset))
# start = self.CACHE_OPCODES[self.CACHE_IDX_OPCODES_OFF[self.getCursorAbsolutePosition()]-1]
current_idx -= 1
newins = self.CACHE_OPCODES[current_idx]
if dy < 0:
self.dataModel.slide(self.OPCODES[0].get_length())
del self.OPCODES[0]
if dy >= 0:
self.dataModel.slide(-newins.get_length())
del self.OPCODES[len(self.OPCODES) - 1]
if dy < 0:
self.OPCODES.append(newins)
if dy > 0:
self.OPCODES.insert(0, newins)
if dy < 0:
RowsToDraw.append((self.ROWS + row, newins))
if dy > 0:
RowsToDraw.append((-row - 1, newins))
log.debug('ROW TO DRAW %s', RowsToDraw)
if len(RowsToDraw) < abs(dy):
# maybe we couldn't draw dy rows (possible we reached the beginning of the data to early), recalculate dy
dy = len(RowsToDraw) * dy / abs(dy)
factor = abs(dy)
if not cachePix:
self.qpix.scroll(0, dy * self.fontHeight, self.qpix.rect())
qp = QtGui.QPainter()
if cachePix:
qp.begin(cachePix)
else:
qp.begin(self.qpix)
qp.setFont(self.font)
qp.setPen(self.textPen)
# erase rows that will disappear
if dy < 0:
qp.fillRect(0, (self.ROWS - factor) * self.fontHeight, self.fontWidth * self.COLUMNS,
factor * self.fontHeight, self.backgroundBrush)
if dy > 0:
qp.fillRect(0, 0, self.fontWidth * self.COLUMNS, factor * self.fontHeight, self.backgroundBrush)
cemu = ConsoleEmulator(qp, self.ROWS, self.COLUMNS)
for row, asm in RowsToDraw:
asm.Load()
self._drawRow(qp, cemu, dy + row, asm)
qp.end()
def scroll(self, dx, dy, cachePix=None, pageOffset=None):
log.debug('scroll %s %s %s %s %s', dx, dy, self.dataModel.inLimits((self.dataModel.getOffset() - dx)), 'offset',
self.dataModel.getOffset())
if dx != 0:
if self.dataModel.inLimits((self.dataModel.getOffset() - dx)):
self.dataModel.slide(dx)
self.draw(refresh=True)
# self.scroll_h(dx)
if dy != 0:
if dy > 0:
if self.dataModel.getOffset() == 0:
log.debug('OFFSET == 0')
return
if dy < 0:
tsize = sum([asm.get_length() for asm in self.OPCODES])
if self.dataModel.getOffset() + tsize == self.dataModel.getDataSize():
log.debug('END')
return
self.scroll_v(dy, cachePix, pageOffset)
def moveCursor(self, direction):
cursorX, cursorY = self.cursor.getPosition()
if direction == Directions.Left:
asm = self.OPCODES[cursorY]
if cursorX == 0:
if cursorY == 0:
# if first line, scroll
self.scroll(0, 1)
self.cursor.moveAbsolute(0, 0)
else:
# move to last token from previous line
asm_prev = self.OPCODES[cursorY - 1]
idx = asm_prev.getEndCursor()
self.cursor.moveAbsolute(idx, cursorY - 1)
else:
x, dy = asm.getNextCursor(cursorX, direction=Directions.Left)
self.cursor.move(-(cursorX - x), dy)
if direction == Directions.Right:
asm = self.OPCODES[cursorY]
x, dy = asm.getNextCursor(cursorX, direction=Directions.Right)
if cursorY == self.ROWS - 1 and dy > 0:
self.scroll(0, -1)
self.cursor.moveAbsolute(0, cursorY)
else:
if cursorY + dy >= len(self.OPCODES):
dy = 0
self.cursor.move(x - cursorX, dy)
if direction == Directions.Down:
if cursorY == self.ROWS - 1:
# move cursor to first token
self.scroll(0, -1)
self.cursor.moveAbsolute(0, cursorY)
else:
# move next line, to nearest token on columns
if cursorY + 1 < len(self.OPCODES):
asm = self.OPCODES[cursorY + 1]
x = asm.getNearestCursor(cursorX)
self.cursor.moveAbsolute(x, cursorY + 1)
if direction == Directions.Up:
if cursorY == 0:
# move cursor to first token
self.scroll(0, 1)
self.cursor.moveAbsolute(0, cursorY)
else:
# move next line, to nearest token on columns
asm = self.OPCODES[cursorY - 1]
x = asm.getNearestCursor(cursorX)
self.cursor.moveAbsolute(x, cursorY - 1)
if direction == Directions.End:
pass
if direction == Directions.Home:
self.cursor.moveAbsolute(0, 0)
if direction == Directions.CtrlHome:
self.goTo(0)
if direction == Directions.CtrlEnd:
self.dataModel.slideToLastPage()
self.draw(refresh=True)
self.cursor.moveAbsolute(self.COLUMNS - 1, self.ROWS - 1)
def _followBranch(self):
cursorX, cursorY = self.cursor.getPosition()
asm = self.OPCODES[cursorY]
if asm.isBranch():
value = asm.branchAddress()
if value:
fofs = self.plugin.disasmVAtoFA(value)
if fofs is not None:
rowOfs = self._getOffsetOfRow(cursorY)
if rowOfs is not None:
self.FlowHistory.append(rowOfs + self.dataModel.getOffset())
self.goTo(fofs)
def _followBranchHistory(self):
if len(self.FlowHistory) > 0:
offset = self.FlowHistory[-1]
del self.FlowHistory[-1]
self.goTo(offset)
def handleKeyEvent(self, modifiers, key, event=None):
if event.type() == QtCore.QEvent.KeyRelease:
if key == QtCore.Qt.Key_Shift:
self.stopSelection()
return True
if event.type() == QtCore.QEvent.KeyPress:
if modifiers == QtCore.Qt.ShiftModifier:
keys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Down, QtCore.Qt.Key_Up,
QtCore.Qt.Key_End, QtCore.Qt.Key_Home]
if key in keys:
self.startSelection()
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Right:
self.dataModel.slide(1)
self.addop((self.scroll, -1, 0))
if key == QtCore.Qt.Key_Left:
self.dataModel.slide(-1)
self.addop((self.scroll, 1, 0))
if key == QtCore.Qt.Key_Down:
self.addop((self.scroll, 0, -1))
self.addop((self.draw,))
if key == QtCore.Qt.Key_Up:
self.addop((self.scroll, 0, 1))
self.addop((self.draw,))
if key == QtCore.Qt.Key_End:
# not supported
pass
if key == QtCore.Qt.Key_Home:
self.moveCursor(Directions.CtrlHome)
self.addop((self.draw,))
# self.draw()
return True
else: # elif modifiers == QtCore.Qt.NoModifier:
if key == QtCore.Qt.Key_Escape:
self.selector.resetSelections()
self.addop((self.draw,))
if key == QtCore.Qt.Key_Left:
self.moveCursor(Directions.Left)
self.addop((self.draw,))
# self.draw()
if key == QtCore.Qt.Key_Right:
self.moveCursor(Directions.Right)
self.addop((self.draw,))
# self.draw()
if key |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.