input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
following kwargs.
# name: The name of the LLDP policy
# state: enabled | disabled
# Note: The configured state is deployed to both Tx and Rx
# status: created | created,modified | deleted
def lldp(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "lldp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/lldpIfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Link policy
# auto_neg: on | off
# speed: 100M | 1G | 10G | 40G | auto
# Note: 100G should be available soon if not already in some versions
# status: created | created,modified | deleted
def link(self, **kwargs):
required_args = {'name': '',
'auto_neg': '',
'speed': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "link.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/hintfpol-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Port-Channel policy
# mode: off | mac-pin | active
# Note: 'off' = static mode-on
# state: enabled | disabled
# Note: The configured state is deployed to both Tx and Rx
# status: created | created,modified | deleted
def pc(self, **kwargs):
required_args = {'name': '',
'mode': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "pc.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/lacplagp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Per Port VLAN policy
# state: enabled | disabled
# status: created | created,modified | deleted
def ppv(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "ppv.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/l2IfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Per Port VLAN policy
# state: enabled | disabled
# status: created | created,modified | deleted
def mcp_intf(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "mcp_intf.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/mcpIfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# password: string for global MCP password
# state: enabled | disabled
def mcp_global(self, **kwargs):
required_args = {'password': '',
'state': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "mcp_global.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/mcpInstP-default'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# event: mcp-loop | ep-move | bpduguard
# state: true | false
def err_disable(self, **kwargs):
required_args = {'event': '',
'state': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "err_disable.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/edrErrDisRecoverPol-default/edrEventP-event-{}'
.format(templateVars['event']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the parent VLAN Pool
# mode: static | dynamic
# range_mode: static | dynamic
# start: Starting VLAN - as an integer
# end: Ending VLAN - as an integer
# status: created | created,modified | deleted
def vl_pool(self, **kwargs):
required_args = {'name': '',
'mode': '',
'range_mode': '',
'start': '',
'end': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['start']):
raise InvalidArg('VLAN IDs must be an integer')
else:
templateVars['start'] = int(templateVars['start'])
if not int(templateVars['end']):
raise InvalidArg('VLAN IDs must be an integer')
else:
templateVars['end'] = int(templateVars['end'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vl_pool.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/vlanns-[{}]-{}'
.format(templateVars['name'], templateVars['mode']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the AEP
# status: created | created,modified | deleted
# infra: created | created,modified | deleted
# Note: This should be 'deleted' if no infra VLAN is needed
# or it should be 'created,modified' if there is a infra VLAN
# infra_vlan: (optional) infastructure vlan as an integer
# override: (optional) created | created,modified | deleted
# Note: This should be 'deleted' if no infra override is needed
# or it should be 'created,modified' if there is an override policy
# override_pc: (optional) Name of the port-channel policy
# override_cdp: (optional) Name of the cdp policy
# override_lldp: (optional) Name of the lldp policy
def aep(self, **kwargs):
required_args = {'name': '',
'status': '',
'infra': 'deleted'}
optional_args = {'infra_vlan': '0',
'override': 'deleted',
'override_pc': '',
'override_cdp': '',
'override_lldp': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['infra'] == 'created,modified':
if not int(templateVars['infra_vlan']):
raise InvalidArg('Infra VLAN ID must be an integer')
else:
templateVars['infra_vlan'] = int(templateVars['infra_vlan'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
if templateVars['infra'] not in valid_status:
raise InvalidArg('Status invalid')
if templateVars['override'] not in valid_status:
raise InvalidArg('Status invalid')
if templateVars['override'] == 'created,modified':
template_file = "aep_override.json"
else:
template_file = "aep_no_override.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/attentp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the L3-Out Domain
# status: created | created,modified | deleted
# vlan_pool: Name of the VLAN pool to associate to the L3 Out
def l3_dom(self, **kwargs):
required_args = {'name': '',
'status': '',
'vlan_pool': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_dom.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/l3dom-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Physical Domain
# status: created | created,modified | deleted
# vlan_pool: Name of the VLAN pool to associate to the Physical Domain
def phys_dom(self, **kwargs):
required_args = {'name': '',
'status': '',
'vlan_pool': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "phys_dom.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/phys-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the AEP
# status: created | created,modified | deleted
# l3_dom: Name of the L3 Domain to be hooked to the AEP
def l3_aep(self, **kwargs):
required_args = {'name': '',
'status': '',
'l3_dom': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_aep.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/attentp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the AEP
# status: created | created,modified | deleted
# dom_name: Name of the L3 Domain to be hooked to the AEP
def phys_aep(self, **kwargs):
required_args = {'name': '',
'status': '',
'dom_name': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "phys_aep.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/attentp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the vPC
# id: vPC ID as an integer
# status: created | created,modified | deleted
| |
from
https://github.com/nrontsis/PILCO/blob/6a962c8e4172f9e7f29ed6e373c4be2dd4b69cb7/pilco/models/mgpr.py#L81,
reinterpreted from tensorflow to pytorch
Args:
state_mu (torch.Tensor): mean value of the input distribution. Dim=(Ns + Na,)
state_var (torch.Tensor): covariance matrix of the input distribution. Dim=(Ns + Na, Ns + Na)
Returns:
M.t() (torch.Tensor): mean value of the predicted change distribution. Dim=(Ns,)
S (torch.Tensor): covariance matrix of the predicted change distribution. Dim=(Ns, Ns)
V.t() (torch.Tensor): Dim=(Ns, Ns + Na)
where Ns: dimension of state, Na: dimension of action
"""
state_var = state_var[None, None, :, :].repeat([self.num_states, self.num_states, 1, 1])
inp = (self.x[self.idxs_mem_gp[:beta.shape[1]]] - state_mu)[None, :, :].repeat([self.num_states, 1, 1])
lengthscales = torch.stack([model.covar_module.base_kernel.lengthscale[0] for model in self.models])
variances = torch.stack([model.covar_module.outputscale for model in self.models])
# Calculate M and V: mean and inv(s) times input-output covariance
iL = torch.diag_embed(1 / lengthscales)
iN = inp @ iL
B = iL @ state_var[0, ...] @ iL + torch.eye(self.num_inputs)
# Redefine iN as in^T and t --> t^T
# B is symmetric, so it is equivalent
t = torch.transpose(torch.solve(torch.transpose(iN, -1, -2), B).solution, -1, -2)
lb = torch.exp(-torch.sum(iN * t, -1) / 2) * beta
tiL = t @ iL
c = variances / torch.sqrt(torch.det(B))
M = (torch.sum(lb, -1) * c)[:, None]
V = torch.matmul(torch.transpose(tiL.conj(), -1, -2), lb[:, :, None])[..., 0] * c[:, None]
# Calculate S: Predictive Covariance
R = torch.matmul(state_var, torch.diag_embed(
1 / torch.square(lengthscales[None, :, :]) +
1 / torch.square(lengthscales[:, None, :])
)) + torch.eye(self.num_inputs)
X = inp[None, :, :, :] / torch.square(lengthscales[:, None, None, :])
X2 = -inp[:, None, :, :] / torch.square(lengthscales[None, :, None, :])
Q = torch.solve(state_var, R).solution / 2
Xs = torch.sum(X @ Q * X, -1)
X2s = torch.sum(X2 @ Q * X2, -1)
maha = -2 * torch.matmul(torch.matmul(X, Q), torch.transpose(X2.conj(), -1, -2)) + Xs[:, :, :, None] + X2s[:, :,
None, :]
k = torch.log(variances)[:, None] - torch.sum(torch.square(iN), -1) / 2
L = torch.exp(k[:, None, :, None] + k[None, :, None, :] + maha)
temp = beta[:, None, None, :].repeat([1, self.num_states, 1, 1]) @ L
S = (temp @ beta[None, :, :, None].repeat([self.num_states, 1, 1, 1]))[:, :, 0, 0]
diagL = torch.Tensor.permute(torch.diagonal(torch.Tensor.permute(L, dims=(3, 2, 1, 0)), dim1=-2, dim2=-1),
dims=(2, 1, 0))
S = S - torch.diag_embed(torch.sum(torch.mul(iK, diagL), [1, 2]))
S = S / torch.sqrt(torch.det(R))
S = S + torch.diag_embed(variances)
S = S - M @ torch.transpose(M, -1, -2)
return M.t(), S, V.t()
def predict_trajectory(self, actions, obs_mu, obs_var, iK, beta):
"""
Compute the future predicted states distribution for the simulated trajectory given the
current initial state (or observation) distribution (obs_mu and obs_var) and planned actions
It also returns the costs, the variance of the costs, and the lower confidence bound of the cost
along the trajectory
Args:
actions (torch.Tensor): actions to apply for the simulated trajectory. dim=(Nh, Na)
where Nh is the len of the horizon and Na the dimension of actions
obs_mu (torch.Tensor): mean value of the inital state distribution.
dim=(Ns,) where Ns is the dimension of state
obs_var (torch.Tensor): variance matrix of the inital state distribution.
dim=(Ns, Ns) where Ns is the dimension of state
iK (torch.Tensor): intermediary result for the gp predictions that only depends on the points in memory
and not on the points to predict.
It is computed outside the optimization function in self.calculate_factorizations
for more efficient predictions. Dim=(Ns, Np, Np)
where Ns is the dimension of state and Np the number of points in gp memory
beta (torch.Tensor): intermediary result for the gp predictions that only depends on the points in memory
and not on the points to predict.
It is computed outside the optimization function in self.calculate_factorizations
for more efficient predictions. Dim=(Ns, Np)
Returns:
states_mu_pred (torch.Tensor): predicted states of the trajectory.
The first element contains the initial state.
Dim=(Nh + 1, Ns)
states_var_pred (torch.Tensor): covariance matrix of the predicted states of the trajectory.
The first element contains the initial state.
Dim=(Nh + 1, Ns, Ns)
costs_traj (torch.Tensor): costs of the predicted trajectory. Dim=(Nh,)
costs_traj_var (torch.Tensor): variance of the costs of the predicted trajectory. Dim=(Nh,)
costs_traj_lcb (torch.Tensor): lower confidence bound of the costs of the predicted trajectory.
Dim=(Nh,)
where Nh: horizon length, Ns: dimension of states, Na: dimension of actions, Np:number of points in gp memory
"""
states_mu_pred = torch.empty((self.len_horizon + 1, len(obs_mu)))
states_var_pred = torch.empty((self.len_horizon + 1, self.num_states, self.num_states))
states_mu_pred[0] = obs_mu
states_var_pred[0] = obs_var
state_dim = obs_mu.shape[0]
# Input of predict_next_state_change is not a state, but the concatenation of state and action
for idx_time in range(1, self.len_horizon + 1):
input_var = torch.zeros((self.num_inputs, self.num_inputs))
input_var[:state_dim, :state_dim] = states_var_pred[idx_time - 1]
input_mean = torch.empty((self.num_inputs,))
input_mean[:self.num_states] = states_mu_pred[idx_time - 1]
input_mean[self.num_states:(self.num_states + self.num_actions)] = actions[idx_time - 1]
if self.include_time_gp:
input_mean[-1] = self.n_iter_obs + idx_time - 1
state_change, state_change_var, v = self.predict_next_state_change(
input_mean, input_var, iK, beta)
# use torch.clamp(states_mu_pred[idx_time], 0, 1) ?
states_mu_pred[idx_time] = states_mu_pred[idx_time - 1] + state_change
states_var_pred[idx_time] = state_change_var + states_var_pred[idx_time - 1] + \
input_var[:states_var_pred.shape[1]] @ v + \
v.t() @ input_var[:states_var_pred.shape[1]].t()
costs_traj, costs_traj_var = self.compute_cost(states_mu_pred[:-1],
states_var_pred[:-1], actions)
cost_traj_final, costs_traj_var_final = self.compute_cost_terminal(states_mu_pred[-1],
states_var_pred[-1])
costs_traj = torch.cat((costs_traj, cost_traj_final[None]), 0)
costs_traj_var = torch.cat((costs_traj_var, costs_traj_var_final[None]), 0)
costs_traj_lcb = costs_traj - self.exploration_factor * torch.sqrt(costs_traj_var)
return states_mu_pred, states_var_pred, costs_traj, costs_traj_var, costs_traj_lcb
def compute_mean_lcb_trajectory(self, actions, obs_mu, obs_var, iK, beta):
"""
Compute the mean lower bound cost of a trajectory given the actions of the trajectory
and initial state distribution. The gaussian process models are used to predict the evolution of
states (mean and variance). Then the cost is computed for each predicted state and the mean is returned.
The partial derivatives of the mean lower bound cost with respect to the actions are also returned.
They are computed automatically with autograd from pytorch.
This function is called multiple times by an optimizer to find the optimal actions.
Args:
actions (numpy.array): actions to apply for the simulated trajectory.
It is a flat 1d array, whatever the dimension of actions
so that this function can be used by the minimize function of the scipy library.
It is reshaped and transformed into a tensor inside.
If self.limit_action_change is true, each element of the array contains the relative
change with respect to the previous iteration, so that the change can be bounded by
the optimizer. dim=(Nh x Na,)
where Nh is the len of the horizon and Na the dimension of actions
obs_mu (torch.Tensor): mean value of the inital state distribution.
dim=(Ns,) where Ns is the dimension of state
obs_var (torch.Tensor): covariance matrix of the inital state distribution.
dim=(Ns, Ns) where Ns is the dimension of state
iK (torch.Tensor): intermediary result for the gp predictions that only depends on the points in memory
and not on the points to predict.
It is computed outside the optimization function in self.calculate_factorizations
for more efficient predictions. Dim=(Ns, Np, Np)
where Ns is the dimension of state and Np the number of points in gp memory
beta (torch.Tensor): intermediary result for the gp predictions that only depends on the points in memory
and not on the points to predict.
It is computed outside the optimization function in self.calculate_factorizations
for more efficient predictions. Dim=(Ns, Np)
Returns:
mean_cost_traj_lcb.item() (float): lower bound of the mean cost distribution
of the predicted trajectory.
gradients_dcost_dactions[:, 0].detach().numpy() (numpy.array):
Derivative of the lower bound of the mean cost
distribution with respect to each of the actions in the
prediction horizon. Dim=(Nh,)
where Nh is the len of the horizon
"""
# reshape actions from flat 1d numpy array into 2d tensor
actions = np.atleast_2d(actions.reshape(self.len_horizon, -1))
actions = torch.Tensor(actions)
actions.requires_grad = True
# If limit_action_change is true, actions are transformed back into absolute values from relative change
if self.limit_action_change:
actions_input = actions.clone()
actions_input[0] = self.action_previous_iter + actions_input[0]
actions_input = torch.clamp(torch.cumsum(actions_input, dim=0), 0, 1)
else:
actions_input = actions
mu_states_pred, s_states_pred, costs_traj, costs_traj_var, costs_traj_lcb = \
self.predict_trajectory(actions_input, obs_mu, obs_var, iK, beta)
if self.clip_lower_bound_cost_to_0:
costs_traj_lcb = torch.clamp(costs_traj_lcb, 0, np.inf)
mean_cost_traj_lcb = costs_traj_lcb.mean()
gradients_dcost_dactions = torch.autograd.grad(mean_cost_traj_lcb, actions, retain_graph=False)[0]
self.cost_traj_mean_lcb = mean_cost_traj_lcb.detach()
self.mu_states_pred = mu_states_pred.detach()
self.costs_trajectory = costs_traj.detach()
self.states_var_pred = s_states_pred.detach()
self.costs_traj_var = costs_traj_var.detach()
return mean_cost_traj_lcb.item(), gradients_dcost_dactions.flatten().detach().numpy()
def compute_action(self, obs_mu, obs_var=None):
"""
Get the optimal action given the observation by optimizing
the actions of the simulated trajectory with the gaussian process models such that the lower confidence bound of
the mean cost of the trajectory is minimized.
Only the first action of the prediction window is returned.
Args:
obs_mu (numpy.array): unnormalized observation from the gym environment. dim=(Ns)
obs_var (numpy.array): unnormalized variance of the observation from the gym environment. dim=(Ns, Ns).
default=None. If it is set to None,
the observation noise from the json parameters will be used for every iteration.
Ns is the dimension of states in the gym environment.
Returns:
action_denorm (numpy.array): action to use in the gym environment.
It is denormalized, so it can be used directly.
dim=(Na), where Ns is the dimension of the action_space
info_dict (dict): contains all additional information about the iteration.
Keys:
- iteration (int): index number of the iteration
- state (torch.Tensor): current normed state (before applying the action)
- predicted states (torch.Tensor): mean value of the predicted distribution of the
normed states in the mpc
- predicted states std (torch.Tensor): predicted normed standard deviation of the
distribution of the states in | |
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This capacity type describes transmission lines that can be built by the
optimization at a cost. These investment decisions are linearized, i.e.
the decision is not whether to build a specific transmission line, but how
much capacity to build at a particular transmission corridor. Once built, the
capacity remains available for the duration of the line's pre-specified
lifetime. The line flow limits are assumed to be the same in each direction,
e.g. a 500 MW line from Zone 1 to Zone 2 will allow flows of 500 MW from
Zone 1 to Zone 2 and vice versa.
The cost input to the model is an annualized cost per unit capacity.
If the optimization makes the decision to build new capacity, the total
annualized cost is incurred in each period of the study (and multiplied by
the number of years the period represents) for the duration of the
transmission line's lifetime.
"""
import csv
import os.path
from pyomo.environ import Set, Param, Var, Expression, NonNegativeReals, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.auxiliary import cursor_to_df
from gridpath.auxiliary.db_interface import setup_results_import
from gridpath.auxiliary.dynamic_components import \
tx_capacity_type_operational_period_sets
from gridpath.auxiliary.validations import write_validation_to_database, \
get_expected_dtypes, get_tx_lines, validate_dtypes, validate_values, \
validate_idxs
# TODO: can we have different capacities depending on the direction
# TODO: add fixed O&M costs similar to gen_new_lin
def add_model_components(
m, d, scenario_directory, subproblem, stage
):
"""
The following Pyomo model components are defined in this module:
+-------------------------------------------------------------------------+
| Sets |
+=========================================================================+
| | :code:`TX_NEW_LIN_VNTS` |
| |
| A two-dimensional set of line-vintage combinations to help describe |
| the periods in time when transmission line capacity can be built in the |
| optimization. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Required Input Params |
+=========================================================================+
| | :code:`tx_new_lin_lifetime_yrs` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The transmission line's lifetime, i.e. how long line capacity of a |
| particular vintage remains operational. |
+-------------------------------------------------------------------------+
| | :code:`tx_new_lin_annualized_real_cost_per_mw_yr` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The transmission line's cost to build new capacity in annualized |
| real dollars per MW. |
+-------------------------------------------------------------------------+
.. note:: The cost input to the model is a levelized cost per unit
capacity. This annualized cost is incurred in each period of the study
(and multiplied by the number of years the period represents) for
the duration of the project's lifetime. It is up to the user to
ensure that the :code:`tx_new_lin_lifetime_yrs` and
:code:`tx_new_lin_annualized_real_cost_per_mw_yr` parameters are
consistent.
|
+-------------------------------------------------------------------------+
| Derived Sets |
+=========================================================================+
| | :code:`OPR_PRDS_BY_TX_NEW_LIN_VINTAGE` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| |
| Indexed set that describes the operational periods for each possible |
| transmission line-vintage combination, based on the |
| :code:`tx_new_lin_lifetime_yrs`. For instance, transmission capacity |
| of the 2020 vintage with lifetime of 30 years will be assumed |
| operational starting Jan 1, 2020 and through Dec 31, 2049, but will |
| *not* be operational in 2050. |
+-------------------------------------------------------------------------+
| | :code:`TX_NEW_LIN_OPR_PRDS` |
| |
| Two-dimensional set that includes the periods when transmission |
| capacity of any vintage *could* be operational if built. This set is |
| added to the list of sets to join to get the final |
| :code:`TRANMISSION_OPERATIONAL_PERIODS` set defined in |
| **gridpath.transmission.capacity.capacity**. |
+-------------------------------------------------------------------------+
| | :code:`TX_NEW_LIN_VNTS_OPR_IN_PRD` |
| | *Defined over*: :code:`PERIODS` |
| |
| Indexed set that describes the transmission line-vintages that could |
| be operational in each period based on the |
| :code:`tx_new_lin_lifetime_yrs`. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Variables |
+=========================================================================+
| | :code:`TxNewLin_Build_MW` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| Determines how much transmission capacity of each possible vintage is |
| built at each :code:`tx_new_lin transmission line`. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Expressions |
+=========================================================================+
| | :code:`TxNewLin_Capacity_MW` |
| | *Defined over*: :code:`TX_NEW_LIN_OPR_PRDS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The transmission capacity of a line in a given operational period is |
| equal to the sum of all capacity-build of vintages operational in that |
| period. |
+-------------------------------------------------------------------------+
"""
# Sets
###########################################################################
m.TX_NEW_LIN_VNTS = Set(dimen=2)
# Required Params
###########################################################################
m.tx_new_lin_lifetime_yrs = Param(
m.TX_NEW_LIN_VNTS,
within=NonNegativeReals
)
m.tx_new_lin_annualized_real_cost_per_mw_yr = Param(
m.TX_NEW_LIN_VNTS,
within=NonNegativeReals
)
# Derived Sets
###########################################################################
m.OPR_PRDS_BY_TX_NEW_LIN_VINTAGE = Set(
m.TX_NEW_LIN_VNTS,
initialize=operational_periods_by_new_build_transmission_vintage
)
m.TX_NEW_LIN_OPR_PRDS = Set(
dimen=2,
initialize=new_build_transmission_operational_periods
)
m.TX_NEW_LIN_VNTS_OPR_IN_PRD = Set(
m.PERIODS, dimen=2,
initialize=new_build_transmission_vintages_operational_in_period
)
# Variables
###########################################################################
m.TxNewLin_Build_MW = Var(
m.TX_NEW_LIN_VNTS,
within=NonNegativeReals
)
# Expressions
###########################################################################
m.TxNewLin_Capacity_MW = Expression(
m.TX_NEW_LIN_OPR_PRDS,
rule=tx_new_lin_capacity_rule
)
# Dynamic Components
###########################################################################
getattr(d, tx_capacity_type_operational_period_sets).append(
"TX_NEW_LIN_OPR_PRDS",
)
# Set Rules
###############################################################################
def operational_periods_by_new_build_transmission_vintage(mod, g, v):
operational_periods = list()
for p in mod.PERIODS:
if v <= p < v + mod.tx_new_lin_lifetime_yrs[g, v]:
operational_periods.append(p)
else:
pass
return operational_periods
def new_build_transmission_operational_periods(mod):
return list(
set((g, p) for (g, v) in mod.TX_NEW_LIN_VNTS
for p in mod.OPR_PRDS_BY_TX_NEW_LIN_VINTAGE[g, v])
)
def new_build_transmission_vintages_operational_in_period(mod, p):
build_vintages_by_period = list()
for (g, v) in mod.TX_NEW_LIN_VNTS:
if p in mod.\
OPR_PRDS_BY_TX_NEW_LIN_VINTAGE[g, v]:
build_vintages_by_period.append((g, v))
else:
pass
return build_vintages_by_period
# Expression Rules
###############################################################################
def tx_new_lin_capacity_rule(mod, g, p):
"""
**Expression Name**: TxNewLin_Capacity_MW
**Defined Over**: TX_NEW_LIN_OPR_PRDS
The transmission capacity of a new line in a given operational period is
equal to the sum of all capacity-build of vintages operational in that
period.
This expression is not defined for a new transmission line's non-
operational periods (i.e. it's 0). E.g. if we were allowed to build
capacity in 2020 and 2030, and the line had a 15 year lifetime,
in 2020 we'd take 2020 capacity-build only, in 2030, we'd take the sum
of 2020 capacity-build and 2030 capacity-build, in 2040, we'd take 2030
capacity-build only, and in 2050, the capacity would be undefined (i.e.
0 for the purposes of the objective function).
"""
return sum(
mod.TxNewLin_Build_MW[g, v] for (gen, v)
in mod.TX_NEW_LIN_VNTS_OPR_IN_PRD[p]
if gen == g
)
# Tx Capacity Type Methods
###############################################################################
def min_transmission_capacity_rule(mod, g, p):
"""
"""
return -mod.TxNewLin_Capacity_MW[g, p]
def max_transmission_capacity_rule(mod, g, p):
"""
"""
return mod.TxNewLin_Capacity_MW[g, p]
def tx_capacity_cost_rule(mod, g, p):
"""
Capacity cost for new builds in each period (sum over all vintages
operational in current period).
"""
return sum(mod.TxNewLin_Build_MW[g, v]
* mod.tx_new_lin_annualized_real_cost_per_mw_yr[g, v]
for (gen, v) in mod.TX_NEW_LIN_VNTS_OPR_IN_PRD[p]
if gen == g)
# Input-Output
###############################################################################
def load_module_specific_data(
m, data_portal, scenario_directory, subproblem, stage
):
# TODO: throw an error when a line of the 'tx_new_lin' capacity
# type is not found in new_build_transmission_vintage_costs.tab
data_portal.load(
filename=os.path.join(scenario_directory, str(subproblem), str(stage), "inputs",
"new_build_transmission_vintage_costs.tab"),
index=m.TX_NEW_LIN_VNTS,
select=("transmission_line", "vintage",
"tx_lifetime_yrs",
"tx_annualized_real_cost_per_mw_yr"),
param=(m.tx_new_lin_lifetime_yrs,
m.tx_new_lin_annualized_real_cost_per_mw_yr)
)
# TODO: untested
def export_module_specific_results(
m, d, scenario_directory, subproblem, stage
):
"""
:param m:
:param d:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
# Export transmission capacity
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"transmission_new_capacity.csv"),
"w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["transmission_line", "period",
"load_zone_from", "load_zone_to",
"new_build_transmission_capacity_mw"])
for (transmission_line, p) in m.TX_OPR_PRDS:
writer.writerow([
transmission_line,
p,
m.load_zone_from[transmission_line],
m.load_zone_to[transmission_line],
value(m.TxNewLin_Build_MW[transmission_line, p])
])
# Database
###############################################################################
def get_module_specific_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn
):
"""
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
c = conn.cursor()
tx_cost = c.execute(
"""SELECT transmission_line, vintage, tx_lifetime_yrs,
tx_annualized_real_cost_per_mw_yr
FROM inputs_transmission_portfolios
CROSS JOIN
(SELECT period as vintage
FROM inputs_temporal_periods
WHERE temporal_scenario_id = {}) as relevant_periods
INNER JOIN
(SELECT transmission_line, vintage, tx_lifetime_yrs,
tx_annualized_real_cost_per_mw_yr
FROM inputs_transmission_new_cost
WHERE transmission_new_cost_scenario_id = {} ) as cost
USING (transmission_line, vintage )
WHERE transmission_portfolio_scenario_id = {};""".format(
subscenarios.TEMPORAL_SCENARIO_ID,
subscenarios.TRANSMISSION_NEW_COST_SCENARIO_ID,
subscenarios.TRANSMISSION_PORTFOLIO_SCENARIO_ID
)
)
return tx_cost
def write_module_specific_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn):
"""
Get inputs from database and write out the model input .tab file.
:param scenario_directory: string, the scenario directory
:param subscenarios: SubScenarios object with | |
x in util.elements_in_index_level(df,'dispatch_feeder') if x in self.dispatch_feeders], self.dispatch_feeders)
distribution_df = self.outputs.clean_df(distribution_df)
distribution_df.columns = [cfg.calculation_energy_unit.upper()]
distribution_df = DfOper.mult([distribution_df, self.distribution_losses, self.transmission_losses])
util.replace_index_name(distribution_df, 'DISPATCH_OUTPUT', 'SUPPLY_NODE')
distribution_df = util.remove_df_levels(distribution_df, 'DISPATCH_FEEDER')
self.bulk_dispatch = pd.concat([self.bulk_dispatch, distribution_df.reorder_levels(self.bulk_dispatch.index.names)])
def set_long_duration_opt(self, year):
# MOVE
"""sets input parameters for dispatched nodes (ex. conventional hydro)"""
def split_and_apply(array, dispatch_periods, fun):
energy_by_block = np.array_split(array, np.where(np.diff(dispatch_periods)!=0)[0]+1)
return [fun(block) for block in energy_by_block]
self.dispatch.ld_technologies = []
for node_name in [x for x in self.dispatch.long_duration_dispatch_order if x in self.nodes.keys()]:
node = self.nodes[node_name]
full_energy_shape, p_min_shape, p_max_shape = node.aggregate_flexible_electricity_shapes(year, util.remove_df_levels(util.df_slice(self.dispatch_feeder_allocation,year,'year'),year))
if node_name in self.flexible_gen.keys():
lookup = self.flexible_gen
load_or_gen = 'gen'
elif node_name in self.flexible_load.keys():
lookup = self.flexible_load
load_or_gen = 'load'
else:
continue
for geography in lookup[node_name].keys():
for zone in lookup[node_name][geography].keys():
for feeder in lookup[node_name][geography][zone].keys():
capacity = util.remove_df_levels(lookup[node_name][geography][zone][feeder]['capacity'], 'resource_bin')
if capacity.sum().sum() == 0:
continue
annual_energy = lookup[node_name][geography][zone][feeder]['energy'].values.sum()
opt_periods = self.dispatch.period_repeated
dispatch_window = self.dispatch.node_config_dict[node_name].dispatch_window
dispatch_periods = getattr(Shapes.get_active_dates_index(), dispatch_window)
if load_or_gen=='load':
annual_energy = copy.deepcopy(annual_energy) *-1
if p_min_shape is None:
p_min = np.repeat(0.0,len(dispatch_periods))
p_max = np.repeat(capacity.sum().values[0],len(dispatch_periods))
hourly_p_min = np.repeat(0.0,len(self.dispatch.hours))
opt_p_min = np.repeat(0.0,len(opt_periods))
opt_p_max = np.repeat(capacity.sum().values[0],len(opt_periods))
hourly_p_max = np.repeat(capacity.sum().values[0],len(self.dispatch.hours))
else:
hourly_p_min = util.remove_df_levels(util.DfOper.mult([capacity, p_min_shape]), GeoMapper.supply_primary_geography).values
p_min = np.array(split_and_apply(hourly_p_min, dispatch_periods, np.mean))
opt_p_min = np.array(split_and_apply(hourly_p_min, opt_periods, np.mean))
hourly_p_max = util.remove_df_levels(util.DfOper.mult([capacity, p_max_shape]),GeoMapper.supply_primary_geography).values
p_max = np.array(split_and_apply(hourly_p_max, dispatch_periods, np.mean))
opt_p_max = np.array(split_and_apply(hourly_p_max, opt_periods, np.mean))
tech_name = str(tuple([geography,node_name, feeder]))
self.dispatch.ld_technologies.append(tech_name)
#reversed sign for load so that pmin always represents greatest load or smallest generation
if zone == self.transmission_node_name:
if load_or_gen=='load':
p_min *= self.transmission_losses.loc[geography,:].values[0]
p_max *= self.transmission_losses.loc[geography,:].values[0]
opt_p_min *= self.transmission_losses.loc[geography,:].values[0]
opt_p_max *= self.transmission_losses.loc[geography,:].values[0]
hourly_p_min *=self.transmission_losses.loc[geography,:].values[0]
hourly_p_max *= self.transmission_losses.loc[geography,:].values[0]
annual_energy*=self.transmission_losses.loc[geography,:].values[0]
else:
p_min *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[GeoMapper.dispatch_geography, 'dispatch_feeder']).values[0][0]
p_max *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[GeoMapper.dispatch_geography, 'dispatch_feeder']).values[0][0]
opt_p_min *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[GeoMapper.dispatch_geography, 'dispatch_feeder']).values[0][0]
opt_p_max *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[GeoMapper.dispatch_geography, 'dispatch_feeder']).values[0][0]
hourly_p_min *=self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[GeoMapper.dispatch_geography, 'dispatch_feeder']).values[0][0]
hourly_p_max *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[GeoMapper.dispatch_geography, 'dispatch_feeder']).values[0][0]
annual_energy *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[GeoMapper.dispatch_geography, 'dispatch_feeder']).values[0][0]
if load_or_gen == 'gen':
max_capacity = opt_p_max
min_capacity = opt_p_min
max_hourly_capacity = hourly_p_max
min_hourly_capacity = hourly_p_min
else:
max_capacity = -opt_p_min
min_capacity = -opt_p_max
max_hourly_capacity = -hourly_p_min
min_hourly_capacity = -hourly_p_max
self.dispatch.annual_ld_energy[tech_name] = annual_energy
self.dispatch.ld_geography[tech_name] = geography
self.dispatch.ld_capacity.update(dict([((tech_name, h), value) for h, value in enumerate(max_hourly_capacity)]))
self.dispatch.ld_min_capacity.update(dict([((tech_name, h), value) for h, value in enumerate(min_hourly_capacity)]))
for period in self.dispatch.periods:
self.dispatch.capacity[period][tech_name] = max_capacity[period]
self.dispatch.min_capacity[period][tech_name] = min_capacity[period]
self.dispatch.geography[period][tech_name] = geography
self.dispatch.feeder[period][tech_name] = feeder
def solve_heuristic_load_and_gen(self, year):
# MOVE
"""solves dispatch shapes for heuristically dispatched nodes (ex. conventional hydro)"""
def split_and_apply(array, dispatch_periods, fun):
energy_by_block = np.array_split(array, np.where(np.diff(dispatch_periods)!=0)[0]+1)
return [fun(block) for block in energy_by_block]
self.dispatched_bulk_load = copy.deepcopy(self.bulk_gen)*0
self.dispatched_bulk_gen = copy.deepcopy(self.bulk_gen)*0
self.dispatched_dist_load = copy.deepcopy(self.bulk_gen)*0
self.dispatched_dist_gen = copy.deepcopy(self.bulk_gen)*0
for node_name in [x for x in self.dispatch.heuristic_dispatch_order if x in self.nodes.keys()]:
node = self.nodes[node_name]
full_energy_shape, p_min_shape, p_max_shape = node.aggregate_flexible_electricity_shapes(year, util.remove_df_levels(util.df_slice(self.dispatch_feeder_allocation,year,'year'),year))
if node_name in self.flexible_gen.keys():
lookup = self.flexible_gen
load_or_gen = 'gen'
elif node_name in self.flexible_load.keys():
lookup = self.flexible_load
load_or_gen = 'load'
else:
continue
logging.info(" solving dispatch for %s" %node.name)
geography_list = []
for geography in lookup[node_name].keys():
for zone in lookup[node_name][geography].keys():
feeder_list = []
for feeder in lookup[node_name][geography][zone].keys():
capacity = lookup[node_name][geography][zone][feeder]['capacity']
energy = lookup[node_name][geography][zone][feeder]['energy']
dispatch_window = self.dispatch.node_config_dict[node_name].dispatch_window
dispatch_periods = getattr(Shapes.get_active_dates_index(), dispatch_window)
num_years = len(dispatch_periods)/8766.
if load_or_gen=='load':
energy = copy.deepcopy(energy) *-1
if full_energy_shape is not None and 'dispatch_feeder' in full_energy_shape.index.names:
energy_shape = util.df_slice(full_energy_shape, feeder, 'dispatch_feeder')
else:
energy_shape = full_energy_shape
if energy_shape is None:
energy_budgets = util.remove_df_levels(energy,[GeoMapper.supply_primary_geography,'resource_bin']).values * np.diff([0]+list(np.where(np.diff(dispatch_periods)!=0)[0]+1)+[len(dispatch_periods)-1])/8766.*num_years
energy_budgets = energy_budgets[0]
else:
hourly_energy = util.remove_df_levels(util.DfOper.mult([energy,energy_shape]), GeoMapper.supply_primary_geography).values
energy_budgets = split_and_apply(hourly_energy, dispatch_periods, sum)
if p_min_shape is None:
p_min = 0.0
p_max = capacity.sum().values[0]
else:
hourly_p_min = util.remove_df_levels(util.DfOper.mult([capacity,p_min_shape]),GeoMapper.supply_primary_geography).values
p_min = split_and_apply(hourly_p_min, dispatch_periods, np.mean)
hourly_p_max = util.remove_df_levels(util.DfOper.mult([capacity,p_max_shape]),GeoMapper.supply_primary_geography).values
p_max = split_and_apply(hourly_p_max, dispatch_periods, np.mean)
if zone == self.transmission_node_name:
net_indexer = util.level_specific_indexer(self.bulk_net_load,[GeoMapper.dispatch_geography], [geography])
if load_or_gen=='load':
self.energy_budgets = energy_budgets
self.p_min = p_min
self.p_max = p_max
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.bulk_net_load.loc[net_indexer,:].values.flatten(),energy_budgets, dispatch_periods, p_min, p_max)])
self.dispatch_result = dispatch
indexer = util.level_specific_indexer(self.bulk_load,[GeoMapper.dispatch_geography], [geography])
self.bulk_load.loc[indexer,:] += dispatch
indexer = util.level_specific_indexer(self.bulk_load,GeoMapper.dispatch_geography, geography)
self.dispatched_bulk_load.loc[indexer,:] += dispatch
else:
indexer = util.level_specific_indexer(self.bulk_gen,GeoMapper.dispatch_geography, geography)
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.bulk_net_load.loc[net_indexer,:].values.flatten(),np.array(energy_budgets).flatten(), dispatch_periods, p_min, p_max)])
self.bulk_gen.loc[indexer,:] += dispatch
self.dispatched_bulk_gen.loc[indexer,:] += dispatch
else:
if load_or_gen=='load':
indexer = util.level_specific_indexer(self.dist_load,[GeoMapper.dispatch_geography,'dispatch_feeder'], [geography,feeder])
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.dist_net_load_no_feeders.loc[net_indexer,:].values.flatten(),energy_budgets, dispatch_periods, p_min, p_max)])
for timeshift_type in list(set(self.distribution_load.index.get_level_values('timeshift_type'))):
indexer = util.level_specific_indexer(self.distribution_load,[GeoMapper.dispatch_geography,'timeshift_type'], [geography,timeshift_type])
self.distribution_load.loc[indexer,:] += dispatch
indexer = util.level_specific_indexer(self.distribution_load,GeoMapper.dispatch_geography, geography)
self.dispatched_dist_load.loc[indexer,:] += dispatch
else:
indexer = util.level_specific_indexer(self.dist_gen,[GeoMapper.dispatch_geography,'dispatch_feeder'], [geography,feeder])
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.dist_net_load_no_feeders.loc[net_indexer,:].values.flatten(),energy_budgets, dispatch_periods, p_min, p_max)])
self.distribution_gen.loc[indexer,:] += dispatch
self.dispatched_dist_gen.loc[indexer,:] += dispatch
index = pd.MultiIndex.from_product([Shapes.get_active_dates_index(),[feeder]],names=['weather_datetime','dispatch_feeder'])
dispatch=pd.DataFrame(dispatch,index=index,columns=['value'])
if load_or_gen=='gen':
dispatch *=-1
feeder_list.append(dispatch)
geography_list.append(pd.concat(feeder_list))
self.update_net_load_signal()
df = pd.concat(geography_list, keys=lookup[node_name].keys(), names=[GeoMapper.dispatch_geography])
df = pd.concat([df], keys=[node_name], names=['supply_node'])
df = pd.concat([df], keys=[year], names=['year'])
if year in self.dispatch_write_years:
self.append_heuristic_load_and_gen_to_dispatch_outputs(df, load_or_gen)
def prepare_optimization_inputs(self,year):
# MOVE
logging.info(" preparing optimization inputs")
self.dispatch.set_timeperiods()
self.dispatch.set_losses(self.transmission_losses,self.distribution_losses)
self.set_net_load_thresholds(year)
#freeze the bulk net load as opt bulk net load just in case we want to rerun a year. If we don't do this, bulk_net_load would be updated with optimization results
self.dispatch.set_opt_loads(self.distribution_load,self.distribution_flex_load,self.distribution_gen,self.bulk_load,self.bulk_gen,self.dispatched_bulk_load, self.bulk_net_load, self.active_thermal_dispatch_df)
self.dispatch.set_opt_loads(self.distribution_load,self.distribution_flex_load,self.distribution_gen,self.bulk_load,self.bulk_gen,self.dispatched_bulk_load, self.bulk_net_load, self.active_thermal_dispatch_df)
flex_pmin, flex_pmax = self.demand_object.aggregate_flexible_load_pmin_pmax(year)
self.dispatch.set_max_min_flex_loads(flex_pmin, flex_pmax)
self.dispatch.set_technologies(self.storage_capacity_dict, self.storage_efficiency_dict, self.active_thermal_dispatch_df)
self.set_long_duration_opt(year)
def set_grid_capacity_factors(self, year):
max_year = max(self.years)
distribution_grid_node = self.nodes[self.distribution_grid_node_name]
dist_cap_factor = util.DfOper.divi([self.dist_only_net_load.groupby(level=[GeoMapper.dispatch_geography,'dispatch_feeder']).mean(),self.dist_only_net_load.groupby(level=[GeoMapper.dispatch_geography,'dispatch_feeder']).max()])
geography_map_key = distribution_grid_node.geography_map_key if hasattr(distribution_grid_node, 'geography_map_key') and distribution_grid_node.geography_map_key is not None else GeoMapper.default_geography_map_key
if GeoMapper.dispatch_geography != GeoMapper.supply_primary_geography:
map_df = GeoMapper.get_instance().map_df(GeoMapper.dispatch_geography,GeoMapper.supply_primary_geography, normalize_as='intensity', map_key=geography_map_key, eliminate_zeros=False)
dist_cap_factor = util.remove_df_levels(util.DfOper.mult([dist_cap_factor,map_df]),GeoMapper.dispatch_geography)
dist_cap_factor = util.remove_df_levels(util.DfOper.mult([dist_cap_factor, util.df_slice(self.dispatch_feeder_allocation, year, 'year')]),'dispatch_feeder')
dist_cap_factor = dist_cap_factor.reorder_levels([GeoMapper.supply_primary_geography,'demand_sector']).sort_index()
distribution_grid_node.capacity_factor.values.loc[:,year] = dist_cap_factor.values
for i in range(0,cfg.getParamAsInt('dispatch_step')+1):
distribution_grid_node.capacity_factor.values.loc[:,min(year+i,max_year)] = dist_cap_factor.values
if hasattr(distribution_grid_node, 'stock'):
distribution_grid_node.update_stock(year,3)
#hardcoded 50% assumption of colocated energy for dispatched flexible gen. I.e. wind and solar. Means that transmission capacity isn't needed to support energy demands.
#TODO change to config parameter
if hasattr(self, 'dispatched_bulk_load'):
bulk_flow = util.DfOper.subt([util.DfOper.add([self.bulk_load,util.remove_df_levels(self.dist_only_net_load,'dispatch_feeder')]),self.dispatched_bulk_load * .5])
else:
bulk_flow = util.DfOper.add([self.bulk_load, util.remove_df_levels(self.dist_only_net_load, 'dispatch_feeder')])
all_bulk_flow = util.DfOper.add([self.bulk_load, util.remove_df_levels(self.dist_only_net_load, 'dispatch_feeder')])
bulk_cap_factor = util.DfOper.divi([all_bulk_flow.groupby(level=GeoMapper.dispatch_geography).mean(),bulk_flow.groupby(level=GeoMapper.dispatch_geography).max()])
transmission_grid_node = self.nodes[self.transmission_node_name]
geography_map_key = transmission_grid_node.geography_map_key if hasattr(transmission_grid_node, 'geography_map_key') and transmission_grid_node.geography_map_key is not None else GeoMapper.default_geography_map_key
if GeoMapper.dispatch_geography != GeoMapper.supply_primary_geography:
map_df = GeoMapper.get_instance().map_df(GeoMapper.dispatch_geography,GeoMapper.supply_primary_geography, normalize_as='intensity', map_key=geography_map_key, eliminate_zeros=False)
bulk_cap_factor = util.remove_df_levels(util.DfOper.mult([bulk_cap_factor,map_df]),GeoMapper.dispatch_geography)
transmission_grid_node.capacity_factor.values.loc[:,year] = bulk_cap_factor.values
for i in range(0,cfg.getParamAsInt('dispatch_step')+1):
transmission_grid_node.capacity_factor.values.loc[:,min(year+i,max_year)] = bulk_cap_factor.values
if hasattr(transmission_grid_node, 'stock'):
transmission_grid_node.update_stock(year,3)
def _get_ld_results_from_dispatch(self):
if not len(self.dispatch.ld_technologies):
return None, None, None
#load and gen are the same in the ld_df, just with different signs. We want to separate and use absolute values (i.e. *- when it is load)
ld_load = util.remove_df_levels(-self.dispatch.ld_df[self.dispatch.ld_df.values<0],'supply_node')
ld_gen = util.remove_df_levels(self.dispatch.ld_df[self.dispatch.ld_df.values>0], 'supply_node')
dist_ld_load = util.df_slice(ld_load, self.dispatch_feeders, 'dispatch_feeder')
if not len(dist_ld_load):
dist_ld_load = None
if not len(ld_load):
ld_load = None
if not len(ld_gen):
ld_gen = None
return ld_load, ld_gen, dist_ld_load
def solve_storage_and_flex_load_optimization(self,year):
# MOVE
"""prepares, solves, and updates the net load with results from the storage and flexible load optimization"""
self.dispatch.set_year(year)
self.prepare_optimization_inputs(year)
logging.info(" solving dispatch for storage and dispatchable load")
self.dispatch.solve_optimization()
ld_load, ld_gen, dist_ld_load = self._get_ld_results_from_dispatch()
dist_storage_charge, dist_storage_discharge = None, None
storage_charge = self.dispatch.storage_df.xs('charge', level='charge_discharge')
storage_discharge = self.dispatch.storage_df.xs('discharge', level='charge_discharge')
if len(set(storage_charge.index.get_level_values('dispatch_feeder')))>1:
dist_storage_charge = util.df_slice(storage_charge, self.dispatch_feeders, 'dispatch_feeder')
dist_storage_discharge = util.df_slice(storage_discharge, self.dispatch_feeders, 'dispatch_feeder')
dist_flex_load = util.df_slice(self.dispatch.flex_load_df, self.dispatch_feeders, 'dispatch_feeder')
self.distribution_load = util.DfOper.add((self.distribution_load, dist_storage_charge, dist_flex_load, dist_ld_load))
self.distribution_gen = util.DfOper.add((self.distribution_gen, dist_storage_discharge,util.df_slice(ld_gen, self.dispatch_feeders, 'dispatch_feeder',return_none=True) ))
imports = None
exports = None
if self.dispatch.transmission_flow_df is not None:
try:
flow_with_losses = util.DfOper.divi((self.dispatch.transmission_flow_df, 1 - self.dispatch.transmission.losses.get_values(year)))
except:
pdb.set_trace()
imports = self.dispatch.transmission_flow_df.groupby(level=['gau_to', 'weather_datetime']).sum()
exports = flow_with_losses.groupby(level=['gau_from', 'weather_datetime']).sum()
imports.index.names = [GeoMapper.dispatch_geography, 'weather_datetime']
exports.index.names = [GeoMapper.dispatch_geography, 'weather_datetime']
try:
if ld_load is not None:
new_ld_load = util.DfOper.divi([util.df_slice(ld_load, 'bulk', 'dispatch_feeder',return_none=True),self.transmission_losses])
else:
new_ld_load = None
self.bulk_load = util.DfOper.add((self.bulk_load, storage_charge.xs('bulk', level='dispatch_feeder'), new_ld_load,util.DfOper.divi([exports,self.transmission_losses])))
except:
pdb.set_trace()
self.bulk_gen = util.DfOper.add((self.bulk_gen, storage_discharge.xs('bulk', level='dispatch_feeder'), util.df_slice(ld_gen, 'bulk', 'dispatch_feeder',return_none=True),imports))
self.opt_bulk_net_load = copy.deepcopy(self.bulk_net_load)
self.update_net_load_signal()
self.produce_distributed_storage_outputs(year)
self.produce_bulk_storage_outputs(year)
self.produce_flex_load_outputs(year)
self.produce_ld_outputs(year)
self.produce_transmission_outputs(year)
def produce_transmission_outputs(self, year):
# MOVE
if year in self.dispatch_write_years and self.dispatch.transmission_flow_df is not None:
df_index_reset = self.dispatch.transmission_flow_df.reset_index()
# df_index_reset['gau_from'] = map(cfg.outputs_id_map[GeoMapper.dispatch_geography].get, df_index_reset['gau_from'].values)
# df_index_reset['gau_to'] = map(cfg.outputs_id_map[GeoMapper.dispatch_geography].get, df_index_reset['gau_to'].values)
df_index_reset_with_losses = DfOper.divi((self.dispatch.transmission_flow_df, 1 - self.dispatch.transmission.losses.get_values(year))).reset_index()
# df_index_reset_with_losses['gau_from'] = map(cfg.outputs_id_map[GeoMapper.dispatch_geography].get, df_index_reset_with_losses['gau_from'].values)
# df_index_reset_with_losses['gau_to'] = map(cfg.outputs_id_map[GeoMapper.dispatch_geography].get, df_index_reset_with_losses['gau_to'].values)
imports = df_index_reset.rename(columns={'gau_to':GeoMapper.dispatch_geography})
exports = df_index_reset_with_losses.rename(columns={'gau_from':GeoMapper.dispatch_geography})
exports['gau_to'] = 'TRANSMISSION EXPORT TO ' + exports['gau_to']
imports['gau_from'] = 'TRANSMISSION IMPORT FROM ' + imports['gau_from']
imports = imports.rename(columns={'gau_from':'DISPATCH_OUTPUT'})
exports = exports.rename(columns={'gau_to':'DISPATCH_OUTPUT'})
imports = imports.set_index([GeoMapper.dispatch_geography, 'DISPATCH_OUTPUT', 'weather_datetime'])
exports = exports.set_index([GeoMapper.dispatch_geography, 'DISPATCH_OUTPUT', 'weather_datetime'])
# drop any lines that don't have flows this is done to reduce the size of outputs
imports = imports.groupby(level=[GeoMapper.dispatch_geography, 'DISPATCH_OUTPUT']).filter(lambda x: x.sum() > 0)
exports = exports.groupby(level=[GeoMapper.dispatch_geography, 'DISPATCH_OUTPUT']).filter(lambda x: x.sum() > 0)
transmission_output = pd.concat((-imports, exports))
transmission_output = util.add_and_set_index(transmission_output, 'year', year)
transmission_output.columns = [cfg.calculation_energy_unit.upper()]
transmission_output = self.outputs.clean_df(transmission_output)
self.bulk_dispatch = pd.concat([self.bulk_dispatch, transmission_output.reorder_levels(self.bulk_dispatch.index.names)])
def produce_distributed_storage_outputs(self, year):
# MOVE
if year in self.dispatch_write_years and len(set(self.dispatch.storage_df.index.get_level_values('dispatch_feeder')))>1 :
dist_storage_df = util.df_slice(self.dispatch.storage_df, self.dispatch_feeders, 'dispatch_feeder')
distribution_df | |
<filename>cross_loss_influence/helpers/influence_function.py
# Created by <NAME>
# Extensions to https://github.com/nimarb/pytorch_influence_functions
import torch
import time
import datetime
import numpy as np
import copy
import logging
from torch.autograd import grad
import random
from cross_loss_influence.helpers.bolukbasi_prior_work.prior_pca_debiasing import extract_txt_embeddings
from torch.utils.data.dataloader import DataLoader
DEVICE = 'cuda'
def calc_influence_single(model, train_dataset, z_test, t_test, recursion_depth, r, test_indices, scifi=True):
"""Calculates the influences of all training data points on a single
test dataset image.
Arugments:
model: pytorch model
train_loader: DataLoader, loads the training dataset
embedding_pair: pair of embeddings we want to diff
recursion_depth: int, number of recursions to perform during s_test
calculation, increases accuracy. r*recursion_depth should equal the
training dataset size.
r: int, number of iterations of which to take the avg.
of the h_estimate calculation; r*recursion_depth should equal the
training dataset size.
Returns:
influence: list of float, influences of all training data samples
for one test sample
harmful: list of float, influences sorted by harmfulness
helpful: list of float, influences sorted by helpfulness
"""
train_loader = DataLoader(train_dataset, shuffle=True)
s_test_vec = calc_s_test_single(model,
z_test,
t_test,
train_loader,
recursion_depth=recursion_depth,
r=r,
test_indices=test_indices,
scifi=scifi)
# Calculate the influence function
train_dataset_size = len(train_dataset)
influences = []
train_loader = DataLoader(train_dataset, shuffle=False)
for index, batch_data in enumerate(train_loader):
good_enough = False # Is a word of interest in this sample?
words, contexts = vectorized_influence_data_to_tensors(batch_data)
for v_index in test_indices:
if v_index in words.cpu():
good_enough = True
if not good_enough:
continue
words = torch.autograd.Variable(words).to(device=DEVICE)
contexts = torch.autograd.Variable(contexts).to(device=DEVICE)
loss_val = model.forward_no_negatives(words, contexts)
grad_z_vec = list(grad(loss_val, list(model.parameters()), create_graph=True))
# For sparse:
if recursion_depth <= 1:
tmp_influence = 0
for k, j in zip(grad_z_vec, s_test_vec):
if (k * j).indices().size(1) > 0:
tmp_influence -= (k * j).values().sum()/train_dataset_size
# For dense
else:
tmp_influence = -sum(
[
####################
# TODO: potential bottle neck, takes 17% execution time
# torch.sum(k * j).data.cpu().numpy()
####################
torch.sum(k * j).data
for k, j in zip(grad_z_vec, s_test_vec)
]) / train_dataset_size
influences.append([index, tmp_influence.cpu()])
influences = np.array(influences)
harmful = influences[influences[:, 1].argsort()]
helpful = harmful[::-1]
influences = influences[:, 1]
return influences, harmful.tolist(), helpful.tolist()
def calc_s_test_single(model, z_test, t_test, train_loader,
damp=0.01, scale=25, recursion_depth=5000, r=1, test_indices=[], scifi=True):
"""Calculates s_test for a single test image taking into account the whole
training dataset. s_test = invHessian * nabla(Loss(test_img, model params))
Arguments:
model: pytorch model, for which s_test should be calculated
z_test: test image
t_test: test image label
train_loader: pytorch dataloader, which can load the train data
damp: float, influence function damping factor
scale: float, influence calculation scaling factor
recursion_depth: int, number of recursions to perform during s_test
calculation, increases accuracy. r*recursion_depth should equal the
training dataset size.
r: int, number of iterations of which to take the avg.
of the h_estimate calculation; r*recursion_depth should equal the
training dataset size.
Returns:
s_test_vec: torch tensor, contains s_test for a single test image"""
s_test_vec_list = []
# For my sparse approach, 1 pass is all we need and we go through the entire dataset to get samples of import.
for i in range(r):
print("Beginning another round of estimation")
s_test_vec_list.append(s_test(z_test, t_test, model, train_loader, damp=damp, scale=scale,
recursion_depth=recursion_depth, test_indices=test_indices, scifi=scifi))
s_test_vec = s_test_vec_list[0]
for i in range(1, r):
s_test_vec += s_test_vec_list[i]
s_test_vec = [i / r for i in s_test_vec if i is not None]
return s_test_vec
def s_test(z_test, t_test, model, train_loader, damp=0.01, scale=25.0,
recursion_depth=5000, test_indices=[], scifi=True):
"""s_test can be precomputed for each test point of interest, and then
multiplied with grad_z to get the desired value for each training point.
Here, strochastic estimation is used to calculate s_test. s_test is the
Inverse Hessian Vector Product.
Arguments:
z_test: torch tensor, test data points, such as test images
t_test: torch tensor, contains all test data labels
model: torch NN, model used to evaluate the dataset
train_loader: torch dataloader, can load the training dataset
damp: float, dampening factor
scale: float, scaling factor
recursion_depth: int, number of iterations aka recursion depth
should be enough so that the value stabilises.
Returns:
h_estimate: list of torch tensors, s_test"""
# v = grad_z(z_test, t_test, model)
if scifi:
v = calc_loss(z_test, t_test) # Change this to bias estimation
else:
v = calc_bias(z_test, t_test, model)
v = list(grad(v, list(model.parameters()), create_graph=True, allow_unused=True)) # A bit sketched by this
# v[1] = v[0]
h_estimates = v.copy()
if recursion_depth <= 1: # If we're sparse
success_limit = 5000
else:
success_limit = recursion_depth
################################
# TODO: Dynamically set the recursion depth so that iterations stops
# once h_estimate stabilises
################################
successes = 0
for i, batch_data in enumerate(train_loader): # instead of random, get all samples of relevance in the dataset.
good_enough = False # Is a word of interest in this sample?
words, contexts = vectorized_influence_data_to_tensors(batch_data)
for v_index in test_indices:
if v_index in words.cpu():
good_enough = True
if not good_enough:
continue
words = torch.autograd.Variable(words).to(device=DEVICE)
contexts = torch.autograd.Variable(contexts).to(device=DEVICE)
loss_val = model.forward_no_negatives(words, contexts)
hv = hvp(loss_val, list(model.parameters()), h_estimates, sparse=recursion_depth == 1)
# Recursively caclulate h_estimate
if not hv:
continue
successes += 1
# h_estimates = [
# _v + (1 - damp) * h_estimate - _hv / scale
# for _v, h_estimate, _hv in zip(v, h_estimates, hv)]
for h_index, bucket in enumerate(zip(v, h_estimates, hv)):
temp_v, h_est, temp_hv = bucket
if h_est is not None:
temp_h_est = temp_v + (1 - damp) * h_est - temp_hv / scale
# print((h_estimates[h_index] - temp_h_est).abs().sum())
h_estimates[h_index] = temp_h_est
# h_estimates[h_index] = temp_v + (1 - damp) * h_est - temp_hv / scale
if successes >= success_limit:
break
return h_estimates
# def grad_z(z, t, model):
# """Calculates the gradient z. One grad_z should be computed for each
# training sample.
# Arguments:
# z: torch tensor, training data points
# e.g. an image sample (batch_size, 3, 256, 256)
# t: torch tensor, training data labels
# model: torch NN, model used to evaluate the dataset
# Returns:
# grad_z: list of torch tensor, containing the gradients
# from model parameters to loss"""
# model.eval()
# # initialize
# z = z.to(device=DEVICE)
# t = t.to(device=DEVICE)
# y = model(z)
# loss = calc_loss(y, t)
# # Compute sum of gradients from model parameters to loss
# return list(grad(loss, list(model.parameters()), create_graph=True))
def calc_bias(target_set, attribute_set, model):
targets_one = target_set[0]
targets_two = target_set[1]
attribute_one = attribute_set[0]
attribute_two = attribute_set[1]
mean_one = torch.zeros(len(targets_one))
mean_two = torch.zeros(len(targets_two))
std_all = torch.zeros(len(targets_one)+len(targets_two))
ind=0
for x, y in zip(targets_one, targets_two):
m1 = similarity_diff(x, attribute_one, attribute_two, model)
m2 = similarity_diff(y, attribute_one, attribute_two, model)
mean_one[ind] = m1
mean_two[ind] = m2
std_all[ind*2] = m1
std_all[ind*2 + 1] = m2
ind += 1
return (mean_one.mean() - mean_two.mean()) / std_all.std()
def similarity_diff(word, attrs_A, attrs_B, model):
cos_attr_one = torch.zeros(len(attrs_A), requires_grad=True)
cos_attr_two = torch.zeros(len(attrs_B), requires_grad=True)
ind = 0
for a_A, a_B in zip(attrs_A, attrs_B):
cos_attr_one[ind] = cos_diff(word, a_A, model)
cos_attr_two[ind] = cos_diff(word, a_B, model)
ind += 1
return cos_attr_one.mean() - cos_attr_two.mean()
def cos_diff(x, y, model):
return torch.nn.functional.cosine_similarity(model.predict(x), model.predict(y))
def calc_loss(y, t):
"""Calculates the loss
Arguments:
y: torch tensor, input with size (minibatch, nr_of_classes)
t: torch tensor, target expected by loss of size (0 to nr_of_classes-1)
Returns:
loss: scalar, the loss"""
loss = torch.nn.functional.mse_loss(y, t, reduction='mean') # TODO: Test cosine loss... but clustering doesn't use that
return loss
def hvp(ys, xs, v, sparse=False):
"""Multiply the Hessians of y and w by v.
Uses a backprop-like approach to compute the product between the Hessian
and another vector efficiently, which even works for large Hessians.
Example: if: y = 0.5 * w^T A x then hvp(y, w, v) returns and expression
which evaluates to the same values as (A + A.t) v.
Arguments:
y: scalar/tensor, for example the output of the loss function
w: list of torch tensors, tensors over which the Hessian
should be constructed
v: list of torch tensors, same shape as w,
will be multiplied with the Hessian
Returns:
return_grads: list of torch tensors, contains product of Hessian and v.
Raises:
ValueError: `y` and `w` have a different length."""
if len(xs) != len(v):
raise(ValueError("xs and v must have the same length."))
# First backprop
first_grads = grad(ys, xs, create_graph=True) # , retain_graph=True, create_graph=True)
# Elementwise products
elemwise_products = 0
for grad_elem, v_elem in zip(first_grads, v):
if not sparse:
if grad_elem is not None and v_elem is not None:
elemwise_products += torch.sum(grad_elem * v_elem.detach())
else:
if (grad_elem*v_elem).indices().size(1) > 0:
elemwise_products += (grad_elem * | |
<filename>escape/escape/infr/topology.py
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper module for handling emulated test topology based on Mininet.
"""
from escape.infr import log, LAYER_NAME
from escape.nffg_lib.nffg import NFFG
from escape.nffg_lib.nffg_elements import NodeInfra
from escape.util.config import CONFIG
from escape.util.misc import quit_with_error, get_ifaces, remove_junks_at_boot
from mininet.link import TCLink, Intf
from mininet.net import VERSION as MNVERSION, Mininet, MininetWithControlNet
from mininet.node import RemoteController, RemoteSwitch
from mininet.term import makeTerms
from mininet.topo import Topo
class AbstractTopology(Topo):
"""
Abstract class for representing emulated topology.
Have the functions to build a ESCAPE-specific topology.
Can be used to define reusable topology similar to Mininet's high-level API.
Reusable, convenient and pre-defined way to define a topology, but less
flexible and powerful.
"""
# Default host options
default_host_opts = None
"""Default host options for Mininet"""
# Default switch options
default_switch_opts = None
"""Default switch options for Mininet"""
# Default link options
default_link_opts = None
"""Default link options for Mininet"""
# Default EE options
default_EE_opts = None
"""Default EE options for Mininet"""
# Type of the Topology class - NEED to be set
# The construction and build of the network is different for the STATIC and
# DYNAMIC way
TYPE = None
"""Type of the Topology class - NEED to be set"""
def __init__ (self, hopts=None, sopts=None, lopts=None, eopts=None):
"""
Init.
:param hopts: host options (optional)
:param sopts: switch options (optional)
:param lopts: link options (optional)
:param eopts: EE options (optional)
:return: None
"""
# Topo is Old-style class
Topo.__init__(self, hopts, sopts, lopts, eopts)
def construct (self, builder=None):
"""
Base class for construct the topology.
:param builder: optional builder object
"""
raise NotImplementedError
@staticmethod
def get_topo_desc ():
"""
Return the NFFG object represents the specific, constructed topology
:return: topology description
:rtype: :any`NFFG`
"""
raise NotImplementedError
class FallbackStaticTopology(AbstractTopology):
"""
Topology class for testing purposes and serve as a fallback topology.
Use the static way for topology compilation.
.. raw:: ascii
+----------+ +----------+
| | | |
| SW1 | | SW2 |
| | | |
+----------+ +----------+
|1 |1
1| 1|
+----------+ +----------+
| |2 2| |
| SW3 +-----------+ SW4 |
| | | |
+----------+ +----------+
|3 |3
1| 1|
+----+ +----+
|SAP1| |SAP2|
+----+ +----+
"""
TYPE = "STATIC"
def construct (self, builder=None):
"""
Assemble the topology description statically.
:param builder: optional builder object
:return: self
:rtype: :any:`FallbackStaticTopology`
"""
# nc1 = self.addEE(name='NC1', {})
# nc2 = self.addEE(name='NC2', {})
log.info("Start static topology creation...")
log.debug("Create Switch with name: SW1")
sw1 = self.addSwitch('SW1')
log.debug("Create Switch with name: SW2")
sw2 = self.addSwitch('SW2')
log.debug("Create Switch with name: SW3")
sw3 = self.addSwitch('SW3')
log.debug("Create Switch with name: SW4")
sw4 = self.addSwitch('SW4')
log.debug("Create SAP with name: SAP1")
sap1 = self.addHost('SAP1')
log.debug("Create SAP with name: SAP2")
sap2 = self.addHost('SAP2')
log.debug("Create Link SW3 <--> SW1")
self.addLink(sw3, sw1)
log.debug("Create Link SW4 <--> SW2")
self.addLink(sw4, sw2)
log.debug("Create Link SW3 <--> SW4")
self.addLink(sw3, sw4)
log.debug("Create Link SAP1 <--> SW3")
self.addLink(sap1, sw3)
log.debug("Create Link SAP2 <--> SW4")
self.addLink(sap2, sw4)
log.info("Static topology creation has been finished!")
return self
@staticmethod
def get_topo_desc ():
"""
Return the topology description.
:return: topo description
:rtype: :class:`NFFG`
"""
# Create NFFG
nffg = NFFG(id="STATIC-FALLBACK-TOPO", name="fallback-static")
# Add switches
sw1 = nffg.add_infra(id="sw1", name="SW1", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw2 = nffg.add_infra(id="sw2", name="SW2", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW)
# Add SAPs
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
# Add links
nffg.add_link(sw1.add_port(1), sw3.add_port(1), id="l1")
nffg.add_link(sw2.add_port(1), sw4.add_port(1), id="l2")
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3")
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4")
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5")
# Duplicate one-way static links to become undirected in order to fit to
# the orchestration algorithm
# nffg.duplicate_static_links()
return nffg
class FallbackDynamicTopology(AbstractTopology):
"""
Topology class for testing purposes and serve as a fallback topology.
Use the dynamic way for topology compilation.
.. raw:: ascii
+----------+ +----------+
| | | |
| EE1 | | EE2 |
| | | |
+----------+ +----------+
|1 |1
1| 1|
+----------+ +----------+
| |2 2| |
| S3 +-----------+ S4 |
| | | |
+----------+ +----------+
|3 |3
1| 1|
+----+ +----+
|SAP1| |SAP2|
+----+ +----+
"""
TYPE = "DYNAMIC"
def construct (self, builder=None):
"""
Set a topology with NETCONF capability for mostly testing.
:param builder: builder object
:return: None
"""
log.info("Start dynamic topology creation...")
builder.create_Controller("ESCAPE")
agt1, nc_sw1 = builder.create_NETCONF_EE(name='NC1')
agt2, nc_sw2 = builder.create_NETCONF_EE(name='NC2')
sw3 = builder.create_Switch(name='SW3')
sw4 = builder.create_Switch(name='SW4')
sap1 = builder.create_SAP(name='SAP1')
sap2 = builder.create_SAP(name='SAP2')
builder.create_Link(sw3, nc_sw1)
builder.create_Link(sw4, nc_sw2)
builder.create_Link(sw3, sw4)
builder.create_Link(sap1, sw3)
builder.create_Link(sap2, sw4)
log.info("Dynamic topology creation has been finished!")
@staticmethod
def get_topo_desc ():
"""
Return the topology description.
:return: topo description
:rtype: :class:`NFFG`
"""
# Create NFFG
nffg = NFFG(id="DYNAMIC-FALLBACK-TOPO", name="fallback-dynamic")
# Add NETCONF capable containers a.k.a. Execution Environments
nc1 = nffg.add_infra(id="nc1", name="NC1", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc2 = nffg.add_infra(id="nc2", name="NC2", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5,
delay=0.9, bandwidth=5000)
nc1.add_supported_type(['A', 'B'])
nc2.add_supported_type(['A', 'C'])
# Add inter-EE switches
sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL",
infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2,
bandwidth=10000)
# Add SAPs
sap1 = nffg.add_sap(id="sap1", name="SAP1")
sap2 = nffg.add_sap(id="sap2", name="SAP2")
# Add links
linkres = {'delay': 1.5, 'bandwidth': 2000}
nffg.add_link(nc1.add_port(1), sw3.add_port(1), id="l1", **linkres)
nffg.add_link(nc2.add_port(1), sw4.add_port(1), id="l2", **linkres)
nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3", **linkres)
nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4", **linkres)
nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5", **linkres)
# Duplicate one-way static links to become undirected in order to fit to
# the orchestration algorithm
# No need for that, ESCAPENetworkBridge do this later
# nffg.duplicate_static_links()
return nffg
class InternalControllerProxy(RemoteController):
"""
Controller class for emulated Mininet network. Making connection with
internal controller initiated by InternalPOXAdapter.
"""
def __init__ (self, name="InternalPOXController", ip='127.0.0.1', port=6653,
**kwargs):
"""
Init.
:param name: name of the controller (default: InternalPOXController)
:type name: str
:param ip: IP address (default: 127.0.0.1)
:type ip: str
:param port: port number (default 6633)
:type port: int
:return: None
"""
# Using old-style class because of MN's RemoteController class
RemoteController.__init__(self, name, ip, port, **kwargs)
def checkListening (self):
"""
Check the controller port is open.
"""
listening = self.cmd("echo A | telnet -e A %s %d" % (self.ip, self.port))
if 'Connected' not in listening:
log.debug(
"Unable to contact with internal controller at %s:%d. Waiting..." % (
self.ip, self.port))
class ESCAPENetworkBridge(object):
"""
Internal class for representing the emulated topology.
Represents a container class for network elements such as switches, nodes,
execution environments, links etc. Contains network management functions
similar to Mininet's mid-level API extended with ESCAPEv2 related capabilities
Separate the interface using internally from original Mininet object to
implement loose coupling and avoid changes caused by Mininet API changes
e.g. 2.1.0 -> 2.2.0.
Follows Bridge design pattern.
"""
def __init__ (self, network=None, topo_desc=None):
"""
Initialize Mininet implementation with proper attributes.
Use network as the hided Mininet topology if it's given.
:param topo_desc: static topology description e.g. the related NFFG
:type topo_desc: :class:`NFFG`
:param network: use this specific Mininet object for init (default: None)
:type network: :class:`mininet.net.MininetWithControlNet`
:return: None
"""
log.debug("Init ESCAPENetworkBridge with topo description: %s" % topo_desc)
if network is not None:
self.__mininet = network
else:
log.warning(
"Network implementation object is missing! Use Builder class instead "
"of direct initialization. Creating bare Mininet object anyway...")
self.__mininet = MininetWithControlNet()
# Topology description which is emulated by the Mininet
self.topo_desc = topo_desc
# Duplicate static links for ensure undirected neighbour relationship
if self.topo_desc is not None:
back_links = [l.id for u, v, l in
self.topo_desc.network.edges_iter(data=True) if
l.backward is True]
if len(back_links) == 0:
log.debug("No backward link has been detected! Duplicate STATIC links "
"to ensure undirected relationship for mapping...")
self.topo_desc.duplicate_static_links()
# Need to clean after shutdown
self._need_clean = None
# There is no such flag in the Mininet | |
#{n_detectors_found}: Detector: {detector_name}.")
fpath_out = save_data_to_hdf5(
fpath_out,
new_data,
metadata=mdata,
fname_add_version=fname_add_version,
file_overwrite_existing=file_overwrite_existing,
create_each_det=create_each_det,
)
d_dict = {"dataset": new_data, "file_name": fpath_out, "detector_name": detector_name, "metadata": mdata}
data_output.append(d_dict)
return data_output
def map_data2D_xfm(
run_id_uid,
fpath,
create_each_det=False,
fname_add_version=False,
completed_scans_only=False,
file_overwrite_existing=False,
output_to_file=True,
):
"""
Transfer the data from databroker into a correct format following the
shape of 2D scan.
This function is used at XFM beamline for step scan.
Save the new data dictionary to hdf file if needed.
.. note:: It is recommended to read data from databroker into memory
directly, instead of saving to files. This is ongoing work.
Parameters
----------
run_id_uid : int
ID or UID of a run
fpath: str
path to save hdf file
create_each_det: bool, optional
Do not create data for each detector is data size is too large,
if set as false. This will slow down the speed of creating hdf file
with large data size.
fname_add_version : bool
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: then conversion fails.
completed_scans_only : bool
True: process only completed scans (for which ``stop`` document exists in
the database). Failed scan for which ``stop`` document exists are considered
completed even if not the whole image was scanned. If incomplete scan is
encountered: an exception is thrown.
False: the feature is disabled, incomplete scan will be processed.
file_overwrite_existing : bool, keyword parameter
This option should be used if the existing file should be deleted and replaced
with the new file with the same name. This option should be used with caution,
since the existing file may contain processed data, which will be permanently deleted.
True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,
then new versions of the existing file will always be created.
False: do not overwrite existing files. If the file already exists, then the exception
is raised.
output_to_file : bool, optional
save data to hdf5 file if True
Returns
-------
dict of data in 2D format matching x,y scanning positions
"""
hdr = db[run_id_uid]
runid = hdr.start["scan_id"] # Replace with the true value (runid may be relative, such as -2)
if completed_scans_only and not _is_scan_complete(hdr):
raise Exception("Scan is incomplete. Only completed scans are currently processed.")
# Generate the default file name for the scan
if fpath is None:
fpath = f"scan2D_{runid}.h5"
# Output data is the list of data structures for all available detectors
data_output = []
# spectrum_len = 4096
start_doc = hdr["start"]
# The dictionary holding scan metadata
mdata = _extract_metadata_from_header(hdr)
plan_n = start_doc.get("plan_name")
if "fly" not in plan_n: # not fly scan
datashape = start_doc["shape"] # vertical first then horizontal
fly_type = None
snake_scan = start_doc.get("snaking")
if snake_scan[1] is True:
fly_type = "pyramid"
current_dir = os.path.dirname(os.path.realpath(__file__))
config_file = "xfm_pv_config.json"
config_path = sep_v.join(current_dir.split(sep_v)[:-2] + ["configs", config_file])
with open(config_path, "r") as json_data:
config_data = json.load(json_data)
# try except can be added later if scan is not completed.
data = db.get_table(hdr, fill=True, convert_times=False)
xrf_detector_names = config_data["xrf_detector"]
data_out = map_data2D(
data,
datashape,
det_list=xrf_detector_names,
pos_list=hdr.start.motors,
create_each_det=create_each_det,
scaler_list=config_data["scaler_list"],
fly_type=fly_type,
)
fpath_out = fpath
if output_to_file:
print("Saving data to hdf file.")
fpath_out = save_data_to_hdf5(
fpath_out,
data_out,
metadata=mdata,
fname_add_version=fname_add_version,
file_overwrite_existing=file_overwrite_existing,
create_each_det=create_each_det,
)
detector_name = "xs"
d_dict = {"dataset": data_out, "file_name": fpath_out, "detector_name": detector_name, "metadata": mdata}
data_output.append(d_dict)
return data_output
def write_db_to_hdf(
fpath,
data,
datashape,
det_list=("xspress3_ch1", "xspress3_ch2", "xspress3_ch3"),
pos_list=("zpssx[um]", "zpssy[um]"),
scaler_list=("sclr1_ch3", "sclr1_ch4"),
fname_add_version=False,
fly_type=None,
subscan_dims=None,
base_val=None,
):
"""
Assume data is obained from databroker, and save the data to hdf file.
This function can handle stopped/aborted scans.
.. note:: This function should become part of suitcase
Parameters
----------
fpath: str
path to save hdf file
data : pandas.core.frame.DataFrame
data from data broker
datashape : tuple or list
shape of two D image
det_list : list, tuple, optional
list of detector channels
pos_list : list, tuple, optional
list of pos pv
scaler_list : list, tuple, optional
list of scaler pv
fname_add_version : bool
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: the exception is thrown if the file exists.
"""
interpath = "xrfmap"
if os.path.exists(fpath):
if fname_add_version:
fpath = _get_fpath_not_existing(fpath)
else:
raise IOError(f"'write_db_to_hdf': File '{fpath}' already exists.")
with h5py.File(fpath, "a") as f:
sum_data = None
new_v_shape = datashape[0] # to be updated if scan is not completed
spectrum_len = 4096 # standard
for n, c_name in enumerate(det_list):
if c_name in data:
detname = "det" + str(n + 1)
dataGrp = f.create_group(interpath + "/" + detname)
logger.info("read data from %s" % c_name)
channel_data = data[c_name]
# new veritcal shape is defined to ignore zeros points caused by stopped/aborted scans
new_v_shape = len(channel_data) // datashape[1]
new_data = np.vstack(channel_data)
new_data = new_data[: new_v_shape * datashape[1], :]
new_data = new_data.reshape([new_v_shape, datashape[1], len(channel_data[1])])
if new_data.shape[2] != spectrum_len:
# merlin detector has spectrum len 2048
# make all the spectrum len to 4096, to avoid unpredicted error in fitting part
new_tmp = np.zeros([new_data.shape[0], new_data.shape[1], spectrum_len])
new_tmp[:, :, : new_data.shape[2]] = new_data
new_data = new_tmp
if fly_type in ("pyramid",):
new_data = flip_data(new_data, subscan_dims=subscan_dims)
if sum_data is None:
sum_data = np.copy(new_data)
else:
sum_data += new_data
ds_data = dataGrp.create_dataset("counts", data=new_data, compression="gzip")
ds_data.attrs["comments"] = "Experimental data from channel " + str(n)
# summed data
dataGrp = f.create_group(interpath + "/detsum")
if sum_data is not None:
sum_data = sum_data.reshape([new_v_shape, datashape[1], spectrum_len])
ds_data = dataGrp.create_dataset("counts", data=sum_data, compression="gzip")
ds_data.attrs["comments"] = "Experimental data from channel sum"
# position data
dataGrp = f.create_group(interpath + "/positions")
pos_names, pos_data = get_name_value_from_db(pos_list, data, datashape)
for i in range(len(pos_names)):
if "x" in pos_names[i]:
pos_names[i] = "x_pos"
elif "y" in pos_names[i]:
pos_names[i] = "y_pos"
if "x_pos" not in pos_names or "y_pos" not in pos_names:
pos_names = ["x_pos", "y_pos"]
# need to change shape to sth like [2, 100, 100]
data_temp = np.zeros([pos_data.shape[2], pos_data.shape[0], pos_data.shape[1]])
for i in range(pos_data.shape[2]):
data_temp[i, :, :] = pos_data[:, :, i]
if fly_type in ("pyramid",):
for i in range(data_temp.shape[0]):
# flip position the same as data flip on det counts
data_temp[i, :, :] = flip_data(data_temp[i, :, :], subscan_dims=subscan_dims)
dataGrp.create_dataset("name", data=helper_encode_list(pos_names))
dataGrp.create_dataset("pos", data=data_temp[:, :new_v_shape, :])
# scaler data
dataGrp = f.create_group(interpath + "/scalers")
scaler_names, scaler_data = get_name_value_from_db(scaler_list, data, datashape)
if fly_type in ("pyramid",):
scaler_data = flip_data(scaler_data, subscan_dims=subscan_dims)
dataGrp.create_dataset("name", data=helper_encode_list(scaler_names))
if base_val is not None: # base line shift for detector, for SRX
base_val = np.array([base_val])
if len(base_val) == 1:
scaler_data = np.abs(scaler_data - base_val)
else:
for i in scaler_data.shape[2]:
scaler_data[:, :, i] = np.abs(scaler_data[:, :, i] - base_val[i])
dataGrp.create_dataset("val", data=scaler_data[:new_v_shape, :])
return fpath
def assemble_data_SRX_stepscan(
data,
datashape,
det_list=("xspress3_ch1", "xspress3_ch2", "xspress3_ch3"),
pos_list=("zpssx[um]", "zpssy[um]"),
scaler_list=("sclr1_ch3", "sclr1_ch4"),
fname_add_version=False,
create_each_det=True,
fly_type=None,
subscan_dims=None,
base_val=None,
):
"""
Convert stepscan data from SRX beamline obtained from databroker into the for accepted
by ``write_db_to_hdf_base`` function.
This function can handle stopped/aborted scans.
Parameters
----------
data : pandas.core.frame.DataFrame
data from data broker
datashape : tuple or list
shape of two D image
det_list : list, tuple, optional
list of detector channels
pos_list : list, tuple, optional
list of pos pv
scaler_list : list, tuple, optional
list of scaler pv
fname_add_version : bool
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: the exception is thrown if the file exists.
create_each_det: bool
True: output dataset contains data for individual detectors, False: output
dataset contains only sum of all detectors.
"""
data_assembled = {}
sum_data = None
new_v_shape = datashape[0] # to be updated if scan is not completed
spectrum_len = 4096 # standard
for n, c_name in enumerate(det_list):
if c_name in data:
detname = "det" + str(n + 1)
channel_data = data[c_name]
# new veritcal shape is defined | |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from builtins import chr
from builtins import range
from builtins import object
from future.utils import python_2_unicode_compatible
import hashlib
import sys
import re
import os
import codecs
import ecdsa
import ctypes
import binascii
import bisect
import hmac
import itertools
from binascii import hexlify, unhexlify
import unicodedata
from .base58 import ripemd160, Base58, doublesha256
from .bip32 import BIP32Key, parse_path
from .dictionary import words as BrainKeyDictionary
from .dictionary import words_bip39 as MnemonicDictionary
from .py23 import py23_bytes, PY2
from .prefix import Prefix
PBKDF2_ROUNDS = 2048
# From <https://stackoverflow.com/questions/212358/binary-search-bisection-in-python/2233940#2233940>
def binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi
hi = hi if hi is not None else len(a) # hi defaults to len(a)
pos = bisect.bisect_left(a, x, lo, hi) # find insertion position
return pos if pos != hi and a[pos] == x else -1 # don't walk off the end
class PasswordKey(Prefix):
""" This class derives a private key given the account name, the
role and a password. It leverages the technology of Brainkeys
and allows people to have a secure private key by providing a
passphrase only.
"""
def __init__(self, account, password, role="active", prefix=None):
self.set_prefix(prefix)
self.account = account
self.role = role
self.password = password
def normalize(self, seed):
""" Correct formating with single whitespace syntax and no trailing space """
return " ".join(re.compile("[\t\n\v\f\r ]+").split(seed))
def get_private(self):
""" Derive private key from the account, the role and the password
"""
if self.account is None and self.role is None:
seed = self.password
elif self.account == "" and self.role == "":
seed = self.password
else:
seed = self.account + self.role + self.password
seed = self.normalize(seed)
a = py23_bytes(seed, 'utf8')
s = hashlib.sha256(a).digest()
return PrivateKey(hexlify(s).decode('ascii'), prefix=self.prefix)
def get_public(self):
return self.get_private().pubkey
def get_private_key(self):
return self.get_private()
def get_public_key(self):
return self.get_public()
@python_2_unicode_compatible
class BrainKey(Prefix):
"""Brainkey implementation similar to the graphene-ui web-wallet.
:param str brainkey: Brain Key
:param int sequence: Sequence number for consecutive keys
Keys in Graphene are derived from a seed brain key which is a string of
16 words out of a predefined dictionary with 49744 words. It is a
simple single-chain key derivation scheme that is not compatible with
BIP44 but easy to use.
Given the brain key, a private key is derived as::
privkey = SHA256(SHA512(brainkey + " " + sequence))
Incrementing the sequence number yields a new key that can be
regenerated given the brain key.
"""
def __init__(self, brainkey=None, sequence=0, prefix=None):
self.set_prefix(prefix)
if not brainkey:
self.brainkey = self.suggest()
else:
self.brainkey = self.normalize(brainkey).strip()
self.sequence = sequence
def __next__(self):
""" Get the next private key (sequence number increment) for
iterators
"""
return self.next_sequence()
def next_sequence(self):
""" Increment the sequence number by 1 """
self.sequence += 1
return self
def normalize(self, brainkey):
""" Correct formating with single whitespace syntax and no trailing space """
return " ".join(re.compile("[\t\n\v\f\r ]+").split(brainkey))
def get_brainkey(self):
""" Return brain key of this instance """
return self.normalize(self.brainkey)
def get_private(self):
""" Derive private key from the brain key and the current sequence
number
"""
encoded = "%s %d" % (self.brainkey, self.sequence)
a = py23_bytes(encoded, 'ascii')
s = hashlib.sha256(hashlib.sha512(a).digest()).digest()
return PrivateKey(hexlify(s).decode('ascii'), prefix=self.prefix)
def get_blind_private(self):
""" Derive private key from the brain key (and no sequence number)
"""
a = py23_bytes(self.brainkey, 'ascii')
return PrivateKey(hashlib.sha256(a).hexdigest(), prefix=self.prefix)
def get_public(self):
return self.get_private().pubkey
def get_private_key(self):
return self.get_private()
def get_public_key(self):
return self.get_public()
def suggest(self, word_count=16):
""" Suggest a new random brain key. Randomness is provided by the
operating system using ``os.urandom()``.
"""
brainkey = [None] * word_count
dict_lines = BrainKeyDictionary.split(',')
if not len(dict_lines) == 49744:
raise AssertionError()
for j in range(0, word_count):
urand = os.urandom(2)
if isinstance(urand, str):
urand = py23_bytes(urand, 'ascii')
if PY2:
num = int(codecs.encode(urand[::-1], 'hex'), 16)
else:
num = int.from_bytes(urand, byteorder="little")
rndMult = num / 2 ** 16 # returns float between 0..1 (inclusive)
wIdx = int(round(len(dict_lines) * rndMult))
brainkey[j] = dict_lines[wIdx]
return " ".join(brainkey).upper()
# From https://github.com/trezor/python-mnemonic/blob/master/mnemonic/mnemonic.py
#
# Copyright (c) 2013 <NAME>
# Copyright (c) 2017 mruddy
@python_2_unicode_compatible
class Mnemonic(object):
"""BIP39 mnemoric implementation"""
def __init__(self):
self.wordlist = MnemonicDictionary.split(',')
self.radix = 2048
def generate(self, strength=128):
""" Generates a word list based on the given strength
:param int strength: initial entropy strength, must be one of [128, 160, 192, 224, 256]
"""
if strength not in [128, 160, 192, 224, 256]:
raise ValueError(
"Strength should be one of the following [128, 160, 192, 224, 256], but it is not (%d)."
% strength
)
return self.to_mnemonic(os.urandom(strength // 8))
# Adapted from <http://tinyurl.com/oxmn476>
def to_entropy(self, words):
if not isinstance(words, list):
words = words.split(" ")
if len(words) not in [12, 15, 18, 21, 24]:
raise ValueError(
"Number of words must be one of the following: [12, 15, 18, 21, 24], but it is not (%d)."
% len(words)
)
# Look up all the words in the list and construct the
# concatenation of the original entropy and the checksum.
concatLenBits = len(words) * 11
concatBits = [False] * concatLenBits
wordindex = 0
use_binary_search = True
for word in words:
# Find the words index in the wordlist
ndx = (
binary_search(self.wordlist, word)
if use_binary_search
else self.wordlist.index(word)
)
if ndx < 0:
raise LookupError('Unable to find "%s" in word list.' % word)
# Set the next 11 bits to the value of the index.
for ii in range(11):
concatBits[(wordindex * 11) + ii] = (ndx & (1 << (10 - ii))) != 0
wordindex += 1
checksumLengthBits = concatLenBits // 33
entropyLengthBits = concatLenBits - checksumLengthBits
# Extract original entropy as bytes.
entropy = bytearray(entropyLengthBits // 8)
for ii in range(len(entropy)):
for jj in range(8):
if concatBits[(ii * 8) + jj]:
entropy[ii] |= 1 << (7 - jj)
# Take the digest of the entropy.
hashBytes = hashlib.sha256(entropy).digest()
if sys.version < "3":
hashBits = list(
itertools.chain.from_iterable(
(
[ord(c) & (1 << (7 - i)) != 0 for i in range(8)]
for c in hashBytes
)
)
)
else:
hashBits = list(
itertools.chain.from_iterable(
([c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)
)
)
# Check all the checksum bits.
for i in range(checksumLengthBits):
if concatBits[entropyLengthBits + i] != hashBits[i]:
raise ValueError("Failed checksum.")
return entropy
def to_mnemonic(self, data):
if len(data) not in [16, 20, 24, 28, 32]:
raise ValueError(
"Data length should be one of the following: [16, 20, 24, 28, 32], but it is not (%d)."
% len(data)
)
h = hashlib.sha256(data).hexdigest()
b = (
bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8)
+ bin(int(h, 16))[2:].zfill(256)[: len(data) * 8 // 32]
)
result = []
for i in range(len(b) // 11):
idx = int(b[i * 11 : (i + 1) * 11], 2)
result.append(self.wordlist[idx])
result_phrase = " ".join(result)
return result_phrase
def check(self, mnemonic):
""" Checks the mnemonic word list is valid
:param list mnemonic: mnemonic word list with lenght of 12, 15, 18, 21, 24
:returns: True, when valid
"""
mnemonic = self.normalize_string(mnemonic).split(" ")
# list of valid mnemonic lengths
if len(mnemonic) not in [12, 15, 18, 21, 24]:
return False
try:
idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic)
b = "".join(idx)
except ValueError:
return False
l = len(b) # noqa: E741
d = b[: l // 33 * 32]
h = b[-l // 33 :]
nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip("L").zfill(l // 33 * 8))
nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[: l // 33]
return h == nh
def check_word(self, word):
return word in self.wordlist
def expand_word(self, prefix):
"""Expands a word when sufficient chars are given
:param str prefix: first chars of a valid dict word
"""
if prefix in self.wordlist:
return prefix
else:
matches = [word for word in self.wordlist if word.startswith(prefix)]
if len(matches) == 1: # matched exactly one word in the wordlist
return matches[0]
else:
# exact match not found.
# this is not a validation routine, just return the input
return prefix
def expand(self, mnemonic):
"""Expands all words given in a list"""
return " ".join(map(self.expand_word, mnemonic.split(" ")))
@classmethod
def normalize_string(cls, txt):
"""Normalizes strings"""
if isinstance(txt, str if sys.version < "3" else bytes):
utxt = txt.decode("utf8")
elif isinstance(txt, unicode if sys.version | |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_thrusterDynamicEffector')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_thrusterDynamicEffector')
_thrusterDynamicEffector = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_thrusterDynamicEffector', [dirname(__file__)])
except ImportError:
import _thrusterDynamicEffector
return _thrusterDynamicEffector
try:
_mod = imp.load_module('_thrusterDynamicEffector', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_thrusterDynamicEffector = swig_import_helper()
del swig_import_helper
else:
import _thrusterDynamicEffector
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _thrusterDynamicEffector.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _thrusterDynamicEffector.SwigPyIterator_value(self)
def incr(self, n=1):
return _thrusterDynamicEffector.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _thrusterDynamicEffector.SwigPyIterator_decr(self, n)
def distance(self, x):
return _thrusterDynamicEffector.SwigPyIterator_distance(self, x)
def equal(self, x):
return _thrusterDynamicEffector.SwigPyIterator_equal(self, x)
def copy(self):
return _thrusterDynamicEffector.SwigPyIterator_copy(self)
def next(self):
return _thrusterDynamicEffector.SwigPyIterator_next(self)
def __next__(self):
return _thrusterDynamicEffector.SwigPyIterator___next__(self)
def previous(self):
return _thrusterDynamicEffector.SwigPyIterator_previous(self)
def advance(self, n):
return _thrusterDynamicEffector.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _thrusterDynamicEffector.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _thrusterDynamicEffector.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _thrusterDynamicEffector.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _thrusterDynamicEffector.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _thrusterDynamicEffector.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _thrusterDynamicEffector.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _thrusterDynamicEffector.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
def new_doubleArray(nelements):
return _thrusterDynamicEffector.new_doubleArray(nelements)
new_doubleArray = _thrusterDynamicEffector.new_doubleArray
def delete_doubleArray(ary):
return _thrusterDynamicEffector.delete_doubleArray(ary)
delete_doubleArray = _thrusterDynamicEffector.delete_doubleArray
def doubleArray_getitem(ary, index):
return _thrusterDynamicEffector.doubleArray_getitem(ary, index)
doubleArray_getitem = _thrusterDynamicEffector.doubleArray_getitem
def doubleArray_setitem(ary, index, value):
return _thrusterDynamicEffector.doubleArray_setitem(ary, index, value)
doubleArray_setitem = _thrusterDynamicEffector.doubleArray_setitem
def new_longArray(nelements):
return _thrusterDynamicEffector.new_longArray(nelements)
new_longArray = _thrusterDynamicEffector.new_longArray
def delete_longArray(ary):
return _thrusterDynamicEffector.delete_longArray(ary)
delete_longArray = _thrusterDynamicEffector.delete_longArray
def longArray_getitem(ary, index):
return _thrusterDynamicEffector.longArray_getitem(ary, index)
longArray_getitem = _thrusterDynamicEffector.longArray_getitem
def longArray_setitem(ary, index, value):
return _thrusterDynamicEffector.longArray_setitem(ary, index, value)
longArray_setitem = _thrusterDynamicEffector.longArray_setitem
def new_intArray(nelements):
return _thrusterDynamicEffector.new_intArray(nelements)
new_intArray = _thrusterDynamicEffector.new_intArray
def delete_intArray(ary):
return _thrusterDynamicEffector.delete_intArray(ary)
delete_intArray = _thrusterDynamicEffector.delete_intArray
def intArray_getitem(ary, index):
return _thrusterDynamicEffector.intArray_getitem(ary, index)
intArray_getitem = _thrusterDynamicEffector.intArray_getitem
def intArray_setitem(ary, index, value):
return _thrusterDynamicEffector.intArray_setitem(ary, index, value)
intArray_setitem = _thrusterDynamicEffector.intArray_setitem
def new_shortArray(nelements):
return _thrusterDynamicEffector.new_shortArray(nelements)
new_shortArray = _thrusterDynamicEffector.new_shortArray
def delete_shortArray(ary):
return _thrusterDynamicEffector.delete_shortArray(ary)
delete_shortArray = _thrusterDynamicEffector.delete_shortArray
def shortArray_getitem(ary, index):
return _thrusterDynamicEffector.shortArray_getitem(ary, index)
shortArray_getitem = _thrusterDynamicEffector.shortArray_getitem
def shortArray_setitem(ary, index, value):
return _thrusterDynamicEffector.shortArray_setitem(ary, index, value)
shortArray_setitem = _thrusterDynamicEffector.shortArray_setitem
def getStructSize(self):
try:
return eval('sizeof_' + repr(self).split(';')[0].split('.')[-1])
except (NameError) as e:
typeString = 'sizeof_' + repr(self).split(';')[0].split('.')[-1]
raise NameError(e.message + '\nYou tried to get this size macro: ' + typeString +
'\n It appears to be undefined. \nYou need to run the SWIG GEN_SIZEOF' +
' SWIG macro against the class/struct in your SWIG file if you want to ' +
' make this call.\n')
def protectSetAttr(self, name, value):
if(hasattr(self, name) or name == 'this'):
object.__setattr__(self, name, value)
else:
raise ValueError('You tried to add this variable: ' + name + '\n' +
'To this class: ' + str(self))
def protectAllClasses(moduleType):
import inspect
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for member in clsmembers:
try:
exec(str(member[0]) + '.__setattr__ = protectSetAttr')
exec(str(member[0]) + '.getStructSize = getStructSize')
except (AttributeError, TypeError) as e:
pass
def new_boolArray(nelements):
return _thrusterDynamicEffector.new_boolArray(nelements)
new_boolArray = _thrusterDynamicEffector.new_boolArray
def delete_boolArray(ary):
return _thrusterDynamicEffector.delete_boolArray(ary)
delete_boolArray = _thrusterDynamicEffector.delete_boolArray
def boolArray_getitem(ary, index):
return _thrusterDynamicEffector.boolArray_getitem(ary, index)
boolArray_getitem = _thrusterDynamicEffector.boolArray_getitem
def boolArray_setitem(ary, index, value):
return _thrusterDynamicEffector.boolArray_setitem(ary, index, value)
boolArray_setitem = _thrusterDynamicEffector.boolArray_setitem
class IntVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)
__repr__ = _swig_repr
def iterator(self):
return _thrusterDynamicEffector.IntVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _thrusterDynamicEffector.IntVector___nonzero__(self)
def __bool__(self):
return _thrusterDynamicEffector.IntVector___bool__(self)
def __len__(self):
return _thrusterDynamicEffector.IntVector___len__(self)
def __getslice__(self, i, j):
return _thrusterDynamicEffector.IntVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _thrusterDynamicEffector.IntVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _thrusterDynamicEffector.IntVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _thrusterDynamicEffector.IntVector___delitem__(self, *args)
def __getitem__(self, *args):
return _thrusterDynamicEffector.IntVector___getitem__(self, *args)
def __setitem__(self, *args):
return _thrusterDynamicEffector.IntVector___setitem__(self, *args)
def pop(self):
return _thrusterDynamicEffector.IntVector_pop(self)
def append(self, x):
return _thrusterDynamicEffector.IntVector_append(self, x)
def empty(self):
return _thrusterDynamicEffector.IntVector_empty(self)
def size(self):
return _thrusterDynamicEffector.IntVector_size(self)
def swap(self, v):
return _thrusterDynamicEffector.IntVector_swap(self, v)
def begin(self):
return _thrusterDynamicEffector.IntVector_begin(self)
def end(self):
return _thrusterDynamicEffector.IntVector_end(self)
def rbegin(self):
return _thrusterDynamicEffector.IntVector_rbegin(self)
def rend(self):
return _thrusterDynamicEffector.IntVector_rend(self)
def clear(self):
return _thrusterDynamicEffector.IntVector_clear(self)
def get_allocator(self):
return _thrusterDynamicEffector.IntVector_get_allocator(self)
def pop_back(self):
return _thrusterDynamicEffector.IntVector_pop_back(self)
def erase(self, *args):
return _thrusterDynamicEffector.IntVector_erase(self, *args)
def __init__(self, *args):
this = _thrusterDynamicEffector.new_IntVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _thrusterDynamicEffector.IntVector_push_back(self, x)
def front(self):
return _thrusterDynamicEffector.IntVector_front(self)
def back(self):
return _thrusterDynamicEffector.IntVector_back(self)
def assign(self, n, x):
return _thrusterDynamicEffector.IntVector_assign(self, n, x)
def resize(self, *args):
return _thrusterDynamicEffector.IntVector_resize(self, *args)
def insert(self, *args):
return _thrusterDynamicEffector.IntVector_insert(self, *args)
def reserve(self, n):
return _thrusterDynamicEffector.IntVector_reserve(self, n)
def capacity(self):
return _thrusterDynamicEffector.IntVector_capacity(self)
__swig_destroy__ = _thrusterDynamicEffector.delete_IntVector
__del__ = lambda self: None
IntVector_swigregister = _thrusterDynamicEffector.IntVector_swigregister
IntVector_swigregister(IntVector)
class DoubleVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)
__repr__ = _swig_repr
def iterator(self):
return _thrusterDynamicEffector.DoubleVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _thrusterDynamicEffector.DoubleVector___nonzero__(self)
def __bool__(self):
return _thrusterDynamicEffector.DoubleVector___bool__(self)
def __len__(self):
return _thrusterDynamicEffector.DoubleVector___len__(self)
def __getslice__(self, i, j):
return _thrusterDynamicEffector.DoubleVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _thrusterDynamicEffector.DoubleVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _thrusterDynamicEffector.DoubleVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _thrusterDynamicEffector.DoubleVector___delitem__(self, *args)
def __getitem__(self, *args):
return _thrusterDynamicEffector.DoubleVector___getitem__(self, *args)
def __setitem__(self, *args):
return _thrusterDynamicEffector.DoubleVector___setitem__(self, *args)
def pop(self):
return _thrusterDynamicEffector.DoubleVector_pop(self)
def append(self, x):
return _thrusterDynamicEffector.DoubleVector_append(self, x)
def empty(self):
return _thrusterDynamicEffector.DoubleVector_empty(self)
def size(self):
return _thrusterDynamicEffector.DoubleVector_size(self)
def swap(self, v):
return _thrusterDynamicEffector.DoubleVector_swap(self, v)
def begin(self):
return _thrusterDynamicEffector.DoubleVector_begin(self)
def end(self):
return _thrusterDynamicEffector.DoubleVector_end(self)
def rbegin(self):
return _thrusterDynamicEffector.DoubleVector_rbegin(self)
def rend(self):
return _thrusterDynamicEffector.DoubleVector_rend(self)
def clear(self):
return _thrusterDynamicEffector.DoubleVector_clear(self)
def get_allocator(self):
return _thrusterDynamicEffector.DoubleVector_get_allocator(self)
def pop_back(self):
return _thrusterDynamicEffector.DoubleVector_pop_back(self)
def erase(self, *args):
return _thrusterDynamicEffector.DoubleVector_erase(self, *args)
def __init__(self, *args):
this = _thrusterDynamicEffector.new_DoubleVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _thrusterDynamicEffector.DoubleVector_push_back(self, x)
def front(self):
return _thrusterDynamicEffector.DoubleVector_front(self)
def back(self):
return _thrusterDynamicEffector.DoubleVector_back(self)
def assign(self, n, x):
return _thrusterDynamicEffector.DoubleVector_assign(self, n, x)
def resize(self, *args):
return _thrusterDynamicEffector.DoubleVector_resize(self, *args)
def insert(self, *args):
return _thrusterDynamicEffector.DoubleVector_insert(self, *args)
def reserve(self, n):
return _thrusterDynamicEffector.DoubleVector_reserve(self, n)
def capacity(self):
return _thrusterDynamicEffector.DoubleVector_capacity(self)
__swig_destroy__ = _thrusterDynamicEffector.delete_DoubleVector
__del__ = lambda self: None
DoubleVector_swigregister = _thrusterDynamicEffector.DoubleVector_swigregister
DoubleVector_swigregister(DoubleVector)
class StringVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StringVector, name)
__repr__ = _swig_repr
def iterator(self):
return _thrusterDynamicEffector.StringVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _thrusterDynamicEffector.StringVector___nonzero__(self)
def __bool__(self):
return _thrusterDynamicEffector.StringVector___bool__(self)
def __len__(self):
return _thrusterDynamicEffector.StringVector___len__(self)
def __getslice__(self, i, j):
return _thrusterDynamicEffector.StringVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _thrusterDynamicEffector.StringVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _thrusterDynamicEffector.StringVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _thrusterDynamicEffector.StringVector___delitem__(self, *args)
def __getitem__(self, *args):
return _thrusterDynamicEffector.StringVector___getitem__(self, *args)
def __setitem__(self, *args):
return _thrusterDynamicEffector.StringVector___setitem__(self, *args)
def pop(self):
return _thrusterDynamicEffector.StringVector_pop(self)
def append(self, x):
return _thrusterDynamicEffector.StringVector_append(self, x)
def empty(self):
return _thrusterDynamicEffector.StringVector_empty(self)
def size(self):
return _thrusterDynamicEffector.StringVector_size(self)
def swap(self, v):
return _thrusterDynamicEffector.StringVector_swap(self, v)
def begin(self):
return _thrusterDynamicEffector.StringVector_begin(self)
def end(self):
return _thrusterDynamicEffector.StringVector_end(self)
def rbegin(self):
return _thrusterDynamicEffector.StringVector_rbegin(self)
def rend(self):
return _thrusterDynamicEffector.StringVector_rend(self)
def clear(self):
return _thrusterDynamicEffector.StringVector_clear(self)
def get_allocator(self):
return _thrusterDynamicEffector.StringVector_get_allocator(self)
def pop_back(self):
return _thrusterDynamicEffector.StringVector_pop_back(self)
def erase(self, *args):
return _thrusterDynamicEffector.StringVector_erase(self, *args)
def __init__(self, *args):
this = _thrusterDynamicEffector.new_StringVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _thrusterDynamicEffector.StringVector_push_back(self, x)
def front(self):
return _thrusterDynamicEffector.StringVector_front(self)
def back(self):
return _thrusterDynamicEffector.StringVector_back(self)
def assign(self, n, x):
return _thrusterDynamicEffector.StringVector_assign(self, n, x)
def resize(self, *args):
return _thrusterDynamicEffector.StringVector_resize(self, *args)
def insert(self, *args):
return _thrusterDynamicEffector.StringVector_insert(self, *args)
def reserve(self, | |
from userbot.events import javes05
from userbot import bot, BOTLOG_CHATID
import asyncio
from telethon import events
from telethon.tl.functions.channels import EditBannedRequest
from telethon.tl.types import (PeerChat, PeerChannel,ChannelParticipantsAdmins, ChatAdminRights,ChatBannedRights, MessageEntityMentionName,MessageMediaPhoto, ChannelParticipantsBots)
from telethon.tl.types import Channel
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from userbot.events import rekcah05
client = javes = bot
from telethon.tl.functions.messages import GetCommonChatsRequest
from userbot import JAVES_NAME, JAVES_MSG
JAVES_NNAME = str(JAVES_NAME) if JAVES_NAME else str(JAVES_MSG)
from datetime import timedelta
import re, datetime
from telethon.tl import types
from typing import Dict, List, Tuple, Union
from telethon.tl.functions.channels import (EditAdminRequest,EditBannedRequest,EditPhotoRequest)
from telethon.tl.types import UserStatusEmpty, UserStatusLastMonth, UserStatusLastWeek, UserStatusOffline, UserStatusOnline, UserStatusRecently, ChannelParticipantsKicked, ChatBannedRights
from telethon.errors import FloodWaitError
from telethon.tl import functions, types
from telethon.tl.functions.messages import EditChatDefaultBannedRightsRequest
from telethon.errors import (BadRequestError, ChatAdminRequiredError,ImageProcessFailedError, PhotoCropSizeSmallError,UserAdminInvalidError)
from telethon.tl.functions.messages import UpdatePinnedMessageRequest
from userbot import CMD_HELP
async def get_user_from_event(event):
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.from_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit(f"`{JAVES_NNAME}`: ** Pass the user's username, id or reply!**")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except Exception as err:
return await event.edit("Failed \n **Error**\n", str(err))
return user_obj, extra
async def ban_user(chat_id, i, rights):
try:
await javes(functions.channels.EditBannedRequest(chat_id, i, rights))
return True, None
except Exception as exc:
return False, str(exc)
async def get_user_from_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
async def amount_to_secs(amount: tuple) -> int:
num, unit = amount
num = int(num)
if not unit:
unit = 's'
if unit == 's':
return 60
elif unit == 'm':
return num * 60
elif unit == 'h':
return num * 60 * 60
elif unit == 'd':
return num * 60 * 60 * 24
elif unit == 'w':
return num * 60 * 60 * 24 * 7
elif unit == 'y':
return num * 60 * 60 * 24 * 7 * 52
else:
return 60
async def string_to_secs(string: str) -> int:
values = regexp.findall(string)
totalValues = len(values)
if totalValues == 1:
return await amount_to_secs(values[0])
else:
total = 0
for amount in values:
total += await amount_to_secs(amount)
return total
regexp = re.compile(r"(\d+)(w|d|h|m|s)?")
adminregexp = re.compile(r"\d+(?:w|d|h|m|s)?")
KWARGS = re.compile(
r'(?<!\S)'
r'(?:(?P<q>\'|\")?)(?P<key>(?(q).+?|(?!\d)\w+?))(?(q)(?P=q))'
r'(?::(?!//)|=)\s?'
r'(?P<val>\[.+?\]|(?P<q1>\'|\").+?(?P=q1)|\S+)')
ARGS = re.compile(r'(?:(?P<q>\'|\"))(.+?)(?:(?P=q))')
BOOL_MAP = {
'false': False,
'true': True,
}
Value = Union[int, str, float, list]
KeywordArgument = Union[Value, range, List[Value]]
async def _parse_arg(val: str) -> Union[int, str, float]:
val = val.strip()
if re.match(r'^-?\d+$', val):
return int(val)
try:
return float(val)
except ValueError:
pass
if isinstance(val, str):
if re.search(r'^\[.*\]$', val):
val = re.sub(r'[\[\]]', '', val).split(',')
val = [await _parse_arg(v.strip()) for v in val]
else:
val = BOOL_MAP.get(val.lower(), val)
if isinstance(val, str):
val = re.sub(r'(?<!\\), ?$', '', val)
return val
async def parse_arguments(
arguments: str) -> Tuple[List[Value], Dict[str, KeywordArgument]]:
keyword_args = {}
args = []
for match in KWARGS.finditer(arguments):
key = match.group('key')
val = await _parse_arg(re.sub(r'[\'\"]', '', match.group('val')))
keyword_args.update({key: val})
arguments = KWARGS.sub('', arguments)
for val in ARGS.finditer(arguments):
args.append(await _parse_arg(val.group(2)))
arguments = ARGS.sub('', arguments)
for val in re.findall(r'([^\r\n\t\f\v ,]+|\[.*\])', arguments):
parsed = await _parse_arg(val)
if parsed:
args.append(parsed)
return args, keyword_args
@javes05(outgoing=True, pattern="^\!promote(?: |$)(.*)", groups_only=True)
async def promote(event):
chat = await event.get_chat()
if event.is_private:
await event.reply("`You can't promote users in private chats.`")
return
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
await event.edit(f"`{JAVES_NNAME}:` **I haven't got the admin rights to do this.**")
return
new_rights = ChatAdminRights(add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True)
await event.edit(f"`{JAVES_NNAME}:` **Promoting User**")
user, rank = await get_user_from_event(event)
if not rank:
rank = "admin"
if user:
pass
else:
return
try:
await event.client(
EditAdminRequest(event.chat_id, user.id, new_rights, rank))
await event.edit(f"`{JAVES_NNAME}:` **Promoted user [{user.first_name}](tg://user?id={user.id}) to admin Sucessfully in {event.chat.title}**")
except BadRequestError:
return await event.edit(f"`{JAVES_NNAME}:`**I don't have sufficient permissions!**")
@javes.on(rekcah05(pattern=f"promote(?: |$)(.*)", allow_sudo=True))
async def promote(event):
chat = await event.get_chat()
if event.is_private:
await event.reply("`You can't promote users in private chats.`")
return
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
await event.reply(f"`{JAVES_NNAME}:` **I haven't got the admin rights to do this.**")
return
new_rights = ChatAdminRights(add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True)
rkp = await event.reply(f"`{JAVES_NNAME}:` **Promoting User**")
user, rank = await get_user_from_event(event)
if not rank:
rank = "admin"
if user:
pass
else:
return
try:
await event.client(
EditAdminRequest(event.chat_id, user.id, new_rights, rank))
await rkp.edit(f"`{JAVES_NNAME}:` **Promoted user [{user.first_name}](tg://user?id={user.id}) to admin Sucessfully in {event.chat.title}**")
except BadRequestError:
return await rkp.edit(f"`{JAVES_NNAME}:`**I don't have sufficient permissions!**")
@javes05(outgoing=True, pattern="^\!demote(?: |$)(.*)", groups_only=True)
async def demote(event):
chat = await event.get_chat()
if event.is_private:
await event.reply("`You can't promote users in private chats.`")
return
admin = chat.admin_rights
creator = chat.creator
await event.edit(f"`{JAVES_NNAME}:`** Demoting user......**")
rank = "admin"
user = await get_user_from_event(event)
user = user[0]
if user:
pass
else:
return
newrights = ChatAdminRights(add_admins=None,
invite_users=None,
change_info=None,
ban_users=None,
delete_messages=None,
pin_messages=None)
try:
await event.client(
EditAdminRequest(event.chat_id, user.id, newrights, rank))
except BadRequestError:
return await rkp.edit(f"`{JAVES_NNAME}:`**I don't have sufficient permissions!**")
return
await event.edit(f"`{JAVES_NNAME}:` **Demoted user [{user.first_name}](tg://user?id={user.id}) to admin Sucessfully in {event.chat.title}**")
@javes.on(rekcah05(pattern=f"demote(?: |$)(.*)", allow_sudo=True))
async def demote(event):
chat = await event.get_chat()
if event.is_private:
await event.reply("`You can't promote users in private chats.`")
return
admin = chat.admin_rights
creator = chat.creator
rkp = await event.reply(f"`{JAVES_NNAME}:`** Demoting user......**")
rank = "admin"
user = await get_user_from_event(event)
user = user[0]
if user:
pass
else:
return
newrights = ChatAdminRights(add_admins=None,
invite_users=None,
change_info=None,
ban_users=None,
delete_messages=None,
pin_messages=None)
try:
await event.client(
EditAdminRequest(event.chat_id, user.id, newrights, rank))
except BadRequestError:
return await rkp.edit(f"`{JAVES_NNAME}:`**I don't have sufficient permissions!**")
return
await rkp.edit(f"`{JAVES_NNAME}:` **Demoted user [{user.first_name}](tg://user?id={user.id}) to admin Sucessfully in {event.chat.title}**")
@javes05(outgoing=True, pattern="^!ban(?: |$|\n)([\s\S]*)")
async def ban(event):
if event.is_private:
await event.reply("`You can't ban users in private chats.`")
return
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
await event.edit(f"`{JAVES_NNAME}:` **I haven't got the admin rights to do this.**")
return
match = event.pattern_match.group(1)
args, kwargs = await parse_arguments(match)
reason = kwargs.get('r', None)
skipped = []
banned = []
error = []
if not args and event.reply_to_msg_id:
reply = await event.get_reply_message()
args.append(reply.sender_id)
if not args:
await event.edit(f"`{JAVES_NNAME}:` **I don't know who you're talking about, you're going to need to specify a user...!**")
return
entity = await event.get_chat()
for user in args:
if isinstance(user, list):
continue
try:
await client.edit_permissions(entity=entity,
user=user,
view_messages=False)
banned.append(user)
except Exception as e:
skipped.append(user)
error.append(str(e))
if banned:
text = f"`{JAVES_NNAME}: `**Successfully banned**\n"
text += ', '.join((f'`{x}`' for x in banned))
if reason:
text += f"\n\n**Reason:** `{reason}`"
await event.edit(text)
if skipped:
text2 = f"`{JAVES_NNAME}: `**Failed to ban **"
text2 += ', '.join((f'{x}' for x in skipped))
text = "\n **Error(s)**\n•"
text += '•'.join((f'{x}\n' for x in error))
await event.reply(text2)
await event.reply(text)
@javes.on(rekcah05(pattern=f"ban(?: |$|\n)([\s\S]*)", allow_sudo=True))
async def ban(event):
if event.is_private:
await event.reply("`You can't ban users in private chats.`")
return
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
await event.reply(f"`{JAVES_NNAME}:` **I haven't got the admin rights to do this.**")
return
match = event.pattern_match.group(1)
args, kwargs = await parse_arguments(match)
reason = kwargs.get('r', None)
skipped = []
banned = []
error = []
if not args and event.reply_to_msg_id:
reply = await event.get_reply_message()
args.append(reply.sender_id)
if not args:
await event.reply(f"`{JAVES_NNAME}:` **I don't know who you're talking about, you're going to need to specify a user...!**")
return
entity = await event.get_chat()
for user in args:
if isinstance(user, list):
continue
try:
await client.edit_permissions(entity=entity,
user=user,
view_messages=False)
banned.append(user)
except Exception as e:
skipped.append(user)
error.append(str(e))
if banned:
text = f"`{JAVES_NNAME}: `**Successfully banned**\n"
text += ', '.join((f'`{x}`' for x in banned))
if reason:
text += f"\n\n**Reason:** `{reason}`"
await event.reply(text)
if skipped:
text2 = f"`{JAVES_NNAME}: `**Failed to ban **"
text2 += ', '.join((f'{x}' for x in skipped))
text = "\n **Error(s)**\n•"
text += '•'.join((f'{x}\n' for x in error))
await event.reply(text2)
await event.reply(text)
@javes05(outgoing=True, pattern="^!unban(?: |$|\n)([\s\S]*)")
async def ban(event):
if event.is_private:
await event.reply("`You can't unban users in private chats.`")
return
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
await event.edit(f"`{JAVES_NNAME}:` **I haven't got the admin rights to | |
<reponame>pshchelo/kopf
"""
Kubernetes watching/streaming and the per-object queueing system.
The framework can handle multiple resources at once.
Every custom resource type is "watched" (as in ``kubectl get --watch``)
in a separate asyncio task in the never-ending loop.
The events for this resource type (of all its objects) are then pushed
to the per-object queues, which are created and destroyed dynamically.
The per-object queues are created on demand.
Every object is identified by its uid, and is handled sequentially:
i.e. the low-level events are processed in the order of their arrival.
Other objects are handled in parallel in their own sequential tasks.
To prevent the memory leaks over the long run, the queues and the workers
of each object are destroyed if no new events arrive for some time.
The destruction delay (usually few seconds, maybe minutes) is needed
to prevent the often queue/worker destruction and re-creation
in case the events are for any reason delayed by Kubernetes.
The conversion of the low-level watch-events to the high-level causes
is done in the `kopf._core.reactor.processing` routines.
"""
import asyncio
import contextlib
import enum
import logging
from typing import TYPE_CHECKING, MutableMapping, NamedTuple, NewType, Optional, Tuple, Union
import aiojobs
from typing_extensions import Protocol, TypedDict
from kopf._cogs.aiokits import aiotoggles
from kopf._cogs.clients import watching
from kopf._cogs.configs import configuration
from kopf._cogs.structs import bodies, references
logger = logging.getLogger(__name__)
# This should be aiojobs' type, but they do not provide it. So, we simulate it.
class _aiojobs_Context(TypedDict, total=False):
exception: BaseException
# message: str
# job: aiojobs._job.Job
class WatchStreamProcessor(Protocol):
async def __call__(
self,
*,
raw_event: bodies.RawEvent,
stream_pressure: Optional[asyncio.Event] = None, # None for tests
resource_indexed: Optional[aiotoggles.Toggle] = None, # None for tests & observation
operator_indexed: Optional[aiotoggles.ToggleSet] = None, # None for tests & observation
) -> None: ...
# An end-of-stream marker sent from the watcher to the workers.
# See: https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
class EOS(enum.Enum):
token = enum.auto()
if TYPE_CHECKING:
WatchEventQueue = asyncio.Queue[Union[bodies.RawEvent, EOS]]
else:
WatchEventQueue = asyncio.Queue
class Stream(NamedTuple):
""" A single object's stream of watch-events, with some extra helpers. """
backlog: WatchEventQueue
pressure: asyncio.Event # means: "hurry up, there are new events queued again"
ObjectUid = NewType('ObjectUid', str)
ObjectRef = Tuple[references.Resource, ObjectUid]
Streams = MutableMapping[ObjectRef, Stream]
def get_uid(raw_event: bodies.RawEvent) -> ObjectUid:
"""
Retrieve or simulate an identifier of an object unique both in time & space.
It is used as a key in mappings of framework-internal system resources,
such as tasks and queues. It is never exposed to the users, even in logs.
The keys are only persistent during a lifetime of a single process.
They can be safely changed across different versions.
In most cases, UIDs are sufficient -- as populated by K8s itself.
However, some resources have no UIDs: e.g. ``v1/ComponentStatus``:
.. code-block:: yaml
apiVersion: v1
kind: ComponentStatus
metadata:
creationTimestamp: null
name: controller-manager
selfLink: /api/v1/componentstatuses/controller-manager
conditions:
- message: ok
status: "True"
type: Healthy
Note that ``selfLink`` is deprecated and will stop being populated
since K8s 1.20. Other fields are not always sufficient to ensure uniqueness
both in space and time: in the example above, the creation time is absent.
In this function, we do our best to provide a fallback scenario in case
UIDs are absent. All in all, having slightly less unique identifiers
is better than failing the whole resource handling completely.
"""
if 'uid' in raw_event['object']['metadata']:
uid = raw_event['object']['metadata']['uid']
else:
ids = [
raw_event['object'].get('kind'),
raw_event['object'].get('apiVersion'),
raw_event['object']['metadata'].get('name'),
raw_event['object']['metadata'].get('namespace'),
raw_event['object']['metadata'].get('creationTimestamp'),
]
uid = '//'.join([s or '-' for s in ids])
return ObjectUid(uid)
async def watcher(
*,
namespace: references.Namespace,
settings: configuration.OperatorSettings,
resource: references.Resource,
processor: WatchStreamProcessor,
operator_paused: Optional[aiotoggles.ToggleSet] = None, # None for tests & observation
operator_indexed: Optional[aiotoggles.ToggleSet] = None, # None for tests & observation
resource_indexed: Optional[aiotoggles.Toggle] = None, # None for tests & non-indexable
) -> None:
"""
The watchers watches for the resource events via the API, and spawns the workers for every object.
All resources and objects are done in parallel, but one single object is handled sequentially
(otherwise, concurrent handling of multiple events of the same object could cause data damage).
The watcher is as non-blocking and async, as possible. It does neither call any external routines,
nor it makes the API calls via the sync libraries.
The watcher is generally a never-ending task (unless an error happens or it is cancelled).
The workers, on the other hand, are limited approximately to the life-time of an object's event.
Watchers spend their time in the infinite watch stream, not in task waiting.
The only valid way for a worker to wake up the watcher is to cancel it:
this will terminate any i/o operation with `asyncio.CancelledError`, where
we can make a decision on whether it was a real cancellation, or our own.
"""
# In case of a failed worker, stop the watcher, and escalate to the operator to stop it.
watcher_task = asyncio.current_task()
worker_error: Optional[BaseException] = None
def exception_handler(scheduler: aiojobs.Scheduler, context: _aiojobs_Context) -> None:
nonlocal worker_error
if worker_error is None:
worker_error = context['exception']
if watcher_task is not None: # never happens, but is needed for type-checking.
watcher_task.cancel()
# All per-object workers are handled as fire-and-forget jobs via the scheduler,
# and communicated via the per-object event queues.
scheduler: aiojobs.Scheduler
signaller = asyncio.Condition()
scheduler = await aiojobs.create_scheduler(limit=settings.batching.worker_limit,
exception_handler=exception_handler)
streams: Streams = {}
try:
# Either use the existing object's queue, or create a new one together with the per-object job.
# "Fire-and-forget": we do not wait for the result; the job destroys itself when it is fully done.
stream = watching.infinite_watch(
settings=settings,
resource=resource, namespace=namespace,
operator_paused=operator_paused,
)
async for raw_event in stream:
# If the listing is over (even if it was empty), the resource kind is pre-indexed.
# At this moment, only the individual workers/processors can block the global readiness.
if raw_event is watching.Bookmark.LISTED:
if operator_indexed is not None and resource_indexed is not None:
await operator_indexed.drop_toggle(resource_indexed)
# Whatever is bookmarked there, don't let it go to the multiplexer. Handle it above.
if isinstance(raw_event, watching.Bookmark):
continue
# Multiplex the raw events to per-resource workers/queues. Start the new ones if needed.
key: ObjectRef = (resource, get_uid(raw_event))
try:
# Feed the worker, as fast as possible, no extra activities.
streams[key].pressure.set() # interrupt current sleeps, if any.
await streams[key].backlog.put(raw_event)
except KeyError:
# Block the operator's readiness for individual resource's index handlers.
# But NOT when the readiness is already achieved once! After that, ignore it.
# NB: Strictly before the worker starts -- the processor can be too slow, too late.
resource_object_indexed: Optional[aiotoggles.Toggle] = None
if operator_indexed is not None and operator_indexed.is_on():
operator_indexed = None
if operator_indexed is not None and resource_indexed is not None:
resource_object_indexed = await operator_indexed.make_toggle(name=f"{key!r}")
# Start the worker, and feed it initially. Starting can be moderately slow.
streams[key] = Stream(backlog=asyncio.Queue(), pressure=asyncio.Event())
streams[key].pressure.set() # interrupt current sleeps, if any.
await streams[key].backlog.put(raw_event)
await scheduler.spawn(worker(
signaller=signaller,
resource_indexed=resource_object_indexed,
operator_indexed=operator_indexed,
processor=processor,
settings=settings,
streams=streams,
key=key,
))
except asyncio.CancelledError:
if worker_error is None:
raise
else:
raise RuntimeError("Event processing has failed with an unrecoverable error. "
"This seems to be a framework bug. "
"The operator will stop to prevent damage.") from worker_error
finally:
# Allow the existing workers to finish gracefully before killing them.
# Ensure the depletion is done even if the watcher is double-cancelled (e.g. in tests).
depletion_task = asyncio.create_task(_wait_for_depletion(
signaller=signaller,
scheduler=scheduler,
streams=streams,
settings=settings,
))
while not depletion_task.done():
with contextlib.suppress(asyncio.CancelledError):
await asyncio.shield(depletion_task)
# Terminate all the fire-and-forget per-object jobs if they are still running.
# Ensure the scheduler is closed even if the watcher is double-cancelled (e.g. in tests).
closing_task = asyncio.create_task(scheduler.close())
while not closing_task.done():
with contextlib.suppress(asyncio.CancelledError):
await asyncio.shield(closing_task)
async def worker(
*,
signaller: asyncio.Condition,
processor: WatchStreamProcessor,
settings: configuration.OperatorSettings,
resource_indexed: Optional[aiotoggles.Toggle], # None for tests & observation
operator_indexed: Optional[aiotoggles.ToggleSet], # None for tests & observation
streams: Streams,
key: ObjectRef,
) -> None:
"""
The per-object workers consume the object's events and invoke the processors/handlers.
The processor is expected to be an async coroutine, always the one from the framework.
In fact, it is either a peering processor, which monitors the peer operators,
or a generic resource processor, which internally calls the registered synchronous processors.
The per-object worker is a time-limited task, which ends as soon as all the | |
returned JSON
#convert to JSON
finishedJSONquery = json.dumps(finishedJSONquery)
print >>sys.stderr, "Timer End"
return HttpResponse(finishedJSONquery, content_type="application/json")
ERROR_MESSAGE += "Error: Trying to access missing or forbidden data"
ERROR_MESSAGE += "Error: You have not submitted through POST"
else: ERROR_MESSAGE += "Error: You do not have permission to access modifying user information"
#If anything goes wrong in the process, return an error in the json HTTP Response
SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META)
return HttpResponse('{"ERROR":"'+ ERROR_MESSAGE +'"}',content_type="application/json")
#=======================================================#
# ACCESS LEVEL : 1 NAVIGATE_QUERY_PAGINATION() *RECYCLING
#=======================================================#
def navigate_query_pagination(self, request):
#***************#
ACCESS_LEVEL = 1
#***************#
#------------------------------------------------------------------------------------------------------------------------------------
# This is the real magic of the database in terms of non-geospatial data. This Query engine takes complicated input from json POST data
ERROR_MESSAGE = ""
#Check our user's session and access level
if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level):
if request.method == 'POST':
#We need to make sure we have permission to deal with the formtype--e.g. it's part of the user's current project
formtype = FormType.objects.get(pk=request.POST['formtype_id'])
#If the project IDs match, then we're good to go! Also if it's not the project, but the formtype is set to PUBLIC then we are also good to go
if formtype.project.pk == request.user.permissions.project.pk or (formtype.project.pk != request.user.permissions.project.pk and formtype.is_public == True):
#Make the AJAX Request Data Model for subsequent AJAX calls
progressData = AJAXRequestData(uuid=request.POST.get('uuid'), jsonString='{"message":"Loading Json","current_query":"","current_term":"","percent_done":"0","is_complete":"False"}')
progressData.save()
#First let's setup our header field of ordered labels
print >>sys.stderr, "Timer Start"
form_att_type_list = []
#***RECYCLING BIN*** Make sure our RTYPES are filtered by their deletion flags
for attType in formtype.formrecordattributetype_set.all().filter(flagged_for_deletion=False).order_by('order_number')[:5]:
form_att_type_list.append((attType.order_number,'frat',attType.pk,attType.record_type))
#***RECYCLING BIN*** Make sure our RTYPES are filtered by their deletion flags
for refType in formtype.ref_to_parent_formtype.all().filter(flagged_for_deletion=False).order_by('order_number')[:5]:
form_att_type_list.append((refType.order_number,'frrt',refType.pk,refType.record_type))
#sort the new combined reference ad attribute type list combined
form_att_type_list = sorted(form_att_type_list, key=lambda att: att[0])
#we only want the first 5 types
form_att_type_list = form_att_type_list[0:5]
#Finally let's organize all of our reference and attribute values to match their provided order number
formList = []
#Setup our inital queryset that includes all forms
#***RECYCLING BIN*** Make sure our Forms are filtered by their deletion flags
masterQuery = formtype.form_set.all().filter(flagged_for_deletion=False)
#Setup a list to hold the attribute types from the query. We want to show the record types that are part of the search terms,
# --rather than the default types that are in order. If there are less than 5 query record types, use the ordered record type list
# --until 5 are met.
queryRTYPElist = []
uniqueRTYPES = []
rtypeCounter = 1
#Load the JSON query from POST
masterQueryJSON = json.loads(request.POST['currentQueryJSON'])
#Update our progressbar to show we're at 10%
progressData.jsonString = '{"message":"Performing Query","current_query":"","current_term":"","percent_done":"5","is_complete":"False"}'
progressData.save()
#Loop through each separate query
for query in sorted(masterQueryJSON['query_list']):
print >>sys.stderr, query
#setup a dictionary of key values of the query stats to add to the main querystas dictionary later
singleQueryStats = {}
#***RECYCLING BIN*** Make sure our Forms are filtered by their deletion flags
queriedForms = formtype.form_set.all().filter(flagged_for_deletion=False)
currentJSONQuery = masterQueryJSON['query_list'][query]
uniqueQuery = False
#Let's not allow any duplicate rtypes in the query rtype list header e.g. we don't want "Object ID" to show up 4 times
#--if the user makes a query that compares it 4 times in 4 separate queries
if currentJSONQuery['RTYPE'] not in uniqueRTYPES:
uniqueRTYPES.append(currentJSONQuery['RTYPE'])
uniqueQuery = True
#We need to check whether or not this query is an AND/OR or a null,e.g. the first one(so there is no and/or)
rtype, rtypePK = currentJSONQuery['RTYPE'].split("-")
#store our percentDone variable to update the ajax progress message object
percentDone = 0
#########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# (FRAT) FormRecordAttributeType Lookups
#########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
if rtype == 'FRAT':
#thisRTYPE = FormRecordAttributeType.objects.get(pk=rtypePK)
#store the record type in a new rtype list if unique
if uniqueQuery: queryRTYPElist.append((rtypeCounter,'frat',rtypePK,currentJSONQuery['LABEL']))
rtypeCounter += 1
tCounter = 0;
logging.info("TimerA"+ " : " + str(time.clock()))
for term in currentJSONQuery['TERMS']:
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
if term['T-ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = queriedForms.filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#CONTAINS
elif term['QCODE'] == '1': newQuery = queriedForms.filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#ICONTAINS
elif term['QCODE'] == '2': newQuery = queriedForms.filter(formrecordattributevalue__record_value__exact=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = queriedForms.exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#EXCLUDES
elif term['QCODE'] == '4': newQuery = queriedForms.filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=rtypePK)#IS_NULL
#save stats and query
queriedForms = newQuery
else:#Otherwise it's an OR statement
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = (formtype.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#CONTAINS
elif term['QCODE'] == '1': newQuery = (formtype.form_set.all().filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#ICONTAINS
elif term['QCODE'] == '2': newQuery = (formtype.form_set.all().filter(formrecordattributevalue__record_value__exact=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = (formtype.form_set.all().exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#EXCLUDES
elif term['QCODE'] == '4': newQuery = (formtype.form_set.all().filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=rtypePK))#IS_NULL
#save stats and query
queriedForms = (newQuery | queriedForms)
logging.info("TimerB"+ " : " + str(time.clock()))
#We'll calculate percent by claiming finishing the query is at 50% when complete and at 20% when starting this section.
logging.info(rtypeCounter)
logging.info(len(masterQueryJSON['query_list']))
Qpercent = ((rtypeCounter-2) * (50.0/len(masterQueryJSON['query_list'])))
percentDone = 5 + Qpercent + (tCounter * (Qpercent / len(currentJSONQuery['TERMS'])) )
progressData.jsonString = '{"message":"Performing Query # '+ str(rtypeCounter-1) + ' on term: '+term['TVAL']+'","current_query":"'+ currentJSONQuery['RTYPE'] + '","current_term":"'+term['TVAL']+'","percent_done":"'+ str(int(percentDone)) +'","is_complete":"False"}'
progressData.save()
tCounter += 1
logging.info("TimerC"+ " : " + str(time.clock()))
#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# (FRRT) FormRecordReferenceType Lookups
# This is where things can get complicated. I've added a 'deep' search -- or the ability to search fields from a related model
# --Right now, this just looks at the form IDs of the related field and looks for matches--it will still need to do that, but
# --it also needs to be able to look up FRAT or FRRTs in the same field--that will essentially double the code for this blocks
# --to do all of this, and will also cause the time of the query to significantly increase because we are doing another JOIN in the
# --SQL lookup to span this relationship. This won't affect the list of queried forms directly--they will be limited by what the
# --query finds obviously--but the user will only see the column for the related FRRT that had a match--not specifically the field that matched
# ----It WILL affect the counts for the graphs etc.
#########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif rtype == 'FRRT':
#thisRTYPE = FormRecordReferenceType.objects.get(pk=rtypePK)
#store the record type in a new rtype list if unique
if uniqueQuery: queryRTYPElist.append((rtypeCounter,'frrt',rtypePK,currentJSONQuery['LABEL']))
rtypeCounter += 1
tCounter = 0;
logging.info("TimerD"+ " : " + str(time.clock()))
#get the deep values
deepRTYPE, deepPK = currentJSONQuery['RTYPE-DEEP'].split('-')
for term in currentJSONQuery['TERMS']:
#==========================================================================================================================================================================================
# IF WE ARE JUST LOOKING UP THE RTYPE FORM ID
#==========================================================================================================================================================================================
#TODO: This also needs to check external reference values if no match is found
if deepRTYPE == 'FORMID':
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
if term['T-ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK) #CONTAINS
elif term['QCODE'] == '1': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__form_name__icontains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK) #ICONTAINS
elif term['QCODE'] == '2': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__form_name__exact=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK)#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = queriedForms.exclude(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK)#EXCLUDES
elif term['QCODE'] == '4': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__isnull=True, ref_to_parent_form__record_reference_type__pk=rtypePK) #IS_NULL
queriedForms = newQuery
else:#Otherwise it's an OR statement
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = (formtype.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#CONTAINS | |
###############################################################################
#
# Package: RoadNarrows Robotics Laelaps Robotic Mobile Platform ROS Package
#
# Link: https://github.com/roadnarrows-robotics/laelaps
#
# ROS Node: laelaps_panel, laelaps_range
#
# File: RangeSensorWin.py
#
## \file
##
## \brief Laelaps range sensor group panel.
##
## \author <NAME> (<EMAIL>)
##
## \par Copyright:
## (C) 2017 RoadNarrows LLC.\n
## (http://www.roadnarrows.com)\n
## All Rights Reserved
##
# @EulaBegin@
# @EulaEnd@
#
###############################################################################
import sys
import os
import time
import math
import getopt
from Tkinter import *
from Tkconstants import *
from tkFileDialog import *
import tkFont
import random
import roslib; roslib.load_manifest('laelaps_control')
import rospy
from industrial_msgs.msg import TriState
from laelaps_control.msg import RangeState # message
from laelaps_control.Utils import *
# ------------------------------------------------------------------------------
# Globals
# ------------------------------------------------------------------------------
## \brief Additional image search paths.
imagePath = [
"/prj/share/appkit/images",
"/usr/local/share/appkit/images"
]
## \brief Common foreground colors.
fgColors = {
'normal': 'black',
'ok': '#008800',
'focus': '#0000aa',
'warning': '#aa6600',
'error': '#cc0000'
}
## \brief Laelaps relevant sensor info.
#
# Tuple:
# name Name and key of Laelaps range sensor.
# beamDir Direction of beam, with 0 degrees being the top and front of
# Laelaps.
# boff Pixel offset from a conceptualized Laelaps centered at 0,0 and
# with pixel dimensions of 300x300. Scale and translate as needed.
# toff Text position offset from calculated beam pixel origin.
#
SensorInfo = [
('front', degToRad(0.0), ( 0, -120), ( 0, -10)),
('left_front', degToRad(10.0), ( -50, -120), ( 0, -10)),
('left', degToRad(90.0), ( -90, 0), (-20, 0)),
('left_rear', degToRad(170.0), ( -50, 130), ( 0, 10)),
('rear', degToRad(180.0), ( 0, 130), ( 0, 10)),
('right_rear', degToRad(190.0), ( 50, 130), ( 0, 10)),
('right', degToRad(270.0), ( 90, 0), ( 20, 0)),
('right_front', degToRad(350.0), ( 50, -120), ( 0, -10))
]
## \brief Laelaps sensor visualization 'structure'.
#
class SensorViz:
BeamMinDist = 0.002 # trusted beam minimum distance (meters)
BeamMaxDist = 0.200 # beam maximum distance (meters)
BeamNoObj = -1.0 # no object detected
SlidingWinMaxSize = 5 # sliding window maximum size
CanvasBg = '#333333' # canvas background color
BeamColor = [ # sensor beam color
'#403333', # 0%
'#473333', # 10%
'#5b3333', # 20%
'#703333', # 30%
'#843333', # 40%
'#993333', # 50%
'#ad3333', # 60%
'#c13333', # 70%
'#d63333', # 80%
'#ea3333', # 90%
'#ff3333' # 100%
]
BrightMax = 10 # maximum beam brightness color index
BrightMin = 0 # minimum beam brightness color index
BrightIncStepSize = 1 # increment beam brightness step size
BrightDecStepSize = 1 # decrement beam brightness step size
TextBgColor = '#333333' # text background color
TextFgColor = '#ffffff' # text foreground color
RobotBodyDim = (0.350, 0.250) # Laelaps body length x width dimensions(meters)
# ------------------------------------------------------------------------------
# Class RangeSensorWin
# ------------------------------------------------------------------------------
## \brief Range sensor group window class.
#
class RangeSensorWin(Toplevel):
#
## \brief Constructor.
##
## \param master Window parent master widget.
## \param cnf Configuration dictionary.
## \param kw Keyword options.
#
def __init__(self, master=None, cnf={}, **kw):
self.m_isCreated = True
Toplevel.__init__(self, master=master, cnf=cnf, **kw)
self.title("laelaps_ranges")
self.wm_protocol("WM_DELETE_WINDOW", lambda: self.onDeleteChild(self))
self.m_frame = RangeSensorFrame(master=self, cnf=cnf, **kw)
self.m_frame.grid(row=0, column=0, padx=5, pady=5)
# load close icon
self.m_iconClose = self.m_frame.loadImage('icons/icon_close_32.png')
# close button
k, w = createCompoundButton(self, text='Close', image=self.m_iconClose,
command=lambda: self.onDeleteChild(self), width=80)
w.grid(row=1, column=0, sticky=N, pady=5)
self.m_bttnClose = w
self.lift()
#
## \brief On delete callback.
##
## \param w Widget (not used).
#
def onDeleteChild(self, w):
if self.m_isCreated:
self.m_isCreated = False
self.destroy()
# ------------------------------------------------------------------------------
# Class RangeSensorFrame
# ------------------------------------------------------------------------------
## \brief Range sensor group frame class.
#
class RangeSensorFrame(Frame):
#
## \brief Constructor.
##
## \param master Window parent master widget.
## \param cnf Configuration dictionary.
## \param kw Keyword options.
#
def __init__(self, master=None, cnf={}, **kw):
self.m_isCreated = True
self.m_parent = master
# intialize window data
kw = self.initData(kw)
self.m_imageLoader = ImageLoader(py_pkg='laelaps_control.images',
image_paths=imagePath)
Frame.__init__(self, master=master, cnf=cnf, **kw)
self.m_icons['app_icon'] = self.loadImage("icons/LaelapsRangeIcon.png")
if self.m_icons['app_icon'] is not None:
self.master.tk.call('wm', 'iconphoto',
self.master._w, self.m_icons['app_icon'])
# craete and show widgets
self.createWidgets()
self.grid(row=0, column=0, padx=5, pady=5)
# subscribe to extended robot status data
self.m_sub = rospy.Subscriber("laelaps_control/range_state",
RangeState,
callback=self.updateSensorData)
#
## \brief Initialize class state data.
##
## Any keywords for this application specific window that are not supported
## by the Frame Tkinter class must be removed.
##
## \param kw Keyword options.
##
## \return Modified keywords sans this specific class.
#
def initData(self, kw):
self.m_debug = False # default debug level
self.m_icons = {} # must keep loaded icons referenced
self.m_range = {} # range sensor state
if kw.has_key('debug'):
self.m_debug = kw['debug']
del kw['debug']
# variables only used for debugging
if self.m_debug:
pass
for sensor in SensorInfo:
self.m_range[sensor[0]] = {
'raw_value': SensorViz.BeamNoObj,
'filtered_value': SensorViz.BeamNoObj,
'sliding_win': [],
'sum_total': 0,
'brightness': SensorViz.BrightMin,
'beam_dir': sensor[1],
'boff': sensor[2],
'toff': sensor[3]}
return kw
#
## \brief Open image from file and convert to PhotoImage.
##
## \param filename Image file name.
##
## \return Returns image widget on success, None on failure.
#
def loadImage(self, filename):
return self.m_imageLoader.loadImage(filename)
#
## \brief Create gui widgets with supporting data and show.
#
def createWidgets(self):
self.createHeading(self, 0, 0)
self.createSensorPanel(self, 1, 0)
#
## \brief Create top gui heading.
##
## \param parent Parent container widget.
## \param row Row in parent widget.
## \param col Column in parent widget.
#
def createHeading(self, parent, row, col):
# top heading
w = Label(parent)
w['font'] = ('Helvetica', 16)
w['text'] = 'Laelaps Range Sensor Group'
w['anchor'] = CENTER
w.grid(row=row, column=col, sticky=E+W)
#
## \brief Create subsystem state lower center panel headers.
##
## \param parent Parent widget
## \param row Row in parent widget.
## \param col Column in parent widget.
#
def createSensorPanel(self, parent, row, col):
wframe = Frame(parent)
wframe['borderwidth'] = 2
wframe['relief'] = 'ridge'
wframe.grid(row=row, column=col, padx=1, pady=3, sticky=N+W+E)
helv = tkFont.Font(family="Helvetica",size=10,weight="bold")
padx = 10
pady = 3
row = 0
col = 0
# Center top-down view
self.m_icons['laelaps_top_down'] = self.loadImage("LaelapsTopDown300.png")
self.m_canvas = Canvas(wframe, width=600, height=700)
self.m_canvas['bg'] = SensorViz.CanvasBg
self.m_canvas.grid(row=0, column=1, rowspan=2, padx=0, pady=0, sticky=E+W)
origin = (300, 350)
size = (300, 300)
if self.m_icons['laelaps_top_down'] is not None:
self.m_canvas.create_image(origin, image=self.m_icons['laelaps_top_down'])
for key in self.m_range:
sensor = self.m_range[key]
# create beam placeholder
sensor['origin'] = (origin[0]+sensor['boff'][0],
origin[1]+sensor['boff'][1])
sensor['idBgBeam'] = None
sensor['idBeam'] = None
# create sensor value text
sensor['idText'] = self.m_canvas.create_text(
(sensor['origin'][0]+sensor['toff'][0],
sensor['origin'][1]+sensor['toff'][1]),
fill = SensorViz.TextFgColor)
#
# Determine the parameterized lines coefficients for the beam rays.
#
# x = x_0 + a * t, y = y_0 + b * t where t = dist_meas / max
#
# Note: Robot is oriented up, so need to rotate by 90 degrees.
#
radius = size[0] * SensorViz.BeamMaxDist / SensorViz.RobotBodyDim[0]
rot90 = degToRad(90.0)
sensor['coef'] = [(0.0, 0.0)] # origin a,b coefficients
for ray in [-20.0, -10.0, 0.0, 10.0, 20.0]:
angle = sensor['beam_dir'] + rot90 + degToRad(ray)
sensor['coef'].append((radius*math.cos(angle), radius*math.sin(angle)))
#
## \brief Update all sensor data from received message.
##
## \param sensedDat RangeState message.
#
def updateSensorData(self, sensedData):
i = 0
for name in sensedData.name:
rawVal = sensedData.range[i]
i += 1
if self.m_range.has_key(name):
if rawVal >= SensorViz.BeamMinDist and rawVal <= SensorViz.BeamMaxDist:
self.detectedObj(self.m_range[name], rawVal)
else:
self.detectedNoObj(self.m_range[name])
def detectedObj(self, sensor, rawVal):
sensor['raw_value'] = rawVal
filteredVal = self.filter(sensor)
brightness = sensor['brightness']
if brightness < SensorViz.BrightMax:
brightness += SensorViz.BrightIncStepSize
self.showBeam(sensor, filteredVal, brightness)
sensor['filtered_value'] = filteredVal
sensor['brightness'] = brightness
def detectedNoObj(self, sensor):
sensor['raw_value'] = SensorViz.BeamNoObj
filteredVal = sensor['filtered_value']
brightness = sensor['brightness']
if brightness > SensorViz.BrightMin:
brightness -= SensorViz.BrightDecStepSize
if brightness <= SensorViz.BrightMin:
filteredVal = SensorViz.BeamNoObj
sensor['sliding_win'] = []
sensor['sum_total'] = 0
self.showBeam(sensor, filteredVal, brightness)
sensor['filtered_value'] = filteredVal
sensor['brightness'] = brightness
def filter(self, sensor):
sensor['sum_total'] += sensor['raw_value']
sensor['sliding_win'].append(sensor['raw_value'])
if len(sensor['sliding_win']) > SensorViz.SlidingWinMaxSize:
v = sensor['sliding_win'].pop(0)
sensor['sum_total'] -= v
return sensor['sum_total'] / len(sensor['sliding_win'])
def showBeam(self, sensor, newFilteredVal, newBrightness):
curFilteredVal = sensor['filtered_value']
curBrightness = sensor['brightness']
# first time for sensor - force noobject beam
if sensor['idBgBeam'] is None:
self.bgShine(sensor)
#if sensor['idBeam'] is None:
# self.fgShine(sensor, 1.0, SensorViz.BeamColor[SensorViz.BrightMin])
# same beam and value
if newFilteredVal == curFilteredVal and newBrightness == curBrightness:
return
#
# Create a new beam when new object is detected or its distance has changed.
#
if newFilteredVal != SensorViz.BeamNoObj and \
(newFilteredVal < curFilteredVal - 0.002 or \
newFilteredVal > curFilteredVal + 0.002):
t = newFilteredVal / SensorViz.BeamMaxDist
self.fgShine(sensor, t, SensorViz.BeamColor[newBrightness])
self.m_canvas.tag_raise(sensor['idBeam'])
#
# Set a new brightness for the current beam.
#
elif newBrightness != curBrightness:
self.m_canvas.itemconfig(sensor['idBeam'],
fill=SensorViz.BeamColor[newBrightness])
# special case
if newBrightness == 0:
self.fgShine(sensor, | |
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='depotType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='depotType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='depotType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='depotType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.depotCode is not None:
namespaceprefix_ = self.depotCode_nsprefix_ + ':' if (UseCapturedNS_ and self.depotCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sdepotCode>%s</%sdepotCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.depotCode), input_name='depotCode')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'depotCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'depotCode')
value_ = self.gds_validate_string(value_, node, 'depotCode')
self.depotCode = value_
self.depotCode_nsprefix_ = child_.prefix
# validate type stringMinLength3MaxLength3
self.validate_stringMinLength3MaxLength3(self.depotCode)
# end class depotType
class marketType(GeneratedsSuper):
"""This identifies the market type for the consignment comprising the
origin
country and whether the consignment is being shipped domestically or
internationally and within which international trading block, e.g. 'EU'."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, originCountryCode=None, marketSpecification=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.originCountryCode = originCountryCode
self.validate_stringMinLength2MaxLength2(self.originCountryCode)
self.originCountryCode_nsprefix_ = None
self.marketSpecification = marketSpecification
self.marketSpecification_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, marketType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if marketType.subclass:
return marketType.subclass(*args_, **kwargs_)
else:
return marketType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_originCountryCode(self):
return self.originCountryCode
def set_originCountryCode(self, originCountryCode):
self.originCountryCode = originCountryCode
def get_marketSpecification(self):
return self.marketSpecification
def set_marketSpecification(self, marketSpecification):
self.marketSpecification = marketSpecification
def validate_stringMinLength2MaxLength2(self, value):
result = True
# Validate type stringMinLength2MaxLength2, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
if len(value) < 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd minLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.originCountryCode is not None or
self.marketSpecification is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='marketType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('marketType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'marketType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='marketType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='marketType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='marketType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='marketType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.originCountryCode is not None:
namespaceprefix_ = self.originCountryCode_nsprefix_ + ':' if (UseCapturedNS_ and self.originCountryCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%soriginCountryCode>%s</%soriginCountryCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.originCountryCode), input_name='originCountryCode')), namespaceprefix_ , eol_))
if self.marketSpecification is not None:
namespaceprefix_ = self.marketSpecification_nsprefix_ + ':' if (UseCapturedNS_ and self.marketSpecification_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%smarketSpecification>%s</%smarketSpecification>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.marketSpecification), input_name='marketSpecification')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'originCountryCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'originCountryCode')
value_ = self.gds_validate_string(value_, node, 'originCountryCode')
self.originCountryCode = value_
self.originCountryCode_nsprefix_ = child_.prefix
# validate type stringMinLength2MaxLength2
self.validate_stringMinLength2MaxLength2(self.originCountryCode)
elif nodeName_ == 'marketSpecification':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'marketSpecification')
value_ = self.gds_validate_string(value_, node, 'marketSpecification')
self.marketSpecification = value_
self.marketSpecification_nsprefix_ = child_.prefix
# end class marketType
class brokenRules(GeneratedsSuper):
"""List of business rules that have been breached by the input and that
will
require the user to correct in order to print labels on resubmission of
XML input file.RequestId number to which the error relates."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, key=None, errorCode=None, errorDescription=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.key = _cast(None, key)
self.key_nsprefix_ = None
self.errorCode = errorCode
self.errorCode_nsprefix_ = None
self.errorDescription = errorDescription
self.errorDescription_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, brokenRules)
if subclass is not None:
return subclass(*args_, **kwargs_)
if brokenRules.subclass:
return brokenRules.subclass(*args_, **kwargs_)
else:
return brokenRules(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_errorCode(self):
return self.errorCode
def set_errorCode(self, errorCode):
self.errorCode = errorCode
def get_errorDescription(self):
return self.errorDescription
def set_errorDescription(self, errorDescription):
self.errorDescription = errorDescription
def get_key(self):
return self.key
def set_key(self, key):
self.key = key
def hasContent_(self):
if (
self.errorCode is not None or
self.errorDescription is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='brokenRules', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('brokenRules')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'brokenRules':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='brokenRules')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='brokenRules', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='brokenRules'):
if self.key is not None and 'key' not in already_processed:
already_processed.add('key')
outfile.write(' key=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.key), input_name='key')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='brokenRules', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.errorCode is not None:
namespaceprefix_ = self.errorCode_nsprefix_ + ':' if (UseCapturedNS_ and self.errorCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%serrorCode>%s</%serrorCode>%s' % (namespaceprefix_ , self.gds_format_integer(self.errorCode, input_name='errorCode'), namespaceprefix_ , eol_))
if self.errorDescription is not None:
namespaceprefix_ = self.errorDescription_nsprefix_ + ':' if (UseCapturedNS_ and self.errorDescription_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%serrorDescription>%s</%serrorDescription>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.errorDescription), input_name='errorDescription')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('key', node)
if value is not None and 'key' not in already_processed:
already_processed.add('key')
self.key = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'errorCode' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'errorCode')
ival_ = self.gds_validate_integer(ival_, node, 'errorCode')
self.errorCode = ival_
self.errorCode_nsprefix_ = child_.prefix
elif nodeName_ == 'errorDescription':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'errorDescription')
value_ = self.gds_validate_string(value_, node, 'errorDescription')
self.errorDescription = value_
self.errorDescription_nsprefix_ = child_.prefix
# end class brokenRules
class fault(GeneratedsSuper):
"""List of faults that have occured during teh processign of multiple
requestsRequestId number to which the fault relates."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, key=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.key | |
#!/usr/bin/env python
__author__ = ('<NAME> (<EMAIL>), '
'<NAME> (<EMAIL>), '
'<NAME> (<EMAIL>), '
'<NAME> (<EMAIL>), '
'<NAME> (<EMAIL>), '
'<NAME> (<EMAIL>)')
__version__ = '3.0.32'
__date__ = '19 May 2020'
import sys
import glob
import os
import argparse as ap
from urllib.request import urlretrieve
import time
import subprocess as sb
import multiprocessing as mp
import bz2
import hashlib
import numpy as np
import tarfile
import datetime
import itertools
import copy
import pandas as pd
if sys.version_info[0] < 3:
raise Exception("PhyloPhlAn {} requires Python 3, your current Python version is {}.{}.{}"
.format(__version__, sys.version_info[0], sys.version_info[1], sys.version_info[2]))
HOW_MANY = "10"
DOWNLOAD_URL = "https://www.dropbox.com/s/xdqm836d2w22npb/phylophlan_metagenomic.txt?dl=1"
DATABASE_FOLDER = 'phylophlan_databases/'
def info(s, init_new_line=False, exit=False, exit_value=0):
if init_new_line:
sys.stdout.write('\n')
sys.stdout.write('{}'.format(s))
sys.stdout.flush()
if exit:
sys.exit(exit_value)
def error(s, init_new_line=False, exit=False, exit_value=1):
if init_new_line:
sys.stderr.write('\n')
sys.stderr.write('[e] {}\n'.format(s))
sys.stderr.flush()
if exit:
sys.exit(exit_value)
def read_params():
p = ap.ArgumentParser(description=("The phylophlan_metagenomic.py script assign SGB and taxonomy to a given set of input genomes. "
"Outputs can be of three types: (1) for each input genomes returns the list of the closest "
"-n/--how_many SGBs sorted by average Mash distance; (2) for each input genomes returns the "
"closest SGB, GGB, FGB, and reference genomes; (3) returns a all vs. all matrix with all the "
"pairwise mash distances"),
formatter_class=ap.ArgumentDefaultsHelpFormatter)
p.add_argument('-i', '--input', type=str,
help="Input folder containing the metagenomic bins to be indexed")
p.add_argument('-o', '--output_prefix', type=str, default=None,
help=("Prefix used for the output folders: indexed bins, distance estimations. If not specified, "
"the input folder will be used"))
p.add_argument('-d', '--database', type=str, default=None,
help="Database name, available options can be listed using the --database_list parameter")
p.add_argument('--database_list', action='store_true', default=False,
help="List of all the available databases that can be specified with the -d/--database option")
p.add_argument('--database_update', action='store_true', default=False, help="Update the databases file")
p.add_argument('-e', '--input_extension', type=str, default=None,
help=("Specify the extension of the input file(s) specified via -i/--input. If not specified will "
"try to infer it from the input files"))
p.add_argument('-n', '--how_many', type=str, default=HOW_MANY,
help=('Specify the number of SGBs to report in the output; "all" is a special value to report all the SGBs;'
' this param is not used when "--only_input" is specified'))
p.add_argument('--nproc', type=int, default=1, help="The number of CPUs to use")
p.add_argument('--database_folder', type=str, default=DATABASE_FOLDER,
help="Path to the folder that contains the database file")
p.add_argument('--only_input', action='store_true', default=False,
help="If specified provides a distance matrix between only the input genomes provided")
p.add_argument('--add_ggb', action='store_true', default=False,
help=("If specified adds GGB assignments. If specified with --add_fgb, then -n/--how_many will be set to 1"
" and will be adding a column that reports the closest reference genome"))
p.add_argument('--add_fgb', action='store_true', default=False,
help=("If specified adds FGB assignments. If specified with --add_ggb, then -n/--how_many will be set to 1"
" and will be adding a column that reports the closest reference genome"))
p.add_argument('--overwrite', action='store_true', default=False, help="If specified overwrite the output file if exists")
p.add_argument('--citation', action='version',
version=('<NAME>., <NAME>., <NAME>. et al. '
'Precise phylogenetic analysis of microbial isolates and genomes from metagenomes using PhyloPhlAn 3.0. '
'Nat Commun 11, 2500 (2020). '
'https://doi.org/10.1038/s41467-020-16366-7'),
help="Show citation")
p.add_argument('--verbose', action='store_true', default=False, help="Prints more stuff")
p.add_argument('-v', '--version', action='version',
version='phylophlan_metagenomic.py version {} ({})'.format(__version__, __date__),
help="Prints the current phylophlan_metagenomic.py version and exit")
return p.parse_args()
def database_list(databases_folder, update=False, exit=False, exit_value=0):
sgbs_url = os.path.basename(DOWNLOAD_URL).replace('?dl=1', '')
download(DOWNLOAD_URL, sgbs_url, overwrite=update, verbose=False)
urls = set([r.strip().split('\t')[-1].replace('.md5', '').replace('.tar', '').replace('.txt.bz2', '')
for r in open(sgbs_url)
if not r.startswith('#') and (len(r.split('\t')) == 2) and ('tutorial' not in r)])
if not update:
info('\nAvailable databases that can be specified with -d/--database:\n ')
info('\n '.join(urls) + '\n', exit=exit, exit_value=exit_value)
def check_params(args, verbose=False):
# database folder
if os.path.isdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), args.database_folder)):
args.database_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.database_folder)
if verbose:
info('Setting --database_folder to "{}"\n'.format(args.database_folder))
if args.database_update:
database_list(args.database_folder, update=args.database_update)
args.database_update = False
if args.database_list:
database_list(args.database_folder, update=args.database_update, exit=True)
if not os.path.isdir(args.database_folder):
create_folder(args.database_folder, verbose=verbose)
if not args.only_input:
if (not args.input) or (not args.database):
error('both -i/--input and -d/--database must be specified', init_new_line=True)
database_list(args.database_folder, update=args.database_update, exit=True)
args.mapping = args.database
if not args.mapping.endswith('.txt.bz2'):
args.mapping += '.txt.bz2'
if not os.path.isdir(args.input):
error('"{}" folder not found, -i/--input must be a folder'.format(args.input), exit=True)
if not args.input_extension:
exts = set([os.path.splitext(i)[1] for i in glob.iglob(args.input + '/*') if os.path.splitext(i)[1]])
if len(exts) > 1:
error('Could not automatically infer the input extension (extensions detected: "{}"), please specify '
'using the -e/--input_extension param'.format('", "'.join(exts)), exit=True)
args.input_extension = list(exts)[0]
if verbose:
info('Setting input extension to "{}"\n'.format(args.input_extension))
if not args.input_extension.startswith('.'):
args.input_extension = '.' + args.input_extension
if not args.output_prefix:
args.output_prefix = os.path.abspath(args.input)
if verbose:
info('Setting output prefix to "{}"\n'.format(args.output_prefix))
if os.path.isdir(args.output_prefix):
if args.output_prefix.endswith('/'):
args.output_prefix = args.output_prefix[:-1]
args.output_prefix = os.path.join(args.output_prefix, os.path.basename(args.output_prefix))
if verbose:
info('Output prefix is a folder, setting it to "{}"\n'.format(args.output_prefix))
if args.how_many != 'all':
try:
how_many = int(args.how_many)
except Exception as _:
if verbose:
info('Unrecognized value "{}", setting -n/--how_many to default value "{}"'.format(args.how_many, HOW_MANY))
args.how_many = HOW_MANY
args.how_many = how_many
create_folder(args.output_prefix + '_sketches', verbose=args.verbose)
create_folder(args.output_prefix + '_sketches/inputs', verbose=args.verbose)
create_folder(args.output_prefix + '_dists', verbose=args.verbose)
if args.add_ggb and args.add_fgb:
args.how_many = 1
if verbose:
info('Both --add_ggb and --add_fgb specified, setting -n/--how_many to "{}"'.format(args.how_many))
if verbose:
info('Arguments: {}\n'.format(vars(args)), init_new_line=True)
def check_dependencies(verbose=False):
if verbose:
info('Checking "mash"\n', init_new_line=True)
try:
sb.check_call(['mash'], stdout=sb.DEVNULL, stderr=sb.DEVNULL)
except Exception as e:
error(str(e), init_new_line=True)
error('mash is not installed or not present in the system path\n', init_new_line=True, exit=True)
def byte_to_megabyte(byte):
"""
Convert byte value to megabyte
"""
return (byte / 1048576)
class ReportHook():
def __init__(self):
self.start_time = time.time()
def report(self, blocknum, block_size, total_size):
"""
Print download progress message
"""
if blocknum == 0:
self.start_time = time.time()
if total_size > 0:
info("Downloading file of size: {:.2f} MB\n".format(byte_to_megabyte(total_size)))
else:
total_downloaded = blocknum * block_size
status = "{:3.2f} MB ".format(byte_to_megabyte(total_downloaded))
if total_size > 0:
percent_downloaded = total_downloaded * 100.0 / total_size
# use carriage return plus sys.stderr to overwrite stderr
download_rate = total_downloaded / (time.time() - self.start_time)
estimated_time = (total_size - total_downloaded) / download_rate
estimated_minutes = int(estimated_time / 60.0)
estimated_seconds = estimated_time - estimated_minutes * 60.0
status += ("{:3.2f} % {:5.2f} MB/sec {:2.0f} min {:2.0f} sec "
.format(percent_downloaded, byte_to_megabyte(download_rate),
estimated_minutes, estimated_seconds))
status += " \r"
info(status)
def download(url, download_file, overwrite=False, verbose=False):
"""
Download a file from a url
"""
if (not os.path.isfile(download_file)) or overwrite:
try:
if verbose:
info('Downloading "{}" to "{}"\n'.format(url, download_file))
urlretrieve(url, download_file, reporthook=ReportHook().report)
info('\n')
except EnvironmentError:
error('unable to download "{}"'.format(url), exit=True)
elif verbose:
info('File "{}" present\n'.format(download_file))
def create_folder(output, verbose=False):
if not os.path.exists(output):
if verbose:
info('Creating folder "{}"\n'.format(output))
os.mkdir(output, mode=0o775)
elif verbose:
info('Folder "{}" already present\n'.format(output))
def remove_file(filename, path=None, verbose=False):
to_remove = ''
if os.path.isfile(filename):
to_remove = filename
elif os.path.isfile(os.path.join(path, filename)):
to_remove = os.path.join(path, filename)
if os.path.isfile(to_remove):
if verbose:
info('Removing "{}"\n'.format(to_remove))
os.remove(to_remove)
elif verbose:
error('cannot remove "{}", file not found'.format(to_remove))
def initt(terminating_):
# This places terminating in the global namespace of the worker subprocesses.
# This allows the worker function to access `terminating` even though it is
# not passed as an argument to the function.
global terminating
terminating = terminating_
def sketching_inputs_for_input_input_dist(input_folder, input_extension, output_prefix, nproc=1, verbose=False):
commands = []
for i in glob.iglob(os.path.join(input_folder, '*' + input_extension)):
out = os.path.splitext(os.path.basename(i))[0]
out_sketch = os.path.join(output_prefix + "_sketches/inputs", out)
commands.append((i, out_sketch, verbose))
if commands:
terminating = mp.Event()
with mp.Pool(initializer=initt, initargs=(terminating,), processes=nproc) as pool:
try:
[_ for _ in pool.imap_unordered(sketching_inputs_for_input_input_dist_rec, commands, chunksize=1)]
except Exception as e:
error(str(e), init_new_line=True)
error('sketching crashed', init_new_line=True, exit=True)
else:
info('No inputs found!\n')
def sketching_inputs_for_input_input_dist_rec(x):
if not terminating.is_set():
try:
inp_bin, out_sketch, verbose = x
if verbose:
t0 = time.time()
info('Sketching "{}"\n'.format(inp_bin))
# sketch
if not os.path.isfile(out_sketch + ".msh"):
cmd = ['mash', 'sketch', '-k', '21', '-s', '10000', '-o', out_sketch, inp_bin]
try:
sb.check_call(cmd, stdout=sb.DEVNULL, stderr=sb.DEVNULL)
except Exception as e:
terminating.set()
remove_file(out_sketch + ".msh", verbose=verbose)
error(str(e), init_new_line=True)
error('cannot execute command\n {}'.format(' '.join(cmd)), init_new_line=True)
raise
if verbose:
t1 = time.time()
info('Sketch for "{}" computed in {}s\n'.format(inp_bin, int(t1 - t0)))
except Exception as e:
terminating.set()
error(str(e), init_new_line=True)
error('error while sketching_inputs_for_input_input_dist_rec\n {}'
.format('\n '.join([str(a) for a in x])), init_new_line=True)
raise
else:
terminating.set()
def sketching(input_folder, input_extension, output_prefix, nproc=1, verbose=False):
commands = []
for i in glob.iglob(os.path.join(input_folder, '*' + input_extension)):
out = os.path.splitext(os.path.basename(i))[0]
out_sketch = os.path.join(output_prefix + "_sketches/inputs", out)
commands.append((i, out_sketch, verbose))
if commands:
terminating = mp.Event()
with mp.Pool(initializer=initt, initargs=(terminating,), processes=nproc) as pool:
try:
[_ for _ in pool.imap_unordered(sketching_rec, commands, chunksize=1)]
except Exception as e:
error(str(e), init_new_line=True)
error('sketching crashed', init_new_line=True, exit=True)
else:
info('No inputs found!\n')
def sketching_rec(x):
if not terminating.is_set():
try:
inp_bin, out_sketch, verbose = x
if verbose:
t0 = time.time()
info('Sketching "{}"\n'.format(inp_bin))
if not os.path.isfile(out_sketch + ".msh"):
| |
# -*- coding: utf-8 -*-
# _mapCtoD.py
# Module providing the mapCtoD function
# Copyright 2013 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the mapCtoD() function
"""
from __future__ import division, print_function
import collections
from warnings import warn
import numpy as np
from numpy.linalg import cond
from numpy.random import rand
from scipy.linalg import expm, inv, norm
from scipy.signal import cont2discrete, lti, ss2zpk
from ._constants import eps
from ._evalMixedTF import evalMixedTF
from ._padb import padb
from ._padr import padr
from ._utils import _getABCD
def mapCtoD(sys_c, t=(0, 1), f0=0.):
"""Map a MIMO continuous-time to an equiv. SIMO discrete-time system.
The criterion for equivalence is that the sampled pulse response
of the CT system must be identical to the impulse response of the DT system.
i.e. If ``yc`` is the output of the CT system with an input ``vc`` taken
from a set of DACs fed with a single DT input ``v``, then ``y``, the output
of the equivalent DT system with input ``v`` satisfies:
``y(n) = yc(n-)`` for integer ``n``. The DACs are characterized by
rectangular impulse responses with edge times specified in the t list.
**Input:**
sys_c : object
the LTI description of the CT system, which can be:
* the ABCD matrix,
* a list-like containing the A, B, C, D matrices,
* a list of zpk tuples (internally converted to SS representation).
* a list of LTI objects
t : array_like
The edge times of the DAC pulse used to make CT waveforms
from DT inputs. Each row corresponds to one of the system
inputs; [-1 -1] denotes a CT input. The default is [0 1],
for all inputs except the first.
f0 : float
The (normalized) frequency at which the Gp filters' gains are
to be set to unity. Default 0 (DC).
**Output:**
sys : tuple
the LTI description for the DT equivalent, in A, B, C, D
representation.
Gp : list of lists
the mixed CT/DT prefilters which form the samples
fed to each state for the CT inputs.
**Example:**
Map the standard second order CT modulator shown below to its CT
equivalent and verify that its NTF is :math:`(1-z^{-1})^2`.
.. image:: ../doc/_static/mapCtoD.png
:align: center
:alt: mapCtoD block diagram
It can be done as follows::
from __future__ import print_function
import numpy as np
from scipy.signal import lti
from deltasigma import *
LFc = lti([[0, 0], [1, 0]], [[1, -1], [0, -1.5]], [[0, 1]], [[0, 0]])
tdac = [0, 1]
LF, Gp = mapCtoD(LFc, tdac)
LF = lti(*LF)
ABCD = np.vstack((
np.hstack((LF.A, LF.B)),
np.hstack((LF.C, LF.D))
))
NTF, STF = calculateTF(ABCD)
print("NTF:") # after rounding to a 1e-6 resolution
print("Zeros:", np.real_if_close(np.round(NTF.zeros, 6)))
print("Poles:", np.real_if_close(np.round(NTF.poles, 6)))
Prints::
Zeros: [ 1. 1.]
Poles: [ 0. 0.]
Equivalent to::
(z -1)^2
NTF = ----------
z^2
.. seealso:: <NAME> and <NAME>, "Delta-sigma modulators employing \
continuous-time circuitry," IEEE Transactions on Circuits and Systems I, \
vol. 43, no. 4, pp. 324-332, April 1996.
"""
# You need to have A, B, C, D specification of the system
Ac, Bc, Cc, Dc = _getABCD(sys_c)
ni = Bc.shape[1]
# Sanitize t
if hasattr(t, 'tolist'):
t = t.tolist()
if (type(t) == tuple or type(t) == list) and np.isscalar(t[0]):
t = [t] # we got a simple list, like the default value
if not (type(t) == tuple or type(t) == list) and \
not (type(t[0]) == tuple or type(t[0]) == list):
raise ValueError("The t argument has an unrecognized shape")
# back to business
t = np.array(t)
if t.shape == (1, 2) and ni > 1:
t = np.vstack((np.array([[-1, -1]]), np.dot(np.ones((ni - 1, 1)), t)))
if t.shape != (ni, 2):
raise ValueError('The t argument has the wrong dimensions.')
di = np.ones(ni).astype(bool)
for i in range(ni):
if t[i, 0] == -1 and t[i, 1] == -1:
di[i] = False
# c2d assumes t1=0, t2=1.
# Also c2d often complains about poor scaling and can even produce
# incorrect results.
A, B, C, D, _ = cont2discrete((Ac, Bc, Cc, Dc), 1, method='zoh')
Bc1 = Bc[:, ~di]
# Examine the discrete-time inputs to see how big the
# augmented matrices need to be.
B1 = B[:, ~di]
D1 = D[:, ~di]
n = A.shape[0]
t2 = np.ceil(t[di, 1]).astype(np.int_)
esn = (t2 == t[di, 1]) and (D[0, di] != 0).T # extra states needed?
npp = n + np.max(t2 - 1 + 1*esn)
# Augment A to npp x npp, B to np x 1, C to 1 x np.
Ap = padb(padr(A, npp), npp)
for i in range(n + 1, npp):
Ap[i, i - 1] = 1
Bp = np.zeros((npp, 1))
if npp > n:
Bp[n, 0] = 1
Cp = padr(C, npp)
Dp = np.zeros((1, 1))
# Add in the contributions from each DAC
for i in np.flatnonzero(di):
t1 = t[i, 0]
t2 = t[i, 1]
B2 = B[:, i]
D2 = D[:, i]
if t1 == 0 and t2 == 1 and D2 == 0: # No fancy stuff necessary
Bp = Bp + padb(B2, npp)
else:
n1 = np.floor(t1)
n2 = np.ceil(t2) - n1 - 1
t1 = t1 - n1
t2 = t2 - n2 - n1
if t2 == 1 and D2 != 0:
n2 = n2 + 1
extraStateNeeded = 1
else:
extraStateNeeded = 0
nt = n + n1 + n2
if n2 > 0:
if t2 == 1:
Ap[:n, nt - n2:nt] = Ap[:n, nt - n2:nt] + np.tile(B2, (1, n2))
else:
Ap[:n, nt - n2:nt - 1] = Ap[:n, nt - n2:nt - 1] + np.tile(B2, (1, n2 - 1))
Ap[:n, (nt-1)] = Ap[:n, (nt-1)] + _B2formula(Ac, 0, t2, B2)
if n2 > 0: # pulse extends to the next period
Btmp = _B2formula(Ac, t1, 1, B2)
else: # pulse ends in this period
Btmp = _B2formula(Ac, t1, t2, B2)
if n1 > 0:
Ap[:n, n + n1 - 1] = Ap[:n, n + n1 - 1] + Btmp
else:
Bp = Bp + padb(Btmp, npp)
if n2 > 0:
Cp = Cp + padr(np.hstack((np.zeros((D2.shape[0], n + n1)), D2*np.ones((1, n2)))), npp)
sys = (Ap, Bp, Cp, Dp)
if np.any(~di):
# Compute the prefilters and add in the CT feed-ins.
# Gp = inv(sI - Ac)*(zI - A)/z*Bc1
n, m = Bc1.shape
Gp = np.empty_like(np.zeros((n, m)), dtype=object)
# !!Make this like stf: an array of zpk objects
ztf = np.empty_like(Bc1, dtype=object)
# Compute the z-domain portions of the filters
ABc1 = np.dot(A, Bc1)
for h in range(m):
for i in range(n):
if Bc1[i, h] == 0:
ztf[i, h] = (np.array([]),
np.array([0.]),
-ABc1[i, h]) # dt=1
else:
ztf[i, h] = (np.atleast_1d(ABc1[i, h]/Bc1[i, h]),
np.array([0.]),
Bc1[i, h]) # dt = 1
# Compute the s-domain portions of each of the filters
stf = np.empty_like(np.zeros((n, n)), dtype=object) # stf[out, in] = zpk
for oi in range(n):
for ii in range(n):
# Doesn't do pole-zero cancellation
stf[oi, ii] = ss2zpk(Ac, np.eye(n), np.eye(n)[oi, :],
np.zeros((1, n)), input=ii)
# scipy as of v 0.13 has no support for LTI MIMO systems
# only 'MISO', therefore you can't write:
# stf = ss2zpk(Ac, eye(n), eye(n), np.zeros(n, n)))
for h in range(m):
for i in range(n):
# k = 1 unneded, see below
for j in range(n):
# check the k values for a non-zero term
if stf[i, j][2] != 0 and ztf[j, h][2] != 0:
if Gp[i, h] is None:
Gp[i, h] = {}
Gp[i, h].update({'Hs':[list(stf[i, j])]})
Gp[i, h].update({'Hz':[list(ztf[j, h])]})
else:
Gp[i, h].update({'Hs':Gp[i, h]['Hs'] + [list(stf[i, j])]})
Gp[i, h].update({'Hz':Gp[i, h]['Hz'] + [list(ztf[j, h])]})
# the MATLAB-like cell code for the above statements | |
<filename>quant_weights.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Modifications Copyright 2018 <NAME>
# Adapted from freeze.py, to create a checkpoint file with quantized weights
#
r"""
Loads a checkpoint file and quantizes weights based on bitwidths command line argument.
The quantized weights are then saved to a separate checkpoint file which can then be converted to a GraphDef file using
freeze.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import math
import tensorflow as tf
import numpy as np
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
import input_data
import models
from tensorflow.python.framework import graph_util
FLAGS = None
def create_inference_graph(wanted_words, sample_rate, clip_duration_ms,
clip_stride_ms, window_size_ms, window_stride_ms,
dct_coefficient_count, model_architecture, input_type,
model_size_info):
"""Creates an audio model with the nodes needed for inference.
Uses the supplied arguments to create a model, and inserts the input and
output nodes that are needed to use the graph for inference.
Args:
wanted_words: Comma-separated list of the words we're trying to recognize.
sample_rate: How many samples per second are in the input audio files.
clip_duration_ms: How many samples to analyze for the audio pattern.
clip_stride_ms: How often to run recognition. Useful for models with cache.
window_size_ms: Time slice duration to estimate frequencies from.
window_stride_ms: How far apart time slices should be.
dct_coefficient_count: Number of frequency bands to analyze.
model_architecture: Name of the kind of model to generate.
"""
words_list = input_data.prepare_words_list(wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), sample_rate, clip_duration_ms, window_size_ms,
window_stride_ms, dct_coefficient_count,100)
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.placeholder(tf.string, [], name='wav_data')
decoded_sample_data = contrib_audio.decode_wav(
wav_data_placeholder,
desired_channels=1,
desired_samples=model_settings['desired_samples'],
name='decoded_sample_data')
#input_spectrogram = tf.placeholder(tf.float32, shape=[49,513], name='speech_signal')
spectrogram = contrib_audio.audio_spectrogram(
decoded_sample_data.audio,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
#spectrogram = input_spectrogram
if (input_type == 'log-mel'):
print("log-mel energies")
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = spectrogram.shape[-1].value # magnitude_spectrograms.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 20.0, 4000.0, model_settings['dct_coefficient_count']
linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, model_settings['sample_rate'], lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrogram, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for `tf.tensordot` does not currently handle this case.
mel_spectrograms.set_shape(spectrogram.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_offset = 1e-6
log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)
fingerprint_input = log_mel_spectrograms
elif (input_type == 'MFCC'):
print('MFCC-features')
fingerprint_input = contrib_audio.mfcc(
spectrogram,
decoded_sample_data.sample_rate,
dct_coefficient_count=model_settings['dct_coefficient_count'])
#fingerprint_input = tf.placeholder(tf.float32,shape=[49,20],name='fingerprint')
fingerprint_frequency_size = model_settings['dct_coefficient_count']
fingerprint_time_size = model_settings['spectrogram_length']
reshaped_input = tf.reshape(fingerprint_input, [
-1, fingerprint_time_size * fingerprint_frequency_size
])
logits,dropout_prob = models.create_model(
reshaped_input, model_settings, model_architecture, model_size_info,
is_training=True, runtime_settings=runtime_settings)
# Create an output to use for inference.
tf.nn.softmax(logits, name='labels_softmax')
def main(_):
print(FLAGS.model_size_info)
reg_conv_bits = FLAGS.bit_widths[0]
dw_conv_bits = FLAGS.bit_widths[1]
pw_conv_bits = FLAGS.bit_widths[2]
fc_bits = FLAGS.bit_widths[3]
activations_bits = FLAGS.bit_widths[4]
print("Regular Conv-weights bit width: " +str(reg_conv_bits))
print("Depthwise Conv-weights bit width: " + str(dw_conv_bits))
print("Pointwise Conv-weights bit width: " + str(pw_conv_bits))
print("FC-weights bit width: " + str(fc_bits))
print("Activations bit width: " + str(activations_bits))
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
words_list = input_data.prepare_words_list(FLAGS.wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count, 100)
clip_stride_ms = 260
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.placeholder(tf.string, [], name='wav_data')
decoded_sample_data = contrib_audio.decode_wav(
wav_data_placeholder,
desired_channels=1,
desired_samples=model_settings['desired_samples'],
name='decoded_sample_data')
# input_spectrogram = tf.placeholder(tf.float32, shape=[49,513], name='speech_signal')
spectrogram = contrib_audio.audio_spectrogram(
decoded_sample_data.audio,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
# spectrogram = input_spectrogram
if (FLAGS.input_type == 'log-mel'):
print("log-mel energies")
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = spectrogram.shape[-1].value # magnitude_spectrograms.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 20.0, 4000.0, model_settings['dct_coefficient_count']
linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, model_settings['sample_rate'], lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrogram, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for `tf.tensordot` does not currently handle this case.
mel_spectrograms.set_shape(spectrogram.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_offset = 1e-6
log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)
fingerprint_input = log_mel_spectrograms
elif (FLAGS.input_type == 'MFCC'):
print('MFCC-features')
fingerprint_input = contrib_audio.mfcc(
spectrogram,
decoded_sample_data.sample_rate,
dct_coefficient_count=model_settings['dct_coefficient_count'])
# fingerprint_input = tf.placeholder(tf.float32,shape=[49,20],name='fingerprint')
fingerprint_frequency_size = model_settings['dct_coefficient_count']
fingerprint_time_size = model_settings['spectrogram_length']
reshaped_input = tf.reshape(fingerprint_input, [
-1, fingerprint_time_size * fingerprint_frequency_size
])
training = tf.placeholder(tf.bool, name='training')
logits, net_c1 = models.create_model(
reshaped_input, model_settings, FLAGS.model_architecture, FLAGS.model_size_info,
is_training=True, runtime_settings=runtime_settings)
# Create an output to use for inference.
tf.nn.softmax(logits, name='labels_softmax')
saver = tf.train.Saver(tf.global_variables())
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
for v in tf.trainable_variables():
print(v.name)
v_backup = tf.trainable_variables()
eps = 0.001
# Layer information [weights, biases, channel means, channel variances, input fractional bits, output fractional bits, name for .h file]
conv_1 = ['DS-CNN/conv_1/weights', 'DS-CNN/conv_1/biases', 'DS-CNN/conv_1/batch_norm/moving_mean',
'DS-CNN/conv_1/batch_norm/moving_variance', 2, 5, 'CONV1', 'DS-CNN/conv_1/batch_norm/beta']
dw_conv_1 = ['DS-CNN/conv_ds_1/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_1/depthwise_conv/biases',
'DS-CNN/conv_ds_1/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_1/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV1',
'DS-CNN/conv_ds_1/dw_batch_norm/beta']
pw_conv_1 = ['DS-CNN/conv_ds_1/pointwise_conv/weights', 'DS-CNN/conv_ds_1/pointwise_conv/biases',
'DS-CNN/conv_ds_1/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_1/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV1', 'DS-CNN/conv_ds_1/pw_batch_norm/beta']
dw_conv_2 = ['DS-CNN/conv_ds_2/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_2/depthwise_conv/biases',
'DS-CNN/conv_ds_2/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_2/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV2',
'DS-CNN/conv_ds_2/dw_batch_norm/beta']
pw_conv_2 = ['DS-CNN/conv_ds_2/pointwise_conv/weights', 'DS-CNN/conv_ds_2/pointwise_conv/biases',
'DS-CNN/conv_ds_2/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_2/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV2', 'DS-CNN/conv_ds_2/pw_batch_norm/beta']
dw_conv_3 = ['DS-CNN/conv_ds_3/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_3/depthwise_conv/biases',
'DS-CNN/conv_ds_3/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_3/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV3',
'DS-CNN/conv_ds_3/dw_batch_norm/beta']
pw_conv_3 = ['DS-CNN/conv_ds_3/pointwise_conv/weights', 'DS-CNN/conv_ds_3/pointwise_conv/biases',
'DS-CNN/conv_ds_3/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_3/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV3', 'DS-CNN/conv_ds_3/pw_batch_norm/beta']
dw_conv_4 = ['DS-CNN/conv_ds_4/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_4/depthwise_conv/biases',
'DS-CNN/conv_ds_4/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_4/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV4',
'DS-CNN/conv_ds_4/dw_batch_norm/beta']
pw_conv_4 = ['DS-CNN/conv_ds_4/pointwise_conv/weights', 'DS-CNN/conv_ds_4/pointwise_conv/biases',
'DS-CNN/conv_ds_4/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_4/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV4', 'DS-CNN/conv_ds_4/pw_batch_norm/beta']
dw_conv_5 = ['DS-CNN/conv_ds_5/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_5/depthwise_conv/biases',
'DS-CNN/conv_ds_5/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_5/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV5',
'DS-CNN/conv_ds_5/dw_batch_norm/beta']
pw_conv_5 = ['DS-CNN/conv_ds_5/pointwise_conv/weights', 'DS-CNN/conv_ds_5/pointwise_conv/biases',
'DS-CNN/conv_ds_5/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_5/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV5', 'DS-CNN/conv_ds_5/pw_batch_norm/beta']
dw_conv_6 = ['DS-CNN/conv_ds_6/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_6/depthwise_conv/biases',
'DS-CNN/conv_ds_6/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_6/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV6',
'DS-CNN/conv_ds_6/dw_batch_norm/beta']
pw_conv_6 = ['DS-CNN/conv_ds_6/pointwise_conv/weights', 'DS-CNN/conv_ds_6/pointwise_conv/biases',
'DS-CNN/conv_ds_6/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_6/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV6', 'DS-CNN/conv_ds_6/pw_batch_norm/beta']
layer_list = [conv_1, dw_conv_1, pw_conv_1, dw_conv_2, pw_conv_2, dw_conv_3, pw_conv_3, dw_conv_4, pw_conv_4,
dw_conv_5, pw_conv_5, dw_conv_6, pw_conv_6]
n_filters = 76
for layer in layer_list:
bit_width = reg_conv_bits
layer_name = layer[6]
PW = False
if (layer_name[0:2] == 'PW'):
PW = True
bit_width = pw_conv_bits
DW = False
if (layer_name[0:2] == 'DW'):
DW = True
bit_width = dw_conv_bits
print("Name of node - " + layer[6])
for v in tf.trainable_variables():
if v.name == layer[0]+':0':
v_weights = v
if v.name == layer[1]+':0':
v_bias = v
if v.name == layer[7]+':0':
v_beta = v
for v in tf.global_variables():
if v.name == layer[2]+':0':
v_mean = v
if v.name == layer[3]+':0':
v_var = v
weights = sess.run(v_weights)
bias = sess.run(v_bias)
beta = sess.run(v_beta)
mean = sess.run(v_mean)
var = sess.run(v_var)
#print("Weights shape: " + str(weights.shape))
#print("Bias shape: " + str(bias.shape))
#print("Var shape: " + str(var.shape))
#print("Mean shape: " + str(mean.shape))
#print("Beta shape: " + str(beta.shape))
w_shape = weights.shape
b_shape = bias.shape
weights = weights.squeeze()
weights_t1 = np.zeros(weights.shape)
bias_t1 = np.zeros((1, n_filters))
for i in range(0, len(bias)):
if (PW):
filter = weights[:, i]
else:
filter = weights[:, :, i]
bias_temp = bias[i]
mean_temp = mean[i]
var_temp = var[i]
beta_temp = beta[i]
new_filter = filter / math.sqrt(var_temp + eps)
new_bias = beta_temp + (bias_temp - mean_temp) / (math.sqrt(var_temp + eps))
if (PW):
weights_t1[:, i] = new_filter
else:
weights_t1[:, :, i] = new_filter
bias_t1[0, i] = new_bias
#if (i == 0):
#print('filters : ' + str(filter))
#print('Bias : ' + str(bias_temp))
#print('Mean : ' + str(mean_temp))
#print('Variance : ' + str(var_temp))
#print("New filter : " + str(new_filter))
#print("New Bias : " + str(new_bias))
min_value = weights_t1.min()
max_value = weights_t1.max()
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits_weight = min((bit_width-1) - int_bits, 111)
weights_quant = np.round(weights_t1 * 2 ** dec_bits_weight)
weights_quant = weights_quant/(2**dec_bits_weight)
weights_quant = weights_quant.reshape(w_shape)
#print("input fractional bits: " + str(layer[4]))
#print("Weights min value: " + str(min_value))
#print("Weights max value: " + str(max_value))
#print("Weights fractional bits: " + str(dec_bits_weight))
min_value = bias_t1.min()
max_value = bias_t1.max()
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits_bias = min((bit_width-1) - int_bits, 10000)
bias_quant = np.round(bias_t1 * 2 ** dec_bits_bias)
bias_quant = bias_quant/(2**dec_bits_bias)
bias_quant = bias_quant.reshape(b_shape)
bias_left_shift = layer[4] + dec_bits_weight - dec_bits_bias
#print("Bias min value: " + str(min_value))
#print("Bias max value: " + str(max_value))
#print("Bias fractional bits: " + str(dec_bits_bias))
# update the weights in tensorflow graph for quantizing the activations
updated_weights = sess.run(tf.assign(v_weights, weights_quant))
updated_bias = sess.run(tf.assign(v_bias, bias_quant))
fc_layer = ['DS-CNN/fc1/weights', 'DS-CNN/fc1/biases', 5, 3, 'FC']
for v in tf.trainable_variables():
if v.name == fc_layer[0]+':0':
v_fc_weights = v
if v.name == fc_layer[1]+':0':
v_fc_bias = v
weights = sess.run(v_fc_weights)
bias = sess.run(v_fc_bias)
w_shape = weights.shape
b_shape = bias.shape
#print("FC weights : | |
import sys
import numpy as np
import matplotlib.pyplot as plt
from mlportopt.util.helperfuncs import gen_real_data, train_test, merge_clusters, get_full_weights
from mlportopt.preprocessing.preprocessing import preprocess
from mlportopt.flatcluster.flatcluster import DPGMM, TFSOM, GPcluster
from mlportopt.hiercluster.hiercluster import scipy_linkage, BHC
from mlportopt.portfolio.portfolio import Allocate, Evaluation, quasidiagonalise
from mlportopt.dependence.dependence import Dependence
from mlportopt.riskmetrics.riskmetrics import RiskMetrics
class Optimise:
def __init__(self, data,
train_test_v = 0.5,
frequency = 'W',
residuals = None,
whiten = True,
reduce_dims = 2,
dep_measure = 'MI',
dist_measure = None,
dep_copula = 'deheuvels',
dep_denoise = None,
flat_cluster = {'DPGMM':{'clusters':5,
'iters': 500,
'step': 1,
'alpha': 1,
'ftol': 1e-6,
'gtol': 1e-6,
'kap': 1e-6,
'var_p': 1e-3,
'conc': 10}},
hier_cluster = {'Bayesian':{'alpha': 0.001,
'beta': 0.001,
'gamma': 1,
'rmt_denoise': None}},
intra_method = 'var',
inter_method = 'var',
plots = True):
'''
Parameters
----------
train_test_v: float
Ratio of split between training and test data (Default is 0.5)
frequency: str
Frequency of data [Options: 'D', 'W', 'M'] (Default is 'W')
residuals: ndarray
Beta-adjusted returns (Regressed on the market). If not None, clustering is performed on these residuals. (Default is None)
whiten: Bool
Boolean indicator for demeaning and standardising (whitening) (Default is True)
reduce_dims: int or None
If not None, target data will be reduced via PCA to a lower dimension of size reduce_dims (Default is 2)
dep_measure: str
Chosen dependence measure [Options: 'MI', 'VI','CE','CD','corr','Waserstein'] (Default is 'MI')
dist_measure: str or None
If not None, the method for transforming a similarity matrix into a distance matrix [Options: 'angular', 'corr_dist', 'acute_angular', 'abs_corr', 'other'] (Default is None)
dep_copula: str
Chosen dependence copula [Options: 'deheuvels', 'gaussian','student','clayton','gumbel'] (Default is None)
dep_denoise: str or None
If not None, the Random Matrix Theoretical approach to denoising Hermitian matrices [Options 'fixed','shrinkage','targeted_shrinkage'] (Default is None)
flat_cluster: None or Nested Dictionary (see below for parameter descriptions)
Parameter Dictionary for flat clustering of form {'Method':{Parameters}}
[Options: {'DPGMM': {clusters, iters, step, alpha, ftol, gtol, kap, var_p, conc, verb}}
{'GP' : {iters, step, s2, l, alpha, gamma, cov_lim, p_Num, latent, verbose}}]
hier_cluster: Nested Dictionary
Parameter Dictionary for hierarchical clustering of form {'Method':{Parameters}}
[Options: {'Bayesian': {alpha, beta, gamma, rmt_denoise}}
{'single': {dep_measure, hier_cluster, dist_measure, dep_copula, dep_denoise}}
{'average': {Same as single}},
{'complete': {Same as single}},
{'ward': {Same as single}}]
intra_method: str
Method for (inversely) weighting at the cluster level - see Risk Options below (Default is 'var')
inter_method: str
Method for (inversely) weighting at the tree level - see Risk Options below (Default is 'var')
Risk Options
-------
- uniform
- prob_sharpe
- ann_sharpe
- sharpe
- var
- vol (std)
- ann_vol
- VaR - normal (VaR under normality assumption)
- VaR - student (VaR under student t assumption)
- VaR - gmm (VaR from fitted GMM samples)
- CVaR - normal (CVaR under normality assumption)
- CVaR - student (CVaR under student t assumption)
- CVaR - gmm (CVaR from fitted GMM samples)
flatcluster - GP
----------
iters: int
Number of iterations in the graident descent method (Default is 10)
step: float
Step size in gradient descent method (Default is 0.1)
s2: float
Initial value for the variance (Default is 1)
l: float
Initial value for the lengthscale (Default is 1)
alpha: float
Initial value for alpha (Default is 1)
gamma: float [0,1]
Controls the proportion of the maximum variance we want to determine cluster boundaries. (Default is 1)
cov_lim: float
Limits the maximum covariance (Default is 0.99)
p_Num:
Determines the number of samples to be generated when fitting (Default is 20)
latent: int
Determines the size of the latent space if dimensionality reduction is required (Default is 3)
verbose: Bool
Boolean indicator for descriptive printing (Default is False)
flatcluster - DPGMM
----------
X: ndarray
clusters: int
Initial number of clusters - updated by the DP prior
iters: int
Number of iterations in gradient descent (Default is 500)
step: float
Step size (Default is 1)
alpha: float
Initial value for the Dirichlet hyper-parameter (Default is 1)
ftol: float
Tolerance for function value convergence (Default is 1e-6)
gtol: float
Tolerance for gradient value convergence (Default is 1e-6)
kap: float
Hyperparameter for prior mean (Default is 1e-6)
var_p: float
Prior value for the variance (Default is 1e-3)
trunc: float
Intial value for the truncation parameter (Default is 10)
verb: Bool
Boolean indicator for explanatory prints (Default is False)
'''
self.data = data
self.train, self.test = train_test(data, train_test_v)
self.frequency = frequency
self.whiten = whiten
######## Miscellaneous ###########
self.residuals = False
self.merge_weights = None
self.plots = plots
self.reduce_dims = reduce_dims
self.linkage = None
self.hier_cluster_dict = hier_cluster
######## Preprocessing ###########
if residuals is not None:
self.residuals = True # Set the target data to the residuals if beta-adjustment is desired
self.X, _ = train_test(residuals, train_test_v)
else: self.X = self.train.copy()
# Whiten and reduce
tform = self.X.copy() + 1e-7
self.X = preprocess(tform, axis = 1, white = self.whiten , reduce = False, n = 0)
self.reduced = preprocess(tform, axis = 1, white = self.whiten , reduce = (reduce_dims > 0), n = reduce_dims)
######## Dependence ############
self.dep_measure = dep_measure
self.dist_measure = dist_measure
self.dep_copula = dep_copula
self.dep_denoise = dep_denoise
###### Cluster Risk Metrics ######
self.intra_method = intra_method
self.inter_method = inter_method
######## Flat Cluster ############
if flat_cluster is not None:
self.flat_cluster = list(flat_cluster.keys())[0]
else:
self.flat_cluster = flat_cluster
if self.flat_cluster == 'DPGMM':
param_dict = flat_cluster['DPGMM']
clusters = param_dict['clusters']
iters = param_dict['iters']
step = param_dict['step']
alpha = param_dict['alpha']
ftol = param_dict['ftol']
gtol = param_dict['gtol']
kap = param_dict['kap']
var_p = param_dict['var_p']
trunc = param_dict['trunc']
verb = False
self.fclust = DPGMM(self.reduced, clusters, iters, step, alpha, ftol, gtol, kap, var_p, trunc, verb)
elif self.flat_cluster == 'GP':
param_dict = flat_cluster['GP']
iters = param_dict['iters']
step = param_dict['step']
s2 = param_dict['s2']
l = param_dict['l']
alpha = param_dict['alpha']
gamma = param_dict['gamma']
cov_lim = param_dict['cov_lim']
p_Num = param_dict['p_Num']
latent = param_dict['latent']
verbose = param_dict['verbose']
self.fclust = GPcluster(self.reduced,iters, step, s2, l, alpha, gamma, cov_lim, p_Num, latent, verbose)
if hier_cluster is not None:
self.hier_cluster = list(hier_cluster.keys())[0]
else:
self.hier_cluster = hier_cluster
return
def param_hclust(self):
######### Hier Cluster ##########
self.hier_cluster = list(self.hier_cluster_dict.keys())[0]
param_dict = self.hier_cluster_dict[self.hier_cluster]
if self.hier_cluster == 'Bayesian':
if self.reduce_dims < 2:
print('Please reduce the dimensionality before attempting Bayesian Hierarchical Clustering')
alpha = param_dict['alpha']
beta = param_dict['beta']
gamma = param_dict['gamma']
rmt_denoise = param_dict['rmt_denoise']
self.hclust = BHC(self.reduced, alpha, beta, gamma, rmt_denoise)
else:
self.hclust = scipy_linkage(self.X, self.dep_measure, self.hier_cluster, self.dist_measure, self.dep_copula, self.dep_denoise)
return
def f_cluster(self):
### Model Settings ###
if self.flat_cluster == 'DPGMM':
self.fclust.fit()
# if self.plots: self.fclust.plot()
# self.fclust.split_all()
# if self.plots: self.fclust.plot()
elif self.flat_cluster == 'GP':
self.fclust.fit()
### Assign Clusters ###
self.assigned_clusters = self.fclust.assigned_clusters
self.unique_flat_clusts = np.unique(self.assigned_clusters).shape[0]
### Merge the clusters weighted by chosen metric to create new data for hierarchical clustering ###
if self.residuals:
_, self.X, self.merge_weights = merge_clusters(data = self.train,
clusters = self.assigned_clusters,
resids = self.X,
freq = self.frequency,
method = self.intra_method)
self.X = preprocess(self.X, axis = 1, white = self.whiten , reduce = False, n = 0)
else:
self.X, _, self.merge_weights = merge_clusters(data = self.train,
clusters = self.assigned_clusters,
resids = None,
freq = self.frequency,
method = self.intra_method)
self.X = preprocess(self.X, axis = 1, white = self.whiten , reduce = False, n = 0)
if self.merge_weights is None:
self.merge_weights = {i:[1] for i in range(self.train.shape[0])}
return
def h_cluster(self):
self.param_hclust()
if self.hier_cluster == 'Bayesian': self.hclust.fit()
if self.plots: self.hclust.plot_dendrogram()
self.linkage = np.array(self.hclust.linkage)
return
def allocate(self):
self.allocation = Allocate(self.train, self.linkage, self.frequency)
if self.hier_cluster is not None:
self.weights = self.allocation.recursively_partition(inter_cluster_metric = self.inter_method,
intra_cluster_metric = self.intra_method)
if self.merge_weights is not None:
self.weights = get_full_weights(self.weights, self.merge_weights)
else:
inter = np.empty(self.train.shape[0])
for i in range(inter.shape[0]):
| |
import time
import numpy as np
import vtk
from vtk.util import numpy_support
from svtk.lib.toolbox.integer import minmax
from svtk.lib.toolbox.idarray import IdArray
from svtk.lib.toolbox.numpy_helpers import normalize
import math as m
class VTKAnimationTimerCallback(object):
"""This class is called every few milliseconds by VTK based on the set frame rate. This allows for animation.
I've added several modification functions, such as adding and deleting lines/points, changing colors, etc."""
__slots__ = ["points", "point_colors", "timer_count", "points_poly",
"lines", "lines_poly", "line_colors", "line_id_array"
"last_velocity_update", "unused_locations",
"last_color_velocity_update", "renderer", "last_bg_color_velocity_update",
"last_velocity_update", "_loop_time", "remaining_lerp_fade_time", "lerp_multiplier",
"line_id_array", "point_id_array", "point_vertices", "interactor_style", "renderer",
"interactive_renderer", "_started"
]
def __init__(self):
self.timer_count = 0
self.last_velocity_update = time.clock()
self.last_color_velocity_update = time.clock()
self.last_bg_color_velocity_update = time.clock()
self._loop_time = time.clock()
self.unused_locations = []
self.remaining_lerp_fade_time = 0
self.lerp_multiplier = 1
self.line_id_array = IdArray()
self.point_id_array = IdArray()
self._started=False
def add_lines(self, lines, line_colors):
"""
Adds multiple lines between any sets of points.
Args:
lines (list, tuple, np.ndarray, np.generic):
An array in the format of [2, point_a, point_b, 2, point_c, point_d, ...]. The two is needed for VTK's
lines.
line_colors (list, tuple, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
lines.
Returns:
list: An array containing the memory locations of each of the newly inserted lines.
"""
assert (isinstance(lines, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(line_colors, (list, tuple, np.ndarray, np.generic)))
np_line_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_line_color_data = numpy_support.vtk_to_numpy(self.line_colors)
#todo: add lines in unused locations if possible
mem_locations = range(int(len(np_line_data) / 3), int((len(np_line_data) + len(lines)) / 3))
np_line_data = np.append(np_line_data, lines)
if len(np_line_color_data) > 0:
np_line_color_data = np.append(np_line_color_data, line_colors, axis=0)
else:
np_line_color_data = line_colors
vtk_line_data = numpy_support.numpy_to_vtkIdTypeArray(np_line_data, deep=True)
self.lines.SetCells(int(len(np_line_data) / 3), vtk_line_data)
vtk_line_color_data = numpy_support.numpy_to_vtk(num_array=np_line_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_line_color_data)
self.lines_poly.Modified()
self.line_id_array.add_ids(mem_locations)
return mem_locations
def del_all_lines(self):
"""
Deletes all lines.
"""
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np.array([], dtype=np.int64), deep=True)
self.lines.SetCells(0, vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np.array([]), deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_lines(self, line_indices):
#todo: change idarray to use tuples of (start,end) locations and set this to delete those partitions
"""
Delete specific lines.
Args:
line_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing line memory locations(s) to delete.
"""
np_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_color_data = numpy_support.vtk_to_numpy(self.line_colors)
if isinstance(line_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
np_new_data = []
np_new_color_data = []
for i in range(len(line_indices)):
loc = self.line_id_array.pop_id(line_indices[i])
if loc==None:
#todo: put warning here
continue
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
else:
np_new_data = np_data[(last_loc + 1) * 3:loc * 3]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
np_new_color_data = np_color_data[(last_loc + 1):loc]
last_loc = loc
last_loc = loc
loc = len(np_data) / 3
np_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
np_data = np_data.astype(np.int64)
np_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_data, deep=True)
self.lines.SetCells(int(len(np_data) / 3), vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_points(self, point_indices):
"""
Delete specific points.
Args:
point_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing point memory locations(s) to delete.
"""
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())#1,1,1,2,1,3,1,4,1,5,1,6...
print(len(np_vert_data), len(np_point_data), len(np_point_color_data))
if isinstance(point_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
subtractor = 0
np_new_data = []
np_new_color_data = []
np_new_verts = []
for i in range(len(point_indices)):
loc = self.point_id_array.pop_id(point_indices[i])
if loc == None:
# todo: put warning here
continue
subtractor+=1
#I could just remove the end of the array, but this keeps the lines attached to the same points
if len(np_new_verts) >0:
np_new_verts = np.append(np_new_verts, np_vert_data[(last_loc+1)*2:loc*2], axis = 0)
else:
np_new_verts = np_vert_data[(last_loc+1)*2: loc*2]
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
else:
np_new_data = np_point_data[(last_loc + 1):loc]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1)*3:loc*3], axis=0)
else:
np_new_color_data = np_point_color_data[(last_loc + 1):loc]
last_loc = loc
if loc == None:
return
last_loc = loc
loc = len(np_point_data)
np_point_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
np_point_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1):loc], axis=0)
np_vert_data = np.append(np_new_verts, np_vert_data[(last_loc + 1)*2:loc*2], axis = 0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtk(np_point_data, deep=True)
self.points.SetData(vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data, deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
self.lines_poly.Modified()
def add_points(self, points, point_colors):
"""
Adds points in 3d space.
Args:
points (tuple, list, np.ndarray, np.generic):
An array in the format of [[x1,y1,z1], [x2,y2,x2], ..., [xn,yn,zn]]
point_colors (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added.
Returns:
"""
assert (isinstance(points, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(point_colors, (list, tuple, np.ndarray, np.generic)))
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())
print(np_vert_data)
for i in range(len(points)):
#todo: modify pointer_id_array to set free pointers to deleted data, not deleted data locations
if len(self.point_id_array.free_pointers)>0:
np_vert_data = np.append(np_vert_data, [1,self.point_id_array.free_pointers.pop()])
else:
np_vert_data = np.append(np_vert_data,[1, len(np_vert_data)/2])
mem_locations = range(int(len(np_point_data)), int((len(np_point_data) + len(points))))
if len(np_point_data) > 0:
np_point_data = np.append(np_point_data, points, axis=0)
else:
np_point_data = points
if len(point_colors) ==1:
points = np.array(points)
point_colors = np.tile(point_colors, (points.shape[0], 1))
if len(np_point_color_data) > 0:
np_point_color_data = np.append(np_point_color_data, point_colors, axis=0)
else:
np_point_color_data = point_colors
vtk_point_data = numpy_support.numpy_to_vtk(num_array=np_point_data, deep=True, array_type=vtk.VTK_FLOAT)
self.points.SetData(vtk_point_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data.astype(np.int64), deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
vtk_point_color_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_point_color_data)
self.points_poly.Modified()
self.point_id_array.add_ids(mem_locations)
#print(self.point_id_array)
return mem_locations
def add_point_field(self, widths, normal, center, color):
"""
Adds a rectangular field of points.
Args:
widths (tuple, list, np.ndarray, np.generic): an array defining the widths of each dimension of the field.
normal (tuple, list, np.ndarray, np.generic): an array defining the normal to the field. Specifies angle.
center (tuple, list, np.ndarray, np.generic): an array defining the central position of the field.
color (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added, or a single color in the form of [[r1, g1, b1]].
Returns:
A list of integers representing the memory locations where the points were added.
"""
true_normal = normalize(normal)
if not np.allclose(true_normal, [1, 0, 0]):
zn = np.cross(true_normal, [1, 0, 0])
xn = np.cross(true_normal, zn)
else:
xn = [1, 0, 0]
zn = [0, 0, 1]
point_field = np.array([])
#todo: replace for loops with numpy or gpu ops
for z in range(-int(m.floor(widths[2] / 2.0)), int(m.ceil(widths[2] / 2.0))):
for y in range(-int(m.floor(widths[1] / 2.0)), int(m.ceil(widths[1] / 2.0))):
for x in range(-int(m.floor(widths[0] / 2.0)), int(m.ceil(widths[0] / 2.0))):
vector_space_matrix = np.column_stack(
(np.transpose(xn), np.transpose(true_normal), np.transpose(zn)))
translation = np.matmul([x, y, z], vector_space_matrix)
point_location = [center[0], center[1], center[2]] + translation
point_location = [point_location]
if len(point_field)>0:
point_field = np.append(point_field, point_location, axis = 0)
else:
point_field = point_location
return self.add_points(point_field, color) #returns ids
def set_bg_color(self, color):
"""
Sets the background color of the viewport.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
r, g, b = color[0]
r,g,b = (r/255.,g/255.,b/255.)
self.renderer.SetBackground((minmax(r, 0, 1), minmax(g, 0, 1), minmax(b, 0, 1)))
self.renderer.Modified()
def set_all_point_colors(self, color):
"""
Sets the color of every point.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data = np.tile(color, (np_color_data.shape[0], 1))
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
def set_point_colors(self, colors, point_indices=None):
if point_indices is None:
if isinstance(colors, (list, tuple, np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=colors, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data[point_indices] = colors
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
def setup_lerp_all_point_colors(self, color, fade_time):
"""
Sets all points to the same color, but uses lerping to slowly change the colors.
Args:
color ():
fade_time ():
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
self.next_colors = np.tile(color, (np_color_data.shape[0], 1))
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def lerp_point_colors(self, colors, fade_time, point_indices=None):
"""
Sets colors for specific points, but uses lerping to slowly change | |
The ARM ID of the subnet where the control
plane load balancer is deployed. When unspecified, it defaults to
AzureControlPlane.subnet_id. Example: "/subscriptions/d00494d6-6f3c-4280
-bbb2-899e163d1d30/resourceGroups/anthos_cluster_gkeust4/providers/Micro
soft.Network/virtualNetworks/gke-vnet-gkeust4/subnets/subnetid123"
mainVolume: Optional. Configuration related to the main volume provisioned
for each control plane replica. The main volume is in charge of storing
all of the cluster's etcd state. When unspecified, it defaults to a
8-GiB Azure Disk.
proxyConfig: Optional. Proxy configuration for outbound HTTP(S) traffic.
replicaPlacements: Optional. Configuration for where to place the control
plane replicas. Up to three replica placement instances can be
specified. If replica_placements is set, the replica placement instances
will be applied to the three control plane replicas as evenly as
possible.
rootVolume: Optional. Configuration related to the root volume provisioned
for each control plane replica. When unspecified, it defaults to 32-GiB
Azure Disk.
sshConfig: Required. SSH configuration for how to access the underlying
control plane machines.
subnetId: Optional. The ARM ID of the default subnet for the control
plane. The control plane VMs are deployed in this subnet, unless
`AzureControlPlane.replica_placements` is specified. This subnet will
also be used as default for `AzureControlPlane.endpoint_subnet_id` if
`AzureControlPlane.endpoint_subnet_id` is not specified. Similarly it
will be used as default for
`AzureClusterNetworking.service_load_balancer_subnet_id`. Example: `/sub
scriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/
/subnets/default`.
tags: Optional. A set of tags to apply to all underlying control plane
Azure resources.
version: Required. The Kubernetes version to run on control plane replicas
(e.g. `1.19.10-gke.1000`). You can list all supported versions on a
given Google Cloud region by calling GetAzureServerConfig.
vmSize: Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For
available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-
machines/vm-naming-conventions. When unspecified, it defaults to
`Standard_DS2_v2`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class TagsValue(_messages.Message):
r"""Optional. A set of tags to apply to all underlying control plane Azure
resources.
Messages:
AdditionalProperty: An additional property for a TagsValue object.
Fields:
additionalProperties: Additional properties of type TagsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a TagsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
configEncryption = _messages.MessageField('GoogleCloudGkemulticloudV1AzureConfigEncryption', 1)
databaseEncryption = _messages.MessageField('GoogleCloudGkemulticloudV1AzureDatabaseEncryption', 2)
endpointSubnetId = _messages.StringField(3)
mainVolume = _messages.MessageField('GoogleCloudGkemulticloudV1AzureDiskTemplate', 4)
proxyConfig = _messages.MessageField('GoogleCloudGkemulticloudV1AzureProxyConfig', 5)
replicaPlacements = _messages.MessageField('GoogleCloudGkemulticloudV1ReplicaPlacement', 6, repeated=True)
rootVolume = _messages.MessageField('GoogleCloudGkemulticloudV1AzureDiskTemplate', 7)
sshConfig = _messages.MessageField('GoogleCloudGkemulticloudV1AzureSshConfig', 8)
subnetId = _messages.StringField(9)
tags = _messages.MessageField('TagsValue', 10)
version = _messages.StringField(11)
vmSize = _messages.StringField(12)
class GoogleCloudGkemulticloudV1AzureDatabaseEncryption(_messages.Message):
r"""Configuration related to application-layer secrets encryption. Anthos
clusters on Azure encrypts your Kubernetes data at rest in etcd using Azure
Key Vault.
Fields:
keyId: Required. The ARM ID of the Azure Key Vault key to encrypt /
decrypt data. For example: `/subscriptions//resourceGroups//providers/Mi
crosoft.KeyVault/vaults//keys/` Encryption will always take the latest
version of the key and hence specific version is not supported.
"""
keyId = _messages.StringField(1)
class GoogleCloudGkemulticloudV1AzureDiskTemplate(_messages.Message):
r"""Configuration for Azure Disks.
Fields:
sizeGib: Optional. The size of the disk, in GiBs. When unspecified, a
default value is provided. See the specific reference in the parent
resource.
"""
sizeGib = _messages.IntegerField(1, variant=_messages.Variant.INT32)
class GoogleCloudGkemulticloudV1AzureJsonWebKeys(_messages.Message):
r"""AzureJsonWebKeys is a valid JSON Web Key Set as specififed in RFC 7517.
Fields:
keys: The public component of the keys used by the cluster to sign token
requests.
"""
keys = _messages.MessageField('GoogleCloudGkemulticloudV1Jwk', 1, repeated=True)
class GoogleCloudGkemulticloudV1AzureK8sVersionInfo(_messages.Message):
r"""Information about a supported Kubernetes version.
Fields:
version: A supported Kubernetes version (for example, `1.19.10-gke.1000`)
"""
version = _messages.StringField(1)
class GoogleCloudGkemulticloudV1AzureNodeConfig(_messages.Message):
r"""Parameters that describe the configuration of all node machines on a
given node pool.
Messages:
LabelsValue: Optional. The initial labels assigned to nodes of this node
pool. An object containing a list of "key": value pairs. Example: {
"name": "wrench", "mass": "1.3kg", "count": "3" }.
TagsValue: Optional. A set of tags to apply to all underlying Azure
resources for this node pool. This currently only includes Virtual
Machine Scale Sets. Specify at most 50 pairs containing alphanumerics,
spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode
characters. Values can be up to 255 Unicode characters.
Fields:
configEncryption: Optional. Configuration related to vm config encryption.
labels: Optional. The initial labels assigned to nodes of this node pool.
An object containing a list of "key": value pairs. Example: { "name":
"wrench", "mass": "1.3kg", "count": "3" }.
proxyConfig: Optional. Proxy configuration for outbound HTTP(S) traffic.
rootVolume: Optional. Configuration related to the root volume provisioned
for each node pool machine. When unspecified, it defaults to a 32-GiB
Azure Disk.
sshConfig: Required. SSH configuration for how to access the node pool
machines.
tags: Optional. A set of tags to apply to all underlying Azure resources
for this node pool. This currently only includes Virtual Machine Scale
Sets. Specify at most 50 pairs containing alphanumerics, spaces, and
symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can
be up to 255 Unicode characters.
taints: Optional. The initial taints assigned to nodes of this node pool.
vmSize: Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See
[Supported VM sizes](/anthos/clusters/docs/azure/reference/supported-
vms) for options. When unspecified, it defaults to `Standard_DS2_v2`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The initial labels assigned to nodes of this node pool. An
object containing a list of "key": value pairs. Example: { "name":
"wrench", "mass": "1.3kg", "count": "3" }.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class TagsValue(_messages.Message):
r"""Optional. A set of tags to apply to all underlying Azure resources for
this node pool. This currently only includes Virtual Machine Scale Sets.
Specify at most 50 pairs containing alphanumerics, spaces, and symbols
(.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to
255 Unicode characters.
Messages:
AdditionalProperty: An additional property for a TagsValue object.
Fields:
additionalProperties: Additional properties of type TagsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a TagsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
configEncryption = _messages.MessageField('GoogleCloudGkemulticloudV1AzureConfigEncryption', 1)
labels = _messages.MessageField('LabelsValue', 2)
proxyConfig = _messages.MessageField('GoogleCloudGkemulticloudV1AzureProxyConfig', 3)
rootVolume = _messages.MessageField('GoogleCloudGkemulticloudV1AzureDiskTemplate', 4)
sshConfig = _messages.MessageField('GoogleCloudGkemulticloudV1AzureSshConfig', 5)
tags = _messages.MessageField('TagsValue', 6)
taints = _messages.MessageField('GoogleCloudGkemulticloudV1NodeTaint', 7, repeated=True)
vmSize = _messages.StringField(8)
class GoogleCloudGkemulticloudV1AzureNodePool(_messages.Message):
r"""An Anthos node pool running on Azure.
Enums:
StateValueValuesEnum: Output only. The current state of the node pool.
Messages:
AnnotationsValue: Optional. Annotations on the node pool. This field has
the same restrictions as Kubernetes annotations. The total size of all
keys and values combined is limited to 256k. Keys can have 2 segments:
prefix (optional) and name (required), separated by a slash (/). Prefix
must be a DNS subdomain. Name must be 63 characters or less, begin and
end with alphanumerics, with dashes (-), underscores (_), dots (.), and
alphanumerics between.
Fields:
annotations: Optional. Annotations on the node pool. This field has the
same restrictions as Kubernetes annotations. The total size of all keys
and values combined is limited to 256k. Keys can have 2 segments: prefix
(optional) and name (required), separated by a slash (/). Prefix must be
a DNS subdomain. Name must be 63 characters or less, begin and end with
alphanumerics, with dashes (-), underscores (_), dots (.), and
alphanumerics between.
autoscaling: Required. Autoscaler configuration for this node pool.
azureAvailabilityZone: Optional. The Azure availability zone of the nodes
in this nodepool. When unspecified, it defaults to `1`.
config: Required. The node configuration of the node pool.
createTime: Output only. The time at which this node pool was created.
etag: Allows clients to perform consistent read-modify-writes through
optimistic concurrency control. Can be sent on update and delete
requests to ensure the client has an up-to-date value before proceeding.
maxPodsConstraint: Required. The constraint on the maximum number of pods
that can be run simultaneously on a node in the node pool.
name: The | |
<reponame>horizonfleet/horizon
'''
Speed_Layer.py
@author: davidrundel, janders
The speed layer is responsible for (near) real-time data aggregation, enrichment and inference,
as well as ensuring ground-truth data is saved into the batch-database.
'''
import os, json
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, to_json, struct, col, mean as _mean, lit, first, avg, concat, when, rand
from pyspark.sql.types import StructType, StructField, StringType, DoubleType, DateType, ArrayType, StringType, \
TimestampType, IntegerType, LongType, BooleanType
from pyspark.sql.types import StructType
from pyspark.sql import functions as F
from pyspark.sql.functions import expr, udf, from_utc_timestamp
from pyspark.sql import SQLContext
from pyspark.ml import PipelineModel
import pandas as pd
import pymongo
import Azure_Storage as AS
import Weather
os.environ[
'PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-streaming-kafka-0-8_2.11:2.4.5,org.apache.spark:spark-sql-kafka-0-10_2.11:2.4.5 pyspark-shell'
# os.environ['JAVA_HOME'] = "C:\Program Files\Java\jdk-11.0.2"
mongoConnectionString = open("mongoConnectionString", 'r').read().split('\n')[0]
mongodb_name = "horizon"
mongodb_collection = "batch"
enrich_weather_data = True
sc = SparkContext("local[2]", "Sparktest")
spark = SparkSession(sc) \
.builder.appName("Sparktest") \
.master("local[4]") \
.getOrCreate()
# --------------------------------------------------------------------------- #
'''
Input: Initial loading
'''
schema = StructType(
[
StructField('truckId', StringType(), True),
StructField('routeId', StringType(), True),
StructField('tripId', StringType(), True),
StructField('truckLP', StringType(), True),
StructField('consumption', DoubleType(), True),
StructField('mass', DoubleType(), True),
StructField('timeStamp', LongType(), True),
StructField('lat', DoubleType(), True),
StructField('lon', DoubleType(), True),
StructField('acceleration', DoubleType(), True),
StructField('speed', DoubleType(), True),
StructField('secSinceLast', DoubleType(), True),
StructField('avgIntervSpeed', DoubleType(), True),
StructField('speedWarn', IntegerType(), True),
StructField('brakeWarn', IntegerType(), True),
StructField('accWarn', IntegerType(), True),
StructField('engineEff', IntegerType(), True),
StructField('tireEff', IntegerType(), True),
StructField('truckCond', IntegerType(), True),
StructField('incident', BooleanType(), True),
StructField('roadType', IntegerType(), True),
StructField('arrivedStr', StringType(), True),
StructField('arrived', IntegerType(), True),
StructField('truckYear', IntegerType(), True),
StructField('truckType', IntegerType(), True),
]
)
'''Dev Environment'''
# df= spark \
# .readStream \
# .option("sep", ",") \
# .option("header", "true") \
# .schema(schema) \
# .csv("path_to_a_csv")
'''Prod Environment'''
df = spark.readStream.format("kafka") \
.option("kafka.bootstrap.servers", "kafka:9092") \
.option("subscribe", "simulation") \
.load()
df = df.selectExpr("CAST(value AS STRING)")
df = df.withColumn("value", from_json("value", schema)) \
.select(col('value.*')) \
# --------------------------------------------------------------------------- #
'''
Data Enrichment section
Features
Master data:
sd_ + tripId truckId truckLP routeId
first_timestamp (for route time calculation)
Time series data:
ts_ + timeStamp, lat, lon, speed, acceleration, consumption, tiresWarning, brakeWarning, speedWarning
Aggregations:
acc_time (sum) # acc_time (sum) #label: last_acc_sum_time with last value
acc_meters (sum)
acc_distance_percent !!! (requires data [length of track] from db)
Most recent values for speedlayer
speed, acceleration, consumption, brakeWarn, speedWarn, tiresWarn,
last_timestamp, first_timestamp, last_lat, last_lon
Additional values for frontend:
avg_speed
avg_consumption
latest_PRED_Clustering
latest_PRED_Estimated_Arrival
'''
df_serve = df \
.withColumn("speed", F.round(df["speed"], 2)) \
.withColumn("consumption", F.round(df["consumption"], 2)) \
.withColumn("acceleration", F.round(df["acceleration"], 2)) \
.withColumn("meters_intervall", F.round((df["avgIntervSpeed"]) * df["secSinceLast"], 2)) \
.withColumn("timestamp_s", F.round((df["timeStamp"] / 1000), 0))
# Get the current weather data (is returned as [code, desc, temp]) and split into three columns
@udf("integer")
def get_weather_code_for_location(lat, lon, unix_time):
result = Weather.get_weather_for_location(lat, lon, unix_time)
return result[0]
# Weather enrichment
if (enrich_weather_data):
try:
df_serve = df_serve \
.withColumn("weatherCode", get_weather_code_for_location("lat","lon","timeStamp"))
except:
df_serve = df_serve.withColumn("weatherCode", lit(0))
else:
# Turn off weather data enrichment during development to save on api calls
df_serve = df_serve.withColumn("weatherCode", (F.round(rand()*7, 0)).cast(IntegerType()))
df_serve = df_serve \
.groupBy('tripId') \
.agg(F.first("truckId").alias("sd_truckId"),
F.first("truckLP").alias("sd_truckLP"),
F.first("routeId").alias("sd_routeId"),
F.first("mass").alias("sd_mass"),
F.collect_list('timestamp_s').alias("ts_timeStamp"),
F.collect_list('lat').alias("ts_lat"),
F.collect_list('lon').alias("ts_lon"),
F.collect_list('meters_intervall').alias("ts_meters_intervall"),
F.collect_list('speed').alias("ts_speed"),
F.collect_list('acceleration').alias("ts_acceleration"),
F.collect_list('consumption').alias("ts_consumption"),
F.collect_list('accWarn').alias("ts_accWarn"),
F.collect_list('brakeWarn').alias("ts_brakeWarn"),
F.collect_list('speedWarn').alias("ts_speedWarn"),
F.collect_list('incident').alias("ts_incident"),
F.collect_list('tireEff').alias("ts_tireEff"),
F.collect_list('engineEff').alias("ts_engineEff"),
F.collect_list('arrived').alias("ts_arrived"),
F.first('timestamp_s').alias("agg_first_timeStamp"),
F.last('timestamp_s').alias("agg_last_timeStamp"),
F.last('lat').alias("agg_last_lat"),
F.last('lon').alias("agg_last_lon"),
F.last('consumption').alias("agg_last_consumption"),
F.last('speed').alias("agg_last_speed"),
F.last('accWarn').alias("agg_last_accWarn"),
F.last('brakeWarn').alias("agg_last_brakeWarn"),
F.last('speedWarn').alias("agg_last_speedWarn"),
F.last('incident').alias("agg_last_incident"),
F.last('tireEff').alias("agg_last_tireEff"),
F.last('engineEff').alias("agg_last_engineEff"),
F.last('truckCond').alias("agg_last_truckCond"),
F.last('weatherCode').alias("agg_last_weatherCode"),
F.last('roadType').alias("agg_last_roadType"),
F.round(F.avg('speed'), 2).alias("agg_avg_speed"),
F.last('acceleration').alias("agg_last_acceleration"),
F.round(F.avg('consumption'), 2).alias("agg_avg_consumption"),
F.round(F.avg('acceleration'), 2).alias("agg_avg_acceleration"),
F.last('arrived').alias("last_arrived"),
F.last('arrivedStr').alias("last_arrivedStr"),
F.last('truckYear').alias("sd_truck_year"),
F.last('truckType').alias("sd_truck_type")
)
# TODO: MovingAverage
# df_dashboard= df.withColumn("speed_movAvg", avg("speed") \
# .over(Window.orderBy("timeStamp").partitionBy("tripId").rowsBetween(-3,-1)))
@udf("array<integer>")
def PRODUCE_ts_agg_acc_sec(xs, ys):
if xs and ys:
temp = [int((x - ys)) for x in xs]
return temp
@udf("integer")
def PRODUCE_LABEL_agg_acc_sec(xs):
if xs:
temp = xs[-1]
return temp
@udf("array<integer>")
def PRODUCE_ts_agg_acc_meters(xs):
if xs:
temp = [int(round(sum(xs[0:i]), 0)) for i in (range(1, len(xs) + 1))]
return temp
@udf("integer")
def PRODUCE_agg_acc_meters(xs):
if xs:
temp = xs[-1]
return temp
df_serve = df_serve \
.withColumn("ts_agg_acc_sec", PRODUCE_ts_agg_acc_sec("ts_timeStamp", "agg_first_timeStamp")) \
.withColumn("agg_acc_sec", PRODUCE_LABEL_agg_acc_sec("ts_agg_acc_sec")) \
.withColumn("ts_agg_acc_meters", PRODUCE_ts_agg_acc_meters("ts_meters_intervall")) \
.withColumn("agg_acc_meters", PRODUCE_agg_acc_meters("ts_agg_acc_meters"))
if not os.path.exists("./route_dist.csv"):
save_path = "."
cloud_file_name = "route_dist.csv"
container_name = "horizon"
AS.download_file(save_path, cloud_file_name, container_name)
route_dist_pd = pd.read_csv("./route_dist.csv")
# Join route dists and truckinfo
sqlCtx = SQLContext(sc)
route_dist = sqlCtx.createDataFrame(route_dist_pd)
route_dist = route_dist.select(route_dist['routeId'].alias('sd_routeId'),
F.round(route_dist['dist'], 0).alias("sd_len_route"),
route_dist['start'].alias('sd_start'),
route_dist['dest'].alias('sd_dest'),
)
try:
df_serve = df_serve.join(route_dist, 'sd_routeId', how='left_outer')
# Fill nas in case csv does not provide information for a route id
df_serve = df_serve.fillna({'sd_len_route': 100000.0, 'sd_start': "Start", 'sd_dest': "Destination"})
except Exception as ex:
df_serve = df_serve.join(route_dist, 'sd_routeId', how='left_outer')
df_serve = df_serve.withColumn('sd_len_route', lit(100000.0)) \
.withColumn('sd_start', lit("Start")) \
.withColumn('sd_dest', lit("Dest"))
print(ex)
@udf("array<double>")
def PRODUCE_ts_acc_distance_percent(xs, ys):
if xs and ys:
temp = [float(round((x / ys), 4)) for x in xs]
return temp
@udf("double")
def PRODUCE_agg_acc_distance_percent(xs):
if xs:
temp = xs[-1]
return temp
try:
df_serve = df_serve.withColumn("ts_acc_distance_percent", PRODUCE_ts_acc_distance_percent("ts_agg_acc_meters", "sd_len_route"))
except:
a = [0.0, 0.0]
df_serve = df_serve.withColumn("ts_acc_distance_percent", F.array([F.lit(x) for x in a]))
try:
df_serve = df_serve.withColumn("agg_acc_distance_percent", PRODUCE_agg_acc_distance_percent("ts_acc_distance_percent"))
except:
df_serve = df_serve.withColumn("agg_acc_distance_percent", lit(0.0))
#---------------------------------------------------------------#
'''
Inference section
'''
#Clustering
save_path = "."
cluster_substring = "cluster"
container_name = "horizon"
newest_filename, newest_filename_success = AS.get_newest_file(container_name, cluster_substring)
# Enter valid fallback cluster-file name here:
backup_cluster = ""
if newest_filename_success:
if not os.path.exists("./" + newest_filename):
filepath, cluster_download_success = AS.download_file(save_path, newest_filename, container_name)
if cluster_download_success:
cluster_path = AS.unzip(".", filepath)
cluster_path = os.path.join("./CURRENT_CLUSTER_MODEL", newest_filename)
else:
if not os.path.exists("./" + backup_cluster):
filepath, cluster_download_success = AS.download_file(save_path, backup_cluster, container_name)
if cluster_download_success:
cluster_path = AS.unzip(".", filepath)
cluster_path = os.path.join("./CURRENT_CLUSTER_MODEL", backup_cluster)
df_serve = df_serve.fillna({'agg_avg_speed': 0.0, 'agg_avg_acceleration': 0.0, 'agg_avg_consumption': 0.0})
model_clustering = PipelineModel.read().load(cluster_path)
try:
df_serve = model_clustering.transform(df_serve)
df_serve = df_serve.withColumnRenamed("prediction", "agg_latest_PRED_Clustering")
except Exception as ex:
print(ex)
df_serve = df_serve.withColumn("agg_latest_PRED_Clustering", lit(0).cast(DoubleType()))
# Aggregations on routes
aggregates_file_name = "aggregates"
container_name = "horizon"
aggregates_newest_filename, newest_filename_success = AS.get_newest_file(container_name, aggregates_file_name)
# Enter valid fallback aggregations-file name here:
backup_aggregates = ""
if newest_filename_success:
if not os.path.exists("./" + aggregates_newest_filename):
AS.download_file(save_path, aggregates_newest_filename, container_name)
else:
if not os.path.exists("./" + backup_aggregates):
AS.download_file(save_path, backup_aggregates, container_name)
aggregates_path = os.path.join(".", aggregates_newest_filename)
aggregates = pd.read_csv(aggregates_path, sep = ",")
sqlCtx = SQLContext(sc)
aggregates = sqlCtx.createDataFrame(aggregates)
aggregates = aggregates.select(aggregates['sd_routeId'],
F.round(aggregates['RP_avg_agg_acc_sec'], 2).alias("avg_agg_route_time"),
F.round(aggregates['RP_agg_avg_speed'], 2).alias('avg_agg_truck_speed'),
F.round(aggregates['RP_agg_avg_acceleration'], 2).alias('avg_agg_truck_acceleration'),
F.round(aggregates['RP_agg_avg_consumption'], 2).alias('agg_normal_consumption')
)
try:
df_serve = df_serve.join(aggregates, 'sd_routeId', how='left_outer')
# We cannot leave these fields empty. If the route does not exist in the batch database and therefore the aggregations file,
# it gets filled up with dummy values.
df_serve = df_serve.fillna({'avg_agg_route_time': 15000.0, 'avg_agg_truck_speed': 20.0,
'avg_agg_truck_acceleration': 1.0, 'agg_normal_consumption': 10.0})
except Exception as ex:
df_serve = df_serve.withColumn('avg_agg_route_time', lit(15000.0)) \
.withColumn('avg_agg_truck_speed', lit(20.0)) \
.withColumn('avg_agg_truck_acceleration', lit(3.0)) \
.withColumn('agg_normal_consumption', lit(10.0))
print(ex)
# Inference of estimated arrival delay
save_path = "."
arrival_file_name = "arrival"
container_name = "horizon"
arrival_newest_filename, newest_filename_success = AS.get_newest_file(container_name, arrival_file_name)
# Enter valid fallback arrival-file name here:
backup_arrival_name = ""
if newest_filename_success:
if not os.path.exists("./" + arrival_newest_filename):
filepath, download_success = AS.download_file(save_path, arrival_newest_filename, container_name)
if download_success:
AS.unzip(".", filepath)
arrival_path = os.path.join("./CURRENT_ARRIVAL_MODEL", arrival_newest_filename)
else:
arrival_path = os.path.join("./CURRENT_ARRIVAL_MODEL", arrival_newest_filename)
else:
if not os.path.exists("./" + backup_arrival_name):
filepath, download_success = AS.download_file(save_path, backup_arrival_name, container_name)
if download_success:
AS.unzip(".", filepath)
arrival_path = os.path.join("./CURRENT_ARRIVAL_MODEL", backup_arrival_name)
try:
df_serve = df_serve.drop("features")
except:
pass
try:
df_serve = df_serve.drop("scaledFeatures")
except:
pass
try:
model_arrival = PipelineModel.read().load(arrival_path)
df_serve = model_arrival.transform(df_serve)
df_serve = df_serve.withColumnRenamed("prediction", "agg_latest_PRED_Arrival")
# dummy delay for development
#df_serve = df_serve.withColumn("agg_latest_PRED_Arrival", F.round(rand() * 60, 2).cast(DoubleType()))
except:
df_serve = df_serve.withColumn("agg_latest_PRED_Arrival", lit(0).cast(DoubleType()))
# --------------------------------------------------------------------------- #
'''
Batch-db section:
# Save ground truth of values into the database
# The database is used by the batch layer for training on all tuples for which the LABEL for the entire trip duration
# is available, which is inserted as soon as the truck arrives
Contains Stammdaten
Contains Time Series
Contains Labels for prediction
'''
df_to_batch = df_serve.select(
"tripId",
"sd_truckId",
"sd_truckLP",
"sd_routeId",
"sd_start",
"sd_dest",
"sd_mass",
"ts_timeStamp",
"ts_lat",
"ts_lon",
"ts_accWarn",
"ts_brakeWarn",
"ts_speedWarn",
"ts_incident",
"agg_last_tireEff",
"agg_last_engineEff",
"agg_last_truckCond",
"agg_last_roadType",
"ts_meters_intervall",
"ts_speed",
"ts_acceleration",
"ts_consumption",
"agg_acc_sec",
"ts_agg_acc_sec",
"agg_avg_speed",
"agg_avg_acceleration",
"agg_avg_consumption",
"agg_latest_PRED_Clustering",
"ts_agg_acc_meters",
"ts_acc_distance_percent",
"last_arrived",
"last_arrivedStr",
"ts_arrived"
)
tempdataframe = pd.DataFrame()
def write_to_mongo(df_to_batch, epoch_id):
tempdataframe = df_to_batch.toPandas()
if tempdataframe.empty:
pass
else:
global mongoConnectionString
with pymongo.MongoClient(mongoConnectionString) as client:
mydb = client[mongodb_name]
sparkcollection = mydb[mongodb_collection]
df_json = json.loads(tempdataframe.T.to_json()).values()
try:
sparkcollection.insert(df_json)
except Exception as e:
print(e)
#EINFÜGEN DES LABELS
filtered_df_to_batch= df_to_batch.filter(df_to_batch.last_arrivedStr == "arr_true") #[1, "true", True])
pd_temp_df_old = filtered_df_to_batch.toPandas()
pd_temp_df_old = pd_temp_df_old[["tripId", "agg_acc_sec"]]
with pymongo.MongoClient(mongoConnectionString) as client:
mydb = client[mongodb_name]
sparkcollection = mydb[mongodb_collection]
for tuple in pd_temp_df_old.iterrows():
temp = tuple[1]
temp_tripId = temp["tripId"]
temp_LABEL = temp["agg_acc_sec"]
sparkcollection.update_many({"tripId": temp_tripId}, \
{"$set": {"LABEL_final_agg_acc_sec": temp_LABEL}})
pass
df_to_batch_stream = df_to_batch.writeStream \
.outputMode("Update") \
.foreachBatch(write_to_mongo) \
.start()
# --------------------------------------------------------------------------- #
'''
Frontend section:
The frontend receives:
- ground truth values from serving dataframe / stream
- aggregates
- all predictions
'''
@udf("integer")
def sum_array(xs):
if xs:
temp = sum(xs)
return temp
df_to_frontend = df_serve \
.withColumn("delay", lit(5)) \
.withColumn('sum_arrived', sum_array("ts_arrived")) \
.withColumn("service_interval", (15000 - (500 * (2020 - col("sd_truck_year")))).cast(IntegerType())) \
.withColumn("next_service", (F.round(rand()*400, 0)).cast(IntegerType()))
df_to_frontend = df_to_frontend \
.withColumn("int_mass", df_to_frontend["sd_mass"].cast(IntegerType())) \
.withColumn("agg_first_timeStamp", df_to_frontend["agg_first_timeStamp"].cast(IntegerType())) \
.withColumn("agg_last_timeStamp", df_to_frontend["agg_last_timeStamp"].cast(IntegerType())) \
.withColumn("agg_latest_PRED_Arrival", when(col("agg_latest_PRED_Arrival") >= 4400.0, 4399)
.otherwise(df_serve["agg_latest_PRED_Arrival"].cast(IntegerType()))) \
.withColumn("avg_route_time", df_to_frontend["avg_agg_route_time"].cast(IntegerType())) \
.withColumn("driver_duration", df_to_frontend["agg_acc_sec"].cast(IntegerType())) \
.withColumn("agg_acc_distance_percent", (col("agg_acc_distance_percent") * 100).cast(IntegerType()))
df_to_frontend = df_to_frontend.withColumn("calculated_arrival_time",
((col("agg_first_timeStamp").cast(IntegerType()))
+ (col("avg_route_time")).cast(IntegerType())))
df_to_frontend = df_to_frontend \
.withColumn("road_type", when(col("agg_last_roadType") == 0, "URBAN")
.when(col("agg_last_roadType") == 1, "INTERURBAN")
.when(col("agg_last_roadType") == 2, "HIGHWAY")
.when(col("agg_last_roadType") == 3, "FREEWAY")
.when(col("agg_last_roadType") == 5, "TRUCKARRIVED")
.otherwise("NO ROAD")) \
.withColumn("truck_type", when(col("sd_truck_type") == 0, "LOCAL")
.when(col("sd_truck_type") == 1, "LONG_DISTANCE")
.when(col("sd_truck_type") == 2, "LONG_DISTANCE_TRAILER")
.otherwise("NO TRUCK"))
#.withColumn("driver_duration", str(df_to_frontend["driver_duration_h"]) + str(":") + str(df_to_frontend["driver_duration_m"]))
df_to_frontend = df_to_frontend.select(
col('tripId').alias("trip_id"),
col("sd_truckId").alias("truck_id"),
col("sd_truckLP").alias("number_plate"),
col("sd_routeId").alias("route_id"),
col("int_mass").alias("truck_mass"),
col("sd_start").alias("departure"),
col("sd_dest").alias("arrival"),
col("agg_first_timeStamp").alias("departure_time"),
col("agg_last_timeStamp").alias("telemetry_timestamp"),
col("calculated_arrival_time").alias("arrival_time"),
col("agg_last_lat").alias("telemetry_lat"),
col("agg_last_lon").alias("telemetry_lon"),
col("agg_last_speed").alias("truck_speed"),
col("agg_last_consumption").alias("truck_consumption"),
col("agg_avg_speed").alias("avg_truck_speed"),
col("agg_last_acceleration").alias("truck_acceleration"),
col("agg_avg_acceleration").alias("avg_truck_acceleration"),
col("agg_avg_consumption").alias("normal_consumption"),
col("agg_acc_distance_percent").alias("route_progress"),
col("agg_latest_PRED_Clustering").alias("driver_class"),
col("agg_latest_PRED_Arrival").alias("delay"),
col("agg_last_accWarn").alias("driver_acceleration"),
col("agg_last_speedWarn").alias("driver_speed"),
col("agg_last_brakeWarn").alias("driver_brake"),
col("agg_last_incident").alias("incident"),
col("agg_last_truckCond").alias("truck_condition"),
col("agg_last_tireEff").alias("tires_efficiency"),
col("agg_last_engineEff").alias("engine_efficiency"),
col("agg_last_weatherCode").alias("weather"),
| |
"""Keeping track of mapping and reduce operations over tasks."""
from copy import deepcopy
from . import helpers_state as hlpst
from .helpers import ensure_list
from .specs import BaseSpec
class State:
"""
A class that specifies a State of all tasks.
* It's only used when a task have a splitter.
* It contains all information about splitter, combiner, final splitter,
and input values for specific task states
(specified by the splitter and the input).
* It also contains information about the final groups and the final splitter
if combiner is available.
Attributes
----------
name : :obj:`str`
name of the state that is the same as a name of the task
splitter : :obj:`str`, :obj:`tuple`, :obj:`list`
can be a str (name of a single input),
tuple for scalar splitter, or list for outer splitter
splitter_rpn_compact : :obj:`list`
splitter in :abbr:`RPN (reverse Polish notation)`, using a compact
notation for splitter from previous states, e.g. _NA
splitter_rpn : :obj:`list`
splitter represented in RPN,
unwrapping splitters from previous states
combiner : :obj:`list`
list of fields that should be combined
(order is not important)
splitter_final :
final splitter that includes the combining process
other_states : :obj:`dict`
used to create connections with previous states::
{
name of a previous state:
(previous state, input from current state needed the connection)
}
inner_inputs : :obj:`dict`
used to create connections with previous states
``{"{self.name}.input name for current inp": previous state}``
states_ind : :obj:`list` of :obj:`dict`
dictionary for every state that contains
indices for all state inputs (i.e. inputs that are part of the splitter)
states_val : :obj:`list` of :obj:`dict`
dictionary for every state that contains
values for all state inputs (i.e. inputs that are part of the splitter)
inputs_ind : :obj:`list` of :obj:`dict`
dictionary for every state that contains
indices for all task inputs (i.e. inputs that are relevant
for current task, can be outputs from previous nodes)
group_for_inputs : :obj:`dict`
specifying groups (axes) for each input field
(depends on the splitter)
group_for_inputs_final : :obj:`dict`
specifying final groups (axes)
for each input field (depends on the splitter and combiner)
groups_stack_final : :obj:`list`
specify stack of groups/axes (used to
determine which field could be combined)
final_combined_ind_mapping : :obj:`dict`
mapping between final indices
after combining and partial indices of the results
"""
def __init__(self, name, splitter=None, combiner=None, other_states=None):
"""
Initialize a state.
Parameters
----------
name : :obj:`str`
name (should be the same as the task's name)
splitter : :obj:`str`, or :obj:`tuple`, or :obj:`list`
splitter of a task
combiner : :obj:`str`, or :obj:`list`)
field/fields used to combine results
other_states :obj:`dict`:
``{name of a previous state: (prefious state,
input from current state needed the connection)}``
"""
self.name = name
self.other_states = other_states
self.splitter = splitter
# temporary combiner
self.combiner = combiner
# if other_states, the connections have to be updated
if self.other_states:
self.update_connections()
def __str__(self):
"""Generate a string representation of the object."""
return (
f"State for {self.name} with a splitter: {self.splitter} "
f"and combiner: {self.combiner}"
)
@property
def splitter(self):
"""Get the splitter of the state."""
return self._splitter
@splitter.setter
def splitter(self, splitter):
if splitter and not isinstance(splitter, (str, tuple, list)):
raise hlpst.PydraStateError(
"splitter has to be a string, a tuple or a list"
)
if splitter:
self._splitter = hlpst.add_name_splitter(splitter, self.name)
else:
self._splitter = None
@property
def splitter_rpn(self):
"""splitter in :abbr:`RPN (Reverse Polish Notation)`"""
_splitter_rpn = hlpst.splitter2rpn(
self.splitter, other_states=self.other_states
)
return _splitter_rpn
@property
def splitter_rpn_compact(self):
r"""splitter in :abbr:`RPN (Reverse Polish Notation)`
with a compact representation of the prev-state part (i.e. without unwrapping
the part that comes from the previous states), e.g., [_NA, _NB, \*]
"""
if self.other_states:
_splitter_rpn_compact = hlpst.splitter2rpn(
self.splitter, other_states=self.other_states, state_fields=False
)
return _splitter_rpn_compact
else:
return self.splitter_rpn
@property
def splitter_final(self):
"""the final splitter, after removing the combined fields"""
return hlpst.rpn2splitter(self.splitter_rpn_final)
@property
def splitter_rpn_final(self):
if self.combiner:
_splitter_rpn_final = hlpst.remove_inp_from_splitter_rpn(
deepcopy(self.splitter_rpn),
self.current_combiner_all + self.prev_state_combiner_all,
)
return _splitter_rpn_final
else:
return self.splitter_rpn
@property
def current_splitter(self):
"""the current part of the splitter,
i.e. the part that is related to the current task's state only
(doesn't include fields propagated from the previous tasks)
"""
lr_flag = self._prevst_current_check(self.splitter)
if lr_flag == "prev-state":
return None
elif lr_flag == "current":
return self.splitter
elif lr_flag == "[prev-state, current]":
return self.splitter[1]
@property
def current_splitter_rpn(self):
"""the current part of the splitter using RPN"""
if self.current_splitter:
current_splitter_rpn = hlpst.splitter2rpn(
self.current_splitter, other_states=self.other_states
)
return current_splitter_rpn
else:
return []
@property
def prev_state_splitter(self):
"""the prev-state part of the splitter,
i.e. the part that comes from the previous tasks' states
"""
if hasattr(self, "_prev_state_splitter"):
return self._prev_state_splitter
else:
return None
@property
def prev_state_splitter_rpn(self):
"""the prev-state art of the splitter using RPN"""
if self.prev_state_splitter:
prev_state_splitter_rpn = hlpst.splitter2rpn(
self.prev_state_splitter, other_states=self.other_states
)
return prev_state_splitter_rpn
else:
return []
@property
def prev_state_splitter_rpn_compact(self):
r"""the prev-state part of the splitter using RPN in a compact form,
(without unwrapping the states from previous nodes), e.g. [_NA, _NB, \*]
"""
if self.prev_state_splitter:
prev_state_splitter_rpn_compact = hlpst.splitter2rpn(
self.prev_state_splitter,
other_states=self.other_states,
state_fields=False,
)
return prev_state_splitter_rpn_compact
else:
return []
@property
def combiner(self):
"""the combiner associated to the state."""
return self._combiner
@combiner.setter
def combiner(self, combiner):
if combiner:
if not isinstance(combiner, (str, list)):
raise hlpst.PydraStateError("combiner has to be a string or a list")
self._combiner = hlpst.add_name_combiner(ensure_list(combiner), self.name)
else:
self._combiner = []
@property
def current_combiner(self):
"""the current part of the combiner,
i.e. the part that is related to the current task's state only
(doesn't include fields propagated from the previous tasks)
"""
return [comb for comb in self.combiner if self.name in comb]
@property
def current_combiner_all(self):
"""the current part of the combiner including all the fields
that should be combined (i.e. not only the fields that are explicitly
set, but also the fields that re in the same group/axis and had to be combined
together, e.g., if splitter is (a, b) a and b has to be combined together)
"""
if hasattr(self, "_current_combiner_all"):
return self._current_combiner_all
else:
return self.current_combiner
@property
def prev_state_combiner(self):
"""the prev-state part of the combiner,
i.e. the part that comes from the previous tasks' states
"""
if hasattr(self, "_prev_state_combiner"):
return self._prev_state_combiner
else:
return list(set(self.combiner) - set(self.current_combiner))
@property
def prev_state_combiner_all(self):
"""the prev-state part of the combiner including all the fields
that should be combined (i.e. not only the fields that are explicitly
set, but also the fields that re in the same group/axis and had to be combined
together, e.g., if splitter is (a, b) a and b has to be combined together)
"""
if hasattr(self, "_prev_state_combiner_all"):
return list(set(self._prev_state_combiner_all))
else:
return self.prev_state_combiner
@property
def other_states(self):
"""specifies the connections with previous states, uses dictionary:
{name of a previous state: (previous state, input field from current state)}
"""
return self._other_states
@other_states.setter
def other_states(self, other_states):
if other_states:
if not isinstance(other_states, dict):
raise hlpst.PydraStateError("other states has to be a dictionary")
else:
for key, val in other_states.items():
if not val:
raise hlpst.PydraStateError(
f"connection from node {key} is empty"
)
self._other_states = other_states
else:
self._other_states = {}
@property
def inner_inputs(self):
"""specifies connections between fields from the current state
with the specific state from the previous states, uses dictionary
``{input name for current state: the previous state}``
"""
if self.other_states:
_inner_inputs = {}
for name, (st, inp_l) in self.other_states.items():
if f"_{st.name}" in self.splitter_rpn_compact:
for inp in inp_l:
_inner_inputs[f"{self.name}.{inp}"] = st
return _inner_inputs
else:
return {}
def update_connections(self, new_other_states=None, new_combiner=None):
"""updating connections, can use a new other_states and combiner
Parameters
----------
new_other_states : :obj:`dict`, optional
dictionary with new other_states, will be set before updating connections
new_combiner : :obj:`str`, or :obj:`list`, optional
new combiner
"""
if new_other_states:
self.other_states = new_other_states
# ensuring that the connected fields are set as a list
self.other_states = {
nm: (st, ensure_list(flds)) for nm, (st, flds) in self.other_states.items()
}
self._connect_splitters()
if new_combiner:
self.combiner = new_combiner
def _connect_splitters(self):
"""
Connect splitters from the previous nodes.
Evaluates the prev-state part of the splitter (i.e. the part from the previous states)
and the current part of the splitter(i.e., the current state).
If the prev-state splitter is not provided the splitter has to be completed.
"""
# TODO: should this be in | |
[2.658419759563575, 44.02221909387343, 0.2523467042002105],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[0.5813684255696072, 19.786932694371394, 15.41750901086601],
[7.25570681241572, 1.0927440074874795, 91.84582406284943],
[0.0009872251043546506, 0.9053174962382692, 133.8246713127458],
[8.119087814578176, 11.534463846273825, 20.646748307888863],
[1.2660556585284062, 32.426344757867966, 3.644132864524858],
[28.953846560192822, 2.1080861661265375, 37.62160206861866],
[45.19704602816857, 8.326032118644386, 9.3126460357371],
[0.3751041491539514, 41.086912440706506, 1.0190502661720282],
[237.83816444392767, 0.4888703582471729, 0.18990092909694792],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[4.673343261366936, 0.013149278162734916, 124.55799761321882],
[44.8321847953332, 9.805102035428424, 6.462262624957976],
[46.223778498112445, 7.913853363412893, 9.695973596545315],
[0.14160646568913623, 1.5247433585619465, 128.3999636006685],
[10.222841372156568, 16.41763242392627, 12.772314032530836],
[2.0814914900905435, 23.22577670944277, 10.336308497213848],
[12.335782559414683, 38.085916954368024, 0.2992188070911673],
[0.003653390798950482, 11.401091572960702, 49.18423992801202],
[22.28112539082112, 4.474747451834841, 31.796482081630725],
[0.4566986680066298, 7.638013993540891, 58.074715910215936],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[0.49702305848118006, 40.96550099402293, 3.0635400814916243],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[45.19704602817468, 8.326032118639139, 9.312646035739876],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[13.023867757475223, 12.103013652825378, 16.224609284682284],
[20.671796358278446, 3.4112033673749256, 36.87557978841176],
[33.996777913280305, 6.172065112575482, 14.72640080270051],
[0.025005487349713213, 30.051842860070614, 19.657177301280463],
[40.87176720650732, 0.017634838459856948, 59.866266778455056],
[24.742787086399492, 6.959727046187082, 22.667514822070036],
[9.525335310777088, 7.194590484053822, 29.52209860374937],
[330.5279481271995, 0.7127311051294718, 0.007228939382119833],
[20.09672606555698, 3.5055312949107793, 36.81276615776066],
[30.880203310585983, 18.315631179109918, 1.6400283186688043],
[0.13553067043794778, 0.34185931552958, 142.150112539614],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[3.512238642109891, 25.231441976293915, 6.40575264540535],
[0.14767998891583067, 14.995851403139168, 38.44202307027863],
[0.006036194733345661, 30.836478323627077, 19.045513656336457],
[19.081473965263168, 3.650424419365363, 34.54959290880227],
[23.300198979840427, 6.684207085136835, 23.09188632214682],
[25.75364969487488, 10.036338242055585, 10.03974656616823],
[5.685888655518544, 14.787874834546074, 17.174074602065392],
[28.45209832490563, 12.383134588612172, 5.726574931820362],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.79437005015109, 3.370051627818931e-05, 62.74342884819718],
[45.794370050144934, 3.370051627818931e-05, 62.74342884820439],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[2.658419759563575, 44.02221909387343, 0.2523467042002105],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[0.0009872251043546506, 0.9053174962382692, 133.8246713127458],
[7.25570681241572, 1.0927440074874795, 91.84582406284943],
[0.5813684255696072, 19.786932694371394, 15.41750901086601],
[4.673343261366936, 0.013149278162734916, 124.55799761321882],
[45.19704602816857, 8.326032118644386, 9.3126460357371],
[1.2660556585284062, 32.426344757867966, 3.644132864524858],
[44.8321847953332, 9.805102035428424, 6.462262624957976],
[1.8736444433586872, 23.71461044689457, 9.836098886888687],
[20.671796358278446, 3.4112033673749256, 36.87557978841176],
[0.3751041491539514, 41.086912440706506, 1.0190502661720282],
[22.28112539082112, 4.474747451834841, 31.796482081630725],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[13.023867757475223, 12.103013652825378, 16.224609284682284],
[0.003653390798950482, 11.401091572960702, 49.18423992801202],
[12.335782559414683, 38.085916954368024, 0.2992188070911673],
[5.685888655518544, 14.787874834546074, 17.174074602065392],
[8.119087814578176, 11.534463846273825, 20.646748307888863],
[35.50279068090353, 8.490114044559714, 9.818315035198417],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[0.14160646568913623, 1.5247433585619465, 128.3999636006685],
[2.0814914900905435, 23.22577670944277, 10.336308497213848],
[0.4566986680066298, 7.638013993540891, 58.074715910215936],
[31.54312744737227, 21.65484057745565, 0.30776119835847343],
[15.553439588526743, 8.29743415586849, 23.99123906557556],
[87.1873638348221, 0.2303489736064877, 28.961781032324126],
[24.742787086399492, 6.959727046187082, 22.667514822070036],
[28.953846560192822, 2.1080861661265375, 37.62160206861866],
[237.83816444392767, 0.4888703582471729, 0.18990092909694792],
[30.369613257968627, 9.937932426810628, 8.46874410495159],
[0.006036194733345661, 30.836478323627077, 19.045513656336457],
[17.484900945403833, 10.94807331143494, 16.428991600506635],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[3.7360289114384897, 15.320995591214686, 35.52595789669466],
[24.89107254885531, 6.961983001478342, 22.55732814019543],
[1.5498733652564336, 33.860262496287085, 2.063559150886595],
[46.223778498112445, 7.913853363412893, 9.695973596545315],
[14.307900189404565, 13.988908191330449, 13.670034273145134],
[0.49702305848118006, 40.96550099402293, 3.0635400814916243],
[22.25224017339891, 2.7842154134539854, 36.8545896478215],
[10.222841372156568, 16.41763242392627, 12.772314032530836],
[9.525335310777088, 7.194590484053822, 29.52209860374937],
[1.0341294482743668, 33.16800136276841, 3.566779808974607],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.79437005015109, 3.370051627818931e-05, 62.74342884819718],
[45.794370050144934, 3.370051627818931e-05, 62.74342884820439],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[3.7360289114384897, 15.320995591214686, 35.52595789669466],
[17.484900945403833, 10.94807331143494, 16.428991600506635],
[90.10882731223003, 1.2305535382342105, 15.84317596331721],
[50.37649582590206, 0.6327219764613276, 43.27363680485479],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[1.5498733652564336, 33.860262496287085, 2.063559150886595],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[46.223778498112445, 7.913853363412893, 9.695973596545315],
[10.222841372156568, 16.41763242392627, 12.772314032530836],
[31.54312744737227, 21.65484057745565, 0.30776119835847343],
[13.023867757475223, 12.103013652825378, 16.224609284682284],
[0.5813684255696072, 19.786932694371394, 15.41750901086601],
[4.768191710839176, 9.093224970433258, 30.2282044071339],
[44.150053108555646, 7.708049633113804, 10.827482630069365],
[35.50279068090353, 8.490114044559714, 9.818315035198417],
[87.1873638348221, 0.2303489736064877, 28.961781032324126],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[15.046320124994251, 12.829293030727515, 14.375849180822923],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[22.28112539082112, 4.474747451834841, 31.796482081630725],
[0.0009872251043546506, 0.9053174962382692, 133.8246713127458],
[1.4164939301115462, 52.4847055083062, 0.6615332237581696],
[7.25570681241572, 1.0927440074874795, 91.84582406284943],
[1.0341294482743668, 33.16800136276841, 3.566779808974607],
[1.8253499131750095, 19.341032762166613, 30.100384127592445],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[0.006036194733345661, 30.836478323627077, 19.045513656336457],
[0.30206213932240766, 49.416731496408715, 0.6896406055948532],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[5.685888655518544, 14.787874834546074, 17.174074602065392],
[24.742787086399492, 6.959727046187082, 22.667514822070036],
[15.636666796147429, 17.979383381071905, 6.803659508424539],
[1.2660556585284062, 32.426344757867966, 3.644132864524858],
[20.671796358278446, 3.4112033673749256, 36.87557978841176],
[33.83023389283168, 9.521048002302278, 8.394504506485251],
[24.89107254885531, 6.961983001478342, 22.55732814019543],
[237.83816444392767, 0.4888703582471729, 0.18990092909694792],
[12.335782559414683, 38.085916954368024, 0.2992188070911673],
[44.8321847953332, 9.805102035428424, 6.462262624957976],
[50.20037757177307, 0.09666360007902673, 52.46963397947609],
[0.4566986680066298, 7.638013993540891, 58.074715910215936],
[0.35760077933128237, 9.772422092152075, 49.70693542334217],
[64.3954919044104, 0.08503730746319846, 41.109055155373966],
[15.553439588526743, 8.29743415586849, 23.99123906557556],
[138.69158365105196, 0.09003767333735373, 23.370089248552887],
[0.49702305848118006, 40.96550099402293, 3.0635400814916243],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.79437005015109, 3.370051627818931e-05, 62.74342884819718],
[45.794370050144934, 3.370051627818931e-05, 62.74342884820439],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[90.10882731223003, 1.2305535382342105, 15.84317596331721],
[50.37649582590206, 0.6327219764613276, 43.27363680485479],
[17.484900945403833, 10.94807331143494, 16.428991600506635],
[1.5498733652564336, 33.860262496287085, 2.063559150886595],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[44.150053108555646, 7.708049633113804, 10.827482630069365],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[46.223778498112445, 7.913853363412893, 9.695973596545315],
[13.023867757475223, 12.103013652825378, 16.224609284682284],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[87.1873638348221, 0.2303489736064877, 28.961781032324126],
[31.54312744737227, 21.65484057745565, 0.30776119835847343],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[35.50279068090353, 8.490114044559714, 9.818315035198417],
[0.0009872251043546506, 0.9053174962382692, 133.8246713127458],
[1.4164939301115462, 52.4847055083062, 0.6615332237581696],
[0.5813684255696072, 19.786932694371394, 15.41750901086601],
[3.631423256843593, 15.118079648267617, 17.28551384951113],
[15.046320124994251, 12.829293030727515, 14.375849180822923],
[1.3263325547101132, 28.37651509260147, 6.192161418935841],
[15.636666796147429, 17.979383381071905, 6.803659508424539],
[10.222841372156568, 16.41763242392627, 12.772314032530836],
[4.768191710839176, 9.093224970433258, 30.2282044071339],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[33.848791321091916, 9.511951939528668, 8.402872677132219],
[5.685888655518544, 14.787874834546074, 17.174074602065392],
[4.564383451318267, 17.090081906455943, 15.403684656504605],
[12.335782559414683, 38.085916954368024, 0.2992188070911673],
[0.006036194733345661, 30.836478323627077, 19.045513656336457],
[0.30206213932240766, 49.416731496408715, 0.6896406055948532],
[50.20037757177307, 0.09666360007902673, 52.46963397947609],
[1.0341294482743668, 33.16800136276841, 3.566779808974607],
[1.3348119977641206, 57.77209194753253, 0.024821663568878268],
[7.25570681241572, 1.0927440074874795, 91.84582406284943],
[65.18113961112365, 3.6978479595792413, 12.594089078062588],
[24.051294882739846, 8.954310302679842, 17.64298857353434],
[1.8253499131750095, 19.341032762166613, 30.100384127592445],
[24.742787086399492, 6.959727046187082, 22.667514822070036],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[0.7222922968351293, 43.11833015731655, 0.9627714481267619],
[3.2672327056938735, 30.3639568871338, 3.6964012136329876],
[102.3075652338219, 0.001092712178348667, 34.66160207628678],
[0.4566986680066298, 7.638013993540891, 58.074715910215936],
[0.35760077933128237, 9.772422092152075, 49.70693542334217],
[12.335782559414683, 38.085916954368024, 0.2992188070911673],
[15.263877862284307, 3.7149945924623946, 43.17316200958201],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.79437005015109, 3.370051627818931e-05, 62.74342884819718],
[45.794370050144934, 3.370051627818931e-05, 62.74342884820439],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[50.37649582590206, 0.6327219764613276, 43.27363680485479],
[90.10882731223003, 1.2305535382342105, 15.84317596331721],
[1.5498733652564336, 33.860262496287085, 2.063559150886595],
[44.150053108555646, 7.708049633113804, 10.827482630069365],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[1.965972712237809, 33.34321702553822, 2.8175589491851123],
[35.50279068090353, 8.490114044559714, 9.818315035198417],
[13.023867757475223, 12.103013652825378, 16.224609284682284],
[17.484900945403833, 10.94807331143494, 16.428991600506635],
[50.20037757177307, 0.09666360007902673, 52.46963397947609],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[87.1873638348221, 0.2303489736064877, 28.961781032324126],
[1.0341294482743668, 33.16800136276841, 3.566779808974607],
[7.25570681241572, 1.0927440074874795, 91.84582406284943],
[3.631423256843593, 15.118079648267617, 17.28551384951113],
[15.636666796147429, 17.979383381071905, 6.803659508424539],
[4.564383451318267, 17.090081906455943, 15.403684656504605],
[46.223778498112445, 7.913853363412893, 9.695973596545315],
[65.18113961112365, 3.6978479595792413, 12.594089078062588],
[0.006036194733345661, 30.836478323627077, 19.045513656336457],
[5.685888655518544, 14.787874834546074, 17.174074602065392],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[31.54312744737227, 21.65484057745565, 0.30776119835847343],
[0.30206213932240766, 49.416731496408715, 0.6896406055948532],
[1.3263325547101132, 28.37651509260147, 6.192161418935841],
[0.0009872251043546506, 0.9053174962382692, 133.8246713127458],
[102.3075652338219, 0.001092712178348667, 34.66160207628678],
[0.35760077933128237, 9.772422092152075, 49.70693542334217],
[41.94853186808705, 0.8544070377122558, 43.83971327314418],
[13.417919941445112, 0.0009685852550144952, 114.96989412637986],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[14.619542807631488, 13.194698430206008, 14.007318722294348],
[1.3348119977641206, 57.77209194753253, 0.024821663568878268],
[24.742787086399492, 6.959727046187082, 22.667514822070036],
[0.7222922968351293, 43.11833015731655, 0.9627714481267619],
[0.4566986680066298, 7.638013993540891, 58.074715910215936],
[24.051294882739846, 8.954310302679842, 17.64298857353434],
[10.22403310520745, 12.933154165515383, 19.16170164728055],
[15.263877862284307, 3.7149945924623946, 43.17316200958201],
[0.5813684255696072, 19.786932694371394, 15.41750901086601],
[12.335782559414683, 38.085916954368024, 0.2992188070911673],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[33.848791321091916, 9.511951939528668, 8.402872677132219],
[23.454722191793852, 10.5287409315765, 14.514691341783633],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.794370050144934, 3.370051627818931e-05, 62.74342884820439],
[102.3075652338219, 0.001092712178348667, 34.66160207628678],
[24.994679989142753, 3.815355660370556, 33.76774153757404],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[10.22403310520745, 12.933154165515383, 19.16170164728055],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[20.443924078461226, 10.039445000003397, 17.341220128559968],
[41.94853186808705, 0.8544070377122558, 43.83971327314418],
[1.3348119977641206, 57.77209194753253, 0.024821663568878268],
[45.79437005015109, 3.370051627818931e-05, 62.74342884819718],
[50.37649582590206, 0.6327219764613276, 43.27363680485479],
[12.335782559414683, 38.085916954368024, 0.2992188070911673],
[14.055225249182767, 6.714650601003579, 28.79874846202873],
[18.571246379102494, 4.9611484101148315, 30.80015587195707],
[15.826025009791802, 0.05591073962038981, 101.15367643351473],
[0.5605752789986421, 32.38025898940134, 4.576144536494646],
[65.18113961110897, 3.697847959577492, 12.59408907805936],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[6.549009069819267, 23.46740339228924, 6.8968409318139],
[26.38728595012448, 20.926259079410624, 0.714832110735247],
[1.3699780762232898, 28.197229782390245, 6.276658014766033],
[15.636666796147429, 17.979383381071905, 6.803659508424539],
[13.023867757475223, 12.103013652825378, 16.224609284682284],
[87.011494218145, 1.512423343119, 14.883592101749498],
[17.246707215651895, 6.0131615140884955, 32.87807084065664],
[3.4620982366229334, 15.527094308681374, 16.806683431158156],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[0.11655062342594713, 46.12492496746356, 0.47257330617820387],
[14.619542807631488, 13.194698430206008, 14.007318722294348],
[1.3263325547101132, 28.37651509260147, 6.192161418935841],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[10.6360533046946, 16.218993080463402, 12.371814384785296],
[4.564383451318267, 17.090081906455943, 15.403684656504605],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[50.20037757177307, 0.09666360007902673, 52.46963397947609],
[7.372815727903613, 31.152300278218597, 1.4585570858564976],
[31.54312744737227, 21.65484057745565, 0.30776119835847343],
[1.5498733652564336, 33.860262496287085, 2.063559150886595],
[1.686033855151136, 33.33031240900092, 2.195121808503979],
[1.0341294482743668, 33.16800136276841, 3.566779808974607],
[46.223778498112445, 7.913853363412893, 9.695973596545315],
[35.50279068090353, 8.490114044559714, 9.818315035198417],
[5.685888655518544, 14.787874834546074, 17.174074602065392],
[94.76924635186782, 0.8488634757550659, 17.41210711229579],
[7.757011039288594, 12.511731771696875, 20.284732709277748],
[15.036177216417082, 7.765590976669071, 23.958448931591604],
[13.417919941445112, 0.0009685852550144952, 114.96989412637986],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[90.10882731223003, 1.2305535382342105, 15.84317596331721],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.794370050144934, 3.370051627818931e-05, 62.74342884820439],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[24.994679989142753, 3.815355660370556, 33.76774153757404],
[18.571246379102494, 4.9611484101148315, 30.80015587195707],
[102.3075652338219, 0.001092712178348667, 34.66160207628678],
[46.223778498100074, 7.913853363410335, 9.695973596548146],
[20.5529311980434, 9.843676677853658, 16.896908279858],
[16.854146298511154, 5.941330043835413, 30.967216317813527],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[3.4620982366229334, 15.527094308681374, 16.806683431158156],
[14.055225249182767, 6.714650601003579, 28.79874846202873],
[19.769809707681357, 0.03113083700716666, 109.23040687972221],
[45.79437005015109, 3.370051627818931e-05, 62.74342884819718],
[23.132636550072803, 6.328881824129333, 23.879924843715422],
[0.060168978294185674, 46.11249605231558, 0.578398391292279],
[50.37649582590206, 0.6327219764613276, 43.27363680485479],
[21.87492204482864, 24.31007383109233, 0.2354745907651458],
[26.38728595012448, 20.926259079410624, 0.714832110735247],
[0.5605752789986421, 32.38025898940134, 4.576144536494646],
[14.55645173625101, 30.77649186243637, 0.2953559267772053],
[46.223778498112445, 7.913853363412893, 9.695973596545315],
[6.549009069819267, 23.46740339228924, 6.8968409318139],
[41.94853186808705, 0.8544070377122558, 43.83971327314418],
[15.636666796147429, 17.979383381071905, 6.803659508424539],
[87.011494218145, 1.512423343119, 14.883592101749498],
[42.005986794634644, 1.8272066753124638, 36.443119552535094],
[90.10882731223003, 1.2305535382342105, 15.84317596331721],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[10.22403310520745, 12.933154165515383, 19.16170164728055],
[1.3263325547101132, 28.37651509260147, 6.192161418935841],
[0.04716500943905092, 56.21963976820116, 0.0015354641300302113],
[10.1155671236392, 10.652621878237817, 21.137829677240344],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[0.0573651635057149, 47.90914480356339, 0.5504059283195805],
[20.443924078461226, 10.039445000003397, 17.341220128559968],
[0.11655062342594713, 46.12492496746356, 0.47257330617820387],
[13.91964130619412, 0.0014191441052082555, 114.26098897988942],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[5.685888655518544, 14.787874834546074, 17.174074602065392],
[7.372815727903613, 31.152300278218597, 1.4585570858564976],
[65.18113961110897, 3.697847959577492, 12.59408907805936],
[1.3699780762232898, 28.197229782390245, 6.276658014766033],
[1.5498733652564336, 33.860262496287085, 2.063559150886595],
[35.33103708240217, 3.5542748562579423, 26.965352759342053],
[15.036177216417082, 7.765590976669071, 23.958448931591604],
[0.014396050845362954, 41.887048555164746, 1.9941393415629105],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[13.417919941445112, 0.0009685852550144952, 114.96989412637986],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.794370050144934, 3.370051627818931e-05, 62.74342884820439],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[3.4620982366229334, 15.527094308681374, 16.806683431158156],
[24.994679989142753, 3.815355660370556, 33.76774153757404],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[45.79437005015109, 3.370051627818931e-05, 62.74342884819718],
[15.536021940198495, 18.07634227757434, 6.743621346707772],
[23.132636550072803, 6.328881824129333, 23.879924843715422],
[20.5529311980434, 9.843676677853658, 16.896908279858],
[7.372815727903613, 31.152300278218597, 1.4585570858564976],
[32.87768630373118, 0.5312197537280611, 51.555541534077854],
[13.440549090163813, 0.0004266679456497624, 113.07885961629518],
[11.921582477209201, 0.010696879501812119, 111.99234020025884],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[26.38728595012448, 20.926259079410624, 0.714832110735247],
[0.5605752789986421, 32.38025898940134, 4.576144536494646],
[71.02303757312367, 2.598190559617718, 14.66761096911895],
[46.223778498100074, 7.913853363410335, 9.695973596548146],
[4.198649546781141, 28.0361413997774, 3.9745226693823965],
[10.22403310520745, 12.933154165515383, 19.16170164728055],
[0.020304267136820035, 47.375235121051226, 0.42738606442809546],
[19.769809707681357, 0.03113083700716666, 109.23040687972221],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[73.8710262116265, 3.088928964135834, 11.912423740365377],
[42.005986794634644, 1.8272066753124638, 36.443119552535094],
[1.391312623938801, 6.453052092865291, 61.33703508990467],
[1.3263325547101132, 28.37651509260147, 6.192161418935841],
[5.017159377753604, 23.893560113393278, 6.676756492939675],
[102.3075652338219, 0.001092712178348667, 34.66160207628678],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[50.37649582590206, 0.6327219764613276, 43.27363680485479],
[35.33103708240217, 3.5542748562579423, 26.965352759342053],
[0.042436372246521595, 56.000516047924734, 0.0031808619213400707],
[14.158376710939255, 13.34185876603863, 12.00434977560262],
[14.055225249182767, 6.714650601003579, 28.79874846202873],
[0.04716500943905092, 56.21963976820116, 0.0015354641300302113],
[15.036177216417082, 7.765590976669071, 23.958448931591604],
[8.124219953614286, 16.3231820509106, 13.102422175174226],
[1.3699780762232898, 28.197229782390245, 6.276658014766033],
[23.68738588244616, 3.7454730603692696, 35.28035675170802],
[16.91088136467953, 12.528474742830426, 14.491029510637139],
[41.94853186808705, 0.8544070377122558, 43.83971327314418],
[10.298791707550937, 13.958090775507177, 14.11119066198783],
[47.563622279791616, 12.977396708382019, 2.200275157409278],
[26.20562764859457, 6.7948304804854445, 20.054555139332127],
[5.685888655516375, 14.787874834546074, 17.174074602065392],
[21.87492204482864, 24.31007383109233, 0.2354745907651458],
[10.1155671236392, 10.652621878237817, 21.137829677240344],
[23.145976918764514, 9.532735990291828, 15.050934917136093],
[1.3716580973586394, 60.48945308210783, 7.643134134527229e-06],
[45.794370050144934, 3.370051627818931e-05, 62.74342884819718],
[0.00010479847342511498, 323.75294183481395, 185.38830897650539],
[23.68738588244616, 3.7454730603692696, 35.28035675170802],
[77.35779943953624, 0.0029279447649296422, 42.55956926220304],
[15.536021940198495, 18.07634227757434, 6.743621346707772],
[35.33103708240217, 3.5542748562579423, 26.965352759342053],
[0.020304267136820035, 47.375235121051226, 0.42738606442809546],
[22.834754562993897, 7.748430604687635, 20.2292918467861],
[3.5710333945842927, 24.24110284275886, 7.239351846405419],
[4.198649546781141, 28.0361413997774, 3.9745226693823965],
[11.921582477209201, 0.010696879501812119, 111.99234020025884],
[15.828910644873933, 11.056539973353988, 15.210326879373437],
[42.001022713271006, 0.5550003220933705, 44.62298308982553],
[23.767023520231813, 7.618964675991613, 20.03154579718517],
[14.055225249182767, 6.714650601003579, 28.79874846202873],
[0.5605752789986421, 32.38025898940134, 4.576144536494646],
[5.685888655516375, 14.787874834546074, 17.174074602065392],
[7.372815727903613, 31.152300278218597, 1.4585570858564976],
[3.4620982366229334, 15.527094308681374, 16.806683431158156],
[14.51709249202439, 6.983205119480762, 28.393455696095884],
[1.8413621505780446, 30.567483868545093, 4.600220013723792],
[71.02303757312367, 2.598190559617718, 14.66761096911895],
[22.592520991011675, 23.802916001995857, 0.29039655111389684],
[1.0199500926971496, 36.54208253338295, 1.9907365613689634],
[10.22403310520745, 12.933154165518655, 19.16170164727657],
[5.017159377753604, 23.893560113393278, 6.676756492939675],
[19.769809707681357, 0.03113083700716666, 109.23040687972221],
[10.22403310520745, 12.933154165515383, 19.16170164728055],
[1.3263325547101132, 28.37651509260147, | |
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from src.main.HmcRestClient import *
from src.logical_partition import ListLogicalPartition, \
DeleteLogicalPartition
from src.logical_partition.sriov_logical_port import ListSRIOVLogicalPort, \
CreateSRIOVLogicalPort, \
ClearSRIOVLogicalPortStatistics, \
ModifySRIOVEthernetLogicalPort
from src.logical_partition.virtual_fibrechannel_client_adapter import ListVirtualFibreChannelClientAdapter, \
CreateVirtualFibreChannelClientAdapter
from src.logical_partition.vscsi_client_adapter import ListVirtualSCSIClientAdapter, \
CreateVirtualSCSIClientAdapter
from src.logical_partition.client_network_adapter import CreateClientNetworkAdapter, \
ListClientNetworkAdapter
from src.partition_operation_util import PowerOnPartition,\
PowerOffPartition,\
ModifyPartition, \
CreatePartition
from src.logical_partition_profile import ListLogicalPartitionProfile,\
CreateLogicalPartitionProfile,\
ModifyLogicalPartitionProfile
import sys
import os
####################
# LOGICAL PARTITON
####################
directory = os.path.dirname(os.path.dirname(__file__))
def logicalpartition_children(n1, managedsystem_uuid, ip, x_api_session):
"""
This function provides a detailed view of the Logical Partitions
Args:
n1 : variable for client selected choices
managedsystem_uuid : The unique id of the Managed system
ip: ip address of hmc
x_api_session : session to be used
"""
os.system("cls")
n = n1
if n == 1:
#Logical Partition operations
while True:
print ("\n\n","LogicalPartition operations".center(50))
print_list = ['List','Create','Delete','Poweron',
'Poweroff','Modify','Return to LogicalPartition Menu',
'Return to ManagedSystem Menu','Return to MainMenu','Help','Exit']
#select any one Logical partition operation
x = int(print_obj.print_on_screen(print_list))
listlogicalpartition_object = ListLogicalPartition.ListLogicalPartition()
object_list = listlogicalpartition_object.\
list_LogicalPartition(ip, managedsystem_uuid,
x_api_session)
if x == 1:
# object creation and method call to List Logical Partition
print("\nAvailable LogicalPartitions :")
selected_logicalpartition_object = get_selectedobject(object_list)
if selected_logicalpartition_object != None:
listlogicalpartition_object.print_logicalpartition_attributes(selected_logicalpartition_object)
elif x == 2:
#object creation and method call to create Logicalpartition
try:
print("\nLogical Partition will be created with Following configruations,\n maximum,mimimum and desired memory = 256",
"\nShared processors,Minimum,Desired and maximum processing units = 0.5,\npartition type = AIX/Linux")
logicalpartition_object = CreatePartition.CreatePartition("LogicalPartition")
created_logicalpartition_object = logicalpartition_object.create_Partition(ip,
managedsystem_uuid,
x_api_session)
print("\nPartition %s Created Successfully\n"%(created_logicalpartition_object.PartitionName.value()))
listlogicalpartition_object.print_logicalpartition_attributes(created_logicalpartition_object)
except (TypeError,AttributeError) :
log_object.log_error("Error in lpar creation")
elif x == 3:
#object creation and method call to delete Logical partition
selected_logicalpartition_object = None
print("\nAvailable LogicalPartitions :")
selected_logicalpartition_object = get_selectedobject(object_list)
if selected_logicalpartition_object != None:
logicalpartition_object = DeleteLogicalPartition.DeleteLogicalPartition()
logicalpartition_object.delete_LogicalPartition(ip,
managedsystem_uuid,
selected_logicalpartition_object,
x_api_session)
elif x == 4:
#object creation and method call to Poweron Logical partition
listlogicalpartition_object = ListLogicalPartition.ListLogicalPartition()
object_list = listlogicalpartition_object.\
list_LogicalPartition(ip, managedsystem_uuid,
x_api_session)
print("\nList of Partitions in inactive state")
k = 0
inactive_object_list = []
for i in range(0,len(object_list)):
if object_list[i].PartitionState.value() == "not activated":
k = k+1
print("%s.%s" % (k,object_list[i].PartitionName.value()))
inactive_object_list.append(object_list[i])
if k>0:
try:
c = int(input("\nSelect any partition index the operation to be performed:"))
if c > 0:
ch = c-1
selected_logicalpartition_object = inactive_object_list[ch]
logicalpartition_object = PowerOnPartition.PowerOnPartition("LogicalPartition")
logicalpartition_object.poweron_Partition(ip,
selected_logicalpartition_object,
x_api_session)
else :
print("\nTry again using valid option")
except IndexError :
print("\nTry again using valid option")
else:
log_object.log_warn("No Partitions are in inactive state")
elif x == 5:
#object creation and method call to Poweroff Logical Partition
listlogicalpartition_object = ListLogicalPartition.ListLogicalPartition()
object_list = listlogicalpartition_object.\
list_LogicalPartition(ip, managedsystem_uuid,
x_api_session)
print("\nList of Partitions in active state")
k = 0
active_object_list = []
for i in range(0,len(object_list)):
if object_list[i].PartitionState.value() == "open firmware" or object_list[i].PartitionState.value() == "running":
k = k+1
print("%s.%s" % (k,object_list[i].PartitionName.value()))
active_object_list.append(object_list[i])
if k>0 :
try:
c = int(input("\nSelect any partition index the operation to be performed:"))
if c > 0:
ch = c-1
selected_logicalpartition_object = active_object_list[ch]
logicalpartition_object = PowerOffPartition.PowerOffPartition("LogicalPartition")
logicalpartition_object.poweroff_Partition(ip,
selected_logicalpartition_object,
x_api_session)
else:
print("\nTry again using valid option")
except IndexError :
print("\nTry again using valid option")
else:
log_object.log_warn("No Partitions are in active state")
elif x == 6:
#object creation and method call to Modify Logical Partition
print("\nAvailable LogicalPartitions :")
selected_logicalPartition_object = get_selectedobject(object_list)
if selected_logicalpartition_object != None:
print("\nLogical partition memory attributes are modified as maximum ,minimum ,desired memory = 512")
modify_logicalpartition_object = ModifyPartition.ModifyPartition("LogicalPartition")
result = modify_logicalpartition_object.modify_Partition(ip,selected_logicalPartition_object,x_api_session)
if result:
print("\nModifications are updated successfully")
else:
log_object.log_error("Error occured while updating the modifications.Verify \
whether the partitions are in running or not activated state before updating it")
elif x == 7:
os.system("cls")
return 1
elif x == 8:
os.system("cls")
return 2
elif x == 9:
os.system("cls")
return 3
elif x == 10:
print(open(directory+"/help/LogicalPartition/LogicalPartitionOperations.txt").read())
elif x == 11:
sys.exit(1)
else:
print("\nTry again using valid option")
back_to_menu()
elif n == 2:
#LogicalPartition Profile operations
while True:
print ("\n\n","LogicalPartition Profile".center(50))
print_list = ['List','Create',
'Modify','Return to LogicalPartition Menu',
'Return to ManagedSystem Menu','Return to MainMenu',
'Help','Exit']
#select any one LogicalPartitionProfile operation
x1 = int(print_obj.print_on_screen(print_list))
try:
if x1 > 0 and x1 < 4:
print("\nAvailable LogicalPartitions :")
logicalpartition_object = ListLogicalPartition.ListLogicalPartition()
object_list = logicalpartition_object.\
list_LogicalPartition(ip, managedsystem_uuid,
x_api_session)
list_logicalpartitionprofile_object = ListLogicalPartitionProfile.\
ListLogicalPartitionProfile("LogicalPartition")
selected_logicalpartition_object=get_selectedobject(object_list)
if x1 == 1:
# object creation and method call to list all profiles for selected LPAR
if selected_logicalpartition_object != None:
partition_id =selected_logicalpartition_object.PartitionUUID.value()
profile_object_list = list_logicalpartitionprofile_object.\
list_LogicalPartitionProfile(ip,partition_id,
x_api_session)
for i in range(0,len(profile_object_list)):
list_logicalpartitionprofile_object.\
print_logicalpartitionprofile_attributes(profile_object_list[i])
elif x1 == 2:
# object creation and method call to create LPAR Profile
if selected_logicalpartition_object != None:
print("\nLogical Partition profile will be created with Following configruations,",
"\n maximum,mimimum and desired memory = 256",
"\nprofile type = REG_LPAR_PROFILE_TYPE")
create_logicalpartitionprofile_object = CreateLogicalPartitionProfile.\
CreateLogicalPartitionProfile("LogicalPartition")
created_logicalpartitionprofile_object = create_logicalpartitionprofile_object.\
create_LogicalPartitionProfile(ip,
selected_logicalpartition_object,
x_api_session)
if created_logicalpartitionprofile_object != None :
print("\nProfile %s Created Successfully\n"%(created_logicalpartitionprofile_object.ProfileName.value()))
list_logicalpartitionprofile_object.\
print_logicalpartitionprofile_attributes(created_logicalpartitionprofile_object)
elif x1 == 3:
# object creation and method call to Modify selected Profile
if selected_logicalpartition_object != None:
partition_id =selected_logicalpartition_object.PartitionUUID.value()
profile_object_list = list_logicalpartitionprofile_object.\
list_LogicalPartitionProfile(ip,partition_id,
x_api_session)
print("\nAvailable LogicalPartitionProfile:")
for i in range(0,len(profile_object_list)):
print("%s.%s"%(i+1,profile_object_list[i].ProfileName.value()))
try:
ch=int(input("\nselect any profile index to modify :"))
print("\nLogical partition profile memory attributes are modified as maximum ,minimum ,desired memory = 512")
modify_logicalpartitionprofile_object = ModifyLogicalPartitionProfile.\
ModifyLogicalPartitionProfile("LogicalPartition")
modify_bool = modify_logicalpartitionprofile_object.\
modify_LogicalPartitionProfile(ip,
partition_id,
profile_object_list[ch-1],
x_api_session)
if modify_bool:
print("\nUpdations to the profile are made Successfully")
else:
log_object.log_error("\nError occured while updating")
except IndexError :
print("\nTry again using valid option")
elif x1 == 4:
os.system("cls")
return 1
elif x1 == 5:
os.system("cls")
return 2
elif x1 == 6:
os.system("cls")
return 3
elif x1 == 7:
print(open(directory+"/help/LogicalPartitionProfile.txt").read())
elif x1 == 8:
sys.exit(1)
else:
print("\nTry again using valid option")
back_to_menu()
except IndexError :
log_object.log_warn("No LogicalPartition Available")
back_to_menu()
elif n == 3:
#ClientNetworkAdapter operations
while True:
print ("\n\n","ClientNetworkAdapter".center(50))
print_list = ['List','Create','Return to LogicalPartition Menu',
'Return to ManagedSystem Menu','Return to MainMenu','Help','Exit']
#select any ClientNetworkAdapter operation
x1 = int(print_obj.print_on_screen(print_list))
if x1 > 0 and x1 < 3 :
print("\nAvailable LogicalPartitions :")
logicalpartition_object = ListLogicalPartition.ListLogicalPartition()
object_list = logicalpartition_object.\
list_LogicalPartition(ip, managedsystem_uuid,
x_api_session)
selected_logicalpartition_object=get_selectedobject(object_list)
if x1 == 1:
#object creation and method call to list all client network adapters available in th selected LPAR
if selected_logicalpartition_object != None:
logicalpartition_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value()
list_clientnetwork_adapter_object = ListClientNetworkAdapter.ListClientNetworkAdapter()
clientnetwork_adapter_list = list_clientnetwork_adapter_object.\
list_clientnetwork_adapter(ip,
logicalpartition_id,
x_api_session)
try:
for clientnetwork_adapter in clientnetwork_adapter_list:
list_clientnetwork_adapter_object.print_clientnetwork_adapter_attributes(clientnetwork_adapter)
except TypeError:
log_object.log_warn("\nNo ClientNetworkAdapters are Available")
elif x1 == 2:
#object creation and method call to create client network adapter
if selected_logicalpartition_object != None:
logicalpartition_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value()
client_networkadapter_object = CreateClientNetworkAdapter.\
CreateClientNetworkAdapter()
client_networkadapter_object.create_clientnetwork_adapter(ip,
logicalpartition_id ,
x_api_session)
elif x1 == 3:
os.system("cls")
return 1
elif x1 == 4:
os.system("cls")
return 2
elif x1 == 5:
os.system("cls")
return 3
elif x1 == 6:
print(open(directory+"/help/LogicalPartition/ClientNetworkAdapter.txt").read())
elif x1 == 7:
sys.exit(1)
else:
print("\nTry again using valid option")
back_to_menu()
elif n == 4:
#virtual scsi adapter operations
while True:
print ("\n\n","VirtualSCSIClientAdapter".center(50))
print_list = ['List','Create','Return to LogicalPartition Menu',
'Return to ManagedSystem Menu','Return to MainMenu','Help','Exit']
#select any VirtualSCSIClientAdapter operation
x1 = int(print_obj.print_on_screen(print_list))
if x1 > 0 and x1 < 3:
print("\nAvailable LogicalPartitions :")
logicalpartition_object = ListLogicalPartition.\
ListLogicalPartition()
object_list = logicalpartition_object.\
list_LogicalPartition(ip,
managedsystem_uuid,
x_api_session)
selected_logicalpartition_object=get_selectedobject(object_list)
if x1 == 1:
#object creation and method call to list all virtual scsi adapters in the selected lpar
if selected_logicalpartition_object != None:
lpar_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value()
vscsi_list_object = ListVirtualSCSIClientAdapter.\
ListVirtualSCSIClientAdapter()
object_list = vscsi_list_object.list_virtualscsi_clientadapter(ip,
lpar_id,
x_api_session)
if object_list != None:
print("\nDetails of Available VirtualSCSIClientAdapters :",
"\n--------------------------------------------------")
for i in range(0,len(object_list)):
vscsi_list_object.print_vscsi_attributes(object_list[i])
else :
log_object.log_warn("There are No VirtualSCSIClientAdapters in the selected LogicalPartition")
elif x1 == 2:
#object creation and method call to create a virtual scsii adapter in the selected lpar
if selected_logicalpartition_object != None:
lpar_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value()
vscsi_create_object = CreateVirtualSCSIClientAdapter.\
| |
<li> Data ONTAP 8.2 or later operating in Cluster-Mode if the
relationship control plane is 'v1'.
</ul>
"""
return self.request( "snapmirror-destroy-async", {
'source_vserver': [ source_vserver, 'source-vserver', [ basestring, 'None' ], False ],
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
'source_cluster': [ source_cluster, 'source-cluster', [ basestring, 'None' ], False ],
'destination_vserver': [ destination_vserver, 'destination-vserver', [ basestring, 'None' ], False ],
'destination_location': [ destination_location, 'destination-location', [ basestring, 'None' ], False ],
'destination_volume': [ destination_volume, 'destination-volume', [ basestring, 'None' ], False ],
'source_location': [ source_location, 'source-location', [ basestring, 'None' ], False ],
'destination_cluster': [ destination_cluster, 'destination-cluster', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def snapmirror_update(self, source_vserver=None, source_volume=None, destination_snapshot=None, transfer_priority=None, source_cluster=None, destination_vserver=None, destination_location=None, destination_volume=None, source_location=None, source_snapshot=None, max_transfer_rate=None, destination_cluster=None):
"""
Updates the destination endpoint of the SnapMirror relationship.
The update is asynchronously handled, and there is no
guarantee that it will succeed.
<p>
On Data ONTAP operating in 7-Mode the snapmirror-get-status API
can be used to check the status of the update. The API must
be issued on the destination storage system.
<p>
On Data ONTAP 8.1 operating in Cluster-Mode, and on Data ONTAP
8.2 operating in Cluster-Mode and for relationships using
a control plane compatible with Data 8.1 operating Cluster-Mode
(relationship-control-plane set 'v1'), a job will be spawned to
operate on the SnapMirror relationship, and the job id will be
returned. The progress of the job can be tracked using the job
APIs.
<p>
On Data ONTAP 8.2 or later operating in Cluster-Mode, you
can track the progress of the operation using the
snapmirror-get API, except for relationships using a control
plane compatible with Data ONTAP 8.1 operating Cluster-Mode.
<p>
You must specify the destination endpoint when using
snapmirror-update.
<p>
The API makes the destination volume an up-to-date mirror of the
source volume.
<p>
This API must be used from the destination storage system on
Data ONTAP 7-Mode, or from the destination cluster on Data
ONTAP 8.1 operating in Cluster-Mode, and from the destination
Vserver on Data ONTAP 8.2 or later operating in Cluster-Mode.
<p>
On Data ONTAP operating in 7-Mode, if the destination endpoint
is a volume, the volume must be in the restricted state. If the
destination endpoint is a qtree, the qtree must not already
exist.
<p>
On Data ONTAP Cluster-Mode if the destination volume is empty,
the snapmirror-update API will fail. The snapmirror-initialize
API must be called to perform the baseline transfer before the
the snapmirror-update can be called.
<p>
For data protection relationships, the snapmirror-update API
makes the destination volume an up-to-date mirror of the source
volume with the following steps:</p>
<ul>
<li>If the source volume is read-write, takes a Snapshot copy on
the source volume to capture the current image of the source volume.
<li>Finds the most recent Snapshot copy on the destination volume
and validates that the corresponding Snapshot copy is on the source.
<li>Incrementally transfers Snapshot copies that are newer than
the corresponding Snapshot copy to the destination volume.
</ul>
<p>
For vault relationships, the snapmirror-update API does not take
a Snapshot copy on the source volume but transfers only selected
Snapshot copies that are newer than the common Snapshot copy to
the destination volume. Snapshot copies are selected by matching
their 'snapmirror-label' with the 'snapmirror-label' of one of
the rules from the corresponding SnapMirror policy associated
to the SnapMirror relationship.
All matching Snapshot copies are incrementally transferred to the
destination volume.
<p>
For vault relationships, the snapmirror-update API also manages
expiration of Snapshot copies on the destination volume. It does
so by deleting Snapshot copies that have exceeded the value of
'keep' for the matching rule from the corresponding SnapMirror
policy associated with the SnapMirror relationship. Snapshot copies
that match the same 'snapmirror-label' will be deleted in
oldest-first order.
<p>
For data protection relationships, the parameter 'source-snapshot'
is optional and allows for the transfer of Snapshot copies newer than
the common Snapshot copy up to the specified 'source-snapshot'.
<p>
For vault relationships, the parameter 'source-snapshot' is optional
and allows transfer of a Snapshot copy that is older than the common
Snapshot copy and/or may not be selected for transfer based on
policy-based selection of a scheduled update transfer.
<p>
After the snapmirror-update API successfully completes, the last
Snapshot copy transferred is made the new exported Snapshot copy
on the destination volume. If an update to a vault relationship
specifies a Snapshot copy using the 'source-snapshot' parameter
that is older than the common snapshot, after the snapmirror-update
API successfully completes, the exported Snapshot copy on the
destination volume will remain unchanged.
<p>
If the snapmirror-update does not finish successfully, due to a
network failure or because a snapmirror-abort API was issued for
example, a restart checkpoint might be recorded on the
destination volume. If a restart checkpoint is recorded, the
next update restarts and continues the transfer from the restart
checkpoint. For vault relationships, the next update will restart
and continue the old transfer regardless of whether it is a
matching Snapshot copy or not.
<p>
On Data ONTAP 8.1 operating in Cluster-Mode, you can
use the snapmirror-update API to update a specific load-sharing
mirror that lags behind up-to-date destination volumes in the
set of load-sharing mirrors. An update to the lagging
load-sharing mirror should bring it up to date with the other
up-to-date destination volumes in the set of load-sharing
mirrors.
Note: You might have to run the snapmirror-update API more than
once if the command does not finish before the next scheduled
update of the set of load-sharing mirrors.
:param source_vserver: Specifies the source Vserver of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the source volume.
<li> The name of the source cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param source_volume: Specifies the source volume of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the source Vserver.
<li> The name of the source cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param destination_snapshot: Creates the specified snapshot (in addition to the regular
SnapMirror snapshot) on the destination after the qtree
SnapMirror transfer is over.
:param transfer_priority: Specifies the priority at which the transfer runs.
Possible values are: "normal", and "low". The default
value is the value specified in the snapmirror policy which is
associated with the relationship.
<p>This parameter only applies on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control plane
is 'v2'.
:param source_cluster: Specifies the source cluster of the SnapMirror relationship. The
source Vserver and source volume must also be specified if using
this parameter.
:param destination_vserver: Specifies the destination Vserver of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the destination volume.
<li> The name of the destination cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param destination_location: Specifies the destination endpoint of the SnapMirror
relationship in the following formats:
<ul>
<li> <system>:/vol/<volume>[/<qtree>]
On Data ONTAP operating in 7-Mode.
<li> [<cluster>:]//<vserver>/<volume>
On Data ONTAP 8.1 operating in Cluster-Mode, and on Data
ONTAP 8.2 operating in Cluster-Mode for relationships using
a control plane compatible with Data ONTAP 8.1 operating in
Cluster-Mode.
<li> <[vserver:]volume>
On Data ONTAP 8.2 | |
c.argument('bucket_id', type=str, help='Bucket ID to which the task belongs. The bucket needs to be in the '
'plan that the task is in. It is 28 characters long and case-sensitive. Format validation is done '
'on the service.')
c.argument('checklist_item_count', type=int, help='Number of checklist items that are present on the task.')
c.argument('completed_date_time', help='Read-only. Date and time at which the \'percentComplete\' of the task '
'is set to \'100\'. The Timestamp type represents date and time information using ISO 8601 format '
'and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('conversation_thread_id', type=str, help='Thread ID of the conversation on the task. This is the ID '
'of the conversation thread object created in the group.')
c.argument('created_date_time', help='Read-only. Date and time at which the task is created. The Timestamp '
'type represents date and time information using ISO 8601 format and is always in UTC time. For '
'example, midnight UTC on Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('due_date_time', help='Date and time at which the task is due. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('has_description', arg_type=get_three_state_flag(), help='Read-only. Value is true if the details '
'object of the task has a non-empty description and false otherwise.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('percent_complete', type=int, help='Percentage of task completion. When set to 100, the task is '
'considered completed.')
c.argument('plan_id', type=str, help='Plan ID to which the task belongs.')
c.argument('preview_type', arg_type=get_enum_type(['automatic', 'noPreview', 'checklist', 'description',
'reference']), help='')
c.argument('reference_count', type=int, help='Number of external references that exist on the task.')
c.argument('start_date_time', help='Date and time at which the task starts. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('title', type=str, help='Title of the task.')
c.argument('bucket_task_board_format', action=AddBucketTaskBoardFormat, nargs='+',
help='plannerBucketTaskBoardTaskFormat')
c.argument('progress_task_board_format', action=AddProgressTaskBoardFormat, nargs='+',
help='plannerProgressTaskBoardTaskFormat')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Details')
c.argument('checklist', type=validate_file_or_dict, help='plannerChecklistItems Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('description', type=str, help='Description of the task', arg_group='Details')
c.argument('microsoft_graph_planner_preview_type', arg_type=get_enum_type(['automatic', 'noPreview',
'checklist', 'description',
'reference']), help='',
arg_group='Details')
c.argument('references', type=validate_file_or_dict, help='plannerExternalReferences Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('id1', type=str, help='Read-only.', arg_group='Assigned To Task Board Format')
c.argument('order_hints_by_assignee', type=validate_file_or_dict, help='plannerOrderHintsByAssignee Expected '
'value: json-string/@json-file.', arg_group='Assigned To Task Board Format')
c.argument('unassigned_order_hint', type=str, help='Hint value used to order the task on the AssignedTo view '
'of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee '
'dictionary does not provide an order hint for the user the task is assigned to. The format is '
'defined as outlined here.', arg_group='Assigned To Task Board Format')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
with self.argument_context('planner planner-bucket delete-task') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-bucket list-task') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-bucket show-task') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-bucket update-task') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('active_checklist_item_count', type=int, help='Number of checklist items with value set to false, '
'representing incomplete items.')
c.argument('applied_categories', type=validate_file_or_dict, help='plannerAppliedCategories Expected value: '
'json-string/@json-file.')
c.argument('assignee_priority', type=str, help='Hint used to order items of this type in a list view. The '
'format is defined as outlined here.')
c.argument('assignments', type=validate_file_or_dict, help='plannerAssignments Expected value: '
'json-string/@json-file.')
c.argument('bucket_id', type=str, help='Bucket ID to which the task belongs. The bucket needs to be in the '
'plan that the task is in. It is 28 characters long and case-sensitive. Format validation is done '
'on the service.')
c.argument('checklist_item_count', type=int, help='Number of checklist items that are present on the task.')
c.argument('completed_date_time', help='Read-only. Date and time at which the \'percentComplete\' of the task '
'is set to \'100\'. The Timestamp type represents date and time information using ISO 8601 format '
'and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('conversation_thread_id', type=str, help='Thread ID of the conversation on the task. This is the ID '
'of the conversation thread object created in the group.')
c.argument('created_date_time', help='Read-only. Date and time at which the task is created. The Timestamp '
'type represents date and time information using ISO 8601 format and is always in UTC time. For '
'example, midnight UTC on Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('due_date_time', help='Date and time at which the task is due. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('has_description', arg_type=get_three_state_flag(), help='Read-only. Value is true if the details '
'object of the task has a non-empty description and false otherwise.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('percent_complete', type=int, help='Percentage of task completion. When set to 100, the task is '
'considered completed.')
c.argument('plan_id', type=str, help='Plan ID to which the task belongs.')
c.argument('preview_type', arg_type=get_enum_type(['automatic', 'noPreview', 'checklist', 'description',
'reference']), help='')
c.argument('reference_count', type=int, help='Number of external references that exist on the task.')
c.argument('start_date_time', help='Date and time at which the task starts. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('title', type=str, help='Title of the task.')
c.argument('bucket_task_board_format', action=AddBucketTaskBoardFormat, nargs='+',
help='plannerBucketTaskBoardTaskFormat')
c.argument('progress_task_board_format', action=AddProgressTaskBoardFormat, nargs='+',
help='plannerProgressTaskBoardTaskFormat')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Details')
c.argument('checklist', type=validate_file_or_dict, help='plannerChecklistItems Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('description', type=str, help='Description of the task', arg_group='Details')
c.argument('microsoft_graph_planner_preview_type', arg_type=get_enum_type(['automatic', 'noPreview',
'checklist', 'description',
'reference']), help='',
arg_group='Details')
c.argument('references', type=validate_file_or_dict, help='plannerExternalReferences Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('id1', type=str, help='Read-only.', arg_group='Assigned To Task Board Format')
c.argument('order_hints_by_assignee', type=validate_file_or_dict, help='plannerOrderHintsByAssignee Expected '
'value: json-string/@json-file.', arg_group='Assigned To Task Board Format')
c.argument('unassigned_order_hint', type=str, help='Hint value used to order the task on the AssignedTo view '
'of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee '
'dictionary does not provide an order hint for the user the task is assigned to. The format is '
'defined as outlined here.', arg_group='Assigned To Task Board Format')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
with self.argument_context('planner planner-bucket-task delete-assigned-to-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-bucket-task delete-bucket-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-bucket-task delete-detail') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-bucket-task delete-progress-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-bucket-task show-assigned-to-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-bucket-task show-bucket-task-board-format') as | |
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""Energy distribution classes."""
import math
from pqu import PQU as PQUModule
from fudge.core.utilities import brb
import xData.ancestry as ancestryModule
from xData import standards as standardsModule
from xData import base as xDataBaseModule
from xData import axes as axesModule
from xData import XYs as XYsModule
from xData import xs_pdf_cdf as xs_pdf_cdfModule
from xData import multiD_XYs as multiD_XYsModule
from xData import regions as regionsModule
from fudge import physicalQuantity as physicalQuantityModule
from . import base as baseModule
from . import probabilities as probabilitiesModule
from . import miscellaneous as miscellaneousModule
__metaclass__ = type
def defaultAxes( energyUnit ) :
axes = axesModule.axes( rank = 3 )
axes[2] = axesModule.axis( 'energy_in', 2, energyUnit )
axes[1] = axesModule.axis( 'energy_out', 1, energyUnit )
axes[0] = axesModule.axis( 'P(energy_out|energy_in)', 0, '1/' + energyUnit )
return( axes )
class XYs1d( XYsModule.XYs1d ) :
def averageEnergy( self ) :
allowedInterpolations = [ standardsModule.interpolation.linlinToken, standardsModule.interpolation.flatToken ]
xys = self.changeInterpolationIfNeeded( allowedInterpolations, XYsModule.defaultAccuracy )
return( xys.integrateWithWeight_x( ) )
def toLinearXYsClass( self ) :
return( XYs1d )
class regions1d( regionsModule.regions1d ) :
def averageEnergy( self ) :
averageEnergy = 0
for region in self : averageEnergy += region.averageEnergy( )
return( averageEnergy )
def integrateWithWeight_x( self ) :
sum = 0
for region in self : sum += region.integrateWithWeight_x( )
return( sum )
def toLinearXYsClass( self ) :
return( XYs1d )
@staticmethod
def allowedSubElements():
return( XYs1d, )
class xs_pdf_cdf1d( xs_pdf_cdfModule.xs_pdf_cdf1d ) :
def toLinearXYsClass( self ) :
return( XYs1d )
class subform( baseModule.subform ) :
"""Abstract base class for energy forms."""
def to_xs_pdf_cdf1d( self, style, tempInfo, indent ) :
return( None )
class discretePrimaryGamma( subform ) :
dimension = 2
ancestryMembers = ( '', )
def __init__( self, value, domainMin, domainMax, axes = None ) :
subform.__init__( self )
if( isinstance( value, int ) ) : value = float( value )
if( not( isinstance( value, float ) ) ) : raise TypeError( 'value must be a float.' )
self.value = value
if( isinstance( domainMin, int ) ) : domainMin = float( domainMin )
if( not( isinstance( domainMin, float ) ) ) : raise TypeError( 'domainMin must be a float.' )
self.__domainMin = domainMin
if( isinstance( domainMax, int ) ) : domainMax = float( domainMax )
if( not( isinstance( domainMax, float ) ) ) : raise TypeError( 'domainMax must be a float.' )
self.__domainMax = domainMax
if( axes is None ) :
self.__axes = None
else :
if( not( isinstance( axes, axesModule.axes ) ) ) : raise TypeError( 'axes is not an axes instance' )
if( len( axes ) <= self.dimension ) : raise Exception( 'len( axes ) = %d != ( self.dimension + 1 ) = %d' % ( len( axes ), ( self.dimension + 1 ) ) )
self.__axes = axes.copy( )
self.__axes.setAncestor( self )
@property
def axes( self ) :
return( self.__axes )
@property
def domainMin( self ) :
return( self.__domainMin )
@property
def domainMax( self ) :
return( self.__domainMax )
@property
def domainUnit( self ) :
return( self.__axes[-1].unit )
def convertUnits( self, unitMap ) :
"See documentation for reactionSuite.convertUnits."
factors = self.axes.convertUnits( unitMap )
self.value *= factors[1]
self.__domainMin *= factors[2]
self.__domainMax *= factors[2]
def copy( self ):
return self.__class__( self.value, self.__domainMin, self.__domainMax, self.axes )
__copy__ = copy
def energySpectrumAtEnergy( self, energy, discreteGammaResolution = 1e-2 ) :
"""Returns the energy spectrum in the lab frame for the specified incident energy."""
if( ( self.__domainMin > energy ) or ( self.__domainMax < energy ) ) : return( XYs1d( axes = defaultAxes( self.domainUnit ) ) )
photonEnergy = self.energyAtEnergy( energy )
energy1 = photonEnergy * ( 1.0 - discreteGammaResolution )
energy2 = photonEnergy * ( 1.0 + discreteGammaResolution )
height = 2.0 / ( energy2 - energy1 )
return( XYs1d( data = [ [ energy1, 0.0 ], [ photonEnergy, height ], [ energy2, 0.0 ] ], axes = defaultAxes( self.domainUnit ) ) )
def toXMLList( self, indent = '', **kwargs ) :
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
XMLStringList = [ '%s<%s value="%s" domainMin="%s" domainMax="%s"' % ( indent, self.moniker, PQUModule.floatToShortestString( self.value, 12 ),
PQUModule.floatToShortestString( self.__domainMin, 12 ), PQUModule.floatToShortestString( self.__domainMax, 12 ) ) ]
if( self.axes is None ) :
XMLStringList[-1] += '/>'
else :
XMLStringList[-1] += '>'
XMLStringList += self.axes.toXMLList( indent = indent2, **kwargs )
XMLStringList[-1] += '</%s>' % self.moniker
return( XMLStringList )
@classmethod
def parseXMLNode( cls, element, xPath, linkData ) :
value = float( element.get( 'value' ) )
domainMin = float( element.get( 'domainMin' ) )
domainMax = float( element.get( 'domainMax' ) )
axes = None
for child in element :
if( child.tag == axesModule.axes.moniker ) :
axes = axesModule.axes.parseXMLNode( child, xPath, linkData )
else :
raise Exception( 'Invalid sub-element with tag = "%s"' % child.tag )
return( cls( value, domainMin, domainMax, axes = axes ) )
def getEnergyArray( self, EMin = None, EMax = None ) :
return( [ EMin, EMax ] )
class discreteGamma( discretePrimaryGamma ) :
moniker = 'discreteGamma'
def check( self, info ) :
from fudge import warning
warnings = []
if( self.value <= 0 ) : warnings.append( warning.negativeDiscreteGammaEnergy() )
return( warnings )
def averageEp( self, E ) :
return( self.value )
def energyAtEnergy( self, energyIn ) :
return( self.value )
def integrate( self, energyIn, energyOut ) :
if( self.domainMin <= energyIn <= self.domainMax ) :
domainMin, domainMax = miscellaneousModule.domainLimits( energyOut, self.value, self.value )
if( domainMin <= self.value <= domainMax ) : return( 1.0 )
return( 0.0 )
class primaryGamma( discretePrimaryGamma ) :
moniker = 'primaryGamma'
def __init__( self, value, domainMin, domainMax, axes = None ) :
discretePrimaryGamma.__init__( self, value, domainMin, domainMax, axes = axes )
self.__massRatio = None # In ENDF lingo this is AWR / ( AWR + 1 ).
@property
def massRatio( self ) :
if( self.__massRatio is None ) :
self.__massRatio = self.findAttributeInAncestry( "getMassRatio" )( )
return self.__massRatio
def check( self, info ) :
from fudge import warning
warnings = []
# BRB6 hardwired
Qvalue = self.findAttributeInAncestry('getQ')('eV')
if isinstance( self.value, PQUModule.PQU ) :
testValue = self.value.getValueAs( 'eV' )
else:
testValue = self.value
if testValue > Qvalue:
warnings.append( warning.primaryGammaEnergyTooLarge( self.value,
100 * testValue / Qvalue ) )
return warnings
def averageEp( self, energyIn ) :
return( self.energyAtEnergy( energyIn ) )
def energyAtEnergy( self, energyIn ) :
return( float( self.value ) + self.massRatio * energyIn )
def integrate( self, energyIn, energyOut ) :
gammaEnergy = float( self.value ) + self.massRatio * energyIn
if( self.domainMin <= energyIn <= self.domainMax ) :
domainMin, domainMax = miscellaneousModule.domainLimits( energyOut, gammaEnergy, gammaEnergy )
if( domainMin <= gammaEnergy <= domainMax ) : return( 1.0 )
return( 0.0 )
class XYs2d( subform, probabilitiesModule.PofX1GivenX2 ) :
def __init__( self, **kwargs ):
"""
>pointwise = XYs2d( )
followed by:
>pointwise[ 0 ] = XYs_data_1
>pointwise[ 1 ] = XYs_data_2
> ...
>pointwise[ n-1 ] = XYs_data_n
"""
probabilitiesModule.PofX1GivenX2.__init__( self, **kwargs )
subform.__init__( self )
def evaluate( self, domainValue, extrapolation = standardsModule.noExtrapolationToken, epsilon = 0 ) :
return( probabilitiesModule.PofX1GivenX2.evaluate( self, domainValue, extrapolation = extrapolation, epsilon = epsilon,
interpolationQualifier = standardsModule.interpolation.unitBaseToken ) )
def getAtEnergy( self, energy ) :
"""This method is deprecated, use getSpectrumAtEnergy."""
return( self.getSpectrumAtEnergy( energy ) )
def energySpectrumAtEnergy( self, energy ) :
"""Returns the energy spectrum in the lab frame for the specified incident energy."""
spectrum = self.evaluate( energy, extrapolation = standardsModule.flatExtrapolationToken )
if( isinstance( spectrum, regions1d ) ) : spectrum = spectrum.toPointwise_withLinearXYs( lowerEps = 1e-6, upperEps = 1e-6 )
return( spectrum )
def getSpectrumAtEnergy( self, energy ) :
"""Returns the energy spectrum for self at projectile energy."""
return( self.energySpectrumAtEnergy( energy ) )
def getEnergyArray( self, EMin = None, EMax = None ) :
Es = [ data.outerDomainValue for data in self ]
if( EMin is not None ) :
if( EMin < ( 1.0 - 1e-15 ) * Es[0] ) : Es.insert( 0, EMin )
if( EMax is not None ) :
if( EMax > Es[-1] ) : Es.append( EMax )
return( Es )
def averageEp( self, energy ) | |
from datetime import date, timedelta
from urllib import urlencode
import pytest
from tornado.httpclient import HTTPError
from fixtures import fe_app as app
from fixtures import standard_graph, users, graph, groups, session, permissions # noqa
from grouper.model_soup import Group, User
from grouper.models.public_key import PublicKey
from grouper.models.public_key_tag import PublicKeyTag
from grouper.models.permission import Permission
from grouper.constants import TAG_EDIT
from grouper.permissions import permission_intersection
from grouper.public_key import get_public_key_permissions, get_public_key_tags, get_public_key_tag_permissions
from grouper.user_permissions import user_permissions
from url_util import url
from util import get_users, get_groups, add_member, grant_permission
from collections import namedtuple
key_1 = ('ssh-rsa A<KEY>2EAAAADAQABAAAB<KEY>pT/etEJR2WUoR+h2sMOQYbJgr0Q'
'<KEY>'
'<KEY>'
'AJh0xLZwhw17/NDM+dAcEdMZ9V89KyjwjraXtOVfFhQF0EDF0ame8d6UkayGrAiXC2He0P2Cja+J'
'<KEY>'
'gh/ some-comment')
key_2 = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDF1DyXlqc40AVUgt/IO0GFcTniaoFt5qCUAeNVlva"
"lMnsrRULIXkb0g1ds9P9/UI2jWr70ZYG7XieQX1F7NpzaDeUyPGCrLV1/ev1ZtUImCrDFfMznEjkcqB"
"33mRe1rCFGKNVOYUviPE1yBdbfZBGUuJBX2GOXQQj9fU4Hiq3rAgOhz89717mt+qZxZllZ4mdyVEaMB"
"WCwqAvl7Z5ecDjB+llFpBORTmsT8OZoGbZnJTIB1d9j0tSbegP17emE+g9fTrk4/ePmSIAKcSV3xj6h"
"98AGesNibyu9eKVrroEptxX4crl0o95Me6B1/DCL632xrTO0a5mSmlF4cxCgjLj9 to/ key2")
@pytest.mark.gen_test
def test_create_tag(users, http_client, base_url, session):
user = session.query(User).filter_by(username="<EMAIL>").scalar()
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
tag = PublicKeyTag.get(session, name="tyler_was_here")
assert tag is not None, "The tag should be created"
assert tag.name == "tyler_was_here", "The tag's name should be tyler_was_here"
@pytest.mark.gen_test
def test_add_tag(users, http_client, base_url, session):
user = session.query(User).filter_by(username="<EMAIL>").scalar()
# add SSH key
fe_url = url(base_url, '/users/{}/public-key/add'.format(user.username))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'public_key': key_1}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key = session.query(PublicKey).filter_by(user_id=user.id).scalar()
# add SSH key
fe_url = url(base_url, '/users/{}/public-key/add'.format(user.username))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'public_key': key_2}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key2 = session.query(PublicKey).filter_by(public_key=key_2).scalar()
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
tag = PublicKeyTag.get(session, name="tyler_was_here")
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert get_public_key_tags(session, key) == [], "No public keys should have a tag unless it's been added to the key"
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "dont_tag_me_bro", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
tag = PublicKeyTag.get(session, name="dont_tag_me_bro")
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert get_public_key_tags(session, key) == [], "No public keys should have a tag unless it's been added to the key"
fe_url = url(base_url, '/users/{}/public-key/{}/tag'.format(user.username, key.id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here"}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 1, "The key should have exactly 1 tag"
assert get_public_key_tags(session, key)[0].name == "tyler_was_here"
key2 = session.query(PublicKey).filter_by(public_key=key_2).scalar()
assert len(get_public_key_tags(session, key2)) == 0, "Keys other than the one with the added tag should not gain tags"
# Non-admin and not user adding tag should fail
fe_url = url(base_url, '/users/{}/public-key/{}/tag'.format(user.username, key.id))
with pytest.raises(HTTPError):
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here"}),
headers={'X-Grouper-User': "<EMAIL>"})
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 1, "The key should have exactly 1 tag"
assert get_public_key_tags(session, key)[0].name == "tyler_was_here"
# User admins test
fe_url = url(base_url, '/users/{}/public-key/{}/tag'.format(user.username, key.id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "dont_tag_me_bro"}),
headers={'X-Grouper-User': "<EMAIL>"})
assert resp.code == 200
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 2, "The key should have 2 tags now"
assert set([x.name for x in get_public_key_tags(session, key)]) == set(["tyler_was_here",
"dont_tag_me_bro"])
@pytest.mark.gen_test
def test_remove_tag(users, http_client, base_url, session):
user = session.query(User).filter_by(username="<EMAIL>").scalar()
# add SSH key
fe_url = url(base_url, '/users/{}/public-key/add'.format(user.username))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'public_key': key_1}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key = session.query(PublicKey).filter_by(user_id=user.id).scalar()
# add SSH key
fe_url = url(base_url, '/users/{}/public-key/add'.format(user.username))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'public_key': key_2}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key2 = session.query(PublicKey).filter_by(public_key=key_2).scalar()
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
tag = PublicKeyTag.get(session, name="tyler_was_here")
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert get_public_key_tags(session, key) == [], "No public keys should have a tag unless it's been added to the key"
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "dont_tag_me_bro", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
tag2 = PublicKeyTag.get(session, name="dont_tag_me_bro")
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert get_public_key_tags(session, key) == [], "No public keys should have a tag unless it's been added to the key"
fe_url = url(base_url, '/users/{}/public-key/{}/tag'.format(user.username, key.id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here"}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 1, "The key should have exactly 1 tag"
assert get_public_key_tags(session, key)[0].name == "tyler_was_here"
key2 = session.query(PublicKey).filter_by(public_key=key_2).scalar()
assert len(get_public_key_tags(session, key2)) == 0, "Keys other than the one with the added tag should not gain tags"
fe_url = url(base_url, '/users/{}/public-key/{}/tag'.format(user.username, key2.id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here"}),
headers={'X-Grouper-User': user.username})
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
# Fail Remove tag
tag = PublicKeyTag.get(session, name="dont_tag_me_bro")
fe_url = url(base_url, '/users/{}/public-key/{}/delete_tag/{}'.format(user.username, key.id, tag.id))
with pytest.raises(HTTPError):
resp = yield http_client.fetch(fe_url, method="POST",
body="",
headers={'X-Grouper-User': "<EMAIL>"})
# Remove tag that isn't on key: should fail silently
tag = PublicKeyTag.get(session, name="dont_tag_me_bro")
fe_url = url(base_url, '/users/{}/public-key/{}/delete_tag/{}'.format(user.username, key.id, tag.id))
resp = yield http_client.fetch(fe_url, method="POST",
body="",
headers={'X-Grouper-User': user.username})
assert resp.code == 200
# Remove tag
tag = PublicKeyTag.get(session, name="tyler_was_here")
fe_url = url(base_url, '/users/{}/public-key/{}/delete_tag/{}'.format(user.username, key.id, tag.id))
resp = yield http_client.fetch(fe_url, method="POST",
body="",
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 0, "The key should have exactly 0 tags"
key2 = session.query(PublicKey).filter_by(public_key=key_2).scalar()
assert len(get_public_key_tags(session, key2)) == 1, "Removing a tag from one key should not affect other keys"
# User admin remove tag
# readd tag
fe_url = url(base_url, '/users/{}/public-key/{}/tag'.format(user.username, key.id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here"}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 1, "The key should have exactly 1 tag"
assert get_public_key_tags(session, key)[0].name == "tyler_was_here"
# Nonuser admin fail Remove tag
tag = PublicKeyTag.get(session, name="tyler_was_here")
fe_url = url(base_url, '/users/{}/public-key/{}/delete_tag/{}'.format(user.username, key.id, tag.id))
with pytest.raises(HTTPError):
resp = yield http_client.fetch(fe_url, method="POST",
body="",
headers={'X-Grouper-User': "<EMAIL>"})
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 1, "The key should have exactly 1 tags"
# Remove tag
tag = PublicKeyTag.get(session, name="tyler_was_here")
fe_url = url(base_url, '/users/{}/public-key/{}/delete_tag/{}'.format(user.username, key.id, tag.id))
resp = yield http_client.fetch(fe_url, method="POST",
body="",
headers={'X-Grouper-User': "<EMAIL>"})
assert resp.code == 200
key = session.query(PublicKey).filter_by(public_key=key_1).scalar()
assert len(get_public_key_tags(session, key)) == 0, "The key should have exactly 0 tags"
@pytest.mark.gen_test
def test_grant_permission_to_tag(users, http_client, base_url, session):
user = session.query(User).filter_by(username="<EMAIL>").scalar()
perm = Permission(name=TAG_EDIT, description="Why is this not nullable?")
perm.add(session)
session.commit()
grant_permission(session.query(Group).filter_by(groupname="all-teams").scalar(), session.query(Permission).filter_by(name=TAG_EDIT).scalar(), "*")
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
tag = PublicKeyTag.get(session, name="tyler_was_here")
user = session.query(User).filter_by(username="<EMAIL>").scalar()
fe_url = url(base_url, '/permissions/grant_tag/{}'.format(tag.name))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'permission': TAG_EDIT, "argument": "*"}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
tag = PublicKeyTag.get(session, name="tyler_was_here")
perm = Permission.get(session, TAG_EDIT)
assert len(get_public_key_tag_permissions(session, tag)) == 1, "The tag should have exactly 1 permission"
assert get_public_key_tag_permissions(session, tag)[0].name == perm.name, "The tag's permission should be the one we added"
assert get_public_key_tag_permissions(session, tag)[0].argument == "*", "The tag's permission should be the one we added"
# Make sure trying to add a permission to a tag doesn't fail horribly if it's already there
user = session.query(User).filter_by(username="<EMAIL>").scalar()
fe_url = url(base_url, '/permissions/grant_tag/{}'.format(tag.name))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'permission': TAG_EDIT, "argument": "*"}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
@pytest.mark.gen_test
def test_edit_tag(users, http_client, base_url, session):
user = session.query(User).filter_by(username="<EMAIL>").scalar()
perm = Permission(name=TAG_EDIT, description="Why is this not nullable?")
perm.add(session)
session.commit()
grant_permission(session.query(Group).filter_by(groupname="all-teams").scalar(), session.query(Permission).filter_by(name=TAG_EDIT).scalar(), "*")
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
tag = PublicKeyTag.get(session, name="tyler_was_here")
assert tag.description == "Test Tag Please Ignore", "The description should match what we created it with"
user = session.query(User).filter_by(username="<EMAIL>").scalar()
fe_url = url(base_url, '/tags/{}/edit'.format(tag.id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({"description": "Don't tag me bro"}),
headers={'X-Grouper-User': user.username})
assert resp.code == 200
tag = PublicKeyTag.get(session, name="tyler_was_here")
assert tag.description == "Don't tag me bro", "The description should have been updated"
@pytest.mark.gen_test
def test_permissions(users, http_client, base_url, session):
user = session.query(User).filter_by(username="<EMAIL>").scalar()
perm = Permission(name=TAG_EDIT, description="Why is this not nullable?")
perm.add(session)
session.commit()
perm = Permission(name="it.literally.does.not.matter", description="Why is this not nullable?")
perm.add(session)
session.commit()
grant_permission(session.query(Group).filter_by(groupname="all-teams").scalar(), session.query(Permission).filter_by(name=TAG_EDIT).scalar(), "*")
grant_permission(session.query(Group).filter_by(groupname="all-teams").scalar(), session.query(Permission).filter_by(name="it.literally.does.not.matter").scalar(), "*")
fe_url = url(base_url, '/tags')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here", "description": "Test Tag Please Ignore"}),
headers={'X-Grouper-User': user.username})
tag = PublicKeyTag.get(session, name="tyler_was_here")
user = session.query(User).filter_by(username="<EMAIL>").scalar()
fe_url = url(base_url, '/permissions/grant_tag/{}'.format(tag.name))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'permission': TAG_EDIT, "argument": "prod"}),
headers={'X-Grouper-User': user.username})
user = session.query(User).filter_by(username="<EMAIL>").scalar()
# add SSH key
fe_url = url(base_url, '/users/{}/public-key/add'.format(user.username))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'public_key': key_1}),
headers={'X-Grouper-User': user.username})
key = session.query(PublicKey).filter_by(user_id=user.id).scalar()
user = session.query(User).filter_by(username="<EMAIL>").scalar()
fe_url = url(base_url, '/users/{}/public-key/{}/tag'.format(user.username, key.id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'tagname': "tyler_was_here"}),
headers={'X-Grouper-User': user.username})
user = session.query(User).filter_by(username="<EMAIL>").scalar()
key = session.query(PublicKey).filter_by(user_id=user.id).scalar()
assert len(get_public_key_permissions(session, key)) == 1, "The SSH Key should have only 1 permission"
assert get_public_key_permissions(session, key)[0].name == TAG_EDIT, | |
top_ks = [max(int(num_pred * top_k), 1) for num_pred in num_preds]
else:
top_ks = [min(num_pred, 1) for num_pred in num_preds]
pgt_scores_idxs = [
torch.topk(prev_pred_score, top_k, dim=0)
for prev_pred_score, top_k in zip(prev_pred_scores, top_ks)
]
pgt_scores = [item[0] for item in pgt_scores_idxs]
pgt_idxs = [item[1] for item in pgt_scores_idxs]
oh_labels = [
torch.gather(
oh_label,
0,
torch.unsqueeze(pgt_idx, 2).expand(top_k, gt_int.numel(), oh_label.size(2)),
)
for oh_label, pgt_idx, top_k, gt_int in zip(
oh_labels, pgt_idxs, top_ks, gt_classes_img_int
)
]
pgt_idxs = [
torch.unsqueeze(pgt_idx, 2).expand(top_k, gt_int.numel(), 4)
for pgt_idx, top_k, gt_int in zip(pgt_idxs, top_ks, gt_classes_img_int)
]
pgt_boxes = [
torch.gather(prev_pred_box, 0, pgt_idx)
for prev_pred_box, pgt_idx in zip(prev_pred_boxes, pgt_idxs)
]
pgt_classes = [
torch.unsqueeze(gt_int, 0).expand(top_k, gt_int.numel())
for gt_int, top_k in zip(gt_classes_img_int, top_ks)
]
if need_weight:
pgt_weights = [
torch.index_select(pred_logits, 1, gt_int).expand(top_k, gt_int.numel())
for pred_logits, gt_int, top_k in zip(
self.pred_class_img_logits.split(1, dim=0), gt_classes_img_int, top_ks
)
]
if thres > 0:
# get large scores
masks = [pgt_score.ge(thres) for pgt_score in pgt_scores]
masks = [
torch.cat([torch.full_like(mask[0:1, :], True), mask[1:, :]], dim=0)
for mask in masks
]
pgt_scores = [
torch.masked_select(pgt_score, mask) for pgt_score, mask in zip(pgt_scores, masks)
]
oh_labels = [
torch.masked_select(
oh_label,
torch.unsqueeze(mask, 2).expand(top_k, gt_int.numel(), oh_label.size(2)),
)
for oh_label, mask, top_k, gt_int in zip(
oh_labels, masks, top_ks, gt_classes_img_int
)
]
pgt_boxes = [
torch.masked_select(
pgt_box, torch.unsqueeze(mask, 2).expand(top_k, gt_int.numel(), 4)
)
for pgt_box, mask, top_k, gt_int in zip(
pgt_boxes, masks, top_ks, gt_classes_img_int
)
]
pgt_classes = [
torch.masked_select(pgt_class, mask) for pgt_class, mask in zip(pgt_classes, masks)
]
if need_weight:
pgt_weights = [
torch.masked_select(pgt_weight, mask)
for pgt_weight, mask in zip(pgt_weights, masks)
]
pgt_scores = [pgt_score.reshape(-1) for pgt_score in pgt_scores]
oh_labels = [oh_label.reshape(-1, oh_label.size(-1)) for oh_label in oh_labels]
pgt_boxes = [pgt_box.reshape(-1, 4) for pgt_box in pgt_boxes]
pgt_classes = [pgt_class.reshape(-1) for pgt_class in pgt_classes]
if need_weight:
pgt_weights = [pgt_weight.reshape(-1) for pgt_weight in pgt_weights]
if not need_instance and need_weight:
return pgt_scores, pgt_boxes, pgt_classes, pgt_weights
elif not need_instance and not need_weight:
return pgt_scores, pgt_boxes, pgt_classes
pgt_boxes = [Boxes(pgt_box) for pgt_box in pgt_boxes]
targets = [
Instances(
proposals[i].image_size,
gt_boxes=pgt_box,
gt_classes=pgt_class,
gt_scores=pgt_score,
gt_weights=pgt_weight,
oh_labels=oh_label,
)
for i, (pgt_box, pgt_class, pgt_score, pgt_weight, oh_label) in enumerate(
zip(pgt_boxes, pgt_classes, pgt_scores, pgt_weights, oh_labels)
)
]
if need_mask:
targets = self.object_evidence(targets)
self._vis_pgt(targets, "pgt_top_k", suffix)
return targets
@torch.no_grad()
def _vis_pgt(self, targets, prefix, suffix):
if self.vis_period <= 0 or self.iter % self.vis_period > 0:
return
storage = get_event_storage()
pgt_boxes = [target.gt_boxes for target in targets]
pgt_classes = [target.gt_classes for target in targets]
pgt_scores = [target.gt_scores for target in targets]
if targets[0].has("pgt_bitmasks"):
pgt_masks = [target.pgt_bitmasks for target in targets]
elif targets[0].has("gt_masks"):
pgt_masks = [target.gt_masks for target in targets]
else:
pgt_masks = [None for target in targets]
output_dir = os.path.join(self.output_dir, prefix)
if self.iter == 0:
Path(output_dir).mkdir(parents=True, exist_ok=True)
for b, (pgt_box, pgt_class, pgt_score, pgt_mask) in enumerate(
zip(pgt_boxes, pgt_classes, pgt_scores, pgt_masks)
):
img = self.images.tensor[b, ...].clone().detach().cpu().numpy()
channel_swap = (1, 2, 0)
img = img.transpose(channel_swap)
pixel_means = [103.939, 116.779, 123.68]
img += pixel_means
img = img.astype(np.uint8)
h, w = img.shape[:2]
img_pgt = img.copy()
device_index = pgt_box.device.index
save_name = (
"i" + str(self.iter) + "_g" + str(device_index) + "_b" + str(b) + suffix + ".png"
)
pgt_box = pgt_box.tensor.clone().detach().cpu().numpy()
pgt_class = pgt_class.clone().detach().cpu().numpy()
pgt_score = pgt_score.clone().detach().cpu().numpy()
if pgt_mask is not None:
pgt_mask = pgt_mask.tensor.clone().detach().cpu().numpy()
for i in range(pgt_box.shape[0]):
c = pgt_class[i]
s = pgt_score[i]
x0, y0, x1, y1 = pgt_box[i, :]
x0 = int(max(x0, 0))
y0 = int(max(y0, 0))
x1 = int(min(x1, w))
y1 = int(min(y1, h))
cv2.rectangle(img_pgt, (x0, y0), (x1, y1), (0, 0, 255), 8)
(tw, th), bl = cv2.getTextSize(str(c), cv2.FONT_HERSHEY_SIMPLEX, 4, 4)
cv2.putText(
img_pgt, str(c), (x0, y0 + th), cv2.FONT_HERSHEY_COMPLEX, 4, (255, 0, 0), 4
)
(_, t_h), bl = cv2.getTextSize(str(s), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
cv2.putText(
img_pgt, str(s), (x0 + tw, y0 + th), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 2
)
if pgt_mask is not None:
m = pgt_mask[i]
img_pgt_m = img.copy()
img_pgt_m = img_pgt_m * m[:, :, np.newaxis]
img_pgt = np.concatenate([img_pgt, img_pgt_m], axis=1)
save_path = os.path.join(output_dir, save_name)
cv2.imwrite(save_path, img_pgt)
img_pgt = img_pgt.transpose(2, 0, 1)
vis_name = prefix + "_g" + str(device_index) + "_b" + str(b) + suffix
storage.put_image(vis_name, img_pgt)
@torch.no_grad()
def _vis_proposal(self, proposals, prefix, suffix):
if self.vis_period <= 0 or self.iter % self.vis_period > 0:
return
prev_pred_boxes = [p.proposal_boxes for p in proposals]
num_preds = [len(prev_pred_box) for prev_pred_box in proposals]
prev_pred_boxes = [
prev_pred_box.tensor.unsqueeze(1).expand(num_pred, self.num_classes, 4)
for num_pred, prev_pred_box in zip(num_preds, prev_pred_boxes)
]
prev_pred_scores = [p.objectness_logits for p in proposals]
prev_pred_scores = [
prev_pred_score.unsqueeze(1).expand(num_pred, self.num_classes + 1)
for num_pred, prev_pred_score in zip(num_preds, prev_pred_scores)
]
self._vis_box(
prev_pred_boxes,
prev_pred_scores,
proposals,
top_k=2048,
thres=-9999,
thickness=1,
prefix=prefix,
suffix=suffix,
)
# self._save_proposal(proposals, prefix, suffix)
@torch.no_grad()
def _save_proposal(self, proposals, prefix, suffix):
if self.vis_period <= 0 or self.iter % self.vis_period > 0:
return
output_dir = os.path.join(self.output_dir, prefix)
for b, p in enumerate(proposals):
box = p.proposal_boxes.tensor.clone().detach().cpu().numpy()
logit = p.objectness_logits.clone().detach().cpu().numpy()
level_ids = p.level_ids.clone().detach().cpu().numpy()
gpu_id = p.objectness_logits.device.index
id_str = "i" + str(self.iter_test) + "_g" + str(gpu_id) + "_b" + str(b)
save_path = os.path.join(output_dir, id_str + "_box" + suffix + ".npy")
np.save(save_path, box)
save_path = os.path.join(output_dir, id_str + "_logit" + suffix + ".npy")
np.save(save_path, logit)
save_path = os.path.join(output_dir, id_str + "_level" + suffix + ".npy")
np.save(save_path, level_ids)
@torch.no_grad()
def _vis_box(
self,
prev_pred_boxes,
prev_pred_scores,
proposals,
top_k=1,
thres=0.01,
thickness=4,
prefix="",
suffix="",
):
if self.vis_period <= 0 or self.iter % self.vis_period > 0:
return
storage = get_event_storage()
pgt_scores, pgt_boxes, pgt_classes = self.get_pgt_top_k(
prev_pred_boxes,
prev_pred_scores,
proposals,
self.num_classes,
self.gt_classes_img_int,
top_k=top_k,
thres=thres,
need_instance=False,
need_weight=False,
suffix="",
)
output_dir = os.path.join(self.output_dir, prefix)
if self.iter == 0:
Path(output_dir).mkdir(parents=True, exist_ok=True)
for b, pgt_box in enumerate(pgt_boxes):
img = self.images[b].clone().detach().cpu().numpy()
channel_swap = (1, 2, 0)
img = img.transpose(channel_swap)
pixel_means = [103.939, 116.779, 123.68]
img += pixel_means
img = img.astype(np.uint8)
h, w = img.shape[:2]
img_pgt = img.copy()
device_index = pgt_box.device.index
save_name = (
"i" + str(self.iter) + "_g" + str(device_index) + "_b" + str(b) + suffix + ".png"
)
pgt_box = pgt_box.cpu().numpy()
for i in range(pgt_box.shape[0]):
x0, y0, x1, y1 = pgt_box[i, :]
x0 = int(max(x0, 0))
y0 = int(max(y0, 0))
x1 = int(min(x1, w))
y1 = int(min(y1, h))
cv2.rectangle(img_pgt, (x0, y0), (x1, y1), (0, 0, 255), thickness)
save_path = os.path.join(output_dir, save_name)
cv2.imwrite(save_path, img_pgt)
img_pgt = img_pgt.transpose(2, 0, 1)
vis_name = prefix + "_g" + str(device_index) + "_b" + str(b) + suffix
storage.put_image(vis_name, img_pgt)
def _sample_proposals_wsl(
self, k, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes,
self.batch_size_per_images[k],
self.positive_sample_fractions[k],
self.num_classes,
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
gt_classes_sp = torch.full_like(gt_classes, -1)
gt_classes_sp[sampled_idxs] = gt_classes[sampled_idxs]
sampled_idxs = torch.arange(gt_classes.shape[0])
return sampled_idxs, gt_classes_sp[sampled_idxs]
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs]
@torch.no_grad()
def label_and_sample_proposals_wsl(
self,
k: int,
proposals: List[Instances],
targets: List[Instances],
cnt_superpixels,
suffix="",
) -> List[Instances]:
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each | |
30,'AN'],
['BC05', 'C', 30,'AN'],
['BC06', 'C', (2,2),'AN'],
['BC07', 'C', 2,'AN'],
],
'BCA': [
['BOTSID', 'M', 3,'AN'],
['BCA01', 'M', (2,2),'AN'],
['BCA02', 'C', (2,2),'AN'],
['BCA03', 'M', 22,'AN'],
['BCA04', 'C', 30,'AN'],
['BCA05', 'C', 8,'AN'],
['BCA06', 'M', (6,6),'DT'],
['BCA07', 'C', 45,'AN'],
['BCA08', 'C', 30,'AN'],
['BCA09', 'C', 30,'AN'],
['BCA10', 'C', (6,6),'DT'],
['BCA11', 'C', (6,6),'DT'],
['BCA12', 'C', (6,6),'DT'],
['BCA13', 'C', (2,2),'AN'],
],
'BCD': [
['BOTSID', 'M', 3,'AN'],
['BCD01', 'M', (6,6),'DT'],
['BCD02', 'M', 16,'AN'],
['BCD03', 'M', 1,'AN'],
['BCD04', 'M', 15,'N2'],
['BCD05', 'M', 1,'AN'],
['BCD06', 'C', (6,6),'DT'],
['BCD07', 'C', 22,'AN'],
['BCD08', 'C', 22,'AN'],
['BCD09', 'C', (6,6),'DT'],
['BCD10', 'C', 22,'AN'],
['BCD11', 'C', (2,2),'AN'],
['BCD12', 'C', (2,2),'AN'],
['BCD13', 'C', (2,2),'AN'],
['BCD14', 'C', 30,'AN'],
['BCD15', 'C', 2,'AN'],
],
'BCH': [
['BOTSID', 'M', 3,'AN'],
['BCH01', 'M', (2,2),'AN'],
['BCH02', 'M', (2,2),'AN'],
['BCH03', 'M', 22,'AN'],
['BCH04', 'C', 30,'AN'],
['BCH05', 'C', 8,'AN'],
['BCH06', 'M', (6,6),'DT'],
['BCH07', 'C', 45,'AN'],
['BCH08', 'C', 30,'AN'],
['BCH09', 'C', 30,'AN'],
['BCH10', 'C', (6,6),'DT'],
['BCH11', 'C', (6,6),'DT'],
],
'BCI': [
['BOTSID', 'M', 3,'AN'],
['BCI01', 'C', 20,'AN'],
['BCI02', 'C', 3,'AN'],
['BCI03', 'C', 30,'AN'],
['BCI04', 'C', (2,2),'AN'],
['BCI05', 'C', (2,3),'AN'],
['BCI06', 'C', 35,'AN'],
['BCI07', 'C', (2,2),'AN'],
['BCI08', 'C', (3,3),'AN'],
],
'BCM': [
['BOTSID', 'M', 3,'AN'],
['BCM01', 'M', (2,2),'AN'],
['BCM02', 'M', (6,6),'DT'],
['BCM03', 'M', (6,6),'DT'],
['BCM04', 'C', 30,'AN'],
['BCM05', 'C', 45,'AN'],
['BCM06', 'C', (2,2),'AN'],
['BCM07', 'C', (2,2),'AN'],
['BCM08', 'C', 45,'AN'],
['BCM09', 'C', (2,2),'AN'],
['BCM10', 'C', (2,2),'AN'],
['BCM11', 'C', (2,2),'AN'],
['BCM12', 'C', (3,3),'AN'],
],
'BCO': [
['BOTSID', 'M', 3,'AN'],
['BCO01', 'M', (2,2),'AN'],
['BCO02', 'M', 45,'AN'],
['BCO03', 'M', (6,6),'DT'],
['BCO04', 'M', 30,'AN'],
['BCO05', 'C', (2,2),'AN'],
['BCO06', 'C', (6,6),'DT'],
['BCO07', 'C', (6,6),'DT'],
['BCO08', 'C', (2,2),'AN'],
['BCO09', 'C', (2,2),'AN'],
['BCO10', 'C', 30,'AN'],
],
'BCP': [
['BOTSID', 'M', 3,'AN'],
['BCP01', 'M', (2,2),'AN'],
['BCP02', 'M', (2,2),'AN'],
['BCP03', 'M', 30,'AN'],
['BCP04', 'C', (6,6),'DT'],
['BCP05', 'C', (2,2),'AN'],
['BCP06', 'C', (2,2),'AN'],
['BCP07', 'C', (6,6),'DT'],
['BCP08', 'C', (4,8),'TM'],
['BCP09', 'C', 8,'AN'],
['BCP10', 'C', 30,'AN'],
['BCP11', 'C', 30,'AN'],
['BCP12', 'C', 80,'AN'],
['BCP13', 'C', 80,'AN'],
],
'BCQ': [
['BOTSID', 'M', 3,'AN'],
['BCQ01', 'M', (2,2),'AN'],
['BCQ02', 'M', (6,6),'DT'],
['BCQ03', 'M', (4,8),'TM'],
['BCQ04', 'C', (2,2),'AN'],
['BCQ05', 'C', 30,'AN'],
['BCQ06', 'C', (2,4),'AN'],
],
'BCS': [
['BOTSID', 'M', 3,'AN'],
['BCS01', 'M', (2,2),'AN'],
['BCS02', 'M', (6,6),'DT'],
['BCS03', 'M', 30,'AN'],
['BCS04', 'M', (6,6),'DT'],
['BCS05', 'C', (2,2),'AN'],
['BCS06', 'C', 45,'AN'],
['BCS07', 'C', 30,'AN'],
['BCS08', 'C', (2,2),'AN'],
['BCS09', 'C', (2,2),'AN'],
['BCS10', 'C', 10,'R'],
['BCS11', 'C', 10,'R'],
['BCS12', 'C', (2,2),'AN'],
],
'BCT': [
['BOTSID', 'M', 3,'AN'],
['BCT01', 'M', (2,2),'AN'],
['BCT02', 'C', 15,'AN'],
['BCT03', 'C', 15,'AN'],
['BCT04', 'C', 6,'AN'],
['BCT05', 'C', (2,2),'AN'],
['BCT06', 'C', 15,'AN'],
['BCT07', 'C', 15,'AN'],
['BCT08', 'C', 6,'AN'],
['BCT09', 'C', 80,'AN'],
['BCT10', 'C', (2,2),'AN'],
],
'BEG': [
['BOTSID', 'M', 3,'AN'],
['BEG01', 'M', (2,2),'AN'],
['BEG02', 'M', (2,2),'AN'],
['BEG03', 'M', 22,'AN'],
['BEG04', 'C', 30,'AN'],
['BEG05', 'M', (6,6),'DT'],
['BEG06', 'C', 30,'AN'],
['BEG07', 'C', (2,2),'AN'],
['BEG08', 'C', (3,3),'AN'],
],
'BEN': [
['BOTSID', 'M', 3,'AN'],
['BEN01', 'C', 1,'AN'],
['BEN02', 'C', 10,'R'],
['BEN03', 'C', (2,2),'AN'],
['BEN04', 'C', 1,'AN'],
['BEN05', 'C', 1,'AN'],
['BEN06', 'C', (2,2),'AN'],
],
'BFR': [
['BOTSID', 'M', 3,'AN'],
['BFR01', 'M', (2,2),'AN'],
['BFR02', 'C', 30,'AN'],
['BFR03', 'C', 30,'AN'],
['BFR04', 'M', (2,2),'AN'],
['BFR05', 'M', 1,'AN'],
['BFR06', 'M', (6,6),'DT'],
['BFR07', 'C', (6,6),'DT'],
['BFR08', 'M', (6,6),'DT'],
['BFR09', 'C', (6,6),'DT'],
['BFR10', 'C', 30,'AN'],
['BFR11', 'C', 22,'AN'],
['BFR12', 'C', (2,2),'AN'],
['BFR13', 'C', 2,'AN'],
],
'BFS': [
['BOTSID', 'M', 3,'AN'],
['BFS01', 'C', (2,2),'AN'],
['BFS02', 'C', 15,'R'],
['BFS03', 'C', (2,2),'AN'],
['BFS04', 'C', 15,'R'],
['BFS05', 'C', (6,6),'DT'],
['BFS06', 'C', 15,'R'],
['BFS07', 'C', (6,6),'DT'],
['BFS08', 'C', 15,'R'],
['BFS09', 'C', (2,2),'AN'],
['BFS10', 'C', 1,'AN'],
],
'BGF': [
['BOTSID', 'M', 3,'AN'],
['BGF01', 'C', (3,3),'AN'],
['BGF02', 'M', (2,2),'AN'],
['BGF03', 'M', 30,'AN'],
],
'BGN': [
['BOTSID', 'M', 3,'AN'],
['BGN01', 'M', (2,2),'AN'],
['BGN02', 'M', 30,'AN'],
['BGN03', 'M', (6,6),'DT'],
['BGN04', 'C', (4,8),'TM'],
['BGN05', 'C', (2,2),'AN'],
['BGN06', 'C', 30,'AN'],
['BGN07', 'C', (2,2),'AN'],
['BGN08', 'C', 2,'AN'],
],
'BHT': [
['BOTSID', 'M', 3,'AN'],
['BHT01', 'M', (4,4),'AN'],
['BHT02', 'M', (2,2),'AN'],
['BHT03', 'M', 30,'AN'],
['BHT04', 'M', (6,6),'DT'],
['BHT05', 'C', (4,8),'TM'],
],
'BIA': [
['BOTSID', 'M', 3,'AN'],
['BIA01', 'M', (2,2),'AN'],
['BIA02', 'M', (2,2),'AN'],
['BIA03', 'M', 30,'AN'],
['BIA04', 'M', (6,6),'DT'],
['BIA05', 'C', (4,8),'TM'],
['BIA06', 'C', 2,'AN'],
],
'BIG': [
['BOTSID', 'M', 3,'AN'],
['BIG01', 'M', (6,6),'DT'],
['BIG02', 'M', 22,'AN'],
['BIG03', 'C', (6,6),'DT'],
['BIG04', 'C', 22,'AN'],
['BIG05', 'C', 30,'AN'],
['BIG06', 'C', 8,'AN'],
['BIG07', 'C', (2,2),'AN'],
['BIG08', 'C', (2,2),'AN'],
['BIG09', 'C', 2,'AN'],
['BIG10', 'C', 22,'AN'],
],
'BIN': [
['BOTSID', 'M', 3,'AN'],
['BIN01', 'M', 15,'R'],
['BIN02', 'M', 99999,'B'],
],
'BIX': [
['BOTSID', 'M', 3,'AN'],
['BIX01', 'M', (2,2),'AN'],
['BIX02', 'M', (2,4),'AN'],
['BIX03', 'M', (6,6),'DT'],
['BIX04', 'M', 2,'AN'],
['BIX05', 'C', 9,'AN'],
['BIX06', 'C', (2,30),'AN'],
['BIX07', 'C', (2,2),'AN'],
['BIX08', 'C', 6,'AN'],
['BIX09', 'C', 1,'AN'],
['BIX10', 'C', 2,'AN'],
['BIX11', 'C', (2,17),'AN'],
],
'BL': [
['BOTSID', 'M', 3,'AN'],
['BL01', 'M', (2,2),'AN'],
['BL02', 'M', 5,'AN'],
['BL03', 'M', 5,'AN'],
['BL04', 'C', (6,9),'AN'],
['BL05', 'C', (2,30),'AN'],
['BL06', 'C', (2,2),'AN'],
['BL07', 'C', (2,3),'AN'],
['BL08', 'C', (6,9),'AN'],
['BL09', 'C', (2,30),'AN'],
['BL10', 'C', (2,2),'AN'],
['BL11', 'C', (2,3),'AN'],
['BL12', 'C', (2,4),'AN'],
['BL13', 'C', (2,4),'AN'],
['BL14', 'C', (2,4),'AN'],
['BL15', 'C', (2,4),'AN'],
['BL16', 'C', (2,4),'AN'],
['BL17', 'C', (2,4),'AN'],
],
'BLI': [
['BOTSID', 'M', 3,'AN'],
['BLI01', 'M', (2,2),'AN'],
['BLI02', 'M', 30,'AN'],
['BLI03', 'C', 15,'R'],
['BLI04', 'C', (2,2),'AN'],
['BLI05', 'C', (3,3),'AN'],
['BLI06', 'C', 14,'R'],
['BLI07', 'C', (2,2),'AN'],
['BLI08', 'C', (2,2),'AN'],
['BLI09', 'C', 30,'AN'],
['BLI10', 'C', (2,2),'AN'],
['BLI11', 'C', 30,'AN'],
['BLI12', 'C', (2,2),'AN'],
['BLI13', 'C', 30,'AN'],
],
'BLR': [
['BOTSID', 'M', 3,'AN'],
['BLR01', 'M', (2,4),'AN'],
['BLR02', 'M', (6,6),'DT'],
],
'BLS': [
['BOTSID', 'M', 3,'AN'],
['BLS01', 'M', (2,2),'AN'],
['BLS02', 'M', (2,2),'AN'],
['BLS03', 'M', 30,'AN'],
['BLS04', 'M', (6,6),'DT'],
['BLS05', 'C', (4,8),'TM'],
['BLS06', 'C', (2,2),'AN'],
],
'BM': [
['BOTSID', 'M', 3,'AN'],
['BM01', 'M', 22,'AN'],
['BM02', 'M', (6,6),'DT'],
['BM03', 'M', 9,'N2'],
['BM04', 'C', (2,2),'AN'],
['BM05', 'C', (6,6),'DT'],
['BM06', 'M', 1,'AN'],
['BM07', 'C', 6,'R'],
['BM08', 'C', 1,'R'],
],
'BMG': [
['BOTSID', 'M', 3,'AN'],
['BMG01', 'M', (2,2),'AN'],
['BMG02', 'C', 80,'AN'],
['BMG03', 'C', (2,2),'AN'],
],
'BMM': [
['BOTSID', 'M', 3,'AN'],
['BMM01', 'M', (2,4),'AN'],
['BMM02', 'M', (6,9),'AN'],
['BMM03', 'M', 15,'R'],
['BMM04', 'M', 6,'R'],
['BMM05', 'C', (6,9),'AN'],
['BMM06', 'C', 30,'AN'],
['BMM07', 'C', 2,'AN'],
['BMM08', 'C', 35,'AN'],
['BMM09', 'C', 30,'AN'],
['BMM10', 'C', (2,2),'AN'],
],
'BMS': [
['BOTSID', 'M', 3,'AN'],
['BMS01', 'M', (2,2),'AN'],
['BMS02', 'M', (6,6),'DT'],
['BMS03', 'C', (2,3),'AN'],
['BMS04', 'C', 30,'AN'],
['BMS05', 'C', 4,'R'],
['BMS06', 'C', 30,'AN'],
['BMS07', 'C', 4,'R'],
['BMS08', 'C', (2,2),'AN'],
['BMS09', 'C', (2,3),'AN'],
],
'BNR': [
['BOTSID', 'M', 3,'AN'],
['BNR01', 'M', (2,2),'AN'],
['BNR02', 'M', 30,'AN'],
['BNR03', 'M', (6,6),'DT'],
['BNR04', 'C', (4,8),'TM'],
['BNR05', 'C', (2,2),'AN'],
['BNR06', 'C', (2,2),'AN'],
],
'BNX': [
['BOTSID', 'M', 3,'AN'],
['BNX01', 'C', 1,'AN'],
['BNX02', 'C', 13,'AN'],
['BNX03', 'C', 1,'AN'],
['BNX04', 'C', (5,5),'R'],
],
'BOS': [
['BOTSID', 'M', 3,'AN'],
['BOS01', 'M', 16,'AN'],
['BOS02', 'M', (6,6),'DT'],
['BOS03', 'C', (2,2),'AN'],
['BOS04', 'C', (6,6),'AN'],
['BOS05', 'C', (2,2),'AN'],
],
'BOX': [
['BOTSID', 'M', 3,'AN'],
['BOX01', 'M', 1,'AN'],
['BOX02', 'M', (2,2),'AN'],
['BOX03', 'M', (2,2),'AN'],
['BOX04', 'M', 15,'R'],
['BOX05', 'C', (3,3),'AN'],
['BOX06', 'C', 15,'R'],
['BOX07', 'C', 15,'R'],
['BOX08', 'C', 15,'R'],
['BOX09', 'C', 15,'R'],
['BOX10', 'C', 15,'R'],
['BOX11', 'C', 15,'R'],
['BOX12', 'C', 14,'R'],
['BOX13', 'C', 15,'R'],
['BOX14', 'C', 30,'AN'],
['BOX15', 'C', 30,'AN'],
],
'BPA': [
['BOTSID', 'M', 3,'AN'],
['BPA01', 'M', (2,2),'AN'],
['BPA02', 'M', (6,6),'DT'],
['BPA03', 'C', (2,2),'AN'],
['BPA04', 'C', 30,'AN'],
['BPA05', 'C', (4,8),'TM'],
],
'BPP': [
['BOTSID', 'M', 3,'AN'],
['BPP01', 'M', (2,2),'AN'],
['BPP02', 'M', (6,6),'DT'],
['BPP03', 'M', (2,2),'AN'],
['BPP04', 'C', 30,'AN'],
['BPP05', 'C', 45,'AN'],
['BPP06', 'C', 30,'AN'],
['BPP07', 'C', (6,6),'DT'],
['BPP08', 'C', (2,2),'AN'],
['BPP09', 'C', 30,'AN'],
['BPP10', 'C', 45,'AN'],
['BPP11', 'C', (6,6),'DT'],
['BPP12', 'C', 30,'AN'],
['BPP13', 'C', (2,2),'AN'],
],
'BPR': [
['BOTSID', 'M', 3,'AN'],
['BPR01', 'M', 1,'AN'],
['BPR02', 'M', 15,'R'],
['BPR03', 'M', 1,'AN'],
['BPR04', 'M', (3,3),'AN'],
['BPR05', 'C', 10,'AN'],
['BPR06', 'C', (2,2),'AN'],
['BPR07', 'C', (3,12),'AN'],
['BPR08', 'C', (2,2),'AN'],
['BPR09', 'C', 35,'AN'],
['BPR10', 'C', (10,10),'AN'],
['BPR11', 'C', (9,9),'AN'],
['BPR12', 'C', (2,2),'AN'],
['BPR13', 'C', (3,12),'AN'],
['BPR14', 'C', (2,2),'AN'],
['BPR15', 'C', 35,'AN'],
['BPR16', 'C', (6,6),'DT'],
['BPR17', 'C', 3,'AN'],
['BPR18', 'C', (2,2),'AN'],
['BPR19', 'C', (3,12),'AN'],
['BPR20', 'C', (2,2),'AN'],
['BPR21', 'C', 35,'AN'],
],
'BPS': [
['BOTSID', 'M', 3,'AN'],
['BPS01', 'M', (3,3),'AN'],
['BPS02', 'M', 15,'R'],
['BPS03', 'M', 1,'AN'],
['BPS04', 'C', (2,2),'AN'],
['BPS05', 'C', (3,12),'AN'],
['BPS06', 'C', 35,'AN'],
['BPS07', 'C', (10,10),'AN'],
['BPS08', 'C', (9,9),'AN'],
['BPS09', 'C', (2,2),'AN'],
['BPS10', 'C', (3,12),'AN'],
['BPS11', 'C', 35,'AN'],
['BPS12', 'C', (6,6),'DT'],
['BPS13', 'C', (6,6),'DT'],
],
'BPT': [
['BOTSID', 'M', 3,'AN'],
['BPT01', 'M', (2,2),'AN'],
['BPT02', 'C', 30,'AN'],
['BPT03', 'M', (6,6),'DT'],
['BPT04', 'C', (2,2),'AN'],
['BPT05', 'C', (3,3),'AN'],
['BPT06', 'C', 10,'R'],
['BPT07', 'C', 2,'AN'],
],
'BQR': [
['BOTSID', 'M', 3,'AN'],
['BQR01', 'M', (2,2),'AN'],
['BQR02', 'M', 45,'AN'],
['BQR03', 'M', (6,6),'DT'],
['BQR04', 'C', (3,3),'AN'],
['BQR05', 'C', (6,6),'DT'],
['BQR06', 'C', (2,2),'AN'],
],
'BQT': [
['BOTSID', 'M', 3,'AN'],
['BQT01', 'M', (2,2),'AN'],
['BQT02', 'M', 45,'AN'],
['BQT03', 'M', (6,6),'DT'],
['BQT04', 'C', (3,3),'AN'],
['BQT05', 'C', (6,6),'DT'],
['BQT06', 'C', (2,2),'AN'],
['BQT07', 'C', (2,2),'AN'],
],
'BR': [
['BOTSID', 'M', 3,'AN'],
['BR01', 'M', (2,2),'AN'],
['BR02', 'M', (2,2),'AN'],
['BR03', 'M', (6,6),'DT'],
['BR04', 'C', (2,17),'AN'],
['BR05', 'C', 2,'AN'],
['BR06', 'C', 2,'AN'],
['BR07', 'C', | |
<filename>choking_commonality.py
#help("modules") #
import urllib.request
import math
import statistics
import collections
import ast
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
"""
Hypothesis 2: A refutation of the more wins and/or better percentage is equivalent to a better side
theory. By natural extension, that they are directly related (e.g. 10 more wins better side
than 2 more).
1st:
This could be proven to be true by looking at how often the team with more wins beats a team in
finals with less wins. You would also expect a side with 5 more relative wins to come out on top
more often than a side with 1 more win. Percentage will be analysed at the same time but reported
separately to see if or to waht extent it differs.
I will only analyse finals games, because that is easier. But I do know that teams with lots of wins
lose to others during h&a as well. How often I don't know (in high winning sides the losses are more
memorable, same for wins in losing sides).
This is based on the premise that who wins in a direct match between two sides is the ultimate
proof of which is the better side. This is, of course, wrong, as no premiership side has ever had
an unbeaten season, but they could definetely be decribed as better sides than those (at least the
majority of those) they lost to that year.
However, everyone is really only interested in premierships as a measure of a side's quality or the
best measure, and this is predicated on beating a side in one game in very specific circumstances
as the best way to prove which is a better side. You would expect an upset to happen exactly as often
as they were "upset" in the home and away season.
"""
year_started = 1990 #<- dont'change to 2000!!!!!! see far below
script_directory = str(os.path.dirname(os.path.realpath(__file__)))
file_name = "choking_commonality_savefile.txt"
path_to_file = script_directory + '\\' + file_name
"""#Uncomment this section to update info
#constants
universalURL = 'https://afltables.com/afl/seas/{}.html'
this_season = 2020#<-this is a manually used value, see last_season below which is autoupdated
teams_in_comp = []
finals_cutoff = []
teams = {}
colours = {"Fitzroy":"Grey", "Gold Coast":"orangered", "Geelong":"royalblue", "Essendon":"red", "Carlton":"navy", "Collingwood":"black", "Melbourne":"lime", "Hawthorn":"brown", "Fitzroy":"grey", "<NAME>":"crimson", "Richmond":"yellow", "North Melbourne":"blue", "Western Bulldogs":"green", "Fremantle":"purple","Greater Western Sydney":"orange", "Brisbane Lions": "orangered", "Port Adelaide":"cyan", "West Coast":"darkgoldenrod", "Sydney":"deeppink", "Adelaide":"royalblue"} #ugh takes so long to write out
running_colours = []
for i in teams:
running_colours.append(colours[i])
def getURL(url):
stream = urllib.request.urlopen(url)
text = stream.read().decode('utf-8')
stream.close()
return text
class Season(object):
def __init__(self, year):
self._year = year
self._games_in_season = 0
self._teams_in_season = 0
self._total_matches = []
self._home_and_away_matches = []
self._finals_matches = []
self.n_home_and_away_wins = 0
self._n_total_wins = 0
self._home_and_away_win_percentage = 0.0
self._percentage = 0.0
self._home_and_away_ladder_position = 0
self._finals_percentage = 0.0
self._final_ladder_position = 0
def get_form(self, round):
'''Returns w/l record of 5 rounds immediately before selected one'''
if round < 5:
return [0, 0, 0, 0, 0]
return []
def do_calcs(self):
return
class Club(object):
def __init__(self, name, home_grounds, interstate):
'''Club(str, str)'''
self._name = name
self._colour = colours[name]
self._home_grounds = home_grounds
self._interstate = interstate
self._seasons = {}
#def get_season(self, year):
# return self._seasons[year - 2000]
victoria = ["M.C.G.", "Princes Park", "Docklands", "Kardinia Park"]
# == multiple home grounds in this stretch actually
Richmond = Club("Richmond", ["M.C.G."], False)
WC = Club("West Coast", ["Subiaco", "W.A.C.A", "Perth Statdium"], True)#
GC = Club("Gold Coast", ["Carrara"], True)
Brisbane = Club("Brisbane Lions", ["Gabba"], True)
STK = Club("St Kilda", ["Docklands"], False)
Fremantle = Club("Fremantle", ["Subiaco", "W.A.C.A", "Perth Statdium"], True)#
Collingwood = Club("Collingwood", ["M.C.G."], False)
Melbourne = Club("Melbourne", ["M.C.G."], False)
Carlton = Club("Carlton", ["Princes Park", "M.C.G."], False)#
Essendon = Club("Essendon", ["M.C.G."], False)#
Hawthorn = Club("Hawthorn", ["M.C.G."], False)
Adelaide = Club("Adelaide", ["Football Park", "Adelaide Oval"], True)#
PA = Club("Port Adelaide", ["Football Park", "Adelaide Oval"], True)#
Sydney = Club("Sydney", ["S.C.G."], True)
GWS = Club("Greater Western Sydney", ["Sydney Showground"], True)
NM = Club("North Melbourne", ["Docklands"], False)
WB = Club("Western Bulldogs", ["Docklands"], False)
Geelong = Club("Geelong", ["Kardinia Park"], False)
Fitzroy = Club("Fitzroy", ["M.C.G."], False)
clubs = {
"Richmond": Richmond,
"West Coast": WC,
"Gold Coast": GC,
"Brisbane Lions": Brisbane,
"St Kilda": STK,
"Fremantle": Fremantle,
"Collingwood": Collingwood,
"Melbourne": Melbourne,
"Carlton": Carlton,
"Essendon": Essendon,
"North Melbourne": NM,
"Kangaroos": NM,
"Footscray": WB,
"Brisbane Bears": Brisbane,
"Fitzroy": Fitzroy,
"Hawthorn": Hawthorn,
"Adelaide": Adelaide,
"Port Adelaide": PA,
"Sydney": Sydney,
"Greater Western Sydney": GWS,
"Western Bulldogs": WB,
"Geelong": Geelong
}
current_clubs = set(clubs.values())
class Match(object):
def __init__(self, home_team, away_team, home_team_score, away_team_score, venue):
self._home_team = home_team
self._away_team = away_team
self._home_team_score = home_team_score
self._away_team_score = away_team_score
self._margin = home_team_score - away_team_score
self._winner = "draw"
self._loser = "draw"
if self._margin > 0:
self._winner = self._home_team
self._loser = self._away_team
elif self._margin < 0:
self._winner = self._away_team
self._loser = self._home_team
self._venue = venue
finals_home_and_away_differentials = [] # (win differential, percentage differential, h&a ladder position of winner)
for year in range(year_started, this_season + 1):
text = getURL(universalURL.format(year))
soup = BeautifulSoup(text, 'html.parser')
tables = soup.findAll('table')
last_season = this_season# int(tables[0].find('tr').find('a').text) - 1
tables.reverse()
#create seasons for every club
for i in current_clubs:
i._seasons[year] = Season(year)
normal = -20
if year == 2010:
normal -= 2
elif year < 1994:
normal += 4
rows = BeautifulSoup(getURL(universalURL.format(year)), features="lxml").findChildren('table')[normal].findChildren('tr')[2:-1]
for i in rows:
collumns = i.findAll('td')
club = collumns[1].text
if club not in clubs.keys():
break
season = clubs[club]._seasons[year]
#season._games_in_season = int(collumns[2].text) + len(season._finals_matches)
season._teams_in_season = len(rows)
season.n_home_and_away_wins = float(collumns[3].text)
draws = str(collumns[4].text).strip()
if draws != '':
season.n_home_and_away_wins += float(collumns[4].text) / 2
'''
season._n_total_wins = season.n_home_and_away_wins
score_for = 0
score_against = 0
for j in season._finals_matches:
if (j._home_team == club and j._margin > 0) or (j._away_team == club and j._margin < 0):
season._n_total_wins += 1
if j._home_team == club:
score_for += j._home_team_score
score_against += j._away_team_score
else:
score_for += j._away_team_score
score_against += j._home_team_score
if score_against != 0:
season._finals_percentage = score_for / score_against
'''
season._home_and_away_win_percentage = float(collumns[13].text[:3]) / (4 * float(collumns[2].text))#%of games played that were wins. Note! this line will reject a perfect 100% win season as a 10% one
season._percentage = float(collumns[12].text)
season._home_and_away_ladder_position = int(collumns[0].text)
'''#home and away games
for i in tables[x + 5:]:
links = i.findAll('a')
if len(links) != 4 and len(links) != 3:
continue
#print(links)
rows = i.findAll('tr')
#print(rows)
team1 = links[0].text
venue = links[1].text
team2 = links[2].text
if len(rows) > 2:
continue
team1_score = int(rows[0].findAll('td')[2].text)
team2_score = int(rows[1].findAll('td')[2].text)
match = Match(team1, team2, team1_score, team2_score, venue)
if match._inter_match:
inter_standard_games += 1
if team1_score == team2_score:
inter_inter_standard_wins += 0.5
elif clubs[match._winner]._interstate:
inter_inter_standard_wins += 1
if clubs[team1] == WC or clubs[team2] == WC:
wc_standard_games += 1
if team1_score == team2_score:
wc_standard_wins += 0.5
elif clubs[match._winner] == WC:
wc_standard_wins += 1
clubs[team1]._seasons[year]._total_matches.append(match)
clubs[team1]._seasons[year]._home_and_away_matches.append(match)
clubs[team2]._seasons[year]._total_matches.append(match)
clubs[team2]._seasons[year]._home_and_away_matches.append(match)
'''
x = 0
for i in tables[::2]:
links = i.findAll('a')
team1 = links[0].text
venue = links[1].text
team2 = links[2].text
rows = i.findAll('tr')
team1_score = int(rows[0].findAll('td')[2].text)
team2_score = int(rows[1].findAll('td')[2].text)
match = Match(team1, team2, team1_score, team2_score, venue)
clubs[team1]._seasons[year]._total_matches.append(match)
clubs[team1]._seasons[year]._finals_matches.append(match)
clubs[team2]._seasons[year]._total_matches.append(match)
clubs[team2]._seasons[year]._finals_matches.append(match)
if team1_score > team2_score:
clubs[team1]._seasons[year]._final_ladder_position = x + 1
clubs[team2]._seasons[year]._final_ladder_position = x + 2
elif team2_score > team1_score:
clubs[team1]._seasons[year]._final_ladder_position = x + 2
clubs[team2]._seasons[year]._final_ladder_position = x + 1
if match._winner != "draw":
win_differential = clubs[match._winner]._seasons[year].n_home_and_away_wins - clubs[match._loser]._seasons[year].n_home_and_away_wins
percentage_differential = round(clubs[match._winner]._seasons[year]._percentage - clubs[match._loser]._seasons[year]._percentage, 2)
winner_ladder_position = clubs[match._winner]._seasons[year]._home_and_away_ladder_position
loser_ladder_position = clubs[match._loser]._seasons[year]._home_and_away_ladder_position
finals_home_and_away_differentials.append((win_differential, percentage_differential, winner_ladder_position, loser_ladder_position, x == 0))
if winner_ladder_position == 0:
print("wtf?", match._winner, match._margin)
#If this is the gf and its not a draw
if team1_score != team2_score and (clubs[team1]._seasons[year]._final_ladder_position == 1 or clubs[team2]._seasons[year]._final_ladder_position == 1) and (clubs[team1]._interstate or clubs[team2]._interstate):
pass
if tables[x + 2].text == "Finals":
break
x += 2
'''
total_sides = 0
for i in current_clubs:
season = i._seasons[year]
if len(season._total_matches) > 0:
total_sides += 1
if len(season._finals_matches) > 0:
pass
'''
print(year)
stored_info = {"key": finals_home_and_away_differentials} # {year:[tables, bs]}
with open(path_to_file, "w") as f:
f.write(str(stored_info))
#since 2000 there have been 189 finals, so len(finals) - 189 == index of 2000
#"""
#"""
with open(path_to_file, "r") as f:
stored_info = ast.literal_eval(f.read())
#MAIN:
#"""#RETRIEVE DATA
finals_home_and_away_differentials = stored_info["key"][len(stored_info["key"]) - 189:] #<-To start from 2000. Else starts 1990
print(finals_home_and_away_differentials)
print("# of finals since " + str(year_started) + ": ", len(finals_home_and_away_differentials))
'''
unique_wins_drequency = {}
for i in finals_home_and_away_differentials:
unique_wins_commonality.get(i[0], 0) += 1
'''
counter = collections.Counter([i[0] for i in finals_home_and_away_differentials])
print("frequency of diff between h&a wins of finalists:", counter)
avg_ladder_diff = 0
for i in counter.most_common():
avg_ladder_diff += i[0] * i[1]
counter2 = collections.Counter([i[2] for i in finals_home_and_away_differentials])
print("frequency of | |
'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
expected = {
(0, 0): 4.117647058823529,
(0, 1): 2.8823529411764706,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 4.0,
(0, 1): 3.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# I guess 2.88 => 3.0 is about a 4% increase
self.assertAlmostEqual(rb.get_required_overload(),
0.040816326530612256)
# ... 10% is plenty enough here
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_small_extra_server_in_zone_with_multiple_replicas(self):
rb = ring.RingBuilder(8, 5, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 1000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 1000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdc', 'weight': 1000})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 1000})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 1000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdc', 'weight': 1000})
# z1 - extra small server
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 50})
expected = {
(0, 0): 2.479338842975207,
(0, 1): 2.5206611570247937,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t: r for (t, r) in
weighted_replicas.items()
if len(t) == 2})
# dispersion is fine with this at the zone tier
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {t: r for (t, r) in
wanted_replicas.items()
if len(t) == 2})
# ... but not ok with that tiny server
expected = {
'127.0.0.1': 2.479338842975207,
'127.0.0.2': 1.5206611570247937,
'127.0.0.3': 1.0,
}
self.assertEqual(expected, {t[-1]: r for (t, r) in
wanted_replicas.items()
if len(t) == 3})
self.assertAlmostEqual(23.2, rb.get_required_overload())
def test_multiple_replicas_in_zone_with_single_device(self):
rb = ring.RingBuilder(8, 5, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 100})
# z1
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sdb', 'weight': 100})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdc', 'weight': 100})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdd', 'weight': 100})
# first things first, make sure we do this right
rb.rebalance()
# each device get's a sing replica of every part
expected = {
0: 256,
1: 256,
2: 256,
3: 256,
4: 256,
}
self.assertEqual(expected, {d['id']: d['parts']
for d in rb._iter_devs()})
# but let's make sure we're thinking about it right too
expected = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
}
# by weight everyone is equal
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# wanted might have liked to have fewer replicas in z1, but the
# single device in z0 limits us one replica per device
with rb.debug():
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
wanted_replicas.items()
if len(t) == 4})
# even with some overload - still one replica per device
rb.set_overload(1.0)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# when overload can not change the outcome none is required
self.assertEqual(0.0, rb.get_required_overload())
# even though dispersion is terrible (in z1 particularly)
self.assertEqual(100.0, rb.dispersion)
def test_one_big_guy_does_not_spoil_his_buddy(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# z2
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# obviously d5 gets one whole replica; the other two replicas
# are split evenly among the five other devices
# (i.e. ~0.4 replicanths for each 100 units of weight)
expected = {
0: 0.39999999999999997,
1: 0.39999999999999997,
2: 0.39999999999999997,
3: 0.39999999999999997,
4: 0.39999999999999997,
5: 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# with no overload we get the "balanced" placement
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# but in reality, these devices having such disparate weights
# leads to a *terrible* balance even w/o overload!
rb.rebalance(seed=9)
self.assertEqual(rb.get_balance(), 1308.2031249999998)
# even though part assignment is pretty reasonable
expected = {
0: 103,
1: 102,
2: 103,
3: 102,
4: 102,
5: 256,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# so whats happening is the small devices are holding *way* more
# *real* parts than their *relative* portion of the weight would
# like them too!
expected = {
0: 1308.2031249999998,
1: 1294.5312499999998,
2: 1308.2031249999998,
3: 1294.5312499999998,
4: 1294.5312499999998,
5: -65.0,
}
self.assertEqual(expected, rb._build_balance_per_dev())
# increasing overload moves towards one replica in each tier
rb.set_overload(0.20)
expected = {
0: 0.48,
1: 0.48,
2: 0.48,
3: 0.48,
4: 0.30857142857142855,
5: 0.7714285714285714,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# ... and as always increasing overload makes balance *worse*
rb.rebalance(seed=17)
self.assertEqual(rb.get_balance(), 1581.6406249999998)
# but despite the overall trend toward imbalance, in the tier
# with the huge device, the small device is trying to shed parts
# as effectively as it can (which would be useful if it was the
# only small device isolated in a tier with other huge devices
# trying to gobble up all the replicanths in the tier - see
# `test_one_small_guy_does_not_spoil_his_buddy`!)
expected = {
0: 123,
1: 123,
2: 123,
3: 123,
4: 79,
5: 197,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# *see*, at least *someones* balance is getting better!
expected = {
0: 1581.6406249999998,
1: 1581.6406249999998,
2: 1581.6406249999998,
3: 1581.6406249999998,
4: 980.078125,
5: -73.06640625,
}
self.assertEqual(expected, rb._build_balance_per_dev())
def test_one_small_guy_does_not_spoil_his_buddy(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# z2
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# it's almost like 3.0 / 5 ~= 0.6, but that one little guy get's
# his fair share
expected = {
0: 0.5988023952095808,
1: 0.5988023952095808,
2: 0.5988023952095808,
3: 0.5988023952095808,
4: 0.5988023952095808,
5: 0.005988023952095809,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# with no overload we get a nice balanced placement
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
rb.rebalance(seed=9)
# part placement looks goods
expected = {
0: 154,
1: 153,
2: 153,
| |
from __future__ import division
import warnings
import gc
import numpy as np
import random, string
from subprocess import Popen, PIPE
import ConfigParser
import math
import cPickle as pickle
from scipy.sparse.linalg import *
from scipy.sparse import csr_matrix, coo_matrix
import myenums
configParser = ConfigParser.ConfigParser()
configParser.readfp(open(r'../learndnakinetics/config_file.txt'))
CONFIG_NAME = 'parent'
NUPACK_bin= configParser.get(CONFIG_NAME, 'NUPACK_bin')
AUXILIARY_NUPACK_FILE= 'auxilary'
R = 0.001987 # R is the molar gas constant in kcal/(mol K).
MOLARITY_OF_WATER = 55.14 # mol/L at 37C
NUCLEOTIDES = "ACTG"
TRANSLATION_TABLE = string.maketrans(NUCLEOTIDES, "TGAC")
RETURN_MINUS_INF = None
class MultiStrandState (object):
def __init__(self, uniqueID, uniqueDotParanthesis, energy , sequence):
self.uniqueID = uniqueID
self.uniqueDotParanthesis = uniqueDotParanthesis
self.sequence = sequence
self.energy = energy
self.neighborsList = []
class MyStrand(object):
@classmethod
def new_random(cls, length):
"""Create a random strand of a certain length"""
sequence = ''.join(random.choice("ACTG") for i in range(length))
return cls(sequence)
def __init__(self, sequence, complement=None):
"""Create a strand by providing a sequence from 5' to 3' ends"""
self.sequence = sequence
if complement:
self.complement = complement
else:
seq = ''.join(list(reversed(self.sequence))).translate(
TRANSLATION_TABLE)
self.complement = MyStrand(seq, self)
def __len__(self):
return len(self.sequence)
def __eq__(self, other):
return self.sequence == other.sequence
class ParentComplex(object):
"""Contains function and variables that different type of reaction have in common"""
def __init__(self,myPickles, theta, T, concentration, sodium, magnesium, dataset_name, docID ):
if rate_method == myenums.ModelName.ARRHENIUSMODELNAME.value :
self.kinetic_parameters = { "stack": (theta[0] , theta[1]) ,
"loop": (theta[2] , theta[3]),
"end": (theta[4] , theta[5]),
"stack+loop": (theta[6] , theta[7]),
"stack+end": (theta[8] , theta[9]),
"loop+end": (theta[10] , theta[11]),
"stack+stack": (theta[12] , theta[13]),
"alpha" : (theta[14]) }
elif rate_method == myenums.ModelName.METROPOLISMODELNAME.value :
self.kinetic_parameters ={ "k_uni" : theta[0] , "k_bi" : theta[1] }
else:
raise ValueError('Error: Please specify rate_method to be Arrhenius or Metropolis in the configuration file!')
self.dataset_name = dataset_name
self.docID = docID
self.T = T
self.concentration = concentration
self.sodium = sodium
self.magnesium = magnesium
self.fast_access= dict()
self.statespace =[]
self.multistrand_state_dictionary = dict()
self.energies = dict()
self.PSD = dict()
self.transition_structure= myPickles [myenums.Permanent_Folder.TRANSITION_STRUCTURE.value]
self.PSD= myPickles [myenums.Permanent_Folder.PSD.value]
self.rates ={}
self.local_context_bi = dict()
self.local_context_uni = dict()
for i in self.kinetic_parameters:
for j in self.kinetic_parameters:
self.local_context_bi [i , j] = 0
self.local_context_uni [i , j] = 0
def possible_states(self, state):
return self.multistrand_state_dictionary[state].neighborsList
def local_context(self, state1, state2) :
""" Finds the local context of a base pair forming or breaking in transition from state1 to state2 """
s1 = self.dot_paren_modify(state1)
s2= self.dot_paren_modify(state2)
count = 0
for i in range( len(s1) ):
if s1[i] != s2[i]:
if count == 0 :
found1 = i
else :
found2 = i
count +=1
if count ==2 :
break
right = self.half_context(s1[found1 + 1] , s2[found2 -1 ] , found1 + 1 , found2 - 1, s1 )
left = self.half_context(s1[found1 - 1] , s2[found2 + 1 ] , found1 - 1, found2 + 1 , s1 )
return (left, right)
def half_context( self, c1, c2 , f1, f2, s1) :
""" Finds the half context on one side of a base pair forming or breaking """
if c1 =='*' and c2 =='*' :
return "end"
if c1 == '.' and c2 =='.' :
return "loop"
if c1 == '(' and c2 ==')' :
countStack = 0
pointer = -1
for i in range( f1 , f2 + 1 ) :
if s1[i] == '(' :
countStack = countStack + 1
elif s1[i] == ')' :
countStack = countStack - 1
if countStack == 0 :
pointer = i
break
if pointer == f2 :
return "stack"
else:
return "stack+stack"
if ( c1 == ')' and c2== '(' ) or ( c1 == ')' and c2== ')' ) or ( c1 == '(' and c2== '(' ) :
return "stack+stack"
if ( c1 == '(' and c2 == '.' ) or ( c1 == ')' and c2 == '.' ) or ( c1 == '.' and c2 == ')' ) or ( c1 == '.' and c2 == '(' ) :
return "stack+loop"
if ( c1 == '(' and c2 == '*' ) or ( c1 == ')' and c2 == '*' ) or ( c1 == '*' and c2 == ')' ) or ( c1 == '*' and c2 == '(' ) :
return "stack+end"
if ( c1 == '.' and c2 == '*' ) or ( c1 == '*' and c2 == '.' ) :
return "loop+end"
def initial_final_state(self ):
initialStateConfig, finalStateConfig = self.initial_final_state_config()
initialState = self.statespace.index(initialStateConfig)
finalState = self.statespace.index(finalStateConfig)
return [initialState, finalState]
def initial_final_state_config(self ) :
return [initialStateConfig, finalStateConfig]
def generate_statespace( self ):
#Generates the state space
state = self.initial_final_state_config()[0]
self.statespace = []
self.statespace.append(state)
self.fast_access[state] = len(self.statespace)- 1
color= dict()
color [state] = True
head = 0
tail = 1
while head < tail :
state = self.statespace[head ]
pstates = self.possible_states( state )
for s in pstates :
if s not in color :
color[s] = True
self.statespace.append(s )
self.fast_access[s] = len(self.statespace)- 1
tail +=1
head += 1
return self.statespace
def calculate_energy (self, auxilary_f):
""" Calculate the energy of all states using NUPACK"""
try:
fs =open( self.dataset_name+"/"+myenums.Permanent_Folder.ENERGY.value+ "/" + myenums.Permanent_Folder.ENERGY.value+str(self.docID) , "rb" )
self.energies = pickle.load(fs )
fs.close()
except:
self.energies = dict()
print myenums.Permanent_Folder.ENERGY.value +" creating RESUBLE files in the simplifiedstatespace folder!. Please wait untill you see a message showing the computed lnprobability and iteration time! These messages will not appear again in the next iteration!"
shell = Popen("bash", stdin=PIPE, stdout=PIPE)
for state in self.statespace:
filename = auxilary_f +self.docID
file_contents = self.sequence(state) + self.dot_paren(state)
energy = self.nupack_energy(shell, filename,file_contents )
complex_count = self.n_complex[state]
"""NUPACK uses mole fraction units when computing free energy. We correct to use molar concentration units: DeltaG_molar = DeltaG - (N-1)RT*log(molarity(H2O)) where N is the number of interacting complexes"""
energy -= ((complex_count - 1) * R * (self.T + 273.15) *
np.log(MOLARITY_OF_WATER))
self.energies[state] = energy
shell.stdout.close()
shell.stdin.close()
pickle.dump( self.energies , open(self.dataset_name+"/"+myenums.Permanent_Folder.ENERGY.value+ "/" + myenums.Permanent_Folder.ENERGY.value+str(self.docID) , "wb") )
def nupack_energy(self, shell, filename, file_contents) :
""" Calls NUPACK to calculate energy """
with open(filename + '.in', 'w') as f:
f.write(file_contents)
command = ("%senergy -material dna -T %f -sodium %f "
"-magnesium %f -multi -dangles some %s\n" %
(NUPACK_bin, self.T,self.sodium, self.magnesium, filename))
shell.stdin.write(command)
line = '%'
while line.startswith('%'):
line = shell.stdout.readline()
energy = float(line)
return energy
def Metropolis_rate(self, state1, state2 ):
""" Uses the Metropolis kinetic model to calculate the transition rate from state1 to state and the transition rate from state2 to state1. Only returns the transition rate from state1 to state2 """
transition1 = (state1,state2 )
transition2 = (state2,state1)
rate1 = self.rates.get(transition1)
if rate1:
return rate1
k_uni = self.kinetic_parameters["k_uni"]
k_bi = self.kinetic_parameters["k_bi"]
DeltaG = (self.energies[state2] - self.energies[state1])
DeltaG2 = -DeltaG
RT = R * (self.T + 273.15)
if ( self.n_complex[state1] - self.n_complex[state2] ) == 1 :
rate1 = k_bi * self.concentration
rate2 = k_bi * np.e ** ( - DeltaG2 / RT)
elif (self.n_complex[state1] - self.n_complex[state2] ) == -1 :
rate1 = k_bi * np.e ** ( - DeltaG / RT)
rate2 = k_bi * self.concentration
elif self.n_complex[state1] == self.n_complex[state2] :
if DeltaG > 0.0:
rate1 = k_uni * np.e **(-DeltaG / RT)
rate2 = k_uni
else:
rate1 = k_uni
rate2 = k_uni * np.e **(-DeltaG2 / RT)
else :
raise ValueError('Exception, fix this in Metropolis_rate function. Check transition rate calculations!')
self.rates[transition1] = rate1
self.rates[transition2] = rate2
return rate1
def Arrhenius_rate(self, state1, state2 ):
"""Uses the Arrhenius kinetic model to calculate the transition rate from state1 to state and the transition rate from state2 to state1. Only returns the transition rate from state1 to state2 | |
window=window, window_p=window_p
)
self.processed_data[i, 0 : len(fid)] = fid
self.processed_data[i, len(fid) :] = np.zeros(self.ft_points[1] - len(fid))
if self.acq[1] != 5:
self.processed_data[i, :] = np.fft.fftshift(
np.fft.fft(
self.processed_data[i, :] * np.exp(1.0j * (phase / 180) * np.pi)
)
)[::-1]
else:
self.processed_data[i, :] = np.fft.fft(
self.processed_data[i, :] * np.exp(1.0j * (phase / 180) * np.pi)
)[::-1]
def proc(
self,
phases=(0, 0),
t2_ss=None,
fp_corrections=(0.5, 0.5),
windows=("sb", "sb"),
windows_p=(0.5, 0.5),
zero_fill=(1.0, 1.0),
):
t1_ac_mode = int(self.acq[1])
if (
t1_ac_mode >= 3 or t1_ac_mode <= 6
): # hypercomplex data. T1 points is really half
points_t2 = int(self.points[1] / 2)
else:
points_t2 = self.points[1]
self.ft_points = (
int(2 ** (next_fourier_number(self.points[0]) + zero_fill[0])),
int(2 ** (next_fourier_number(points_t2) + zero_fill[1])),
)
print(self.ft_points)
self.processed_data = np.zeros(self.ft_points, dtype="complex128")
self.processed_data[
0 : self.points[0], 0 : self.points[1]
] = self.converted_data
self.proc_t2(
t2_ss=t2_ss,
phase=phases[0],
c=fp_corrections[0],
window=windows[0],
window_p=windows_p[0],
)
self.proc_t1(
phase=phases[1],
c=fp_corrections[1],
window=windows[1],
window_p=windows_p[1],
)
# this is an test form of processing - should be deleted eventually
def proc_ii(
self,
t2_ss=None,
phases=(0, 0),
fp_corrections=(0.5, 0.5),
windows=("sb", "sb"),
windows_p=(0.5, 0.5),
zero_fill=(1.0, 1.0),
):
t1_ac_mode = int(self.acq[1])
if (
t1_ac_mode >= 3 or t1_ac_mode <= 6
): # hypercomplex data. T1 points is really half
points_t2 = int(self.points[1] / 2)
else:
points_t2 = self.points[1]
self.ft_points = (
int(2 ** (next_fourier_number(self.points[0]) + zero_fill[0])),
int(2 ** (next_fourier_number(points_t2) + zero_fill[1])),
)
print(self.ft_points)
self.processed_data = np.zeros(self.ft_points, dtype="complex128")
self.processed_data[
0 : self.points[0], 0 : self.points[1]
] = self.converted_data
self.proc_t2(
t2_ss=t2_ss,
phase=phases[0],
c=fp_corrections[0],
window=windows[0],
window_p=windows_p[0],
)
self.proc_t1_ii(
phase=phases[1],
c=fp_corrections[1],
window=windows[1],
window_p=windows_p[1],
)
class LINData3D:
def __init__(
self,
data_dir=".",
ser_file="ser",
points=None,
dim_status=None,
decim=None,
dspfvs=None,
grpdly=None,
): # sourcery no-metrics
self.ac1 = os.path.join(data_dir, "acqus")
self.ac2 = os.path.join(data_dir, "acqu2s")
self.ac3 = os.path.join(data_dir, "acqu3s")
self.ser = os.path.join(data_dir, "ser")
self.pp = os.path.join(data_dir, "pulseprogram")
self.ser = os.path.join(data_dir, ser_file)
self.dir = data_dir
self.acq = [0, 0, 0] # acquisition modes start as undefined
# dictionary of acquisition modes for Bruker
self.acqDict = {
0: "undefined",
1: "qf",
2: "qsec",
3: "tppi",
4: "states",
5: "states-tppi",
6: "echo-antiecho",
}
# check if we are a Bruker 2D data set
if (
os.path.isfile(self.ac1)
and os.path.isfile(self.ac2)
and os.path.isfile(self.ac3)
and os.path.isfile(self.ser)
and os.path.isfile(self.pp)
):
self.valid = True
else:
self.valid = False
print("Data Directory does not seem to contain Bruker 3D Data")
p0 = p1 = p2 = 0 # we'll find these in the files
dec = dsp = grp = 0 # we'll find these in the files
with open(self.ac1, "r") as acqusfile:
for line in acqusfile:
if "##$TD=" in line:
(_, value) = line.split()
p0 = int(value)
if "##$DECIM=" in line:
(_, value) = line.split()
dec = int(value)
if "##$DSPFVS=" in line:
(_, value) = line.split()
dsp = int(value)
if "##$GRPDLY=" in line:
(_, value) = line.split()
grp = float(value)
if "##$BYTORDA=" in line:
(_, value) = line.split()
self.byte_order = float(value)
self.acq[0] = 0 # doesnt matter we assume DQD for direct anyway
with open(self.ac2, "r") as acqusfile:
for line in acqusfile:
if "##$TD=" in line:
(_, value) = line.split()
p1 = int(value)
if "##$FnMODE=" in line:
(_, value) = line.split()
self.acq[1] = int(value)
with open(self.ac3, "r") as acqusfile:
for line in acqusfile:
if "##$TD=" in line:
(_, value) = line.split()
p2 = int(value)
if "##$FnMODE=" in line:
(_, value) = line.split()
self.acq[2] = int(value)
if p0 and p1 and p2: # we got # points for all three dimensions
points = [p0, p1, p2]
else:
print("problem with detecting number of points in data")
self.valid = False
if dec != 0:
decim = dec
if dsp != 0:
dspfvs = dsp
if grp:
grpdly = grp
elif dec != 0 and dsp != 0:
grpdly = dd2g(dspfvs, decim)
else:
print(
"problem with detecting / determining grpdly - needed for Bruker conversion"
)
self.valid = False
print("Data Points structure is: " + str(points))
print(
"DECIM= "
+ str(decim)
+ " DSPFVS= "
+ str(dspfvs)
+ " GRPDLY= "
+ str(grpdly)
)
self.dim_status = ["t", "t", "t"] if dim_status is None else dim_status
if dim_status:
if len(dim_status) != len(points):
raise ValueError(
"insanity: number of dimensions in 'points' and 'dim_status' don't match"
)
for i in range(len(dim_status)):
if dim_status[i] not in ["t", "f"]:
print(dim_status[i])
raise ValueError(
"dimension domains must be 'f' - frequency or 't' - time"
)
# lets store the points to the class instance
self.points = points
# now lets load in the bruker serial file
with open(self.ser, "rb") as serial_file:
if self.byte_order == 0:
self.raw_data = np.frombuffer(serial_file.read(), dtype="<i4")
elif self.byte_order == 1:
self.raw_data = np.frombuffer(serial_file.read(), dtype=">i4")
# now reshape the data
self.raw_data = np.reshape(self.raw_data, np.asarray(self.points), order="F")
# TODO - set up some sort of sanity test
self.converted_data = np.zeros(
(int(self.points[0] / 2), self.points[1], self.points[2]),
dtype="complex128",
)
# lets convert the data
if decim and dspfvs:
if not grpdly:
grpdly = dd2g(dspfvs, decim)
self.convert_bruker_3d(grpdly)
elif grpdly and not decim and not dspfvs:
self.convert_bruker_3d(grpdly)
else:
print(
"Could not convert from Bruker data, incorrect or not found grpdly, dspfvs and/or decim"
)
print("Converted Data Points structure is:", self.points)
self.phases = (0, 0, 0)
self.fp_corrections = (0.5, 0.5, 0.5)
self.windows = ("sb", "sb", "sb")
self.windows_p = (0.5, 0.5, 0.5)
self.zero_fill = (1.0, 1.0, 1.0)
self.processed_data = [] # this will be filled out in proc method
self.ft_points = []
def convert_bruker_3d(self, grpdly):
# edit the number of points in first dimension after Bruker filter removal
# we now count points in complex numbers as well
self.points[0] = len(
remove_bruker_delay(make_complex(self.raw_data[:, 0, 0]), grpdly)
)
for ii in range(
self.points[2]
): # outer loop for third dimension points from dataFID
for i in range(
self.points[1]
): # inner loop for second dimension points from dataFID
fid = remove_bruker_delay(make_complex(self.raw_data[:, i, ii]), grpdly)
self.converted_data[0 : len(fid), i, ii] = fid
self.converted_data = self.converted_data[
0 : self.points[0],
0 : self.points[1],
0 : self.points[2],
]
self.raw_data = self.converted_data # clean up memory a little
if self.acq[1] == 6: # Rance Kay Processing needed
print("Echo-AntiEcho Detected in T2 - dealing with it...")
for i in range(0, self.points[1], 2):
for ii in range(self.points[2]):
a = self.converted_data[:, i, ii]
b = self.converted_data[:, i + 1, ii]
c = a + b
d = a - b
self.converted_data[:, i, ii] = c * np.exp(
1.0j * (90 / 180) * np.pi
)
self.converted_data[:, i + 1, ii] = d * np.exp(
1.0j * (180 / 180) * np.pi
)
if self.acq[2] == 6: # Rance Kay Processing needed
print("Echo-AntiEcho Detected in T1 - dealing with it...")
for i in range(0, self.points[2], 2):
for ii in range(self.points[1]):
a = self.converted_data[:, ii, i]
b = self.converted_data[:, ii, i + 1]
c = a + b
d = a - b
self.converted_data[:, ii, i] = c * np.exp(
1.0j * (90 / 180) * np.pi
)
self.converted_data[:, ii, i + 1] = d * np.exp(
1.0j * (180 / 180) * np.pi
)
self.raw_data = self.converted_data # clean up memory a little
def proc_t3(self, phase=0, t3_ss=None, c=1.0, window="sb", window_p=0.5):
self.processed_data[0, :, :] = self.processed_data[0, :, :] * c
window = window_function(
points=self.points[0],
window=window,
window_p=window_p,
)
for i in range(self.ft_points[2]):
for ii in range(self.ft_points[1]):
fid = self.processed_data[0 : self.points[0], ii, i]
if t3_ss == "butter":
fid = butter_highpass_filter(fid, 0.01, 0.1, order=1)
elif t3_ss == "poly":
co_ef = np.polynomial.polynomial.polyfit(
np.arange(len(fid)), fid, 5
)
time_points = np.arange(len(fid))
polyline = sum(
co_ef[iii] * time_points ** iii for iii in range(len(co_ef))
)
fid = fid - polyline
# fid[0:len(window)] = fid[0:len(window)] * window
fid = fid * window
self.processed_data[0 : self.points[0], ii, i] = fid
self.processed_data[:, ii, i] = np.fft.fftshift(
np.fft.fft(
self.processed_data[:, ii, i]
* np.exp(1.0j * (phase / 180) * np.pi)
)
)[::-1]
def proc_t2(self, phase=0, c=1.0, window="sb", window_p=0.5):
self.processed_data[:, 0, :] = self.processed_data[:, 0, :] * c
self.processed_data[:, 1, :] = self.processed_data[:, 1, :] * c
window = window_function(
points=self.points[1] / 2, # hypercomplex so halve this
window=window,
window_p=window_p,
)
for i in range(self.ft_points[2]):
for ii in range(self.ft_points[0]):
fid = np.real(
| |
"""
Calculates port ranks and distributes ports.
The rank of a port is a floating point number that represents its position
inside the containing layer. This depends on the node order of that layer and on the
port constraints of the nodes. Port ranks are used by {@link ICrossingMinimizationHeuristics
for calculating barycenter or median values for nodes. Furthermore, they are used in this
class for distributing the ports of nodes where the order of ports is not fixed,
which has to be done as the last step of each crossing minimization processor.
There are different ways to determine port ranks, therefore that is done in concrete subclasses.
"""
from collections import defaultdict
from math import inf
from typing import List
from layeredGraphLayouter.containers.constants import PortType, PortSide
from layeredGraphLayouter.containers.lNode import LNode
from layeredGraphLayouter.containers.lPort import LPort
def hasNestedGraph(node):
return node.nestedLgraph is not None
def isNotFirstLayer(length: int, currentIndex: int, isForwardSweep: bool):
return currentIndex != 0 if isForwardSweep else currentIndex != length - 1
def portTypeFor(isForwardSweep: bool):
return PortType.OUTPUT if isForwardSweep else PortType.INPUT
class AbstractBarycenterPortDistributor():
"""
Constructs a port distributor for the given array of port ranks.
All ports are required to be assigned ids in the range of the given array.
:ivar portRanks: port ranks dict {port: rank} in which the results of ranks calculation are stored.
"""
def __init__(self, random, graph):
self.random = random
r = self.portRanks = {}
self.minBarycenter = inf
self.maxBarycenter = 0.0
np = self.nodePositions = {}
for i, la in enumerate(graph.layers):
for node in la:
np[node] = i
for p in node.iterPorts():
r[p] = 0
self.portBarycenter = defaultdict(int)
self.inLayerPorts = []
# ######################################/
# Port Rank Assignment
def distributePortsWhileSweeping(self, nodeOrder, currentIndex: int, isForwardSweep: bool):
self.updateNodePositions(nodeOrder, currentIndex)
freeLayer = nodeOrder[currentIndex]
side = PortSide.WEST if isForwardSweep else PortSide.EAST
distributePorts_side = self.distributePorts_side
if isNotFirstLayer(len(nodeOrder), currentIndex, isForwardSweep):
if isForwardSweep:
fixedLayer = nodeOrder[currentIndex - 1]
else:
fixedLayer = nodeOrder[currentIndex + 1]
self.calculatePortRanks_many(
fixedLayer, portTypeFor(isForwardSweep))
for node in freeLayer:
distributePorts_side(node, side)
self.calculatePortRanks_many(
freeLayer, portTypeFor(not isForwardSweep))
for node in fixedLayer:
if not hasNestedGraph(node):
distributePorts_side(node, PortSide.opposite(side))
else:
for node in freeLayer:
distributePorts_side(node, side)
# Barycenter port distributor can not be used with always improving crossing minimization heuristics
# which do not need to count.
return False
def calculatePortRanks_many(self, layer: List[LNode], portType: PortType):
"""
Determine ranks for all ports of specific type in the given layer.
The ranks are written to the {@link #getPortRanks() array.
:param layer: a layer as node array
:param portType: the port type to consider
"""
#assert isinstance(layer, LNodeLayer), (layer, layer.__class__)
calculatePortRanks = self.calculatePortRanks
consumedRank = 0
for node in layer:
consumedRank += calculatePortRanks(node, consumedRank, portType)
def calculatePortRanks(self, node: LNode, rankSum: float, type_: PortType):
"""
Assign port ranks for the input or output ports of the given node. If the node's port
constraints imply a fixed order, the ports are assumed to be pre-ordered in the usual way,
i.e. in clockwise order north - east - south - west.
The ranks are written to the {@link #getPortRanks() array.
:param node: a node
:param rankSum: the sum of ranks of preceding nodes in the same layer
:param type: the port type to consider
:return the rank consumed by the given node the following node's ranks start at
{@code rankSum + consumedRank
:see: {@link org.eclipse.alg.layered.intermediate.PortListSorter
"""
raise NotImplementedError("Implement on child class")
# ######################################/
# Port Distribution
def distributePorts_side(self, node: LNode, side: PortSide):
if not node.portConstraints.isOrderFixed():
# distribute ports in sweep direction and on north south side of
# node.
self.distributePorts(node, node.getPortSideView(side))
self.distributePorts(node, node.getPortSideView(PortSide.SOUTH))
self.distributePorts(node, node.getPortSideView(PortSide.NORTH))
# sort the ports by considering the side, type, and barycenter
# values
self.sortPorts(node)
def distributePorts(self, node, ports):
"""
* Distribute the ports of the given node by their sides, connected ports, and input or output
* type.
*
* :param node
* node whose ports shall be sorted
"""
self.inLayerPorts.clear()
if ports:
self.iteratePortsAndCollectInLayerPorts(node, ports)
if self.inLayerPorts:
self.calculateInLayerPortsBarycenterValues(node)
def iteratePortsAndCollectInLayerPorts(self, node, ports):
minBarycenter = 0.0
maxBarycenter = 0.0
# a float value large enough to ensure that barycenters of south ports
# work fine
absurdlyLargeFloat = 2 * len(node.layer) + 1
# calculate barycenter values for the ports of the node
dealWithNorthSouthPorts = self.dealWithNorthSouthPorts
continueOnPortIteration = False
inLayerPorts = self.inLayerPorts
portRanks = self.portRanks
portBarycenter = self.portBarycenter
for port in ports:
northSouthPort = port.side == PortSide.NORTH or port.side == PortSide.SOUTH
sum_ = 0
if northSouthPort:
# Find the dummy node created for the port
portDummy = port.portDummy
if (portDummy is None):
continue
sum_ += dealWithNorthSouthPorts(absurdlyLargeFloat,
port, portDummy)
else:
# add up all ranks of connected ports
for outgoingEdge in port.outgoingEdges:
if outgoingEdge.dstNode.layer is node.layer:
inLayerPorts.append(port)
continueOnPortIteration = True
break
else:
# outgoing edges go to the subsequent layer and are
# seen clockwise
connectedPort = outgoingEdge.dst
sum_ += portRanks[connectedPort]
if continueOnPortIteration:
continueOnPortIteration = False
continue
for incomingEdge in port.incomingEdges:
if incomingEdge.srcNode.layer is node.layer:
inLayerPorts.append(port)
continueOnPortIteration = True
break
else:
# incoming edges go to the preceding layer and are seen
# counter-clockwise
connectedPort = incomingEdge.src
sum_ -= portRanks[connectedPort]
if continueOnPortIteration:
continueOnPortIteration = False
continue
if port.getDegree() > 0:
portBarycenter[port] = sum_ / port.getDegree()
minBarycenter = min(minBarycenter, portBarycenter[port])
maxBarycenter = max(maxBarycenter, portBarycenter[port])
elif northSouthPort:
# For northern and southern ports, the sum directly corresponds to the
# barycenter value to be used.
portBarycenter[port] = sum_
def calculateInLayerPortsBarycenterValues(self, node):
# go through the list of in-layer ports and calculate their barycenter
# values
nodePositions = self.nodePositions
nodeIndexInLayer = nodePositions[node] + 1
layerSize = len(node.layer) + 1
minBarycenter = self.minBarycenter
maxBarycenter = self.maxBarycenter
portBarycenter = self.portBarycenter
for inLayerPort in self.inLayerPorts:
# add the indices of all connected in-layer ports
sum_ = 0
inLayerConnections = 0
for connectedPort in inLayerPort.getConnectedPorts():
if connectedPort.getNode().layer is node.layer:
sum_ += nodePositions[connectedPort.getNode()] + 1
inLayerConnections += 1
# The port's barycenter value is the mean index of connected nodes. If that
# value is lower than the node's index, most in-layer edges point upwards, so we want
# the port to be placed at the top of the side. If the value is higher than the
# nodes's index, most in-layer edges point downwards, so we want the port to be
# placed at the bottom of the side.
barycenter = sum_ / inLayerConnections
portSide = inLayerPort.side
if portSide == PortSide.EAST:
if barycenter < nodeIndexInLayer:
# take a low value in order to have the port above
portBarycenter[inLayerPort] = minBarycenter - barycenter
else:
# take a high value in order to have the port below
portBarycenter[inLayerPort] = maxBarycenter + \
(layerSize - barycenter)
elif portSide == PortSide.WEST:
if barycenter < nodeIndexInLayer:
# take a high value in order to have the port above
portBarycenter[inLayerPort] = maxBarycenter + barycenter
else:
# take a low value in order to have the port below
portBarycenter[inLayerPort] = minBarycenter - \
(layerSize - barycenter)
def dealWithNorthSouthPorts(self, absurdlyLargeFloat: float,
port: LPort, portDummy: LNode):
# Find out if it's an input port, an output port, or both
input_ = False
output = False
for portDummyPort in portDummy.iterPorts():
if portDummyPort.origin == port:
if portDummyPort.outgoingEdges:
output = True
elif portDummyPort.incomingEdges:
input_ = True
sum_ = 0.0
if input_ and input_ ^ output:
# It's an input port the index of its dummy node is its inverted sortkey
# (for southern input ports, the key must be larger than the ones
# assigned to output ports or inputandoutput ports)
if port.side == PortSide.NORTH:
sum_ = -self.nodePositions[portDummy]
else:
sum_ = absurdlyLargeFloat - self.nodePositions[portDummy]
elif output and input_ ^ output:
# It's an output port the index of its dummy node is its sort key
# (for northern output ports, the key must be larger than the ones assigned
# to input ports or inputandoutput ports, which are negative and 0,
# respectively)
sum_ = self.nodePositions[portDummy] + 1.0
elif input_ and output:
# It's both, an input and an output port it must sit between input and
# output ports
# North: input ports < 0.0, output ports > 0.0
# South: input ports > FLOAT_MAX / 2, output ports | |
of axes are
combined into a single composite image before
saving a figure as a vector graphics file,
such as a PDF.''').tag(config=True, kind='')
#
# CONTOUR PLOTS
#
contour_negative_linestyle = Enum(['dashed', 'solid'], default_value='dashed', help=r'''dashed | solid''').tag(
config=True, kind='')
contour_corner_mask = Enum([True, False, 'legacy'], default_value=True, help=r'''True | False | legacy''').tag(
config=True, kind='')
#
# ERRORBAR
#
errorbar_capsize = Float(1.0, help=r'''length of end cap on error bars in pixels''').tag(config=True, kind='')
#
# HIST
#
hist_bins = Union((Unicode('auto'), Integer(10)), help=r'''The default number of histogram bins.
If Numpy 1.11 or later is
installed, may also be `auto`''').tag(config=True, kind='', default='auto')
#
# SCATTER
#
scatter_marker = Enum(list(Line2D.markers.keys()), default_value='o',
help=r'''The default marker type for scatter plots.''').tag(config=True, kind='')
#
# SAVING FIGURES
#
savefig_dpi = Unicode('300', help=r'''figure dots per inch or "figure"''').tag(config=True, kind='')
savefig_facecolor = Unicode('white', help=r'''figure facecolor when saving''').tag(config=True, kind='color')
savefig_edgecolor = Unicode('white', help=r'''figure edgecolor when saving''').tag(config=True, kind='color')
savefig_format = Enum(['png', 'ps', 'pdf', 'svg'], default_value='png', help=r'''png, ps, pdf, svg''').tag(
config=True, kind='')
savefig_bbox = Enum(["tight", "standard"], default_value='standard', help=r'''"tight" or "standard". "tight" is
incompatible with pipe-based animation backends but will workd with temporary file based ones:
e.g. setting animation.writer to ffmpeg will not work, use ffmpeg_file instead''').tag(config=True, kind='')
savefig_pad_inches = Float(0.1, help=r'''Padding to be used when bbox is set to "tight"''').tag(config=True,
kind='')
savefig_jpeg_quality = Integer(95, help=r'''when a jpeg is saved, the default quality parameter.''').tag(
config=True, kind='')
savefig_directory = Unicode("", help=r'''default directory in savefig dialog box, leave empty to always use current
working directory''').tag(config=True, kind='')
savefig_transparent = Bool(False, help=r'''setting that controls whether figures are saved with a transparent
background by default''').tag(config=True, kind='')
#
# Agg rendering
#
agg_path_chunksize = Integer(20000, help=r'''0 to disable; values in the range 10000 to 100000 can improve speed
slightly and prevent an Agg rendering failure when plotting very large data sets,
especially if they are very gappy. It may cause minor artifacts, though. A value of
20000 is probably a good starting point.''').tag(config=True, kind='')
path_simplify = Bool(True, help=r'''When True, simplify paths by removing "invisible" points to reduce file size
and
increase rendering speed''').tag(config=True, kind='')
path_simplify_threshold = Float(0.111111111111, help=r'''The threshold of similarity below which vertices will
be removed in
the simplification process''').tag(config=True, kind='')
path_snap = Bool(True, help=r'''When True, rectilinear axis-aligned paths will be snapped to the nearest pixel
when certain criteria are met. When False, paths will never be snapped.''').tag(config=True,
kind='')
path_sketch = Unicode('None', help=r'''May be none, or a 3-tuple of the form (scale, length, randomness). *scale*
is the amplitude of the wiggle perpendicular to the line (in pixels). *length* is the length of
the wiggle along the line (in pixels). *randomness* is the factor by which the length is
randomly scaled.''').tag(config=True, kind='')
# ==================================================================================================================
# NON MATPLOTLIB OPTIONS
# ==================================================================================================================
style = Union((Unicode(), List(), Tuple()), help='Basic matplotlib style to use').tag(config=True, default_='scpy')
stylesheets = Unicode(help='Directory where to look for local defined matplotlib styles when they are not in the '
' standard location').tag(config=True, type="folder")
use_plotly = Bool(False, help='Use Plotly instead of MatPlotLib for plotting (mode Matplotlib more suitable for '
'printing publication ready figures)').tag(config=True)
method_1D = Enum(['pen', 'scatter', 'scatter+pen', 'bar'], default_value='pen',
help='Default plot methods for 1D datasets').tag(config=True)
method_2D = Enum(['map', 'image', 'stack', 'surface', '3D'], default_value='stack',
help='Default plot methods for 2D datasets').tag(config=True)
method_3D = Enum(['surface'], default_value='surface', help='Default plot methods for 3D datasets').tag(
config=True)
# - 2d
# ------
colorbar = Bool(False, help='Show color bar for 2D plots').tag(config=True)
show_projections = Bool(False, help='Show all projections').tag(config=True)
show_projection_x = Bool(False, help='Show projection along x').tag(config=True)
show_projection_y = Bool(False, help='Show projection along y').tag(config=True)
colormap = Enum(plt.colormaps(), default_value='viridis',
help=r'''A colormap name, gray etc... (equivalent to image_cmap''').tag(config=True)
max_lines_in_stack = Integer(1000, min=1, help='Maximum number of lines to plot in stack plots').tag(config=True)
simplify = Bool(help='Matplotlib path simplification for improving performance').tag(config=True, group='mpl')
# -1d
# ---_
# antialias = Bool(True, help='Antialiasing')
number_of_x_labels = Integer(5, min=3, help='Number of X labels').tag(config=True)
number_of_y_labels = Integer(5, min=3, help='Number of Y labels').tag(config=True)
number_of_z_labels = Integer(5, min=3, help='Number of Z labels').tag(config=True)
number_of_contours = Integer(50, min=10, help='Number of contours').tag(config=True)
contour_alpha = Float(1.00, min=0., max=1.0, help='Transparency of the contours').tag(config=True)
contour_start = Float(0.05, min=0.001, help='Fraction of the maximum for starting contour levels').tag(config=True)
antialiased = Bool(True, help='antialiased option for surface plot').tag(config=True)
rcount = Integer(50, help='rcount (steps in the row mode) for surface plot').tag(config=True)
ccount = Integer(50, help='ccount (steps in the column mode) for surface plot').tag(config=True)
# ..................................................................................................................
def __init__(self, **kwargs):
super().__init__(jsonfile='PlotPreferences', **kwargs)
for key in plt.rcParams:
lis = key.split('.')
if len(lis) > 1:
self._groups.add(lis.pop(0))
if len(lis) > 1:
self.subgroups.add(lis.pop(0))
if len(lis) > 1:
raise NotImplementedError
self._members.add(lis[0])
@property
def available_styles(self):
return available_styles()
@property
def members(self):
return self.members
@property
def groups(self):
return self._groups
@property
def subgroups(self):
return self._subgroups
def set_latex_font(self, family=None):
def update_rcParams():
mpl.rcParams['text.usetex'] = self.text_usetex
mpl.rcParams['mathtext.fontset'] = self.mathtext_fontset
mpl.rcParams['mathtext.bf'] = self.mathtext_bf
mpl.rcParams['mathtext.cal'] = self.mathtext_cal
mpl.rcParams['mathtext.default'] = self.mathtext_default
mpl.rcParams['mathtext.rm'] = self.mathtext_rm
mpl.rcParams['mathtext.it'] = self.mathtext_it
if family is None:
family = self.font_family # take the current one
if family == 'sans-serif':
self.text_usetex = False
self.mathtext_fontset = 'dejavusans'
self.mathtext_bf = 'dejavusans:bold'
self.mathtext_cal = 'cursive'
self.mathtext_default = 'regular'
self.mathtext_rm = 'dejavusans'
self.mathtext_it = 'dejavusans:italic'
update_rcParams()
elif family == 'serif':
self.text_usetex = False
self.mathtext_fontset = 'dejavuserif'
self.mathtext_bf = 'dejavuserif:bold'
self.mathtext_cal = 'cursive'
self.mathtext_default = 'regular'
self.mathtext_rm = 'dejavuserif'
self.mathtext_it = 'dejavuserif:italic'
update_rcParams()
elif family == 'cursive':
self.text_usetex = False
self.mathtext_fontset = 'custom'
self.mathtext_bf = 'cursive:bold'
self.mathtext_cal = 'cursive'
self.mathtext_default = 'regular'
self.mathtext_rm = 'cursive'
self.mathtext_it = 'cursive:italic'
update_rcParams()
elif family == 'monospace':
self.text_usetex = False
mpl.rcParams['mathtext.fontset'] = 'custom'
mpl.rcParams['mathtext.bf'] = 'monospace:bold'
mpl.rcParams['mathtext.cal'] = 'cursive'
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['mathtext.rm'] = 'monospace'
mpl.rcParams['mathtext.it'] = 'monospace:italic'
elif family == 'fantasy':
self.text_usetex = False
mpl.rcParams['mathtext.fontset'] = 'custom'
mpl.rcParams['mathtext.bf'] = 'Humor Sans:bold'
mpl.rcParams['mathtext.cal'] = 'cursive'
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['mathtext.rm'] = 'Comic Sans MS'
mpl.rcParams['mathtext.it'] = 'Humor Sans:italic'
@observe('simplify')
def _simplify_changed(self, change):
plt.rcParams['path.simplify'] = change.new
plt.rcParams['path.simplify_threshold'] = 1.
@default('stylesheets')
def _get_stylesheets_default(self):
# the spectra path in package data
return get_pkg_path('stylesheets', 'scp_data')
@observe('style')
def _style_changed(self, change):
changes = change.new
if not isinstance(changes, list):
changes = [changes]
for _style in changes:
try:
if isinstance(_style, (list, tuple)):
for s in _style:
self._apply_style(s)
else:
self._apply_style(_style)
except Exception as e:
raise e
# additional setting
self.set_latex_font(self.font_family)
@staticmethod
def _get_fontsize(fontsize):
if fontsize == 'None':
return float(mpl.rcParams['font.size'])
plt.ioff()
fig, ax = plt.subplots()
t = ax.text(0.5, 0.5, 'Text')
plt.ion()
try:
t.set_fontsize(fontsize)
fontsize = str(round(t.get_fontsize(), 2))
except Exception:
pass
plt.close(fig)
plt.ion()
return fontsize
@staticmethod
def _get_color(color):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
c = [f'C{i}' for i in range(10)]
if color in c:
return f"{colors[c.index(color)]}"
else:
return f"{color}"
def _apply_style(self, _style):
f = (pathclean(self.stylesheets) / _style).with_suffix('.mplstyle')
if not f.exists():
# we have to look matplotlib predetermined style.
f = (pathclean(mpl.__file__).parent / 'mpl-data' / 'stylelib' / _style).with_suffix('.mplstyle')
if not f.exists():
raise TypeError(f'This style {_style} doesn\'t exists')
txt = f.read_text()
pars = txt.split('\n')
for line in pars:
if line.strip() and not line.strip().startswith('#'):
name, value = line.split(':', maxsplit=1)
name_ = name.strip().replace('.', '_')
value = value.split(' # ')[0].strip()
if "size" in name and "figsize" not in name and 'papersize' not in name:
value = self._get_fontsize(value)
elif name.endswith('color') and 'force_' not in name:
value = self._get_color(value)
# debug_(f'{name_} = {value}')
if value == 'true':
value = 'True'
elif value == 'false':
value = 'False'
try:
setattr(self, name_, value)
except ValueError:
if name.endswith('color') and len(value) == 6:
value = '#' + value.replace('\'', '')
except TraitError:
if hasattr(self.traits()[name_], 'default_args'):
try:
value = type(self.traits()[name_].default_args)(map(float, value.split(',')))
except Exception:
value = type(self.traits()[name_].default_args)(value.split(','))
value = tuple(map(str.strip, value))
else:
value = type(self.traits()[name_].default_value)(eval(value))
except Exception as e:
raise e
try:
setattr(self, name_, value)
except Exception as e:
raise e
if line.strip() and line.strip().startswith('##@'):
# SPECTROCHEMPY Parameters
name, value = line[3:].split(':', maxsplit=1)
name = name.strip()
value = value.strip()
try:
setattr(self, name, value)
except TraitError:
setattr(self, name, eval(value))
def to_rc_key(self, key):
rckey = ""
lis = key.split('_')
if len(lis) > 1 and lis[0] in self.groups:
rckey += lis.pop(0)
rckey += '.'
if len(lis) > 1 and lis[0] in self.subgroups:
rckey += lis.pop(0)
rckey += '.'
rckey += '_'.join(lis)
return rckey
@observe(All)
def _anytrait_changed(self, change):
# ex: change {
# 'owner': object, # The | |
####################################################
####################################################
# functions and classes used in conjunction with
# pipeline_metaomics.py
####################################################
####################################################
# import libraries
import sys
import re
import os
import itertools
import sqlite3
import CGAT.IOTools as IOTools
import CGATPipelines.Pipeline as P
from rpy2.robjects import r as R
import pandas
import numpy as np
####################################################
####################################################
####################################################
# SECTION 1
####################################################
####################################################
####################################################
def buildDiffStats(infile, outfile, db, connection):
'''
build differential abundance statistics
at different p-value and Fold change
thresholds for each comparison
'''
tablename = P.toTable(os.path.basename(infile))
statement = "ATTACH '%(db)s' as diff;" % locals()
connection.execute(statement)
# build table of results at different thresholds
ps = [0.01, 0.05, 0.1]
fcs = [0, 0.5, 1, 1.5, 2]
# build results for each pair
pairs = [("HhaIL10R", "WT"), ("WT", "aIL10R"), ("Hh", "WT")]
outf = open(outfile, "w")
outf.write("group1\tgroup2\tadj_P_Val\tlogFC\tnumber\n")
for pair in pairs:
p1, p2 = pair[0], pair[1]
for p, fc in itertools.product(ps, fcs):
statement = """SELECT COUNT(*)
FROM diff.%(tablename)s
WHERE group1 == "%(p1)s"
AND group2 == "%(p2)s"
AND adj_P_Val < %(p)f
AND abs(logFC) > %(fc)f""" % locals()
for data in connection.execute(statement).fetchall():
outf.write("\t".join([p1, p2, str(p), str(fc), str(data[0])]) + "\n")
outf.close()
####################################################
####################################################
####################################################
# SECTION 2
####################################################
####################################################
####################################################
def buildCommonList(rnadb, dnadb, outfile):
'''
build a list of NOGs/genera that were found in
common after filtering between RNA and
DNA data sets
'''
# select appropriate table depending on
# whether we want genera or NOGs
if "genera" in outfile:
tablename = "genus_diamond_aggregated_counts_diff"
else:
tablename = "gene_counts_diff"
# connect to respective
# databases for RNA and DNA
dbh_rna = sqlite3.connect(rnadb)
cc_rna = dbh_rna.cursor()
dbh_dna = sqlite3.connect(dnadb)
cc_dna = dbh_dna.cursor()
# collect NOGs/genera and write to
# file
outf = open(outfile, "w")
rna = set()
dna = set()
for gene in cc_rna.execute("""
SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""" % tablename).fetchall():
rna.add(gene[0])
for gene in cc_dna.execute("""SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""" % tablename).fetchall():
dna.add(gene[0])
for gene in rna.intersection(dna):
outf.write(gene + "\n")
####################################################
####################################################
####################################################
def buildDiffList(db,
commonset,
outfile,
fdr=0.05,
l2fold=1,
tablename=None):
'''
build a list of differentially expressed
NOGs between colitis and steady state
'''
# list of common NOGs for sql statement
common = set([x[:-1] for x in open(commonset).readlines()])
common = "(" + ",".join(['"'+x+'"' for x in common]) + ")"
# connect to database
dbh = sqlite3.connect(db)
cc = dbh.cursor()
# remove any genes that are different between Hh and steady state
# or between aIL10R and steady state
hh = set([x[0] for x in cc.execute("""SELECT taxa
FROM %s \
WHERE group1 == "Hh" \
AND group2 == "WT" \
AND adj_P_Val < %f""" % (tablename, fdr)).fetchall()])
# sql list
hh = "(" + ",".join(['"'+x+'"' for x in hh]) + ")"
ail10r = set([x[0] for x in cc.execute("""SELECT taxa
FROM %s
WHERE group1 == "WT"
AND group2 == "aIL10R"
AND adj_P_Val < %f""" % (tablename, fdr)).fetchall()])
# sql list
ail10r = "(" + ",".join(['"'+x+'"' for x in ail10r]) + ")"
outf = open(outfile, "w")
for gene in cc.execute("""SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
AND adj_P_Val < %f
AND (logFC > %i OR logFC < -%i)
AND taxa IN %s
AND taxa NOT IN %s
AND taxa NOT IN %s
ORDER BY logFC DESC""" % (tablename, fdr, l2fold, l2fold, common, hh, ail10r)).fetchall():
outf.write(gene[0] + "\n")
outf.close()
####################################################
####################################################
####################################################
def heatmapDiffFeatures(diff_list,
matrix,
outfile):
'''
draw heatmap of differentially abundant features
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''diff <- read.csv("%s", header=F, sep="\t", stringsAsFactors=F)''' % diff_list)
R('''dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % matrix)
R('''rownames(dat) <- dat$taxa''')
R('''dat <- dat[, 1:ncol(dat)-1]''')
R('''dat <- dat[diff[,1],]''')
R('''dat <- na.omit(dat)''')
R('''dat <- dat[, mixedsort(colnames(dat))]''')
R('''samples <- colnames(dat)''')
R('''dat <- t(apply(dat, 1, scale))''')
R('''colnames(dat) <- samples''')
R('''cols <- colorRampPalette(c("blue", "white", "red"))''')
R('''pdf("%s")''' % outfile)
R('''heatmap.2(as.matrix(dat), col = cols, scale = "row", trace = "none", Rowv = F, Colv = F, margins = c(15,15),
distfun = function(x) dist(x, method = "manhattan"),
hclustfun = function(x) hclust(x, method = "ward.D2"))''')
R["dev.off"]()
####################################################
####################################################
####################################################
def buildDiffGeneOverlap(dnafile, rnafile, outfile):
'''
overlap differentially abundant NOGs between
RNA and DNA data sets
'''
dna = set([x[:-1] for x in open(dnafile).readlines()])
rna = set([x[:-1] for x in open(rnafile).readlines()])
ndna = len(dna)
nrna = len(rna)
overlap = len(dna.intersection(rna))
outf = open(outfile, "w")
outf.write("nDNA\tnRNA\tnoverlap\n%(ndna)i\t%(nrna)i\t%(overlap)i\n" % locals())
outf.close()
####################################################
####################################################
####################################################
def testSignificanceOfOverlap(common, overlap, outfile):
'''
Test significance of overlapping lists
bewteen RNA and DNA using hypergeometric test
'''
R('''pop <- read.csv("%s", header = F, sep = "\t", stringsAsFactors = F)''' % common)
R('''overlaps <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % overlap)
# total genes in population
R('''npop <- nrow(pop)''')
# x = number of white balls picked = overlap
R('''x <- overlaps$noverlap''')
# m = total number of white balls = total diff in RNA analysis
R('''m <- overlaps$nRNA''')
# n = total number of black balls = total - diff in RNA analysis
R('''n <- npop - m''')
# k = total balls sampled = number of genera different in DNA analysis
R('''k <- overlaps$nDNA''')
# hypergeometric test
R('''p <- 1-phyper(x,m,n,k)''')
# write result
R('''res <- matrix(ncol = 2, nrow = 5)''')
R('''res[1,1] <- "x"''')
R('''res[2,1] <- "m"''')
R('''res[3,1] <- "n"''')
R('''res[4,1] <- "k"''')
R('''res[5,1] <- "p-value"''')
R('''res[1,2] <- x''')
R('''res[2,2] <- m''')
R('''res[3,2] <- n''')
R('''res[4,2] <- k''')
R('''res[5,2] <- p''')
R('''print(res)''')
R('''write.table(as.data.frame(res), file = "%s", quote = F, sep = "\t", row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def scatterplotAbundanceEstimates(dnamatrix,
rnamatrix,
outfile):
'''
scatterplot abundance estimates between DNA and RNA
data sets
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnamatrix)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnamatrix)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)-1]''')
# intersection of taxa/NOGs present
R('''keep <- intersect(rownames(rna), rownames(dna))''')
# get data where there is rna and dna
R('''rna <- rna[keep,]''')
R('''dna <- dna[keep,]''')
# take averages
R('''rna.ave <- data.frame(apply(rna, 1, mean))''')
R('''dna.ave <- data.frame(apply(dna, 1, mean))''')
R('''print(cor(dna.ave,rna.ave)[[1]])''')
R('''png("%s")''' % outfile)
R('''plot(dna.ave[,1],
rna.ave[,1],
pch = 16,
col = "slateGrey",
xlab = "Mean DNA abundance",
ylab = "Mean RNA abundance",
main = paste("N = ", nrow(dna.ave), sep = ""))
abline(lm(rna[,1]~dna[,1], na.rm = T))''')
R["dev.off"]()
####################################################
####################################################
####################################################
def buildDetectionOverlap(rnacounts, dnacounts, outfile):
'''
build detection overlaps between RNA and DNA
data sets
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnacounts)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnacounts)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)]''')
R('''taxa.rna <- rownames(rna)''')
R('''taxa.dna <- rownames(dna)''')
# union of taxa across samples
R('''nrna = length(taxa.rna)''')
R('''ndna = length(taxa.dna)''')
# get overlapping
R('''noverlap = length(intersect(taxa.rna, taxa.dna))''')
R('''result = data.frame(nrna = nrna, ndna = ndna, noverlap = noverlap)''')
R('''write.table(result, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def plotAbundanceLevelsOfOverlap(rnacounts,
dnacounts,
outfile,
of=None):
'''
plot abundance levels pf taxa/NOGs that do
and don't overlap between data sets
'''
R('''library(ggplot2)''')
# get rna reads per million
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnacounts)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,2:ncol(rna)]''')
R('''rna <- sweep(rna, 2, colSums(rna)/1000000, "/")''')
# get dna reads per million
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnacounts)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,2:ncol(dna)]''')
R('''dna <- sweep(dna, 2, colSums(dna)/1000000, "/")''')
# common and distinct sets
R('''common <- intersect(rownames(dna), rownames(rna))''')
R('''rna.only <- setdiff(rownames(rna), rownames(dna))''')
R('''dna.only <- setdiff(rownames(dna), rownames(rna))''')
# boxplot the abundance levels
R('''rna.common <- apply(rna[common,], 1, mean)''')
R('''dna.common <- apply(dna[common,], 1, mean)''')
R('''rna.distinct <- apply(rna[rna.only,], 1, mean)''')
R('''dna.distinct <- apply(dna[dna.only,], 1, mean)''')
if of == "genes":
# this is just so the thing will run
# genes do not have distinct genes
# in RNA analysis
R('''rna.distinct <- rep(0, 20)''')
else:
R('''rna.distinct <- rna.distinct''')
# test sig bewteen groups
R('''wtest1 <- wilcox.test(rna.common, rna.distinct)''')
R('''wtest2 <- wilcox.test(dna.common, dna.distinct)''')
R('''wtest3 <- wilcox.test(rna.common, dna.distinct)''')
R('''wtest4 <- wilcox.test(dna.common, rna.distinct)''')
R('''wtest5 | |
<filename>aydin/it/classic_denoisers/spectral.py
import math
from functools import partial
from typing import Optional, Union, Tuple, Sequence
import numpy
from numba import jit, prange
from numpy.fft import fftshift, ifftshift
from scipy.fft import fftn, ifftn, dctn, idctn, dstn, idstn
from aydin.util.array.outer import outer_sum
from aydin.util.crop.rep_crop import representative_crop
from aydin.util.j_invariance.j_invariant_smart import calibrate_denoiser_smart
from aydin.util.patch_size.patch_size import default_patch_size
from aydin.util.patch_transform.patch_transform import (
extract_patches_nd,
reconstruct_from_nd_patches,
)
def calibrate_denoise_spectral(
image,
axes: Optional[Tuple[int, ...]] = None,
patch_size: Optional[Union[int, Tuple[int], str]] = None,
try_dct: bool = True,
try_fft: bool = True,
try_dst: bool = False,
max_order: float = 6.0,
crop_size_in_voxels: Optional[int] = None,
max_num_evaluations: int = 256,
display_images: bool = False,
**other_fixed_parameters,
):
"""
Calibrates the Spectral denoiser for the given image and returns the optimal
parameters obtained using the N2S loss.
Parameters
----------
image: ArrayLike
Image to calibrate spectral denoiser for.
axes: Optional[Tuple[int,...]]
Axes over which to apply the spectral transform (dct, dst, fft) for denoising each patch.
patch_size: int
Patch size for the 'image-to-patch' transform.
Can be: 'full' for a single patch covering the whole image, 'half', 'quarter',
or an int s that corresponds to isotropic patches of shape: (s,)*image.ndim,
or a tuple of ints. By default (None) the patch size is chosen automatically
to give the best results.
(advanced)
try_dct: bool
Tries DCT transform during optimisation.
try_fft: bool
Tries FFT transform during optimisation.
try_dst: bool
Tries DST ransform during optimisation.
max_order: float
Maximal order for the Butterworth filter.
(advanced)
crop_size_in_voxels: int or None for default
Number of voxels for crop used to calibrate denoiser.
(advanced)
max_num_evaluations: int
Maximum number of evaluations for finding the optimal parameters.
(advanced)
display_images: bool
When True the denoised images encountered during optimisation are shown
other_fixed_parameters: dict
Any other fixed parameters
Returns
-------
Denoising function, dictionary containing optimal parameters,
and free memory needed in bytes for computation.
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
# obtain representative crop, to speed things up...
crop = representative_crop(image, crop_size=crop_size_in_voxels, display_crop=False)
# Normalise patch size:
patch_size = default_patch_size(image, patch_size, odd=True)
# Ranges:
threshold_range = (0.0, 1.0) # np.arange(0, 0.5, 0.02) ** 2
freq_bias_range = (0.0, 2.0) # np.arange(0, 2, 0.2)
freq_cutoff_range = (0.01, 1.0)
order_range = (0.5, max_order)
# prepare modes list
modes = []
if try_dct:
modes.append("dct")
if try_fft:
modes.append("fft")
if try_dst:
modes.append("dst")
# Parameters to test when calibrating the denoising algorithm
parameter_ranges = {
'threshold': threshold_range,
'freq_bias_stength': freq_bias_range,
# 'reconstruction_gamma': [0.0001, 0.1, 1.0],
'freq_cutoff': freq_cutoff_range,
'order': order_range,
'mode': modes,
} # 'fft',, 'fft'
# Combine fixed parameters:
other_fixed_parameters = other_fixed_parameters | {
'patch_size': patch_size,
'axes': axes,
}
# Partial function:
_denoise_spectral = partial(
denoise_spectral, **(other_fixed_parameters | {'multi_core': False})
)
# Calibrate denoiser
best_parameters = (
calibrate_denoiser_smart(
crop,
_denoise_spectral,
denoise_parameters=parameter_ranges,
display_images=display_images,
max_num_evaluations=max_num_evaluations,
)
| other_fixed_parameters
)
# Memory needed:
memory_needed = 2 * image.nbytes + 8 * image.nbytes * math.prod(patch_size)
return denoise_spectral, best_parameters, memory_needed
def denoise_spectral(
image,
axes: Optional[Tuple[int, ...]] = None,
patch_size: Optional[Union[int, Tuple[int], str]] = None,
mode: str = 'dct',
threshold: float = 0.5,
freq_bias_stength: float = 1,
freq_cutoff: Union[float, Sequence[float]] = 0.5,
order: float = 1,
reconstruction_gamma: float = 0,
multi_core: bool = True,
):
"""Denoises the given image by first applying the patch
transform, and then zeroing Fourier/DCT/DST coefficients
below a given threshold. In addition, we apply Butterworth
filter to suppress freuencies above the band-pass and a
configurable frequency bias before applying the thresholding
to favour suppressing high versus low frequencies.
\n\n
Note: This seems like a lot of parameters, but thanks to our
auto-tunning approach these paramneters are all automatically
determined 😊.
Parameters
----------
image: ArrayLike
Image to denoise
axes: Optional[Tuple[int,...]]
Axes over which to apply the spetcral transform (dct, dst, fft) for denoising each patch.
patch_size: int
Patch size for the 'image-to-patch' transform.
Can be: 'full' for a single patch covering the whole image, 'half', 'quarter',
or an int s that corresponds to isotropic patches of shape: (s,)*image.ndim,
or a tuple of ints. By default (None) the patch size is chosen automatically
to give the best results.
mode: str
Possible modes are: 'dct'(works best!), 'dst', and 'fft'.
threshold: float
Threshold between 0 and 1
freq_bias_stength: float
Frequency bias: closer to zero: no bias against high frequencies,
closer to one and above: stronger bias towards high-frequencies.
freq_cutoff: float
Cutoff frequency, must be within [0, 1]. In addition
order: float
Filter order, typically an integer above 1.
reconstruction_gamma: float
Patch reconstruction parameter
multi_core: bool
By default we use as many cores as possible, in some cases, for small
(test) images, it might be faster to run on a single core instead of
starting the whole parallelization machinery.
Returns
-------
Denoised image
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
# 'full' patch size:
if patch_size == 'full':
patch_size = image.shape
elif patch_size == 'half':
patch_size = tuple(max(3, 2 * (s // 4)) for s in image.shape)
elif patch_size == 'quarter':
patch_size = tuple(max(3, 4 * (s // 8)) for s in image.shape)
# Normalise patch size:
patch_size = default_patch_size(image, patch_size, odd=True)
# Default axes:
if axes is None:
axes = tuple(range(image.ndim))
# Selected axes:
selected_axes = tuple((a in axes) for a in range(image.ndim))
workers = -1 if multi_core else 1
axes = tuple(a for a in range(1, image.ndim + 1) if (a - 1) in axes)
if mode == 'fft':
transform = lambda x: fftshift( # noqa: E731
fftn(x, workers=workers, axes=axes), axes=axes
)
i_transform = lambda x: ifftn( # noqa: E731
ifftshift(x, axes=axes), workers=workers, axes=axes
)
elif mode == 'dct':
transform = partial(dctn, workers=workers, axes=axes)
i_transform = partial(idctn, workers=workers, axes=axes)
elif mode == 'dst':
transform = partial(dstn, workers=workers, axes=axes)
i_transform = partial(idstn, workers=workers, axes=axes)
else:
raise ValueError(f"Unsupported mode: {mode}")
# Normalise freq_cutoff argument to tuple:
if type(freq_cutoff) is not tuple:
freq_cutoff = tuple((freq_cutoff,) * image.ndim)
# First we apply the patch transform:
patches = extract_patches_nd(image, patch_size=patch_size)
# ### PART 1: apply Butterworth filter to patches:
# Then we apply the sparsifying transform:
patches = transform(patches)
# Compute adequate squared distance image and chose filter implementation:
if mode == 'fft':
f = _compute_distance_image_for_fft(freq_cutoff, patch_size, selected_axes)
elif mode == 'dct' or mode == 'dst':
f = _compute_distance_image_for_dxt(freq_cutoff, patch_size, selected_axes)
else:
raise ValueError(f"Unsupported mode: {mode}")
# Configure filter function:
filter_wrapped = jit(nopython=True, parallel=multi_core)(_filter)
# Apply filter:
patches = filter_wrapped(patches, f, order)
# ### PART 2: apply thresholding:
# Window for frequency bias:
freq_bias = _freq_bias_window(patch_size, freq_bias_stength)
# We use this value to estimate power per coefficient:
power = numpy.absolute(patches)
power *= freq_bias
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(patches, name='patches')
# viewer.add_image(f_patches, name='f_patches')
# viewer.add_image(power, name='power')
# viewer.add_image(freq_bias, name='freq_bias')
# What is the max coefficient in the transforms:
max_value = numpy.max(power)
# We derive from that the threshold:
threshold *= max_value
# Here are the entries that are below the threshold:
below = power < threshold
# Thresholding:
patches[below] = 0
# Transform back to real space:
patches = i_transform(patches)
# convert to real:
if numpy.iscomplexobj(patches):
patches = numpy.real(patches)
# Transform back from patches to image:
denoised_image = reconstruct_from_nd_patches(
patches, image.shape, gamma=reconstruction_gamma
)
# Cast back to float32 if needed:
denoised_image = denoised_image.astype(numpy.float32, copy=False)
return denoised_image
def _freq_bias_window(shape: Tuple[int], alpha: float = 1):
window_tuple = tuple(numpy.linspace(0, 1, s) ** 2 for s in shape)
window_nd = numpy.sqrt(outer_sum(*window_tuple)) + 1e-6
window_nd = 1.0 / (1.0 + window_nd)
window_nd **= alpha
window_nd /= window_nd.max()
window_nd = window_nd.astype(numpy.float32)
return window_nd
def _compute_distance_image_for_dxt(freq_cutoff, shape, selected_axes):
# Normalise selected axes:
if selected_axes is None:
selected_axes = (a for a in range(len(shape)))
f = numpy.zeros(shape=shape, dtype=numpy.float32)
axis_grid = tuple(
(numpy.linspace(0, 1, s) if sa else numpy.zeros((s,)))
for sa, s in zip(selected_axes, shape)
)
for fc, x in zip(freq_cutoff, numpy.meshgrid(*axis_grid, indexing='ij')):
f += (x / fc) ** 2
return f
def _compute_distance_image_for_fft(freq_cutoff, shape, selected_axes):
f = numpy.zeros(shape=shape, dtype=numpy.float32)
axis_grid = tuple(
(numpy.linspace(-1, 1, s) if sa else numpy.zeros((s,)))
for sa, s in zip(selected_axes, shape)
)
for fc, x in zip(freq_cutoff, numpy.meshgrid(*axis_grid, indexing='ij')):
f += (x / | |
import collections
from collections import defaultdict as ddict
import itertools
from .. import graph_util
from ..ssa import objtypes, ssa_jumps
from ..ssa.exceptionset import ExceptionSet
from .setree import SEBlockItem, SEIf, SEScope, SESwitch, SETry, SEWhile
# This module is responsible for transforming an arbitrary control flow graph into a tree
# of nested structures corresponding to Java control flow statements. This occurs in
# several main steps
#
# Preprocessing - create graph view and ensure that there are no self loops and every node
# has only one incoming edge type
# Structure loops - ensure every loop has a single entry point. This may result in
# exponential code duplication in pathological cases
# Structure exceptions - create dummy nodes for every throw exception type for every node
# Structure conditionals - order switch targets consistent with fallthrough and create
# dummy nodes where necessary
# Create constraints - sets up the constraints used to represent nested statements
# Merge exceptions - try to merge as any try constraints as possible. This is done by
# extending one until it covers the cases that another one handles, allowing the second
# to be removed
# Parallelize exceptions - freeze try constraints and turn them into multicatch blocks
# where possible (not implemented yet)
# Complete scopes - expand scopes to try to reduce the number of successors
# Add break scopes - add extra scope statements so extra successors can be represented as
# labeled breaks
#########################################################################################
class DominatorInfo(object):
def __init__(self, root):
self._doms = doms = {root:frozenset([root])}
stack = [root]
while stack:
cur = stack.pop()
assert cur not in stack
for child in cur.successors:
new = doms[cur] | frozenset([child])
old = doms.get(child)
if new != old:
new = new if old is None else (old & new)
assert child in new
if old is not None:
assert new == old or len(new) < len(old)
if new != old:
doms[child] = new
if child not in stack:
stack.append(child)
self.nodeset = set(self._doms)
self.root = root
def dominators(self, node):
return self._doms[node]
def ordered(self, node): # for debugging
return sorted(self._doms[node], key=lambda n:len(self._doms[n]))
def dominator(self, *nodes):
'''Get the common dominator of nodes'''
doms = reduce(frozenset.intersection, map(self._doms.get, nodes))
return max(doms, key=lambda n:len(self._doms[n]))
def set_extend(self, dom, nodes):
nodes = list(nodes) + [dom]
pred_nl_func = lambda x:x.predecessors_nl if x is not dom else []
return frozenset(graph_util.topologicalSort(nodes, pred_nl_func))
def area(self, node): return ClosedSet([k for k,v in self._doms.items() if node in v], node, self)
def extend(self, dom, nodes): return ClosedSet(self.set_extend(dom, nodes), dom, self)
def extend2(self, nodes): return self.extend(self.dominator(*nodes), nodes)
def single(self, head): return ClosedSet([head], head, self)
# Immutable class representing a dominator closed set of nodes
# TODO clean up usage (remove copy() calls, etc.)
class ClosedSet(object):
__slots__ = "nodes", "head", "info"
def __init__(self, nodes, head, info):
self.nodes = frozenset(nodes)
self.head = head
self.info = info
if nodes:
assert head in nodes
# assert info.dominator(*nodes) == head
def touches(self, other): return not self.nodes.isdisjoint(other.nodes)
def isdisjoint(self, other): return self.nodes.isdisjoint(other.nodes)
def issuperset(self, other): return self.nodes.issuperset(other.nodes)
def issubset(self, other): return self.nodes.issubset(other.nodes)
def __or__(self, other):
assert type(self) == type(other)
if not other.nodes or self is other:
return self
elif not self.nodes:
return other
assert self.head is not None and other.head is not None
assert self.info is other.info
if self.head in other.nodes:
self, other = other, self
nodes, head, info = self.nodes, self.head, self.info
nodes |= other.nodes
if other.head not in self.nodes:
head = info.dominator(head, other.head)
nodes = info.set_extend(head, nodes)
return ClosedSet(nodes, head, info)
def __and__(self, other):
assert type(self) == type(other)
nodes = self.nodes & other.nodes
if not nodes:
return ClosedSet.EMPTY
if self.head in other.nodes:
self, other = other, self
if other.head in self.nodes:
head = other.head
else:
head = self.info.dominator(*nodes)
return ClosedSet(nodes, head, self.info)
@staticmethod
def union(*sets):
return reduce(ClosedSet.__or__, sets, ClosedSet.EMPTY)
def __str__(self): # pragma: no cover
return 'set{} ({} nodes)'.format(self.head, len(self.nodes))
__repr__ = __str__
def __lt__(self, other): return self.nodes < other.nodes
def __le__(self, other): return self.nodes <= other.nodes
def __gt__(self, other): return self.nodes > other.nodes
def __ge__(self, other): return self.nodes >= other.nodes
ClosedSet.EMPTY = ClosedSet(frozenset(), None, None)
#########################################################################################
class ScopeConstraint(object):
def __init__(self, lbound, ubound):
self.lbound = lbound
self.ubound = ubound
_count = itertools.count()
_gcon_tags = 'while','try','switch','if','scope'
class CompoundConstraint(object):
def __init__(self, tag, head, scopes):
assert tag in _gcon_tags
self.id = next(_count) # for debugging purposes
self.tag = tag
self.scopes = scopes
self.head = head
# self.heads = frozenset([head]) if head is not None else frozenset()
# only used by try constraints, but we leave dummy sets for the rest
self.forcedup = self.forceddown = frozenset()
self.lbound = ClosedSet.union(*[scope.lbound for scope in self.scopes])
self.ubound = ClosedSet.union(*[scope.ubound for scope in self.scopes])
if head is not None:
assert head in self.lbound.nodes and head in self.ubound.nodes
assert self.ubound >= self.lbound
def __str__(self): # pragma: no cover
return self.tag+str(self.id)
__repr__ = __str__
def WhileCon(dom, head):
ubound = dom.area(head)
lbound = dom.extend(head, [n2 for n2 in head.predecessors if head in dom.dominators(n2)])
return CompoundConstraint('while', None, [ScopeConstraint(lbound, ubound)])
def TryCon(dom, trynode, target, cset, catchvar):
trybound = dom.single(trynode)
tryscope = ScopeConstraint(trybound, trybound)
# Catch scopes are added later, once all the merging is finished
new = CompoundConstraint('try', None, [tryscope])
new.forcedup = set()
new.forceddown = set()
new.target = target
new.cset = cset
new.catchvar = catchvar
assert len(new.target.successors) == 1
new.orig_target = new.target.successors[0]
return new
def FixedScopeCon(lbound):
return CompoundConstraint('scope', None, [ScopeConstraint(lbound, lbound)])
#########################################################################################
def structureLoops(nodes):
todo = nodes
while_heads = []
while todo:
newtodo = []
temp = set(todo)
sccs = graph_util.tarjanSCC(todo, lambda block:[x for x in block.predecessors if x in temp])
for scc in sccs:
if len(scc) <= 1:
continue
scc_set = set(scc)
entries = [n for n in scc if not scc_set.issuperset(n.predecessors)]
assert len(entries) == 1
head = entries[0]
newtodo.extend(scc)
newtodo.remove(head)
while_heads.append(head)
todo = newtodo
return while_heads
def structureExceptions(nodes):
thrownodes = [n for n in nodes if n.block and isinstance(n.block.jump, ssa_jumps.OnException)]
newinfos = []
for n in thrownodes:
manager = n.block.jump.cs
assert len(n.block.jump.params) == 1
thrownvar = n.block.jump.params[0]
mycsets = {}
mytryinfos = []
newinfos.append((n, manager.mask, mycsets, mytryinfos))
for handler, cset in manager.sets.items():
en = n.blockdict[handler.key, True]
mycsets[en] = cset
en.predecessors.remove(n)
n.successors.remove(en)
caughtvars = [v2 for (v1,v2) in zip(n.outvars[en], en.invars) if v1 == thrownvar]
assert len(caughtvars) <= 1
caughtvar = caughtvars.pop() if caughtvars else None
outvars = n.outvars.pop(en)[:]
assert outvars.count(thrownvar) <= 1
if caughtvar is not None:
outvars[outvars.index(thrownvar)] = None
for tt in cset.getTopTTs():
top = ExceptionSet.fromTops(cset.env, objtypes.className(tt))
new = en.indirectEdges([])
new.predecessors.append(n)
n.successors.append(new)
n.eassigns[new] = outvars # should be safe to avoid copy as we'll never modify it
nodes.append(new)
mytryinfos.append((top, new, caughtvar))
return newinfos
def structureConditionals(entryNode, nodes):
dom = DominatorInfo(entryNode)
switchnodes = [n for n in nodes if n.block and isinstance(n.block.jump, ssa_jumps.Switch)]
ifnodes = [n for n in nodes if n.block and isinstance(n.block.jump, ssa_jumps.If)]
# For switch statements, we can't just blithely indirect all targets as that interferes with fallthrough behavior
switchinfos = []
for n in switchnodes:
targets = n.successors
# a proper switch block must be dominated by its entry point
# and all other nonloop predecessors must be dominated by a single other target
# keep track of remaining good targets, bad ones will be found later by elimination
target_set = frozenset(targets)
good = []
parents = {}
for target in targets:
if n not in dom.dominators(target):
continue
preds = [x for x in target.predecessors if x != n and target not in dom.dominators(x)]
for pred in preds:
choices = dom.dominators(pred) & target_set
if len(choices) != 1:
break
choice = min(choices)
if parents.setdefault(target, choice) != choice:
break
else:
# passed all the tests for now, target appears valid
good.append(target)
while 1:
size = len(parents), len(good)
# prune bad parents and children from dict
for k,v in parents.items():
if k not in good:
del parents[k]
elif v not in good:
del parents[k]
good.remove(k)
# make sure all parents are unique. In case they're not, choose one arbitrarily
chosen = {}
for target in good:
if target in parents and chosen.setdefault(parents[target], target) != target:
del parents[target]
good.remove(target)
if size == (len(parents), len(good)): # nothing changed this iteration
break
# Now we need an ordering of the good blocks consistent with fallthrough
# regular topoSort can't be used since | |
"""@file anchor_deepattractornet_softmax_loss.py
contains the AnchorDeepattractornetSoftmaxLoss"""
import loss_computer
from nabu.neuralnetworks.components import ops
import warnings
import tensorflow as tf
class AnchorDeepattractornetSoftmaxLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network with softmax loss, using anchors
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
warnings.warn('In following versions it will be required to use the AnchorDeepattractornetLoss', Warning)
# Clean spectograms of sources
spectrogram_targets = targets['multi_targets']
# Spectogram of the original mixture, used to mask for scoring
mix_to_mask = targets['mix_to_mask']
# Length of sequences
seq_length = seq_length['bin_emb']
# Logits (=output network)
emb_vec = logits['bin_emb']
anchors = logits['anchors']
# calculate loss and normalisation factor of mini-batch
loss, norm = ops.anchor_deepattractornet_loss(
spectrogram_targets, mix_to_mask, emb_vec, anchors, seq_length, self.batch_size, activation='softmax')
return loss, norm
class AnchorNormDeepattractornetSoftmaxLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network with softmax loss, using anchors. Embeddings will be
normalized.
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
warnings.warn('In following versions it will be required to use the AnchorDeepattractornetLoss', Warning)
# Clean spectograms of sources
spectrogram_targets = targets['multi_targets']
# Spectogram of the original mixture, used to mask for scoring
mix_to_mask = targets['mix_to_mask']
# Length of sequences
seq_length = seq_length['bin_emb']
# Logits (=output network)
emb_vec = logits['bin_emb']
anchors = logits['anchors']
# calculate loss and normalisation factor of mini-batch
loss, norm = ops.anchor_deepattractornet_loss(
spectrogram_targets, mix_to_mask, emb_vec, anchors, seq_length, self.batch_size, activation='softmax',
normalize=True)
return loss, norm
class WeightedAnchorNormDeepattractornetSoftmaxLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network with softmax loss, using weighted anchors.
Embeddings will be normalized.
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
warnings.warn('In following versions it will be required to use the AnchorDeepattractornetLoss', Warning)
# Clean spectograms of sources
spectrogram_targets = targets['multi_targets']
# Spectogram of the original mixture, used to mask for scoring
mix_to_mask = targets['mix_to_mask']
# Length of sequences
seq_length = seq_length['bin_emb']
# Logits (=output network)
emb_vec = logits['bin_emb']
anchors = logits['anchors']
spk_weights = logits['spk_weights']
# calculate loss and normalisation factor of mini-batch
loss, norm = ops.weighted_anchor_deepattractornet_loss(
spectrogram_targets, mix_to_mask, emb_vec, anchors, spk_weights, seq_length, self.batch_size, activation='softmax',
normalize=True)
return loss, norm
class TimeAnchorDeepattractornetSoftmaxLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network with softmax loss, using anchors
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
warnings.warn('In following versions it will be required to use the AnchorDeepattractornetLoss', Warning)
# Clean spectograms of sources
spectrogram_targets = targets['multi_targets']
# Spectogram of the original mixture, used to mask for scoring
mix_to_mask = targets['mix_to_mask']
# Length of sequences
seq_length = seq_length['bin_emb']
# Logits (=output network)
emb_vec = logits['bin_emb']
anchors = logits['anchors']
# calculate loss and normalisation factor of mini-batch
loss, norm = ops.time_anchor_deepattractornet_loss(
spectrogram_targets, mix_to_mask, emb_vec, anchors, seq_length, self.batch_size, activation='softmax')
return loss, norm
class AnchorDeepattractornetLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network using anchors
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
# Clean spectograms of sources
spectrogram_targets = targets['multi_targets']
# Spectogram of the original mixture, used to mask for scoring
mix_to_mask = targets['mix_to_mask']
# Length of sequences
seq_length = seq_length['bin_emb']
# Logits (=output network)
emb_vec = logits['bin_emb']
anchors = logits['anchors']
if 'speaker_logits' in logits:
# Assuming dimensions are B x T x S
speaker_logits = logits['speaker_logits']
av_speaker_logits_time_flag = self.lossconf['av_speaker_logits_time_flag'] == 'True'
else:
speaker_logits = None
if 'anchors_scale' in logits:
# Assuming dimensions are B x T x S
anchors_scale = logits['anchors_scale']
anchors_scale = anchors_scale[0, 0]
else:
anchors_scale = None
time_anchors_flag = self.lossconf['time_anchors_flag'] == 'True'
av_anchors_time_flag = (self.lossconf['av_anchors_time_flag'] == 'True') and time_anchors_flag
activation = self.lossconf['activation']
normalize_embs = self.lossconf['normalize_embs'] == 'True'
normalize_anchors = self.lossconf['normalize_anchors'] == 'True'
if 'do_square' in self.lossconf:
do_square = self.lossconf['do_square'] == 'True'
else:
do_square = True
with tf.name_scope('anchor_deepattractornet_loss'):
feat_dim = spectrogram_targets.get_shape()[2]
emb_dim = anchors.get_shape()[-1]
time_dim = tf.shape(anchors)[1]
nrS = spectrogram_targets.get_shape()[3]
V = tf.reshape(emb_vec, [self.batch_size, -1, feat_dim, emb_dim], name='V') # dim: (B x T x F x D)
if normalize_embs:
V = V / (tf.norm(V, axis=-1, keepdims=True) + 1e-12)
time_dim = tf.shape(V)[1]
if not time_anchors_flag:
anchors = tf.tile(tf.expand_dims(tf.expand_dims(anchors, 0), 0), [self.batch_size, time_dim, 1, 1]) # dim: (B x T x S x D)
if normalize_anchors:
anchors = anchors / (tf.norm(anchors, axis=-1, keepdims=True) + 1e-12)
if speaker_logits is not None:
speaker_logits = tf.expand_dims(speaker_logits, -1)
if av_speaker_logits_time_flag:
speaker_logits = tf.reduce_mean(speaker_logits, 1, keepdims=True)
anchors *= speaker_logits
if anchors_scale is not None:
anchors *= anchors_scale
if av_anchors_time_flag:
anchors = tf.reduce_mean(anchors, axis=1, keepdims=True)
anchors = tf.tile(anchors, [1, time_dim, 1, 1])
prod_1 = tf.matmul(V, anchors, transpose_a=False, transpose_b=True, name='AVT')
if activation == 'softmax':
masks = tf.nn.softmax(prod_1, axis=-1, name='M') # dim: (B x T x F x nrS)
elif activation in ['None', 'none', None]:
masks = prod_1
elif activation == 'sigmoid':
masks = tf.nn.sigmoid(prod_1, name='M')
else:
masks = tf.nn.sigmoid(prod_1, name='M')
X = tf.expand_dims(mix_to_mask, -1, name='X') # dim: (B x T x F x 1)
reconstructions = tf.multiply(masks, X) # dim: (B x T x F x nrS)
reconstructions = tf.transpose(reconstructions, perm=[3, 0, 1, 2]) # dim: (nrS x B x T x F)
S = tf.transpose(spectrogram_targets, [3, 0, 1, 2]) # nrS x B x T x F
if 'vad_targets' in targets:
overlap_weight = float(self.lossconf['overlap_weight'])
vad_sum = tf.reduce_sum(targets['vad_targets'], -1)
bin_weights = tf.where(
vad_sum > 1,
tf.ones([self.batch_size, time_dim]) * overlap_weight,
tf.ones([self.batch_size, time_dim]))
bin_weights = tf.expand_dims(bin_weights, -1) # broadcast the frame weights to all bins
norm = tf.reduce_sum(bin_weights) * tf.to_float(feat_dim)
else:
bin_weights = None
norm = tf.to_float(tf.reduce_sum(seq_length) * feat_dim)
loss = ops.base_pit_loss(reconstructions, S, bin_weights=bin_weights, overspeakererized=False, do_square=do_square)
return loss, norm
class TimeAnchorNormDeepattractornetSoftmaxLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network with softmax loss, using anchors. Embeddings will be
normalized.
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
warnings.warn('In following versions it will be required to use the AnchorDeepattractornetLoss', Warning)
# Clean spectograms of sources
spectrogram_targets = targets['multi_targets']
# Spectogram of the original mixture, used to mask for scoring
mix_to_mask = targets['mix_to_mask']
# Length of sequences
seq_length = seq_length['bin_emb']
# Logits (=output network)
emb_vec = logits['bin_emb']
anchors = logits['anchors']
# calculate loss and normalisation factor of mini-batch
loss, norm = ops.time_anchor_deepattractornet_loss(
spectrogram_targets, mix_to_mask, emb_vec, anchors, seq_length, self.batch_size, activation='softmax',
normalize=True)
return loss, norm
class TimeAnchorReadHeadsNormDeepattractornetSoftmaxLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network with softmax loss, using anchors. Embeddings will be
normalized. Use read heads for assignments.
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: a dictionary of [batch_size] vectors containing
the sequence lengths
Returns:
loss: a scalar value containing the loss
norm: a scalar value indicating how to normalize the loss
"""
warnings.warn('In following versions it will be required to use the AnchorDeepattractornetLoss', Warning)
# Clean spectograms of sources
spectrogram_targets = targets['multi_targets']
# Spectogram of the original mixture, used to mask for scoring
mix_to_mask = targets['mix_to_mask']
# Length of sequences
seq_length = seq_length['bin_emb']
# Logits (=output network)
emb_vec = logits['bin_emb']
anchors = logits['anchors']
read_heads = logits['read_heads']
# calculate loss and normalisation factor of mini-batch
loss, norm = ops.time_anchor_read_heads_deepattractornet_loss(
spectrogram_targets, mix_to_mask, emb_vec, anchors, read_heads, seq_length, self.batch_size,
activation='softmax', normalize=True)
return loss, norm
class TimeAnchorReadHeadsNormDeepattractornetSoftmaxFramebasedLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
"""
Compute the loss
Creates the operation to compute the deep attractor network with softmax loss, using anchors. Embeddings will be
normalized. Use read heads for assignments.
Args:
targets: a dictionary of [batch_size x time x ...] tensor containing
the targets
logits: a dictionary of [batch_size x time x ...] tensors containing the logits
seq_length: | |
# -*- coding: utf-8 -*-
import copy
from functools import total_ordering
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext_lazy as _
from olympia.constants.applications import ANDROID, FIREFOX
from olympia.constants.base import (
ADDON_DICT, ADDON_EXTENSION, ADDON_LPAPP, ADDON_SEARCH,
ADDON_SLUGS, ADDON_STATICTHEME, _ADDON_THEME, _ADDON_PERSONA)
@total_ordering
class StaticCategory(object):
"""Helper to populate `CATEGORIES` and provide some helpers.
Note that any instance is immutable to avoid changing values
on the globally unique instances during test runs which can lead
to hard to debug sporadic test-failures.
"""
def __init__(self, name=None, description=None, weight=0):
# Avoid triggering our own __setattr__ implementation
# to keep immutability intact but set initial values.
object.__setattr__(self, 'name', name)
object.__setattr__(self, 'weight', weight)
object.__setattr__(self, 'description', description)
def __str__(self):
return str(self.name)
def __repr__(self):
return '<%s: %s (%s)>' % (
self.__class__.__name__, force_bytes(self), self.application)
def __eq__(self, other):
return (
self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __lt__(self, other):
return (self.weight, self.name) < (other.weight, other.name)
def get_url_path(self):
try:
type = ADDON_SLUGS[self.type]
except KeyError:
type = ADDON_SLUGS[ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
def _immutable(self, *args):
raise TypeError('%r instances are immutable' %
self.__class__.__name__)
__setattr__ = __delattr__ = _immutable
del _immutable
CATEGORIES_NO_APP = {
ADDON_EXTENSION: {
'alerts-updates': StaticCategory(
name=_(u'Alerts & Updates'),
description=_(
u'Download Firefox extensions that help you stay '
u'up-to-date, track tasks, improve efficiency. Find '
u'extensions that reload tabs, manage productivity, and '
u'more.'
)
),
'appearance': StaticCategory(
name=_(u'Appearance'),
description=_(
u'Download extensions that modify the appearance of '
u'websites and the browser Firefox. This category '
u'includes extensions for dark themes, tab management, '
u'and more.'
)
),
'bookmarks': StaticCategory(
name=_(u'Bookmarks'),
description=_(
u'Download extensions that enhance bookmarks by '
u'password-protecting them, searching for duplicates, '
u'finding broken bookmarks, and more.'
)
),
'download-management': StaticCategory(
name=_(u'Download Management'),
description=_(
u'Download Firefox extensions that can help download web, '
u'music and video content. You can also find extensions '
u'to manage downloads, share files, and more.'
)
),
'feeds-news-blogging': StaticCategory(
name=_(u'Feeds, News & Blogging'),
description=_(
u'Download Firefox extensions that remove clutter so you '
u'can stay up-to-date on social media, catch up on blogs, '
u'RSS feeds, reduce eye strain, and more.'
)
),
'games-entertainment': StaticCategory(
name=_(u'Games & Entertainment'),
description=_(
u'Download Firefox extensions to boost your entertainment '
u'experience. This category includes extensions that can '
u'enhance gaming, control video playback, and more.'
)
),
'language-support': StaticCategory(
name=_(u'Language Support'),
description=_(
u'Download Firefox extensions that offer language support '
u'like grammar check, look-up words, translate text, '
u'provide text-to-speech, and more.'
)
),
'photos-music-videos': StaticCategory(
name=_(u'Photos, Music & Videos'),
description=_(
u'Download Firefox extensions that enhance photo, music '
u'and video experiences. Extensions in this category '
u'modify audio and video, reverse image search, and more.'
)
),
'privacy-security': StaticCategory(
name=_(u'Privacy & Security'),
description=_(
u'Download Firefox extensions to browse privately and '
u'securely. This category includes extensions to block '
u'annoying ads, prevent tracking, manage redirects, and '
u'more.'
)
),
'search-tools': StaticCategory(
name=_(u'Search Tools'),
description=_(
u'Download Firefox extensions for search and look-up. '
u'This category includes extensions that highlight and '
u'search text, lookup IP addresses/domains, and more.'
)
),
'shopping': StaticCategory(
name=_(u'Shopping'),
description=_(
u'Download Firefox extensions that can enhance your '
u'online shopping experience with coupon finders, deal '
u'finders, review analyzers, more.'
)
),
'social-communication': StaticCategory(
name=_(u'Social & Communication'),
description=_(
u'Download Firefox extensions to enhance social media and '
u'instant messaging. This category includes improved tab '
u'notifications, video downloaders, and more.'
)
),
'tabs': StaticCategory(
name=_(u'Tabs'),
description=_(
u'Download Firefox extension to customize tabs and the '
u'new tab page. Discover extensions that can control '
u'tabs, change the way you interact with them, and more.'
)
),
'web-development': StaticCategory(
name=_(u'Web Development'),
description=_(
u'Download Firefox extensions that feature web '
u'development tools. This category includes extensions '
u'for GitHub, user agent switching, cookie management, '
u'and more.'
)
),
'other': StaticCategory(
name=_(u'Other'),
weight=333,
description=_(
u'Download Firefox extensions that can be unpredictable '
u'and creative, yet useful for those odd tasks.'
)
),
# Android only categories:
'device-features-location': StaticCategory(
name=_(u'Device Features & Location'),
description=_(
u'Download extensions to enhance Firefox for Android. '
u'Perform quick searches, free up system resources, take '
u'notes, and more.'
)
),
'experimental': StaticCategory(
name=_(u'Experimental'),
description=_(
u'Download Firefox extensions that are regularly updated '
u'and ready for public testing. Your feedback informs '
u'developers on changes to make in upcoming versions.'
)
),
'performance': StaticCategory(
name=_(u'Performance'),
description=_(
u'Download extensions that give Firefox a performance '
u'boost. Find extensions that help you be more productive '
u'and efficient by blocking annoying ads and more.'
)
),
'photos-media': StaticCategory(
name=_(u'Photos & Media'),
description=_(
u'Download Firefox extensions to enhance photos and '
u'media. This category includes extensions to reverse '
u'search images, capture full page screenshots, and more.'
)
),
'security-privacy': StaticCategory(
name=_(u'Security & Privacy'),
description=_(
u'Download Firefox extensions to surf safely and '
u'privately. Discover extensions that can stop sneaky ad '
u'trackers in their tracks, easily clear browsing '
u'history, and more.'
)
),
'social-networking': StaticCategory(
name=_(u'Social Networking'),
description=_(
u'Download Firefox extensions to enhance your experience '
u'on popular social networking websites such as YouTube, '
u'GitHub, Reddit, and more.'
)
),
'sports-games': StaticCategory(
name=_(u'Sports & Games'),
description=_(
u'Download Firefox extensions to give your entertainment '
u'experience a boost with live stream enhancers, sports '
u'updates, and more.'
)
),
'user-interface': StaticCategory(
name=_(u'User Interface'),
description=_(
u'Download user interface Firefox extensions to alter web '
u'pages for easier reading, searching, browsing, and more.'
)
),
},
_ADDON_THEME: {
'animals': StaticCategory(name=_(u'Animals')),
'compact': StaticCategory(name=_(u'Compact')),
'large': StaticCategory(name=_(u'Large')),
'miscellaneous': StaticCategory(name=_(u'Miscellaneous')),
'modern': StaticCategory(name=_(u'Modern')),
'nature': StaticCategory(name=_(u'Nature')),
'os-integration': StaticCategory(name=_(u'OS Integration')),
'retro': StaticCategory(name=_(u'Retro')),
'sports': StaticCategory(name=_(u'Sports'))
},
ADDON_STATICTHEME: {
'abstract': StaticCategory(
name=_(u'Abstract'),
description=_(
u'Download Firefox artistic and conceptual themes. This '
u'category includes colorful palettes and shapes, fantasy '
u'landscapes, playful cats, psychedelic flowers.'
)
),
'causes': StaticCategory(
name=_(u'Causes'),
description=_(
u'Download Firefox themes for niche interests and topics. '
u'This category includes sports themes, holidays, '
u'philanthropic causes, nationalities, and much more.'
)
),
'fashion': StaticCategory(
name=_(u'Fashion'),
description=_(
u'Download Firefox themes that celebrate style of all '
u'forms—patterns, florals, textures, models, and more.'
)
),
'film-and-tv': StaticCategory(
name=_(u'Film and TV'),
description=_(
u'Download Firefox themes with movies and television. '
u'This category includes anime like Uchiha Madara, movies '
u'like The Matrix, shows (Game of Thrones), and more.'
)
),
'firefox': StaticCategory(
name=_(u'Firefox'),
description=_(
u'Download Firefox themes with the Firefox browser theme. '
u'This category includes colorful, diverse depictions of '
u'the Firefox logo, including more general fox themes.'
)
),
'foxkeh': StaticCategory(
name=_(u'Foxkeh'),
description=_(
u'Download Firefox themes with the Japanese Firefox. This '
u'category includes themes that depict the cute Foxkeh '
u'mascot in various poses on diverse landscapes.'
)
),
'holiday': StaticCategory(
name=_(u'Holiday'),
description=_(
u'Download Firefox themes with holidays. This category '
u'includes Christmas, Halloween, Thanksgiving, St. '
u'Patrick’s Day, Easter, Fourth of July, and more.'
)
),
'music': StaticCategory(
name=_(u'Music'),
description=_(
u'Download Firefox themes for musical interests and '
u'artists. This category includes popular bands like '
u'Nirvana and BTS, instruments, music videos, and much '
u'more.'
)
),
'nature': StaticCategory(
name=_(u'Nature'),
description=_(
u'Download Firefox themes with animals and natural '
u'landscapes. This category includes flowers, sunsets, '
u'foxes, seasons, planets, kittens, birds, and more.'
)
),
'other': StaticCategory(
name=_(u'Other'),
weight=333,
description=_(
u'Download Firefox themes that are interesting, creative, '
u'and unique.'
)
),
'scenery': StaticCategory(
name=_(u'Scenery'),
description=_(
u'Download Firefox themes that feature the environment '
u'and the natural world. This category includes sunsets, '
u'beaches, illustrations, city skylines, and more.'
)
),
'seasonal': StaticCategory(
name=_(u'Seasonal'),
description=_(
u'Download Firefox themes for all four seasons—fall, '
u'winter, spring, and summer. Autumn leaves, snowy '
u'mountain peaks, sunny summer days, and spring flowers.'
)
),
'solid': StaticCategory(
name=_(u'Solid'),
description=_(
u'Download Firefox themes with solid and gradient colors '
u'to personalize your browser. This category includes '
u'bold reds, pastels, soft greys, and much more.'
)
),
'sports': StaticCategory(
name=_(u'Sports'),
description=_(
u'Download Firefox themes that feature a variety of '
| |
import itertools as itl
import NodosAST as ast
import visitor
from cool_utils import *
from cool_errors import *
ERROR = 0
INTEGER = 1
class TypeCollectorVisitor:
def __init__(self,scope):
self.context = scope
@visitor.on('node')
def visit(self, node):
pass
@visitor.when(ast.ProgramNode)
def visit(self, node):
classes_names = set()
for program_class in node.classes:
if program_class.name in classes_names:
throw_exception(TypeError, node.line, node.index, 'Type %s already defined' % program_class.name)
classes_names.add(program_class.name)
self.visit(program_class)
@visitor.when(ast.ClassNode)
def visit(self, node):
# por ahora tengo puesto q se puede heredar de IO pero no de lotras clases bultins
if node.inherit != 'Object' and node.inherit in builtins_classes_names and node.inherit != 'IO':
throw_exception(TypeError, node.line, node.index, "Builtin type %s can't not be inherited" % node.inherit)
if node.inherit == 'Main':
throw_exception(TypeError, node.line, node.index, "Main class can't be inherited")
self.context.create_type(node.name, node.inherit, node.line, node.index,node)
class TypeBuilderVisitor:
def __init__(self,scope):
self.context = scope
self._current_type = None
@visitor.on('node')
def visit(self, node):
pass
@visitor.when(ast.ProgramNode)
def visit(self, node):
for program_class in node.classes:
if program_class not in builtins_classes:
self.visit(program_class)
@visitor.when(ast.ClassNode)
def visit(self, node):
self._current_type = self.context.get_type(node.name)
if not self._current_type.define_parent(self.context):
throw_exception(TypeError,node.line,node.index,
'Type %s is not defined'% self._current_type._parent_type_name)
methods_names = set()
for methoddef in node.methods:
if methoddef.id in methods_names:
throw_exception(TypeError,node.line, node.index, 'Method %s already defined' % methoddef.id)
methods_names.add(methoddef.id)
self.visit(methoddef)
attr_names = set()
for attrdef in node.attributes:
if attrdef.id in attr_names:
throw_exception(TypeError,node.line, node.index, 'Attr %s already defined' % attrdef.id)
attr_names.add(attrdef.id)
self.visit(attrdef)
@visitor.when(ast.MethodNode)
def visit(self, node):
if node.return_type == 'SELF_TYPE':
return_type = self._current_type
else:
return_type = self.context.get_type(node.return_type)
if return_type is None:
throw_exception(TypeError,node.line, node.index, 'Return Type %s not defined' % return_type)
params = []
param_names = set()
for id, type_name in node.parameters:
if id == 'self':
throw_exception(TypeError, node.line, node.index, 'Params can not be named self')
if type_name == "SELF_TYPE":
type_class = self._current_type
else:
type_class = self.context.get_type(type_name)
if type_class is None:
throw_exception(TypeError, node.line,node.index,'Param Type %s not defined' % type_name)
if id in param_names:
throw_exception(TypeError, node.line, node.index, 'Param Name %s already defined' % id)
param_names.add(id)
params.append((id, type_class))
self._current_type.define_method(node.id,return_type,params)
@visitor.when(ast.AttributeNode)
def visit(self, node: ast.AttributeNode):
if node.id == 'self':
throw_exception(TypeError, node.line, node.index, 'Attributes can not be named self')
if node.type == "SELF_TYPE":
attr_type = self._current_type
else:
attr_type = self.context.get_type(node.type)
if attr_type is None:
throw_exception(TypeError, node.line, node.index, 'Type %s not defined' % attr_type)
self._current_type.define_attr(node.id, attr_type)
class TypeCheckerVisitor:
def __init__(self):
self.current_class_name = ''
def look_for_Main_Class(self, context):
main_type = context.type_is_defined('Main')
if main_type is None:
throw_exception(
NameError, 0, 0, "Can not find Main class")
# if main_type._parent_type_name != 'IO':
# throw_exception(TypeError, 0, 0, "Class Main can't inherits from other class")
main_method = main_type.get_method('main')
if main_method is None:
throw_exception(TypeError, 0, 0, "Class Main does't have a main method")
if len(main_method.arguments) > 0:
throw_exception(TypeError, 0, 0, "Class Main does't receive params")
def check_class_hierarchy(self,context):
classes = context._classes_global_field()
for class_ in classes.values():
if class_._checked_for_cycle:
continue
for current_type in class_.get_hierarchy_iterator():
if current_type._checking_for_cycle:
throw_exception(SemanticError,current_type.line,current_type.index,
'Detected cycle inheritence in %s' % current_type.name)
current_type._checking_for_cycle = True
for current_type in class_.get_hierarchy_iterator():
current_type._checking_for_cycle = False
for current_type in class_.get_hierarchy_iterator():
current_type._checked_for_cycle = True
@visitor.on('node')
def visit(self, node, scope):
pass
def update_classes_attrs(self, scope: Scope):
for cls in scope.scope_classes_dictionary.values():
if cls.updated_attrs_inheritence:
continue
clss = [i for i in cls.get_hierarchy_iterator() if not i.updated_attrs_inheritence]
for i in range(len(clss)-2,-1,-1):
type_d = clss[i]
ast_node = type_d.node_ast_ref
parent_name = type_d._parent_type_name
parent_attrs = type_d.parent_type.node_ast_ref.attributes
for parent_attr in parent_attrs:
if parent_attr in ast_node.attributes:
throw_exception(TypeError, ast_node.line, ast_node.index, f'Attribute {parent_attr.id} from type '
f'{type_d.name} '
f'already defined on parent '
f'{parent_name}')
ast_node.attributes.append(parent_attr)
type_d.updated_attrs_inheritence = True
@visitor.when(ast.ProgramNode)
def visit(self, node: ast.ProgramNode, scope: Scope):
tcv = TypeCollectorVisitor(scope)
tcv.visit(node)
tbv = TypeBuilderVisitor(scope)
tbv.visit(node)
self.check_class_hierarchy(scope)
self.look_for_Main_Class(scope)
self.update_classes_attrs(scope)
for cool_class in node.classes:
child_scope = scope.create_child_scope()
self.visit(cool_class, child_scope)
@visitor.when(ast.ClassNode)
def visit(self, node,scope):
self.current_class_name = node.name
# Añadimos el objeto self del tipo current type
scope.define_variable('self', self.current_class_name)
for attr in node.attributes:
# child_scope = scope.create_child_scope()
self.visit(attr,scope)
for method in node.methods:
child_scope = scope.create_child_scope()
self.visit(method,child_scope)
@visitor.when(ast.MethodNode)
def visit(self, node, scope):
# Añadimos los parametros como variables locales
for param_name, param_type_name in node.parameters:
vinfo = scope.define_variable(param_name, param_type_name)
if vinfo is None:
throw_exception(NameError, node.line,node.index,"Error in method {}: parameter {}".format(node.id,param_name))
self.visit(node.expression, scope)
method_info = scope.get_method_from_type(self.current_class_name, node.id)
return_type = method_info.return_type
if node.expression.computed_type is None:
throw_exception(TypeError, node.line, node.index,
"Error in method {}: return static type expected {}, founded: Void".
format(node.id, return_type.name))
if not node.expression.computed_type.lower_equals(return_type):
throw_exception(TypeError, node.line, node.index,
"Error in method {}: return static type expected {}, founded:{}".
format(node.id, return_type.name, node.expression.computed_type.name))
@visitor.when(ast.AttributeNode)
def visit(self, node, scope):
node.computed_type = scope.get_type(node.type)
if node.value is not None:
self.visit(node.value, scope)
if not node.value.computed_type.lower_equals(node.computed_type):
throw_exception(TypeError, node.line, node.index, f'Type {node.value.computed_type.name} is not lower or equals to'
f' type {node.computed_type.name}')
vinfo = scope.define_variable(node.id, node.type)
if vinfo is None:
throw_exception(NameError, node.line, node.index, 'Attribute already defined %s' % node.id)
@visitor.when(ast.AssignNode)
def visit(self, node: ast.AssignNode, scope):
# Verificamos si existe este símbolo
vinfo = scope.get_variable_info(node.variable.id)
if vinfo is None:
throw_exception(NameError, node.line, node.index, 'Error while assinging ' + node.variable.id + 'not defined')
if vinfo.name == 'self':
throw_exception(TypeError, node.line, node.index, 'self object can not be assign')
self.visit(node.expression, scope)
node.computed_type = vinfo.vtype
if not node.expression.computed_type.lower_equals(node.computed_type):
throw_exception(TypeError, node.line, node.index, 'Error between lvalue {} and rvalue {}'.format(vinfo.vtype.name,
node.expression.computed_type.name))
@visitor.when(ast.DispatchNode)
def visit(self, node,scope):
'''
Para revisar el dispatch: e0.f(e1,...,en)
1-calcular los tipos ei
2-verificar si todos los tipos de están definidos
3-verificar si el tipo de e0 tiene definido el metodo f con los
parámetro respectivos.
3.1-cada uno de los tipos q le pasamos como parámetro tiene
que ser lower than the original
4-devolver el tipo del resultado del método
:param node:
:param scope:
:param errors:
:return:
'''
if node.left_expression is not None:
# si el método se llama de la forma expr.id(params) .
self.visit(node.left_expression,scope)
dispatch_type = node.left_expression.computed_type
else:
# si el método se llama de la forma id(params)
dispatch_type = scope.get_type(self.current_class_name)
if dispatch_type is None:
throw_exception(TypeError, node.line,node.index,'Error type {} not defined in dispatch'.format(dispatch_type))
# verificar primero que exista el metodo subiendo por el árbol de la jerarquía
# ,luego q tenga la misma cnt de argumentos, verificar su tipo
method = scope.get_params_from_method(dispatch_type.name,node.func_id)
if method is None:
throw_exception(TypeError,node.line,node.index,"Method %s doesn't exist on type %s" % (node.func_id,
dispatch_type.name))
if len(method.arguments) != len(node.parameters):
throw_exception(TypeError, node.line, node.index, "Method %s expected to received %i params, founded: %i" %
(node.func_id, len(method.arguments), len(node.parameters)))
for i, param in enumerate(node.parameters):
self.visit(param, scope)
if not param.computed_type.lower_equals(method.arguments[i][1]):
throw_exception(TypeError, node.line, node.index, 'Error in dispatch node, type {} is not lower than {}'
.format(param.computed_type.name,
method.arguments[i][1].name))
node.computed_type = method.return_type
@visitor.when(ast.StaticDispatchNode)
def visit(self, node, scope):
self.visit(node.left_expression, scope)
dispatch_type = node.left_expression.computed_type
# if dispatch_type is None:
# throw_exception(TypeError, node.line,node.index,'Type {} not defined'.format(dispatch_type))
parent_type = scope.get_type(node.parent_type)
if parent_type is None:
throw_exception(TypeError, node.line, node.index, 'Type {} not defined'.format(node.parent_type))
if not dispatch_type.lower_equals(parent_type):
throw_exception(TypeError, node.line,node.index,"Parent %s type defined is not lower than %s" % parent_type.name)
method = scope.get_params_from_method(parent_type.name,node.func_id)
for i, param in enumerate(node.parameters):
self.visit(param, scope)
if not param.computed_type.lower_equals(method.arguments[i][1]):
throw_exception(TypeError, node.line,node.index,'Error in dispatch node, type {} is not lower than {}'.format(param.computed_type.name,
method.arguments[i][1].name))
node.computed_type = method.return_type
@visitor.when(ast.ConditionalNode)
def visit(self, node, scope):
'''
Si e1 es de tipo bool, e2 de tipo T2 y e3 de tipo T3
:param node:
:param errors:
:return: LCA de las expresiones e3 y e2
'''
then_child_scope = scope.create_child_scope()
else_child_scope = scope.create_child_scope()
self.visit(node.if_expression,scope)
self.visit(node.then_expression,then_child_scope)
self.visit(node.else_expression,else_child_scope)
if node.if_expression.computed_type != scope.get_type('Bool'):
throw_exception(TypeError, node.line,node.index,'Error. If Condition is not type bool')
lca = node.then_expression.computed_type.get_lca(node.else_expression.computed_type)
node.computed_type = lca
@visitor.when(ast.CaseNode)
def visit(self, node, scope):
self.visit(node.case_expression,scope)
id_s_types = set()
branches_types = []
for id_typeName, expr in node.implications:
if id_typeName[0] == 'self':
throw_exception(TypeError, node.line, node.index, 'case local variables can not be named self')
implication_id_type = scope.get_type(id_typeName[1])
if implication_id_type is None:
throw_exception(TypeError, node.line, node.index,
'Type {} not defined'.format(id_typeName[1]))
if id_typeName[1] in id_s_types:
throw_exception(TypeError, node.line, node.index, 'Type {} already defined on Case Node'
.format(id_typeName[1]))
id_s_types.add(id_typeName[1])
child_scope = scope.create_child_scope()
if child_scope.define_variable(*id_typeName) is None:
throw_exception(TypeError, node.line, node.index, 'Variable definition {} not valid'
.format(id_typeName))
self.visit(expr,child_scope)
branches_types.append(expr.computed_type)
lca_joined = branches_types[0]
for lca in branches_types[1:]:
if lca.height > lca_joined.height:
lca_joined = lca
node.computed_type = lca_joined
@visitor.when(ast.LoopNode)
def visit(self, node, scope):
self.visit(node.while_expression,scope)
if node.while_expression.computed_type != scope.get_type('Bool'):
throw_exception(TypeError, node.line,node.index,'Error in while Condition expression, is not type boolean.')
child_scope = scope.create_child_scope()
self.visit(node.loop_expression, child_scope)
node.computed_type = scope.get_type("Object")
@visitor.when(ast.NewTypeNode)
def visit(self, node,scope):
'''
Hay dos casos para new:
1- si T es SELF_TYPE:
2- en otro caso
en cualquier caso ahora mismo devolver el mismo tipo T
| |
<gh_stars>0
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mlrun database HTTP server"""
import ast
import mimetypes
import tempfile
import traceback
from argparse import ArgumentParser
from base64 import b64decode
from datetime import date, datetime
from distutils.util import strtobool
from functools import wraps
from http import HTTPStatus
from operator import attrgetter
from os import environ, remove
from pathlib import Path
from flask import Flask, Response, jsonify, request
from flask.json import JSONEncoder
from kfp import Client as kfclient
from mlrun.builder import build_runtime
from mlrun.config import config
from mlrun.datastore import get_object_stat, StoreManager
from mlrun.db import RunDBError, RunDBInterface, periodic
from mlrun.db.filedb import FileRunDB
from mlrun.db.sqldb import SQLDB, to_dict as db2dict, table2cls
from mlrun.k8s_utils import K8sHelper
from mlrun.run import import_function, new_function, list_piplines
from mlrun.runtimes import runtime_resources_map
from mlrun.scheduler import Scheduler
from mlrun.utils import get_in, logger, now_date, parse_function_uri, update_in
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, date):
return obj.isoformat()
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
_scheduler: Scheduler = None
_db: RunDBInterface = None
_k8s: K8sHelper = None
_logs_dir = None
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
basic_prefix = 'Basic '
bearer_prefix = 'Bearer '
def json_error(status=HTTPStatus.BAD_REQUEST, **kw):
kw.setdefault('ok', False)
logger.error(str(kw))
reply = jsonify(**kw)
reply.status_code = status
return reply
def parse_basic_auth(header):
"""
>>> parse_basic_auth('Basic YnVnczpidW5ueQ==')
['bugs', 'bunny']
"""
b64value = header[len(basic_prefix):]
value = b64decode(b64value).decode()
return value.split(':', 1)
class AuthError(Exception):
pass
def basic_auth_required(cfg):
return cfg.user or cfg.password
def bearer_auth_required(cfg):
return cfg.token
@app.before_request
def check_auth():
if request.path == '/api/healthz':
return
cfg = config.httpdb
header = request.headers.get('Authorization', '')
try:
if basic_auth_required(cfg):
if not header.startswith(basic_prefix):
raise AuthError('missing basic auth')
user, passwd = parse_basic_auth(header)
if user != cfg.user or passwd != cfg.password:
raise AuthError('bad basic auth')
elif bearer_auth_required(cfg):
if not header.startswith(bearer_prefix):
raise AuthError('missing bearer auth')
token = header[len(bearer_prefix):]
if token != cfg.token:
raise AuthError('bad bearer auth')
except AuthError as err:
resp = jsonify(ok=False, error=str(err))
resp.status_code = HTTPStatus.UNAUTHORIZED
return resp
def catch_err(fn):
@wraps(fn)
def wrapper(*args, **kw):
try:
return fn(*args, **kw)
except RunDBError as err:
return json_error(
HTTPStatus.INTERNAL_SERVER_ERROR, ok=False, reason=str(err))
return wrapper
# curl -d@/path/to/job.json http://localhost:8080/submit
@app.route('/api/submit', methods=['POST'])
@app.route('/api/submit/', methods=['POST'])
@app.route('/api/submit_job', methods=['POST'])
@app.route('/api/submit_job/', methods=['POST'])
@catch_err
def submit_job():
try:
data: dict = request.get_json(force=True)
except ValueError:
return json_error(HTTPStatus.BAD_REQUEST, reason='bad JSON body')
logger.info('submit_job: {}'.format(data))
return _submit(data)
def _submit(data):
task = data.get('task')
function = data.get('function')
url = data.get('functionUrl')
if not url and task:
url = get_in(task, 'spec.function')
if not (function or url) or not task:
return json_error(
HTTPStatus.BAD_REQUEST,
reason='bad JSON, need to include function/url and task objects',
)
# TODO: block exec for function['kind'] in ['', 'local] (must be a
# remote/container runtime)
try:
if function and not url:
fn = new_function(runtime=function)
else:
if '://' in url:
fn = import_function(url=url)
else:
project, name, tag = parse_function_uri(url)
runtime = _db.get_function(name, project, tag)
if not runtime:
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: function {} not found'.format(
url),
)
fn = new_function(runtime=runtime)
if function:
fn2 = new_function(runtime=function)
for attr in ['volumes', 'volume_mounts', 'env', 'resources',
'image_pull_policy', 'replicas']:
val = getattr(fn2.spec, attr, None)
if val:
setattr(fn.spec, attr, val)
fn.set_db_connection(_db, True)
logger.info('func:\n{}'.format(fn.to_yaml()))
# fn.spec.rundb = 'http://mlrun-api:8080'
schedule = data.get('schedule')
if schedule:
args = (task, )
job_id = _scheduler.add(schedule, fn, args)
_db.store_schedule(data)
resp = {'schedule': schedule, 'id': job_id}
else:
resp = fn.run(task, watch=False)
logger.info('resp: %s', resp.to_yaml())
except Exception as err:
logger.error(traceback.format_exc())
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: {}'.format(err),
)
if not isinstance(resp, dict):
resp = resp.to_dict()
return jsonify(ok=True, data=resp)
# curl -d@/path/to/pipe.yaml http://localhost:8080/submit_pipeline
@app.route('/api/submit_pipeline', methods=['POST'])
@app.route('/api/submit_pipeline/', methods=['POST'])
@catch_err
def submit_pipeline():
namespace = request.args.get('namespace', config.namespace)
experiment_name = request.args.get('experiment', 'Default')
run_name = request.args.get('run', '')
run_name = run_name or \
experiment_name + ' ' + datetime.now().strftime('%Y-%m-%d %H-%M-%S')
arguments = {}
arguments_data = request.headers.get('pipeline-arguments')
if arguments_data:
arguments = ast.literal_eval(arguments_data)
logger.info('pipeline arguments {}'.format(arguments_data))
ctype = request.content_type
if '/yaml' in ctype:
ctype = '.yaml'
elif ' /zip' in ctype:
ctype = '.zip'
else:
return json_error(HTTPStatus.BAD_REQUEST,
reason='unsupported pipeline type {}'.format(ctype))
logger.info('writing file {}'.format(ctype))
if not request.data:
return json_error(HTTPStatus.BAD_REQUEST, reason='post data is empty')
print(str(request.data))
pipe_tmp = tempfile.mktemp(suffix=ctype)
with open(pipe_tmp, 'wb') as fp:
fp.write(request.data)
try:
client = kfclient(namespace=namespace)
experiment = client.create_experiment(name=experiment_name)
run_info = client.run_pipeline(experiment.id, run_name, pipe_tmp,
params=arguments)
except Exception as e:
remove(pipe_tmp)
return json_error(HTTPStatus.BAD_REQUEST,
reason='kfp err: {}'.format(e))
remove(pipe_tmp)
return jsonify(ok=True, id=run_info.run_id,
name=run_info.run_info.name)
# curl -d@/path/to/job.json http://localhost:8080/build/function
@app.route('/api/build/function', methods=['POST'])
@app.route('/api/build/function/', methods=['POST'])
@catch_err
def build_function():
try:
data = request.get_json(force=True)
except ValueError:
return json_error(HTTPStatus.BAD_REQUEST, reason='bad JSON body')
logger.info('build_function:\n{}'.format(data))
function = data.get('function')
with_mlrun = strtobool(data.get('with_mlrun', 'on'))
try:
fn = new_function(runtime=function)
fn.set_db_connection(_db)
fn.save(versioned=False)
ready = build_runtime(fn, with_mlrun)
fn.save(versioned=False)
logger.info('Fn:\n %s', fn.to_yaml())
except Exception as err:
logger.error(traceback.format_exc())
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: {}'.format(err),
)
return jsonify(ok=True, data=fn.to_dict(), ready=ready)
# curl -d@/path/to/job.json http://localhost:8080/start/function
@app.route('/api/start/function', methods=['POST'])
@app.route('/api/start/function/', methods=['POST'])
@catch_err
def start_function():
try:
data = request.get_json(force=True)
except ValueError:
return json_error(HTTPStatus.BAD_REQUEST, reason='bad JSON body')
logger.info('start_function:\n{}'.format(data))
url = data.get('functionUrl')
if not url:
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: functionUrl not specified',
)
project, name, tag = parse_function_uri(url)
runtime = _db.get_function(name, project, tag)
if not runtime:
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: function {} not found'.format(url),
)
fn = new_function(runtime=runtime)
resource = runtime_resources_map.get(fn.kind)
if 'start' not in resource:
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: "start" not supported by this runtime',
)
try:
fn.set_db_connection(_db)
# resp = resource['start'](fn) # TODO: handle resp?
resource['start'](fn)
fn.save(versioned=False)
logger.info('Fn:\n %s', fn.to_yaml())
except Exception as err:
logger.error(traceback.format_exc())
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: {}'.format(err),
)
return jsonify(ok=True, data=fn.to_dict())
# curl -d@/path/to/job.json http://localhost:8080/status/function
@app.route('/api/status/function', methods=['POST'])
@app.route('/api/status/function/', methods=['POST'])
@catch_err
def function_status():
try:
data = request.get_json(force=True)
except ValueError:
return json_error(HTTPStatus.BAD_REQUEST, reason='bad JSON body')
logger.info('function_status:\n{}'.format(data))
selector = data.get('selector')
kind = data.get('kind')
if not selector or not kind:
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: selector or runtime kind not specified',
)
resource = runtime_resources_map.get(kind)
if 'status' not in resource:
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: "status" not supported by this runtime',
)
try:
resp = resource['status'](selector)
logger.info('status: %s', resp)
except Exception as err:
logger.error(traceback.format_exc())
return json_error(
HTTPStatus.BAD_REQUEST,
reason='runtime error: {}'.format(err),
)
return jsonify(ok=True, data=resp)
# curl -d@/path/to/job.json http://localhost:8080/build/status
@app.route('/api/build/status', methods=['GET'])
@app.route('/api/build/status/', methods=['GET'])
@catch_err
def build_status():
name = request.args.get('name', '')
project = request.args.get('project', '')
tag = request.args.get('tag', '')
offset = int(request.args.get('offset', '0'))
logs = strtobool(request.args.get('logs', 'on'))
fn = _db.get_function(name, project, tag)
if not fn:
return json_error(HTTPStatus.NOT_FOUND, name=name,
project=project, tag=tag)
state = get_in(fn, 'status.state', '')
pod = get_in(fn, 'status.build_pod', '')
image = get_in(fn, 'spec.build.image', '')
out = b''
if not pod:
if state == 'ready':
image = image or get_in(fn, 'spec.image')
return Response(out, mimetype='text/plain',
headers={"function_status": state,
"function_image": image,
"builder_pod": pod})
logger.info('get pod {} status'.format(pod))
state = _k8s.get_pod_status(pod)
logger.info('pod state={}'.format(state))
if state == 'succeeded':
logger.info('build completed successfully')
state = 'ready'
if state in ['failed', 'error']:
logger.error('build {}, watch the build pod logs: {}'.format(
state, pod))
if logs and state != 'pending':
resp = _k8s.logs(pod)
if resp:
out = resp.encode()[offset:]
update_in(fn, 'status.state', state)
if state == 'ready':
update_in(fn, 'spec.image', image)
_db.store_function(fn, name, project, tag)
return Response(out, mimetype='text/plain',
headers={"function_status": state,
"function_image": image,
"builder_pod": pod})
def get_obj_path(schema, path, user=''):
if schema:
return schema + '://' + path
elif path.startswith('/User/'):
user = user or environ.get('V3IO_USERNAME', 'admin')
return 'v3io:///users/' + user + path[5:]
elif config.httpdb.data_volume and \
path.startswith(config.httpdb.data_volume):
if config.httpdb.real_path:
path = config.httpdb.real_path + \
path[len(config.httpdb.data_volume)-1:]
return path
return None
# curl http://localhost:8080/api/files?schema=s3&path=mybucket/a.txt
@app.route('/api/files', methods=['GET'])
@catch_err
def get_files():
schema = request.args.get('schema', '')
objpath = request.args.get('path', '')
user = request.args.get('user', '')
size = int(request.args.get('size', '0'))
offset = int(request.args.get('offset', '0'))
_, filename = objpath.split(objpath)
objpath = get_obj_path(schema, objpath, user=user)
if not objpath:
return json_error(HTTPStatus.NOT_FOUND, path=objpath,
err='illegal path prefix or schema')
secrets = get_secrets(request)
try:
stores = StoreManager(secrets)
obj = stores.object(url=objpath)
if objpath.endswith('/'):
listdir = obj.listdir()
return jsonify(ok=True, listdir=listdir)
body = obj.get(size, offset)
except FileNotFoundError as e:
return json_error(HTTPStatus.NOT_FOUND, path=objpath, err=str(e))
if body is None:
return json_error(HTTPStatus.NOT_FOUND, path=objpath)
ctype, _ = mimetypes.guess_type(objpath)
if not ctype:
ctype = 'application/octet-stream'
return Response(
body, mimetype=ctype, headers={"x-suggested-filename": filename})
# curl http://localhost:8080/api/filestat?schema=s3&path=mybucket/a.txt
@app.route('/api/filestat', methods=['GET'])
@catch_err
def get_filestat():
schema = request.args.get('schema', '')
path = request.args.get('path', '')
user = request.args.get('user', '')
_, filename = path.split(path)
path = get_obj_path(schema, path, user=user)
if not path:
return json_error(HTTPStatus.NOT_FOUND, path=path,
err='illegal path prefix or schema')
secrets = get_secrets(request)
try:
stat = get_object_stat(path, secrets)
except FileNotFoundError as e:
return json_error(HTTPStatus.NOT_FOUND, path=path, err=str(e))
ctype, _ = mimetypes.guess_type(path)
if not ctype:
ctype = 'application/octet-stream'
return jsonify(ok=True, size=stat.size,
| |
# Ensure output is in float32 for softmax operation
x = x.float()
logits = F.softmax(x, dim=1)
return logits
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", self.train_acc(logits, y), prog_bar=True, sync_dist=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("val_loss", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)
self.log("val_acc", self.valid_acc(logits, y), prog_bar=True, sync_dist=True)
def test_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("test_loss", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)
self.log("test_acc", self.test_acc(logits, y), prog_bar=True, sync_dist=True)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
x, y = batch
logits = self.forward(x)
self.test_acc(logits, y)
return self.test_acc.compute()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
return [optimizer], [{"scheduler": lr_scheduler, "interval": "step"}]
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
if not hasattr(self, "model"):
self.configure_sharded_model()
class ManualModelParallelClassificationModel(ModelParallelClassificationModel):
@property
def automatic_optimization(self) -> bool:
return False
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
opt = self.optimizers()
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", self.train_acc(logits, y), prog_bar=True, sync_dist=True)
opt.zero_grad()
self.manual_backward(loss)
opt.step()
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_3(tmpdir, deepspeed_config):
"""Test to ensure ZeRO Stage 3 works with a parallel model."""
model = ModelParallelBoringModel()
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedPlugin(stage=3), gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_3_manual_optimization(tmpdir, deepspeed_config):
"""Test to ensure ZeRO Stage 3 works with a parallel model."""
model = ModelParallelBoringModelManualOptim()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedPlugin(stage=3), gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
def run_checkpoint_test(tmpdir: str, automatic_optimization: bool = True, accumulate_grad_batches: int = 2):
seed_everything(1)
if automatic_optimization:
model = ModelParallelClassificationModel()
else:
model = ManualModelParallelClassificationModel()
dm = ClassifDataModule()
ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=10,
strategy=DeepSpeedPlugin(stage=3),
gpus=2,
precision=16,
accumulate_grad_batches=accumulate_grad_batches,
callbacks=[ck],
)
trainer.fit(model, datamodule=dm)
results = trainer.test(datamodule=dm)
assert results[0]["test_acc"] > 0.7
saved_results = trainer.test(ckpt_path=ck.best_model_path, datamodule=dm)
assert saved_results[0]["test_acc"] > 0.7
assert saved_results == results
if automatic_optimization:
model = ModelParallelClassificationModel()
else:
model = ManualModelParallelClassificationModel()
trainer = Trainer(default_root_dir=tmpdir, gpus=2, strategy=DeepSpeedPlugin(stage=3), precision=16)
results = trainer.test(model, datamodule=dm, ckpt_path=ck.best_model_path)
assert results[0]["test_acc"] > 0.7
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_3_checkpointing(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can save/load a model resuming from a checkpoint, and
see convergence."""
run_checkpoint_test(tmpdir)
@RunIf(min_gpus=1, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_3_warns_resume_training(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can resume from training, throwing a warning that the
optimizer state and scheduler states cannot be restored."""
dm = ClassifDataModule()
model = BoringModel()
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
trainer.save_checkpoint(checkpoint_path)
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
strategy=DeepSpeedPlugin(stage=3, load_full_weights=True),
gpus=1,
precision=16,
)
with pytest.warns(
UserWarning,
match="A single checkpoint file has been given. This means optimizer states and "
"scheduler states can not be restored. If you'd like to restore these states, you must "
"provide a path to the originally saved DeepSpeed checkpoint.",
):
trainer.fit(model, datamodule=dm, ckpt_path=checkpoint_path)
@RunIf(min_gpus=1, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_3_resume_training(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can resume training."""
initial_model = ModelParallelClassificationModel()
dm = ClassifDataModule()
ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
initial_trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
strategy=DeepSpeedPlugin(stage=3),
gpus=1,
precision=16,
callbacks=[ck],
)
initial_trainer.fit(initial_model, datamodule=dm)
class TestCallback(Callback):
def on_train_batch_start(
self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int
) -> None:
original_deepspeed_plugin = initial_trainer.accelerator.training_type_plugin
current_deepspeed_plugin = trainer.accelerator.training_type_plugin
assert isinstance(original_deepspeed_plugin, DeepSpeedPlugin)
assert isinstance(current_deepspeed_plugin, DeepSpeedPlugin)
# assert optimizer states are the correctly loaded
original_optimizer_dict = original_deepspeed_plugin.deepspeed_engine.optimizer.state_dict()
current_optimizer_dict = current_deepspeed_plugin.deepspeed_engine.optimizer.state_dict()
for orig_tensor, current_tensor in zip(
original_optimizer_dict["fp32_flat_groups"], current_optimizer_dict["fp32_flat_groups"]
):
assert torch.all(orig_tensor.eq(current_tensor))
# assert model state is loaded correctly
for current_param, initial_param in zip(pl_module.parameters(), initial_model.parameters()):
assert torch.equal(current_param.cpu(), initial_param.cpu())
# assert epoch has correctly been restored
assert trainer.current_epoch == 1
model = ModelParallelClassificationModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
strategy=DeepSpeedPlugin(stage=3),
gpus=1,
precision=16,
callbacks=TestCallback(),
)
trainer.fit(model, datamodule=dm, ckpt_path=ck.best_model_path)
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_3_checkpointing_full_weights_manual(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can save/load a model resuming from a checkpoint,
where we save the full weights to one file."""
run_checkpoint_test(tmpdir, automatic_optimization=False, accumulate_grad_batches=1)
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir):
_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer=False)
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_stage_2_accumulated_grad_batches_offload_optimizer(tmpdir):
_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer=True)
def _deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer):
"""Test to ensure with Stage 2 and multiple GPUs, accumulated grad batches works."""
seed_everything(42)
class VerificationCallback(Callback):
def __init__(self):
self.on_train_batch_start_called = False
def on_train_batch_start(self, trainer, pl_module: LightningModule, batch: Any, batch_idx: int) -> None:
deepspeed_engine = trainer.training_type_plugin.model
assert trainer.global_step == deepspeed_engine.global_steps
self.on_train_batch_start_called = True
model = ModelParallelClassificationModel()
dm = ClassifDataModule()
verification_callback = VerificationCallback()
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
# TODO: this test fails with max_epochs >1 as there are leftover batches per epoch.
# there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it.
# we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch
max_epochs=1,
strategy=DeepSpeedPlugin(stage=2, offload_optimizer=offload_optimizer),
gpus=2,
limit_train_batches=5,
limit_val_batches=2,
precision=16,
accumulate_grad_batches=2,
callbacks=[verification_callback],
)
assert trainer.limit_train_batches % trainer.accumulate_grad_batches != 0, "leftover batches should be tested"
trainer.fit(model, datamodule=dm)
assert verification_callback.on_train_batch_start_called
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_test(tmpdir):
"""Test to ensure we can use DeepSpeed with just test using ZeRO Stage 3."""
model = ModelParallelBoringModel()
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedPlugin(stage=3), gpus=2, fast_dev_run=True, precision=16
)
trainer.test(model)
@RunIf(min_gpus=1, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_partial_partition_parameters(tmpdir):
"""Test to ensure that a module that defines a layer inside the ``__init__`` and ``configure_sharded_model``
correctly converts all parameters to float16 when ``precision=16`` and runs successfully."""
class TestModel(ModelParallelBoringModel):
def __init__(self):
super().__init__()
self.layer_2 = torch.nn.Linear(32, 32)
def configure_sharded_model(self) -> None:
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
x = self.layer_2(x)
return self.layer(x)
def on_train_epoch_start(self) -> None:
assert all([x.dtype == torch.float16 for x in self.parameters()])
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedPlugin(stage=3), gpus=1, fast_dev_run=True, precision=16
)
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_test_rnn(tmpdir):
"""Test to ensure that turning off explicit partitioning of the entire module for ZeRO Stage 3 works when
training with certain layers which will crash with explicit partitioning."""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.rnn = torch.nn.GRU(32, 32)
def on_train_epoch_start(self) -> None:
assert all([x.dtype == torch.float16 for x in self.parameters()])
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedPlugin(stage=3, partition_module=False),
gpus=1,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
@RunIf(deepspeed=True)
@mock.patch("deepspeed.init_distributed", autospec=True)
@pytest.mark.parametrize("platform", ["Linux", "Windows"])
def test_deepspeed_plugin_env_variables(mock_deepspeed_distributed, tmpdir, platform):
"""Test to ensure that we setup distributed communication using correctly.
When using windows, ranks environment variables should not be set, and deepspeed should handle this.
"""
trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedPlugin(stage=3))
plugin = trainer.training_type_plugin
assert isinstance(plugin, DeepSpeedPlugin)
with mock.patch("platform.system", return_value=platform) as mock_platform:
plugin._init_deepspeed_distributed()
mock_deepspeed_distributed.assert_called()
mock_platform.assert_called()
if platform == "Windows":
# assert no env variables have been set within the DeepSpeedPlugin
assert all(k not in os.environ for k in ("MASTER_PORT", "MASTER_ADDR", "RANK", "WORLD_SIZE", "LOCAL_RANK"))
else:
assert os.environ["MASTER_ADDR"] == str(trainer.training_type_plugin.cluster_environment.main_address)
assert os.environ["MASTER_PORT"] == str(trainer.training_type_plugin.cluster_environment.main_port)
assert os.environ["RANK"] == str(trainer.training_type_plugin.global_rank)
assert os.environ["WORLD_SIZE"] == str(trainer.training_type_plugin.world_size)
assert os.environ["LOCAL_RANK"] == str(trainer.training_type_plugin.local_rank)
def _assert_save_model_is_equal(model, tmpdir, trainer):
checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = trainer.training_type_plugin.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.training_type_plugin.barrier()
# carry out the check only on rank 0
if trainer.is_global_zero:
single_ckpt_path = os.path.join(tmpdir, "single_model.pt")
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, single_ckpt_path)
state_dict = torch.load(single_ckpt_path)
model = model.cpu()
# Assert model parameters are identical after loading
for orig_param, saved_model_param in zip(model.parameters(), state_dict.values()):
if model.dtype == torch.half:
# moved model to float32 for comparison with single fp32 saved weights
saved_model_param = saved_model_param.half()
assert torch.equal(orig_param, saved_model_param)
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_multigpu_no_schedulers(tmpdir):
"""Test to ensure ZeRO Stage 3 works with a parallel model and no schedulers."""
model = ModelParallelBoringModelNoSchedulers()
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedPlugin(stage=3), gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=1, deepspeed=True, standalone=True)
def test_deepspeed_skip_backward_raises(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
return None
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedPlugin(), gpus=1, fast_dev_run=True, precision=16)
with pytest.raises(MisconfigurationException, match="returning `None` .* is not supported"):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, standalone=True)
def test_deepspeed_setup_train_dataloader(tmpdir):
"""Test DeepSpeed works when setup is required to call in the DataModule."""
class TestSetupIsCalledDataModule(LightningDataModule):
def __init__(self):
super().__init__()
self._setup = False
def setup(self, stage: Optional[str] = None) -> None:
self._setup = True
def train_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
def val_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
def test_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedPlugin(logging_level=logging.INFO),
gpus=1,
fast_dev_run=True,
)
dm = TestSetupIsCalledDataModule()
with mock.patch("deepspeed.utils.logging.logger.warning", autospec=True) as mock_object:
trainer.fit(model, datamodule=dm)
assert any("Tried to infer the batch size" in str(arg) for arg in mock_object.call_args_list)
@mock.patch("torch.optim.lr_scheduler.StepLR.step", autospec=True)
@RunIf(min_gpus=1, deepspeed=True, standalone=True)
def test_deepspeed_scheduler_step_count(mock_step):
"""Test to ensure that the scheduler is called the correct amount of | |
######################################################################
# #
# Copyright 2009-2020 <NAME>. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys
if sys.version_info.major < 3:
from builtins import zip
from builtins import open
from builtins import int
from builtins import round
from builtins import range
from builtins import super
from future import standard_library
standard_library.install_aliases()
# Python 2 doesn't have the pathlib module.
Path = basestring
else:
from pathlib import Path
# Python 3 doesn't have basestring, as unicode is type string
# Python 2 doesn't equate unicode to string, but both are basestring
# Now isinstance(s, basestring) will be True for any python version
basestring = str
import numpy
import datetime
import struct
import itertools
import colorsys
import warnings
import copy as libcopy
from gdspy.polygon import PolygonSet, Polygon
from gdspy.path import FlexPath, RobustPath
from gdspy.label import Label
from gdspy.gdsiiformat import (
_record_reader,
_raw_record_reader,
_eight_byte_real,
_eight_byte_real_to_float,
)
_mpone = numpy.array((-1.0, 1.0))
use_current_library = True
"""
Globally disable add newly-created cells to the current_library.
"""
class Cell(object):
"""
Collection of polygons, paths, labels and raferences to other cells.
.. deprecated:: 1.5
The parameter `exclude_from_current` has been deprecated
alongside the use of a global library. It will be removed in a
future version of Gdspy.
Parameters
----------
name : string
The name of the cell.
Attributes
----------
name : string
The name of this cell.
polygons : list of `PolygonSet`
List of cell polygons.
paths : list of `RobustPath` or `FlexPath`
List of cell paths.
labels : list of `Label`
List of cell labels.
references : list of `CellReference` or `CellArray`
List of cell references.
"""
__slots__ = (
"name",
"polygons",
"paths",
"labels",
"references",
"_bb_valid",
"_bounding_box",
)
def __init__(self, name, exclude_from_current=False):
self.name = name
self.polygons = []
self.paths = []
self.labels = []
self.references = []
self._bb_valid = False
self._bounding_box = None
if use_current_library and not exclude_from_current:
import gdspy
gdspy.current_library.add(self, include_dependencies=False)
def __str__(self):
return 'Cell ("{}", {} polygons, {} paths, {} labels, {} references)'.format(
self.name,
len(self.polygons),
len(self.paths),
len(self.labels),
len(self.references),
)
def __iter__(self):
return itertools.chain(self.polygons, self.paths, self.labels, self.references)
def to_gds(self, outfile, multiplier, timestamp=None):
"""
Convert this cell to a GDSII structure.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
structure.
timestamp : datetime object
Sets the GDSII timestamp. If None, the current time is
used.
"""
now = datetime.datetime.today() if timestamp is None else timestamp
name = self.name
if len(name) % 2 != 0:
name = name + "\0"
outfile.write(
struct.pack(
">2H12h2H",
28,
0x0502,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
4 + len(name),
0x0606,
)
)
outfile.write(name.encode("ascii"))
for polygon in self.polygons:
polygon.to_gds(outfile, multiplier)
for path in self.paths:
path.to_gds(outfile, multiplier)
for label in self.labels:
label.to_gds(outfile, multiplier)
for reference in self.references:
reference.to_gds(outfile, multiplier)
outfile.write(struct.pack(">2H", 4, 0x0700))
def copy(
self,
name,
deep_copy=False,
translation=None,
rotation=None,
scale=None,
x_reflection=False,
):
"""
Create a copy of this cell.
Parameters
----------
name : string
The name of the cell.
deep_copy : bool
If False, the new cell will contain only references to the
existing elements. If True, copies of all elements are also
created. If any transformation is performed, this argument
is automatically set to True.
translation : Numpy array[2]
Amount to translate the cell contents.
rotation : number
Rotation angle (in *radians*).
scale : number
Scaling factor.
x_reflection : bool
Reflect the geometry accros the x axis.
Returns
-------
out : `Cell`
The new copy of this cell.
"""
new_cell = Cell(name)
transform = False
if (
x_reflection
or scale is not None
or rotation is not None
or translation is not None
):
transform = True
deep_copy = True
if not deep_copy:
new_cell.polygons = list(self.polygons)
new_cell.paths = list(self.paths)
new_cell.labels = list(self.labels)
new_cell.references = list(self.references)
return new_cell
new_cell.polygons = libcopy.deepcopy(self.polygons)
new_cell.paths = libcopy.deepcopy(self.paths)
new_cell.labels = libcopy.deepcopy(self.labels)
new_cell.references = [libcopy.copy(ref) for ref in self.references]
if transform:
r = -1 if x_reflection else 1
s = 1 if scale is None else scale
t = 0 if rotation is None else rotation
dx, dy = (0, 0) if translation is None else translation
ct = numpy.cos(t)
st = numpy.sin(t)
for poly in new_cell.polygons:
if x_reflection:
poly.scale(1, -1)
if scale is not None:
poly.scale(scale)
if rotation is not None:
poly.rotate(rotation)
if translation is not None:
poly.translate(dx, dy)
for path in new_cell.paths:
path.transform(translation, rotation, scale, x_reflection)
for lbl in new_cell.labels:
r0 = -1 if lbl.x_reflection is None else 1
s0 = 1 if lbl.magnification is None else lbl.magnification
t0 = 0 if lbl.rotation is None else (lbl.rotation * numpy.pi / 180)
dx0, dy0 = lbl.position
lbl.position = (
dx + s * (dx0 * ct - r * dy0 * st),
dy + s * (dx0 * st + r * dy0 * ct),
)
lbl.rotation = (r * t0 + t) * 180 / numpy.pi
if lbl.rotation == 0:
lbl.rotation = None
lbl.magnification = s * s0
if lbl.magnification == 1:
lbl.magnification = None
lbl.x_reflection = r * r0 < 0
for ref in new_cell.references:
r0 = -1 if ref.x_reflection is None else 1
s0 = 1 if ref.magnification is None else ref.magnification
t0 = 0 if ref.rotation is None else (ref.rotation * numpy.pi / 180)
dx0, dy0 = ref.origin
ref.origin = (
dx + s * (dx0 * ct - r * dy0 * st),
dy + s * (dx0 * st + r * dy0 * ct),
)
ref.rotation = (r * t0 + t) * 180 / numpy.pi
if ref.rotation == 0:
ref.rotation = None
ref.magnification = s * s0
if ref.magnification == 1:
ref.magnification = None
ref.x_reflection = r * r0 < 0
return new_cell
def add(self, element):
"""
Add a new element or list of elements to this cell.
Parameters
----------
element : `PolygonSet`, `CellReference`, `CellArray` or iterable
The element or iterable of elements to be inserted in this
cell.
Returns
-------
out : `Cell`
This cell.
"""
if isinstance(element, PolygonSet):
self.polygons.append(element)
elif isinstance(element, RobustPath) or isinstance(element, FlexPath):
self.paths.append(element)
elif isinstance(element, Label):
self.labels.append(element)
elif isinstance(element, CellReference) or isinstance(element, CellArray):
self.references.append(element)
else:
for e in element:
if isinstance(e, PolygonSet):
self.polygons.append(e)
elif isinstance(e, RobustPath) or isinstance(e, FlexPath):
self.paths.append(e)
elif isinstance(e, Label):
self.labels.append(e)
elif isinstance(e, CellReference) or isinstance(e, CellArray):
self.references.append(e)
else:
raise ValueError(
"[GDSPY] Only instances of `PolygonSet`, `FlexPath`, "
"`RobustPath`, `Label`, `CellReference`, and "
"`CellArray` can be added to `Cell`."
)
self._bb_valid = False
return self
def remove_polygons(self, test):
"""
Remove polygons from this cell.
The function or callable `test` is called for each polygon in
the cell. If its return value evaluates to True, the
corresponding polygon is removed from the cell.
Parameters
----------
test : callable
Test function to query whether a polygon should be removed.
The function is called with arguments:
``(points, layer, datatype)``
Returns
-------
out : `Cell`
This cell.
Examples
--------
Remove polygons in layer 1:
>>> cell.remove_polygons(lambda pts, layer, datatype:
... layer == 1)
Remove polygons with negative x coordinates:
>>> cell.remove_polygons(lambda pts, layer, datatype:
... any(pts[:, 0] < 0))
"""
empty = []
for element in self.polygons:
ii = 0
while ii < len(element.polygons):
if test(
element.polygons[ii], element.layers[ii], element.datatypes[ii]
):
element.polygons.pop(ii)
element.layers.pop(ii)
element.datatypes.pop(ii)
else:
ii += 1
if len(element.polygons) == 0:
empty.append(element)
for element in empty:
self.polygons.remove(element)
return self
def remove_paths(self, test):
"""
Remove paths from this cell.
The function or callable `test` is called for each `FlexPath`
or `RobustPath` in the cell. If its return value evaluates to
True, the corresponding label is removed from the cell.
Parameters
----------
test : callable
Test function to query whether a path should be removed.
The function is called with the path as the only argument.
Returns
-------
out : `Cell`
This cell.
"""
ii = 0
while ii < len(self.paths):
if test(self.paths[ii]):
self.paths.pop(ii)
else:
ii += | |
# mechanism_dvs.py
#
# Copyright 2014 Cybercom Finland Oy
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Author:
# <NAME> <<EMAIL>>
"""Implentation of VMware dvSwitch ML2 Mechanism driver for Neutron"""
import time
import threading
import random
from oslo.config import cfg
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import driver_api as api
from pyVim.connect import SmartConnect as SmartConnect
from pyVim.connect import Disconnect as Disconnect
from pyVmomi import vim, vmodl
LOG = logging.getLogger(__name__)
MECHANISM_VERSION = 0.42
NET_TYPES_SUPPORTED = ('vlan',)
# Do some test fuzzing in worker thread?
TEST_FUZZING = False
# Compared against random.random() which returns a float between 0..1
# Use greater than 1.0 to disable a particular fuzzing.
TEST_FUZZ_WORKER_DIE = 2.0
TEST_FUZZ_WORKER_BLOCK = 2.0
TEST_FUZZ_DISCONNECT = 0.98
ML2_DVS = [
cfg.StrOpt('vsphere_server', default='127.0.0.1',
help=_('The server hostname or IP address'
' of the vSphere SOAP API')),
cfg.StrOpt('vsphere_user', default='admin',
help=_('The username to use for vSphere API')),
cfg.StrOpt('vsphere_pass', default='password', secret=True,
help=_('The password to use for vSphere API')),
cfg.StrOpt('dvs_name', default='mydvswitch',
help=_('The name of VMware dvSwitch to use')),
cfg.StrOpt('vsphere_proto', default='https',
help=_('The vSphere API protocol: http or https')),
cfg.IntOpt('vsphere_port', default=443,
help=_('The vSphere API port, usually 80 or 443')),
cfg.StrOpt('vsphere_path', default='/sdk',
help=_('The vSphere API path, usually /sdk')),
cfg.IntOpt('dvs_refresh_interval', default=300,
help=_('How often to refresh dvSwitch portgroup information'
' from vSphere')),
cfg.StrOpt('portgroup_prefix', default='',
help=_('The prefix to prepend to port group names in vSphere')),
cfg.IntOpt('todo_loop_interval', default=2,
help=_('How often to poll TODO list for'
' doable or expired work')),
cfg.IntOpt('todo_initial_wait', default=10,
help=_('How long to wait before initial attempt'
' to reconfigure a new VM')),
cfg.IntOpt('todo_polling_interval', default=6,
help=_('How long to wait before another attempt'
' to check a particular VM')),
cfg.IntOpt('todo_expire_time', default=300,
help=_('How long to keep trying for a particular VM')),
cfg.IntOpt('todo_vsphere_keepalive', default=300,
help=_('How often to ask vSphere server for timestamp'
' in order to keep login session alive')),
]
cfg.CONF.register_opts(ML2_DVS, "ml2_dvs")
class DvsConfigError(exceptions.NeutronException):
message = _('%(msg)s')
class DvsRuntimeError(exceptions.NeutronException):
message = _('%(msg)s')
TODO_CLASS_DEFAULT_EXPIRE = 300
class TodoEntry():
def __init__(self, item, starttime=None, expiretime=None):
if not starttime: starttime = time.time()
if not expiretime: expiretime = starttime + TODO_CLASS_DEFAULT_EXPIRE
self.starttime = starttime
self.expiretime = expiretime
self.done = False
self.item = item
class TodoList():
def __init__(self):
self.todo = []
self.lock = threading.Lock()
def _cleanup(self, now=None):
if not now: now = time.time()
with self.lock:
for entry in self.todo:
if entry.done:
self.todo.remove(entry)
if now >= entry.expiretime:
LOG.warn(_("Expired todo task: %s" % repr(entry)))
self.todo.remove(entry)
return self
def add(self, item, starttime, expiretime):
now = time.time()
LOG.info(_("todo add item=%s now=%d starttime-delta %d expire-delta %d"
% (repr(item), now, starttime-now, expiretime-now)))
entry = TodoEntry(item, starttime=starttime, expiretime=expiretime)
with self.lock:
self.todo.append(entry)
return self
def get_tasks(self):
doable_list = []
now = time.time()
self._cleanup(now)
with self.lock:
for entry in self.todo:
if now >= entry.starttime:
doable_list.append(entry)
return tuple(doable_list)
class VmwareDvswitchMechanismDriver(api.MechanismDriver):
"""ML2 Mechanism driver for VMWare dvSwitches"""
def __init__(self):
LOG.info(_("dvs.__init__() called"))
try:
self.vsphere_server = cfg.CONF.ml2_dvs.vsphere_server
self.vsphere_user = cfg.CONF.ml2_dvs.vsphere_user
self.vsphere_pass = cfg.CONF.ml2_dvs.vsphere_pass
self.vsphere_proto = cfg.CONF.ml2_dvs.vsphere_proto
self.vsphere_port = int(cfg.CONF.ml2_dvs.vsphere_port)
self.vsphere_path = cfg.CONF.ml2_dvs.vsphere_path
self.dvs_name = cfg.CONF.ml2_dvs.dvs_name
self.dvs_refresh_interval = int(cfg.CONF.ml2_dvs.dvs_refresh_interval)
self.portgroup_prefix = cfg.CONF.ml2_dvs.portgroup_prefix
self.todo_loop_interval = int(cfg.CONF.ml2_dvs.todo_loop_interval)
self.todo_initial_wait = int(cfg.CONF.ml2_dvs.todo_initial_wait)
self.todo_polling_interval = int(cfg.CONF.ml2_dvs.todo_polling_interval)
self.todo_expire_time = int(cfg.CONF.ml2_dvs.todo_expire_time)
self.todo_vsphere_keepalive = int(cfg.CONF.ml2_dvs.todo_vsphere_keepalive)
self.si_lock = threading.Lock()
self.dvs_lock = threading.Lock()
self.si = None
self.todo = TodoList()
self.worker_local = threading.local()
self.watchdog = threading.Thread(target=self._todo_watchdog,
name="ml2_mech_dvs_watchdog")
self.watchdog.daemon = True
self.watchdog_local = threading.local()
except Exception as error:
msg = (_("Could not Initialize parameters: %(err)s") %
{'err': error})
LOG.exception(msg)
raise DvsConfigError(msg=msg)
# instance init okay
return None
def initialize(self):
LOG.info(_("ML2 vmware dvswitch mech driver initializing"))
now = time.time()
self._init_si()
self._update_dvs()
self.pg_ts = now
self._start_worker(now)
self.watchdog.start()
LOG.info(_("dvs driver initialized: dvs_name=%s dvs_refresh=%d" %
(self.dvs_name, self.dvs_refresh_interval)))
return self
def _start_worker(self, now):
self.todo_watchdog = now
self.worker = threading.Thread(target=self._todo_worker,
name="ml2_mech_dvs_worker")
self.worker.daemon = True
self.worker.start()
return self
def _todo_eligible(self):
# Is this thread eligible for working,
# or possibly forgotten and abandoned by main program?
if self.worker.ident == self.worker_local.thread_id:
return True
else:
# The main thread has started another worker and it is not me
LOG.info(_("abandoned worker thread %d stopping" %
self.worker_local.thread_id))
return False
def _todo_worker(self):
# Record our own thread-id to thread-local storage
self.worker_local.thread_id = self.worker.ident
LOG.info(_("Worker %d started:"
" loop interval: %d initial wait: %d"
" polling interval: %d expire time: %d" %
(self.worker_local.thread_id,
self.todo_loop_interval, self.todo_initial_wait,
self.todo_polling_interval, self.todo_expire_time)))
keepalive_last = 0
while True:
# Do not busyloop
time.sleep(self.todo_loop_interval)
# Test fuzzing, deliberately generate random failures in worker
if TEST_FUZZING:
if random.random() > TEST_FUZZ_WORKER_DIE:
LOG.info(_("accidentally, worker %d dies" %
self.worker_local.thread_id))
raise DvsRuntimeError()
if random.random() > TEST_FUZZ_WORKER_BLOCK:
LOG.info(_("suddenly, worker %d blocks" %
self.worker_local.thread_id))
time.sleep(300)
if random.random() > TEST_FUZZ_DISCONNECT:
LOG.info(_("accidentally, disconnect from vsphere"))
try:
Disconnect(self.si)
except Exception as error:
msg = (_("Disconnect failed: %(err)s") %
{'err': error})
LOG.exception(msg)
# EOF (End Of Fuzzing)
if not self._todo_eligible(): return None
now = time.time()
# Update watchdog timestamp
self.todo_watchdog = now
# Test and keep vsphere session alive
if now > keepalive_last + self.todo_vsphere_keepalive:
self._check_si()
keepalive_last = now
# Update dvswitch portgroup data if stale
self._check_dvs()
# Check my work list
tasks = self.todo.get_tasks()
if tasks: LOG.info(_("Worker %d found %d doable tasks" %
(self.worker_local.thread_id, len(tasks))))
# Do the needful
for entry in tasks:
if not self._todo_eligible(): return None
LOG.info(_("Worker %d trying to connect vm %s to network %s") %
(self.worker_local.thread_id,
entry.item[0], entry.item[1]))
if self._connect_vm(entry.item[0], entry.item[1]):
entry.done = True
else:
entry.starttime = now + self.todo_polling_interval
# Do not spam vsphere
time.sleep(1)
def _check_worker(self):
now = time.time()
if now > self.todo_watchdog + 3 * self.todo_polling_interval:
LOG.info(_("Worker watchdog expired!"))
if self.worker.is_alive():
LOG.info(_("My worker is still alive! Is it hung?"))
else:
LOG.info(_("My worker thread is dead!"))
self._start_worker(now)
return False
return True
def _todo_watchdog(self):
self.watchdog_local.thread_id = self.watchdog.ident
LOG.info(_("watchdog thread %d started" %
self.watchdog_local.thread_id))
while True:
# Do not busyloop
time.sleep(5)
try:
if not self._check_worker():
LOG.info(_("watchdog started a new worker"))
except Exception as error:
msg = (_("Wat? Watchdog check failed, error: %(err)s") %
{'err': error})
LOG.info(msg)
def _check_si(self):
LOG.info(_("worker: vsphere keepalive"))
try:
ret = self.si.CurrentTime()
except Exception as error:
if not self.si == None:
msg = (_("check_si failed, error: %(err)s") %
{'err': error})
LOG.info(msg)
self._init_si()
return self
def _init_si(self):
if not self.si_lock.acquire(blocking=False):
# Another thread must be already doing this. Bailing out.
return self
try:
LOG.info(_("CONNECT - proto %s server %s port %d path %s"
" user %s dvs_name %s") %
(self.vsphere_proto, self.vsphere_server,
self.vsphere_port, self.vsphere_path,
self.vsphere_user, self.dvs_name))
self.si = SmartConnect(protocol=self.vsphere_proto,
host=self.vsphere_server,
port=self.vsphere_port,
path=self.vsphere_path,
user=self.vsphere_user,
pwd=self.vsphere_pass)
self.si_lock.release()
except Exception as error:
self.si = None
self.si_lock.release()
msg = (_("Could not connect to vsphere server: %(err)s") %
{'err': error})
LOG.exception(msg)
raise DvsRuntimeError(msg=msg)
return self
def _check_dvs(self):
"""Periodically update dvs metadata from vsphere"""
# Do we need to refresh dvSwitch information?
if time.time() < self.pg_ts + self.dvs_refresh_interval:
return self
# Possibly stale dvs information, update it and store the timestamp.
# My name is Case, <NAME>.
if not self.dvs_lock.acquire(blocking=False):
# Some other thread is already doing this.
return self
try:
self._update_dvs()
self.pg_ts = time.time()
self.dvs_lock.release()
except Exception as error:
# Will try again after dvs_refresh_interval
self.pg_ts = time.time()
# Retain the old cache
if not self.dvs_uuid:
self.dvs_uuid = None
if not self.pg_key:
self.pg_key = None
if not self.pg_name:
self.pg_name = None
self.dvs_lock.release()
msg = (_("dvs update failed: %(err)s") %
{'err': error})
LOG.exception(msg)
raise DvsRuntimeError(msg=msg)
return self
def _update_dvs(self):
"""Update dvswitch data from vsphere"""
# Should not be called from any other method than
# initialize() or _check_dvs()
LOG.info(_("Updating dvswitch data"))
c = self.si.content
mydvs = None
self.dvs_uuid = None
oview = c.viewManager.CreateContainerView(c.rootFolder, [vim.DistributedVirtualSwitch], True)
for dvs in oview.view:
if not dvs.name == self.dvs_name: continue
mydvs = dvs
self.dvs_uuid = dvs.summary.uuid
break
oview.Destroy()
if not mydvs:
msg = (_("Could not find dvs \"%s\"") % self.dvs_name)
LOG.exception(msg)
raise DvsRuntimeError(msg=msg)
pg_key = {}
pg_name = {}
for pg in mydvs.portgroup:
pg_key[pg.config.name] = pg.key
pg_name[pg.key] = pg.config.name
# Atomic
self.pg_key = pg_key
self.pg_name = pg_name
return self
def _find_vm(self, name):
"""Find VM | |
###########################################
# Project: CMSIS DSP Library
# Title: node.py
# Description: Node class for description of dataflow graph
#
# $Date: 29 July 2021
# $Revision: V1.10.0
#
# Target Processor: Cortex-M and Cortex-A cores
# -------------------------------------------------------------------- */
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############################################
"""Description of the basic types used to build a dataflow graph"""
from jinja2 import Environment, FileSystemLoader, PackageLoader,select_autoescape
import pathlib
import os.path
class NoFunctionArrayInPython(Exception):
pass
def camelCase(st):
output = ''.join(x for x in st.title() if x.isalnum())
return output[0].lower() + output[1:]
def joinit(iterable, delimiter):
it = iter(iterable)
yield next(it)
for x in it:
yield delimiter
yield x
### Definition of the IOs
class IO:
"""Class of input / outputs"""
def __init__(self,owner,name,theType,nbSamples):
self._theType = theType
self._nbSamples = nbSamples
self._owner = owner
self._name = name
self._fifo = None
self.constantNode = None
@property
def fifo(self):
return self._fifo
## the attribute name and the method name must be same which is used to set the value for the attribute
@fifo.setter
def fifo(self, var):
self._fifo = var
def compatible(self,other):
return(self._theType == other._theType)
@property
def owner(self):
return self._owner
@property
def name(self):
return self._name
@property
def ctype(self):
"""ctype string """
return self._theType.ctype
@property
def nptype(self):
"""ctype string """
return self._theType.nptype
@property
def theType(self):
return self._theType
@property
def dspExtension(self):
return self._theType.dspExtension
@property
def graphViztype(self):
return self._theType.graphViztype
@property
def nbSamples(self):
return self._nbSamples
class Input(IO):
"""Node input"""
pass
class Output(IO):
"""Node output"""
pass
### Definition of the nodes types
class Constant:
""" Represent a constant object.
A constant object is ignored for the scheduling.
But it can be connected to CMSIS-DSP inputs.
It is generated as DEFINE
"""
def __init__(self,name):
self._name = name
@property
def name(self):
return self._name
@property
def isConstantNode(self):
return True
class SchedArg:
"""Class for arguments of the scheduler functions.
They can either be a literal arg like string, boolean
or number or they can be a variable name"""
def __init__(self,name):
self._name=name
class ArgLiteral(SchedArg):
def __init__(self,name):
super().__init__(name)
@property
def arg(self):
if isinstance(self._name,str):
return("\"%s\"" % self._name)
else:
return(str(self._name))
class VarLiteral(SchedArg):
def __init__(self,name):
super().__init__(name)
@property
def arg(self):
return(self._name)
class BaseNode:
"""Root class for all Nodes of a dataflow graph.
To define a new kind of node, inherit from this class"""
def __init__(self,name):
"""Create a new kind of Node.
name :: The name of the node which is used as
a C variable in final code"""
self._nodeName = name
self._nodeID = name
self._inputs={}
self._outputs={}
# For code generations
# The fifo args
self._args=""
# Literal arguments
self.schedArgs=None
def __getattr__(self,name):
"""Present inputs / outputs as attributes"""
if name in self._inputs:
return(self._inputs[name])
if name in self._outputs:
return(self._outputs[name])
raise AttributeError
def __getitem__(self,name):
"""Present inputs / outputs as keys"""
if name in self._inputs:
return(self._inputs[name])
if name in self._outputs:
return(self._outputs[name])
raise IndexError
def addLiteralArg(self,l):
if self.schedArgs:
self.schedArgs.append(ArgLiteral(l))
else:
self.schedArgs=[ArgLiteral(l)]
def addVariableArg(self,l):
if self.schedArgs:
self.schedArgs.append(VarLiteral(l))
else:
self.schedArgs=[VarLiteral(l)]
@property
def isConstantNode(self):
return False
@property
def hasState(self):
"""False if the node is a pure functiom with no state
and no associated C++ object
"""
return(True)
@property
def typeName(self):
return "void"
@property
def nodeID(self):
"""Node ID to uniquely identify a node"""
return self._nodeID
@property
def nodeName(self):
"""Node name displayed in graph
It could be the same for different nodes if the
node is just a function with no state.
"""
return self._nodeName
# For code generation
def allIOs(self):
"""Get list of IO objects for inputs and outputs"""
ins=[]
outs=[]
# Use orderd io names
for io in self.inputNames:
x = self._inputs[io]
ins.append(x)
for io in self.outputNames:
x = self._outputs[io]
outs.append(x)
return(ins,outs)
def ioTemplate(self):
"""Template arguments for C
input type, input size ...
output type, output size ...
Some nodes may customize it
"""
ios=[]
# Use ordered io names
for io in self.inputNames:
x = self._inputs[io]
ios.append("%s,%d" % (x.ctype,x.nbSamples))
for io in self.outputNames:
x = self._outputs[io]
ios.append("%s,%d" % (x.ctype,x.nbSamples))
return("".join(joinit(ios,",")))
def pythonIoTemplate(self):
"""Template arguments for Python
input type, input size ...
output type, output size ...
Some nodes may customize it
"""
ios=[]
# Use ordered io names
for io in self.inputNames:
x = self._inputs[io]
ios.append("%d" % x.nbSamples)
for io in self.outputNames:
x = self._outputs[io]
ios.append("%d" % x.nbSamples)
return("".join(joinit(ios,",")))
def cRun(self,ctemplate=True):
"""Run function
Some nodes may customize it
"""
if ctemplate:
return ("sdfError = %s.run();" % self.nodeName)
else:
return ("sdfError = %s.run()" % self.nodeName)
def cFunc(self,ctemplate=True):
"""Function call for code array scheduler
Some nodes may customize it
"""
if ctemplate:
return ("(runNode)&%s<%s>::run" % (self.typeName,self.ioTemplate()))
else:
raise NoFunctionArrayInPython
@property
def listOfargs(self):
"""List of fifos args for object initialization"""
return self._args
@property
def args(self):
"""String of fifo args for object initialization
with literal argument and variable arguments"""
allArgs=self.listOfargs
# Add specific argrs after FIFOs
if self.schedArgs:
for lit in self.schedArgs:
allArgs.append(lit.arg)
return "".join(joinit(allArgs,","))
@args.setter
def args(self,fifoIDs):
res=[]
# Template args is used only for code array
# scheduler when we create on the fly a new class
# for a function.
# In this case, the arguments of the template must only be
# fifos and not constant.
templateargs=[]
for x in fifoIDs:
# If args is a FIFO we generate a name using fifo ids
if isinstance(x,int):
res.append("fifo%d" % x)
templateargs.append("fifo%d" % x)
# If args is a constant node, we just use the constant node name
# (Defined in C code)
else:
res.append(x)
self._args=res
self._templateargs=templateargs
# For graphviz generation
@property
def graphvizName(self):
"""Name for graph vizualization"""
return ("%s<BR/>(%s)" % (self.nodeName,self.typeName))
@property
def inputNames(self):
return sorted(list(self._inputs.keys()))
@property
def outputNames(self):
return sorted(list(self._outputs.keys()))
@property
def hasManyInputs(self):
return len(self._inputs.keys())>1
@property
def hasManyOutputs(self):
return len(self._outputs.keys())>1
@property
def hasManyIOs(self):
return (self.hasManyInputs or self.hasManyOutputs)
@property
def nbEmptyInputs(self):
return (self.maxNbIOs - len(self._inputs.keys()))
@property
def nbEmptyOutputs(self):
return (self.maxNbIOs - len(self._outputs.keys()))
@property
def maxNbIOs(self):
return max(len(self._inputs.keys()),len(self._outputs.keys()))
class GenericSink(BaseNode):
"""A sink in the dataflow graph"""
def __init__(self,name):
BaseNode.__init__(self,name)
@property
def typeName(self):
return "void"
def addInput(self,name,theType,theLength):
self._inputs[name]=Input(self,name,theType,theLength)
class GenericSource(BaseNode):
"""A source in the dataflow graph"""
def __init__(self,name):
BaseNode.__init__(self,name)
@property
def typeName(self):
return "void"
def addOutput(self,name,theType,theLength):
self._outputs[name]=Output(self,name,theType,theLength)
class GenericNode(BaseNode):
"""A source in the dataflow graph"""
def __init__(self,name):
BaseNode.__init__(self,name)
@property
def typeName(self):
return "void"
def addInput(self,name,theType,theLength):
self._inputs[name]=Input(self,name,theType,theLength)
def addOutput(self,name,theType,theLength):
self._outputs[name]=Output(self,name,theType,theLength)
class SlidingBuffer(GenericNode):
def __init__(self,name,theType,length,overlap):
GenericNode.__init__(self,name)
self._length = length
self._overlap = overlap
self.addInput("i",theType,length-overlap)
self.addOutput("o",theType,length)
def ioTemplate(self):
"""ioTemplate is different for window
"""
theType=self._inputs[self.inputNames[0]].ctype
ios="%s,%d,%d" % (theType,self._length,self._overlap)
return(ios)
def pythonIoTemplate(self):
"""ioTemplate is different for window
"""
theType=self._inputs[self.inputNames[0]].ctype
ios="%d,%d" % (self._length,self._overlap)
return(ios)
@property
def typeName(self):
return "SlidingBuffer"
class OverlapAdd(GenericNode):
def __init__(self,name,theType,length,overlap):
GenericNode.__init__(self,name)
self._length = length
self._overlap = overlap
self.addInput("i",theType,length)
self.addOutput("o",theType,length-overlap)
def ioTemplate(self):
"""ioTemplate is different for window
"""
theType=self._inputs[self.inputNames[0]].ctype
ios="%s,%d,%d" % (theType,self._length,self._overlap)
return(ios)
def pythonIoTemplate(self):
"""ioTemplate is different for window
"""
theType=self._inputs[self.inputNames[0]].ctype
ios="%d,%d" % (self._length,self._overlap)
return(ios)
@property
def typeName(self):
return "OverlapAdd"
# Pure compute functions
# It is supporting unary function (src,dst,blockize)
# and binary functions (sraa,srcb, dst, blocksize)
# For cmsis, the prefix arm and the type suffix are not needed
# if class Dsp is used
class GenericFunction(GenericNode):
# Number of function node of each category
# Used to generate unique ID and names when
# unique names are required
# like for creating the graph where each call to
# the same function must be identified as a
# separate node
NODEID={}
PUREID=1
ENV = Environment(
loader=PackageLoader("cmsisdsp.sdf.scheduler"),
autoescape=select_autoescape(),
lstrip_blocks=True,
trim_blocks=True
)
CTEMPLATE = ENV.get_template("cmsis.cpp")
CNODETEMPLATE = ENV.get_template("cmsisNode.cpp")
PYTEMPLATE = ENV.get_template("cmsis.py")
def __init__(self,funcname,theType,length):
if not (funcname in GenericFunction.NODEID):
GenericFunction.NODEID[funcname]=1
self._pureNodeID = GenericFunction.PUREID
GenericFunction.PUREID = GenericFunction.PUREID + 1
GenericNode.__init__(self,"%s%d" % (funcname,GenericFunction.NODEID[funcname]))
| |
<filename>libs/logdb.py
"""Logging database
This file implements a logging database optimized to store sequential
data provided for example by sensors. The data is stored in the CSV
format. A definable portion of the data is kept in the RAM to provide
them in a fast way if requested.
Copyright (C) 2020 <NAME>
See the file "LICENSE" for information on usage and redistribution of
this file, and for a DISCLAIMER OF ALL WARRANTIES.
"""
import os
import time
import re
import array
class _LogDbCsv:
"""CSV logging database base class.
If the CSV file does not exist, the it is created, an info line
written to the first line andall labels into the second line. If
the file exists, it reads the info line and the existing labels.
If new labels exists, the existing ones are combined with the new
ones and the combined list written into the header line of the CSV
file. If the info line is not recognized, the current CSV file is
backed up and a new one is created.
The data can be written in 2 modes to the CSV file. In standard
mode (log_delta=False), each data set is written as independent
data set to the file. Each CSV file line is therefore
self-contained. In the second mode only differences from the
previous data set are written to the file. Depending the nature of
the data, the CSV file size can be reduced with this mode. Once the
mode of the CSV file is defined, it will be maintained and a
mismatch with the log_delta parameter of the constructor will be
reported with a warning.
Args:
labels (list of str): Label list
file (str): CSV file
log_delta (bool): Enables delta logging if True.
Default=False.
"""
# Setting DEBUG to True will print detailed debugging information
DEBUG = False
# Python string encoding
ENCODING = "utf-8"
# Initial size of the label line
CSV_HEADER_ROW_LENGTH = 2048
# Initial file chunk load size that is dynamically increased
INIT_RELOAD_SIZE = 1000000
_NAN = float('nan')
_REP = float('inf')
def __init__(self, labels, file, log_delta=False):
if self.DEBUG:
print("_LogDbCsv, Init (file=", file, ", Labels=", labels, ")")
# Open the file if it exists and read the info line (1st line)
# and the existing labels (2nd row)
existing_labels = []
if os.path.exists(file):
f = open(file,"br+")
f.seek(0, 0)
file_info = str(f.readline(), self.ENCODING).strip().split(",")
line = str(f.readline(), self.ENCODING)
header_length = f.tell()
for label in line.split(","):
label = label.strip()
existing_labels.append(label)
if self.DEBUG:
print(" Header_length:", header_length)
print(" File info:", file_info)
# Check the consistency of the file info. If it is wrong,
# rename the file to create a new one
if len(file_info)!=3 or file_info[0]!="logdbcsv" or \
re.fullmatch("delta=[01]", file_info[2])==None:
print("LogDbCsv: Incorrect CSV header line in file '" + file +
"': '" + ",".join(file_info) +
"'. Backup existing file and create new one!")
f.close()
os.rename(file, file + time.strftime(
"%Y%m%d_%H%M%S", time.localtime(time.time())))
else:
# Select the mode from the existing file (delta=0/1)
existing_log_delta = \
{"0": False, "1": True}[file_info[2].split("=")[1]]
if log_delta != existing_log_delta:
print("LogDbCsv: Specified log_delta parameter (" +
str(log_delta) +
") does not match with the existing one of file '" +
file + " Using the existing one!")
log_delta = existing_log_delta
# Create the file if it does not exist
if not os.path.exists(file):
f = open(file,"bw")
if self.DEBUG:
print(" New file!")
header_length = None
# Create a list of all labels, and another one of the new labels
all_labels = existing_labels.copy()
new_labels = []
for label in labels:
if label in existing_labels:
continue
new_labels.append(label)
all_labels.append(label)
if self.DEBUG:
print(" Existing labels:", existing_labels)
print(" New labels:", new_labels)
print(" All labels:", all_labels)
# Write the updated CSV header lines with the additional labels
if len(new_labels) > 0:
f.seek(0, 0)
# File information line
file_info_line = "logdbcsv,version=1.0,delta="+["0", "1"][log_delta]
f.write(file_info_line.encode())
f.write(b"\n")
# Label line
label_line = ",".join(all_labels)
label_line += " "*(self.CSV_HEADER_ROW_LENGTH \
-len(label_line)-len(file_info_line))
f.write(label_line.encode())
f.write(b"\n")
f.flush()
# Move the write pointer to the file end
f.seek(0, 2)
self.labels = all_labels
self.log_delta = log_delta
self.f = f
self.header_length = header_length
self.last_data_set = {}
def get_labels(self):
"""Returns the list of all labels."""
return self.labels
def _str2float(self, string):
"""Translates a CSV file value into a Python value."""
try:
if string == "":
value = self._REP if self.log_delta else self._NAN
elif string == "n":
value = self._NAN
else:
value = float(string)
except:
if string.strip() == "" and self.log_delta:
value = self._REP
else:
value = self._NAN
return value
def restore_data(self, data=None, number_retention_records=None):
"""Restores the data from the CSV file.
This method restores data from the CSV file either in the data
structure provided by the parameter 'data', or if this one is
None, to a newly created data structure.
Args:
data (list of arrays): Optionally provided data structure
where the data has to be restored. Default: None.
number_retention_records:
Optional maximum number of data records to load to reduce the
memory footprint.
Return:
list of arrays: Data structure
"""
# Create the data structure if not provided. This structure is a
# dictionary of arrays. Use 'double' as array type.
if data is None:
data = {}
for label in self.labels:
data[label] = array.array("d")
# Skip data restore if the CSV file is new
if self.header_length is None:
return data
f = self.f
# Organize the data arrays as list to improve access speed.
# Ignore labels that have no member in the data dictionary.
data_matrix = []
for label in self.labels:
data_matrix.append(data[label] if label in data else None)
# Get the current file size
f.seek(0, 2)
file_size = f.tell()
if self.DEBUG:
print(" Load existing data, file size:", file_size)
# Read the file by chunks, starting at the file end. The chunk
# size is defined by 'reload_size' that is will be increased.
# The data arrays are created in the inverse sense; elements 0
# are the last elements stored in the CSV file. After reading
# all data the arrays are reversed.
read_pos = file_size
reload_size = self.INIT_RELOAD_SIZE
all_read = False
nbr_records_to_load = float('inf') if number_retention_records is None \
else number_retention_records
while not all_read:
# Limit the number of records to load if required
if nbr_records_to_load <= 0:
break
# Set the read position to the new location. Ensure that the
# header line is not read
if read_pos-reload_size < self.header_length:
f.seek(self.header_length-2, 0)
all_read = True
else:
f.seek(read_pos-reload_size, 0)
# From the defined read position, go to the next line start,
# and adjust the exact read chunk size and read position.
f.readline()
read_size = read_pos - f.tell()
read_pos = f.tell()
if self.DEBUG:
print(" Load", read_size, "bytes from position", read_pos)
# Read the data chunk
data_csv = f.read(read_size)
if self.DEBUG:
print(" Read", data_csv.count(b"\n"), "lines")
# Handle each line of the CSV file in the inversed sense.
# Add the data in this inversed sense to the data arrays.
for line in reversed(data_csv.split(b"\n")):
# Strip removes eventual \r characters at the line end
data_record = str(line, self.ENCODING).strip().split(",")
# Skip the line if it is empty, and raise an error if
# the number of data words in the line are higher than
# the number of defined labels.
if len(data_record) < 2:
continue
if len(data_record) > len(data_matrix):
raise Exception(("LogDbCVS, restore_data: Line with more " +
"than {} values: {!a}").format(
len(data_record), str(line, self.ENCODING)))
# Add the data of the line to the data arrays
for index, data_item in enumerate(data_record):
if data_matrix[index] is None:
continue
data_matrix[index].append(self._str2float(data_item))
# Fill up eventual missing data values with NANs
for index in range(len(data_record), len(self.labels)):
if data_matrix[index] is None:
continue
data_matrix[index].append(self._NAN)
# Limit the number of records to load if required
nbr_records_to_load -= 1
if nbr_records_to_load <= 0:
break
# | |
<reponame>Edgencio/pybel
# -*- coding: utf-8 -*-
"""Constants for PyBEL.
This module maintains the strings used throughout the PyBEL codebase to promote consistency.
"""
from .config import connection
def get_cache_connection() -> str:
"""Get the preferred RFC-1738 database connection string.
1. Check the environment variable ``PYBEL_CONNECTION``
2. Check the ``PYBEL_CONNECTION`` key in the config file ``~/.config/pybel/config.json``. Optionally, this config
file might be in a different place if the environment variable ``PYBEL_CONFIG_DIRECTORY`` has been set.
3. Return a default connection string using a SQLite database in the ``~/.pybel``. Optionally, this directory
might be in a different place if the environment variable ``PYBEL_RESOURCE_DIRECTORY`` has been set.
"""
return connection
PYBEL_CONTEXT_TAG = 'pybel_context'
PYBEL_AUTOEVIDENCE = 'Automatically added by PyBEL'
CITATION_TYPE_BOOK = 'Book'
CITATION_TYPE_PUBMED = 'PubMed'
CITATION_TYPE_PMC = 'PubMed Central'
CITATION_TYPE_JOURNAL = 'Journal'
CITATION_TYPE_ONLINE = 'Online Resource'
CITATION_TYPE_URL = 'URL'
CITATION_TYPE_DOI = 'DOI'
CITATION_TYPE_OTHER = 'Other'
#: The valid citation types
#: .. seealso:: https://wiki.openbel.org/display/BELNA/Citation
CITATION_TYPES = {
CITATION_TYPE_BOOK: None,
CITATION_TYPE_PUBMED: 'pmid',
CITATION_TYPE_PMC: 'pmc',
CITATION_TYPE_JOURNAL: None,
CITATION_TYPE_ONLINE: None,
CITATION_TYPE_URL: None,
CITATION_TYPE_DOI: 'doi',
CITATION_TYPE_OTHER: None,
}
NAMESPACE_DOMAIN_BIOPROCESS = 'BiologicalProcess'
NAMESPACE_DOMAIN_CHEMICAL = 'Chemical'
NAMESPACE_DOMAIN_GENE = 'Gene and Gene Products'
NAMESPACE_DOMAIN_OTHER = 'Other'
#: The valid namespace types
#: .. seealso:: https://wiki.openbel.org/display/BELNA/Custom+Namespaces
NAMESPACE_DOMAIN_TYPES = {
NAMESPACE_DOMAIN_BIOPROCESS,
NAMESPACE_DOMAIN_CHEMICAL,
NAMESPACE_DOMAIN_GENE,
NAMESPACE_DOMAIN_OTHER,
}
#: Represents the key for the citation date in a citation dictionary
CITATION_DATE = 'date'
#: Represents the key for the citation authors in a citation dictionary
CITATION_AUTHORS = 'authors'
#: Represents the key for the citation comment in a citation dictionary
CITATION_JOURNAL = 'journal'
#: Represents the key for the optional PyBEL citation volume entry in a citation dictionary
CITATION_VOLUME = 'volume'
#: Represents the key for the optional PyBEL citation issue entry in a citation dictionary
CITATION_ISSUE = 'issue'
#: Represents the key for the optional PyBEL citation pages entry in a citation dictionary
CITATION_PAGES = 'pages'
#: Represents the key for the optional PyBEL citation first author entry in a citation dictionary
CITATION_FIRST_AUTHOR = 'first'
#: Represents the key for the optional PyBEL citation last author entry in a citation dictionary
CITATION_LAST_AUTHOR = 'last'
# Used during BEL parsing
MODIFIER = 'modifier'
EFFECT = 'effect'
FROM_LOC = 'fromLoc'
TO_LOC = 'toLoc'
LOCATION = 'location'
ACTIVITY = 'Activity'
DEGRADATION = 'Degradation'
TRANSLOCATION = 'Translocation'
CELL_SECRETION = 'CellSecretion'
CELL_SURFACE_EXPRESSION = 'CellSurfaceExpression'
INTRACELLULAR = 'intracellular'
EXTRACELLULAR = 'extracellular space'
CELL_SURFACE = 'cell surface'
# Internal node data format keys
#: The node data key specifying the node's function (e.g. :data:`GENE`, :data:`MIRNA`, :data:`BIOPROCESS`, etc.)
FUNCTION = 'function'
#: The key specifying a concept
CONCEPT = 'concept'
#: The key specifying an identifier dictionary's namespace. Used for nodes, activities, and transformations.
NAMESPACE = 'namespace'
#: The key specifying an identifier dictionary's name. Used for nodes, activities, and transformations.
NAME = 'name'
#: The key specifying an identifier dictionary
IDENTIFIER = 'identifier'
#: The key specifying an optional label for the node
LABEL = 'label'
#: The key specifying an optional description for the node
DESCRIPTION = 'description'
#: The key specifying xrefs
XREFS = 'xref'
#: They key representing the nodes that are a member of a composite or complex
MEMBERS = 'members'
#: The key representing the nodes appearing in the reactant side of a biochemical reaction
REACTANTS = 'reactants'
#: The key representing the nodes appearing in the product side of a biochemical reaction
PRODUCTS = 'products'
#: The node data key specifying a fusion dictionary, containing :data:`PARTNER_3P`, :data:`PARTNER_5P`,
# :data:`RANGE_3P`, and :data:`RANGE_5P`
FUSION = 'fusion'
#: The key specifying the identifier dictionary of the fusion's 3-Prime partner
PARTNER_3P = 'partner_3p'
#: The key specifying the identifier dictionary of the fusion's 5-Prime partner
PARTNER_5P = 'partner_5p'
#: The key specifying the range dictionary of the fusion's 3-Prime partner
RANGE_3P = 'range_3p'
#: The key specifying the range dictionary of the fusion's 5-Prime partner
RANGE_5P = 'range_5p'
FUSION_REFERENCE = 'reference'
FUSION_START = 'left'
FUSION_STOP = 'right'
FUSION_MISSING = 'missing'
#: The key specifying the node has a list of associated variants
VARIANTS = 'variants'
#: The key representing what kind of variation is being represented
KIND = 'kind'
#: The value for :data:`KIND` for an HGVS variant
HGVS = 'hgvs'
#: The value for :data:`KIND` for a protein modification
PMOD = 'pmod'
#: The value for :data:`KIND` for a gene modification
GMOD = 'gmod'
#: The value for :data:`KIND` for a fragment
FRAGMENT = 'frag'
#: The allowed values for :data:`KIND`
PYBEL_VARIANT_KINDS = {
HGVS,
PMOD,
GMOD,
FRAGMENT,
}
#: The group of all BEL-provided keys for node data dictionaries, used for hashing.
PYBEL_NODE_DATA_KEYS = {
FUNCTION,
NAMESPACE,
NAME,
IDENTIFIER,
VARIANTS,
FUSION,
MEMBERS,
REACTANTS,
PRODUCTS,
}
#: Used as a namespace when none is given when lenient parsing mode is turned on. Not recommended!
DIRTY = 'dirty'
#: Represents the BEL abundance, abundance()
ABUNDANCE = 'Abundance'
#: Represents the BEL abundance, geneAbundance()
#: .. seealso:: http://openbel.org/language/version_2.0/bel_specification_version_2.0.html#Xabundancea
GENE = 'Gene'
#: Represents the BEL abundance, rnaAbundance()
RNA = 'RNA'
#: Represents the BEL abundance, microRNAAbundance()
MIRNA = 'miRNA'
#: Represents the BEL abundance, proteinAbundance()
PROTEIN = 'Protein'
#: Represents the BEL function, biologicalProcess()
BIOPROCESS = 'BiologicalProcess'
#: Represents the BEL function, pathology()
PATHOLOGY = 'Pathology'
#: Represents the BEL function, populationAbundance()
POPULATION = 'Population'
#: Represents the BEL abundance, compositeAbundance()
COMPOSITE = 'Composite'
#: Represents the BEL abundance, complexAbundance()
COMPLEX = 'Complex'
#: Represents the BEL transformation, reaction()
REACTION = 'Reaction'
#: A set of all of the valid PyBEL node functions
PYBEL_NODE_FUNCTIONS = {
ABUNDANCE,
GENE,
RNA,
MIRNA,
PROTEIN,
BIOPROCESS,
PATHOLOGY,
COMPOSITE,
COMPLEX,
REACTION,
POPULATION,
}
#: The mapping from PyBEL node functions to BEL strings
rev_abundance_labels = {
ABUNDANCE: 'a',
GENE: 'g',
MIRNA: 'm',
PROTEIN: 'p',
RNA: 'r',
BIOPROCESS: 'bp',
PATHOLOGY: 'path',
COMPLEX: 'complex',
COMPOSITE: 'composite',
POPULATION: 'pop',
}
# Internal edge data keys
#: The key for an internal edge data dictionary for the relation string
RELATION = 'relation'
#: The key for an internal edge data dictionary for the citation dictionary
CITATION = 'citation'
#: The key for an internal edge data dictionary for the evidence string
EVIDENCE = 'evidence'
#: The key for an internal edge data dictionary for the annotations dictionary
ANNOTATIONS = 'annotations'
#: The key for free annotations
FREE_ANNOTATIONS = 'free_annotations'
SOURCE = 'source'
TARGET = 'target'
#: The key for an internal edge data dictionary for the source modifier dictionary
SOURCE_MODIFIER = 'source_modifier'
#: The key for an internal edge data dictionary for the target modifier dictionary
TARGET_MODIFIER = 'target_modifier'
#: The key or an internal edge data dictionary for the line number
LINE = 'line'
#: The key representing the hash of the other
HASH = 'hash'
#: The group of all BEL-provided keys for edge data dictionaries, used for hashing.
PYBEL_EDGE_DATA_KEYS = {
RELATION,
CITATION,
EVIDENCE,
ANNOTATIONS,
SOURCE_MODIFIER,
TARGET_MODIFIER,
}
#: The group of all PyBEL-specific keys for edge data dictionaries, not used for hashing.
PYBEL_EDGE_METADATA_KEYS = {
LINE,
HASH,
}
#: The group of all PyBEL annotated keys for edge data dictionaries
PYBEL_EDGE_ALL_KEYS = PYBEL_EDGE_DATA_KEYS | PYBEL_EDGE_METADATA_KEYS
#: A BEL relationship
HAS_REACTANT = 'hasReactant'
#: A BEL relationship
HAS_PRODUCT = 'hasProduct'
#: A BEL relationship
HAS_VARIANT = 'hasVariant'
#: A BEL relationship
#: :data:`GENE` to :data:`RNA` is called transcription
TRANSCRIBED_TO = 'transcribedTo'
#: A BEL relationship
#: :data:`RNA` to :data:`PROTEIN` is called translation
TRANSLATED_TO = 'translatedTo'
#: A BEL relationship
INCREASES = 'increases'
#: A BEL relationship
DIRECTLY_INCREASES = 'directlyIncreases'
#: A BEL relationship
DECREASES = 'decreases'
#: A BEL relationship
DIRECTLY_DECREASES = 'directlyDecreases'
#: A BEL relationship
CAUSES_NO_CHANGE = 'causesNoChange'
#: A BEL relationship
REGULATES = 'regulates'
#: A BEL relationship
BINDS = 'binds'
#: A BEL relationship
CORRELATION = 'correlation'
#: A BEL relationship
NO_CORRELATION = 'noCorrelation'
#: A BEL relationship
NEGATIVE_CORRELATION = 'negativeCorrelation'
#: A BEL relationship
POSITIVE_CORRELATION = 'positiveCorrelation'
#: A BEL relationship
ASSOCIATION = 'association'
#: A BEL relationship
ORTHOLOGOUS = 'orthologous'
#: A BEL relationship
ANALOGOUS_TO = 'analogousTo'
#: A BEL relationship
IS_A = 'isA'
#: A BEL relationship
RATE_LIMITING_STEP_OF = 'rateLimitingStepOf'
#: A BEL relationship
SUBPROCESS_OF = 'subProcessOf'
#: A BEL relationship
BIOMARKER_FOR = 'biomarkerFor'
#: A BEL relationship
PROGONSTIC_BIOMARKER_FOR = 'prognosticBiomarkerFor'
#: A BEL relationship, added by PyBEL
EQUIVALENT_TO = 'equivalentTo'
#: A BEL relationship, added by PyBEL
PART_OF = 'partOf'
#: A set of all causal relationships that have an increasing effect
CAUSAL_INCREASE_RELATIONS = {INCREASES, DIRECTLY_INCREASES}
#: A set of all causal relationships that have a decreasing effect
CAUSAL_DECREASE_RELATIONS = {DECREASES, DIRECTLY_DECREASES}
#: A set of direct causal relations
DIRECT_CAUSAL_RELATIONS = {DIRECTLY_DECREASES, DIRECTLY_INCREASES}
#: A set of direct causal relations
INDIRECT_CAUSAL_RELATIONS = {DECREASES, INCREASES, REGULATES}
#: A set of causal relationships that are polar
CAUSAL_POLAR_RELATIONS = CAUSAL_INCREASE_RELATIONS | CAUSAL_DECREASE_RELATIONS
#: A set of all causal relationships
CAUSAL_RELATIONS = CAUSAL_INCREASE_RELATIONS | CAUSAL_DECREASE_RELATIONS | {REGULATES}
APOLAR_CORRELATIVE_RELATIONS = {
CORRELATION,
NO_CORRELATION,
}
POLAR_CORRELATIVE_RELATIONS = {
POSITIVE_CORRELATION,
NEGATIVE_CORRELATION,
}
#: A set of all correlative relationships
CORRELATIVE_RELATIONS = APOLAR_CORRELATIVE_RELATIONS | POLAR_CORRELATIVE_RELATIONS
#: A set of polar relations
POLAR_RELATIONS = CAUSAL_POLAR_RELATIONS | POLAR_CORRELATIVE_RELATIONS
#: A set of all relationships that are inherently directionless, and are therefore added to the graph twice
TWO_WAY_RELATIONS = CORRELATIVE_RELATIONS | {
ASSOCIATION,
ORTHOLOGOUS,
ANALOGOUS_TO,
EQUIVALENT_TO,
BINDS,
}
#: A list of relationship types that don't require annotations or evidence
UNQUALIFIED_EDGES = {
HAS_REACTANT,
HAS_PRODUCT,
HAS_VARIANT,
TRANSCRIBED_TO,
TRANSLATED_TO,
IS_A,
EQUIVALENT_TO,
PART_OF,
ORTHOLOGOUS,
}
# BEL Keywords
BEL_KEYWORD_SET = 'SET'
BEL_KEYWORD_DOCUMENT = 'DOCUMENT'
BEL_KEYWORD_DEFINE = 'DEFINE'
BEL_KEYWORD_NAMESPACE = 'NAMESPACE'
BEL_KEYWORD_ANNOTATION = 'ANNOTATION'
BEL_KEYWORD_AS | |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import gzip
import json
import time
from datetime import datetime
import networkx as nx
import warnings
warnings.filterwarnings("ignore")
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from collections import Counter
import copy
###################################################
# #
# Functions Ex1 #
# #
###################################################
def get_user(user, dict_users):
"""
Returns the class User from ID and update the dictionary
Parameters:
user (int): user ID
dict_users (dict): as a key we must have the user ID and as a value we must have the class User
Returns:
user (User): class User
dict_users (dict) return the dict_users updated if the user not exists
"""
# create user if it wasn't created previously
if user not in dict_users:
user_obj = User(user)
dict_users[user] = user_obj
return dict_users[user], dict_users
def create_graph(df, type_node, weight, graph):
"""
Returns the updated graph, the first call of this function, the graph must be empty
Parameters:
df (dataframe): the dataset
type_node(string): the type of dataset (c2a, c2q, a2q)
weight(int): the weight to assign for each edge
graph(dict): the graph obtained until now, so the first one must be empty
Returns:
graph(dict): return the updated graph
"""
# for each row in the dataset
for index, row in df.iterrows():
# we only take the first 6 characters, so year and month for example 201808 -> August 2008
year_month = int(str(row[2])[:-2])
source_ = int(row[1])
target_ = int(row[0])
source, graph = get_user(source_, graph)
target, graph = get_user(target_, graph)
# create Relation(edge) between source(v) and target(u)
rel = Relation(type_node, year_month, source, target, weight)
# add relation in user
graph[target_].add_in_relation(rel) # add to u the incoming edge from v
graph[source_].add_out_relation(rel) # # add to v the oucoming edge to v
return graph
###################################################
# #
# Functions Ex2 #
# #
###################################################
# Ex 2.1
def get_features(name_graph, graph):
"""
plot all information below
- Whether the graph is directed or not
- Number of users
- Number of answers/comments
- Average number of links per user
- Density degree of the graph
- Whether the graph is sparse or dense
Parameters:
name_graph (string): the name of the graph for which we want to obtain this information
"""
type_graph = "directed"
users = set()
number_rel = 0
for user in graph:
for year in graph[user].get_out_relation:
if name_graph in graph[user].get_out_relation[year]:
for relation in graph[user].get_out_relation[year][name_graph]:
users.add(relation.target)
users.add(relation.source)
number_rel += 1
average_link_user = round(number_rel / len(users), 1)
density_degree = round(number_rel / (len(users) * (len(users) - 1)), 4)
type_graph = "DENSE" if density_degree >= 0.5 else "SPARSE"
#################################################################################
#visualization
row = [["Directed", "True"], [ "Number of users", str(len(users))], ["Number of answers/comments", str(number_rel) ], ["Average number of links per user",str(average_link_user) ],\
["Density degree of the graph", str(density_degree)], ["Type of graph", type_graph]]
rowEvenColor = 'grey'
rowOddColor = 'white'
fig, ax = plt.subplots()
ax.axis('off')
ax.axis('tight')
fig.set_figheight(4)
fig.set_figwidth(10)
fig.patch.set_visible(False)
t = ax.table(cellText=row,
cellLoc = "center",
cellColours=[[rowOddColor,rowOddColor],[rowEvenColor, rowEvenColor]]*3,
loc='center')
t.set_fontsize(14)
t.scale(1.5, 1.5)
fig.tight_layout()
########################################################################################################################################
#Ex 2.2
def dijkstraNumShortesPath(graph, source, start, end):
"""
Implements Dijkstra's single source shortest path algorithm
for a directed graph
Parameters:
graph: the graph we are working on
source (int): the vertex choose as source
start (string): beginning date in format "MM/YYYY"
end (string): ending date in format "MM/YYYY"
Returns:
prev (dict): a dictionary with vertex as key and as value the list of the previous nodes in the path
-1 if it is not reachable
dist (dict): a dictionary with vertex as key and the total distance from the source as value
path (dict): as key the node and as value the number of the shortest paths
"""
start = convertDate(start)
end = convertDate(end)
visited = set() # set of visited node
unvisited = set(graph) # set of univised node
dist = dict() # as key the node and as a value the distance between all node
prev = dict() # as key the node and as a value list of the previous node with min distance
path = dict() # as key the node and as value the number of the shortest paths
#initialization
for u in unvisited:
dist[u] = float('inf')
prev[u] = -1
dist[source] = 0
visited.add(source)
# while there node to visited and visited not contain neighbor
while len(unvisited) > 0 or not set(neighbor).issubset(visited):
current_node = getMinUnvisited(unvisited, dist) # get the node with the minimum cost of weight
unvisited.remove(current_node)
visited.add(current_node)
neighbor = getNeighbors(current_node, graph, start, end)
# visited neighbors not visited yet
for u in unvisited.intersection(set(neighbor)):
new_dist = dist[current_node] + neighbor[u]
if new_dist < dist[u]:
dist[u] = new_dist
prev[u] = [current_node]
path[u] = 1
# if find distance that equals of the shortest, so we take into account all the shortest path
elif dist[u] == new_dist and dist[u] != float("inf"):
path[u] = path.get(u) + 1 # number of the shorted path for the node u
prev[u].append(current_node)
return prev, dist, path
def freq_(matrix):
"""
Function that return the numbers of frequency for each element in the matrix
Parameters:
matrix
Returns:
result(dictionary): as the key the element and as value the numbers of frequency for each element
"""
result = dict()
for array in matrix:
for el in array:
result[el] = result.get(el,0) + 1
return result
def allShortPath(prev, current, source, path, all_path):
"""
Function that return all path from the target(current) to the source
Parameters:
prev (dict): a dictionary with vertex as key and as value the list of the previous nodes in the path
current(int): the started node
source(int): the desiderable node to find the shortest path
path(list): empty list
all_path(list): empty list
Returns:
result(dictionary): as the key the element and as value the numbers of frequency for each element
"""
for node in prev[current]:
if node == source:
all_path.append(path + [node])
path = path + [node]
return all_path
allShortPath(prev, node, source, path + [node], all_path)
return all_path
def getDegree(relation, start, end, target_bool=True):
"""
Update the residual in the path given in input
Parameters:
relation(dict): as key the year and as a value a dict that have as a value the type of relation
and as a key the list of all relation
start (int): timestamp
end (int): timestamp
target_bool(boolean): if True out_relation otherwise in_relation
Returns:
out(int): the degree of the node taken in input
node(set): the neighbor of the source
"""
out = 0
nodes = set()
for year in relation:
for rel in relation[year]:
for edge in relation[year][rel]:
if(start <= edge.time <= end):
out += edge.weight
if target_bool:
nodes.add(edge.target)
else:
nodes.add(edge.source)
return out, nodes
###########################################################################################################################################
# Ex 2.3
def getMinUnvisited(unvisited, dist):
"""
return the minimum distance vertex from
the set of vertices not yet processed.
Parameters:
unvisited (set): the set containing all the vertex not yet processed
dist (dict): a dictionary with vertex as key and the total distance from the source as value
"""
aux = {key: dist[key] for key in unvisited}
minimum = min(aux.values())
for key in unvisited:
if dist[key] == minimum:
return key
def convertDate(time):
"""
Returns the converted time, accept only this format DD/MM/YYYY
Parameters:
time (string)
Returns:
time (int): return converted time as this format YYYYMM, so year and month
"""
tmp = time.split("/")
return int(tmp[1] + tmp[0])
def getShortestPath(source, target, prev, dist):
"""
Rebuild the shortest path from source to target as a list and its cost
Parameters:
source (int): the vertex choose as source
target (int): the vertex choose as target
prev (dict): a dictionary with vertex as key and last previous vertex in the path from the source as value
-1 if it is not reachable
dist (dict): a dictionary with vertex as key and the total distance from the source as value
Returns:
path (list): the sequence of nodes covered
cost (int): cost of the path
"""
path = [target]
cost = dist[target]
# go back from target to source using prev dictionary
while target != source:
path.append(prev[target])
target = prev[target]
path.reverse()
return path, cost
def getNeighbors(node, graph, start, end):
"""
Find all the | |
their original values as defined in the parent meme(s)
and removes custom properties.
Two params: entity, drilldown (optional)'''"""
# revertPropertyValues(self, drillDown = False)
def execute(self, params):
drillDown = False
try:
if len(params) == 2:
drillDown = params[1]
except: pass
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
entity.entityLock.acquire(True)
try:
entity.revertPropertyValues(drillDown)
entity.removeAllCustomProperties(drillDown)
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exception as e:
ex = "Function addEntityDecimalProperty failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
return None
class revertEntityPropertyValues(object):
""" Reset property values to their original values as defined in the parent meme(s).
It does not affect custom properties or properties from depricated memes.
Two params: entity, drilldown (optional)'''"""
# revertPropertyValues(self, drillDown = False)
def execute(self, params):
drillDown = False
try:
if len(params) == 2:
drillDown = params[1]
except: pass
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
entity.entityLock.acquire(True)
try:
entity.revertPropertyValues(drillDown)
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exception as e:
ex = "Function addEntityDecimalProperty failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
return None
class setEntityPropertyValue(object):
''' Three params: entity, name, value'''
def execute(self, params):
returnValue = None
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
entity.entityLock.acquire(True)
try:
returnValue = entity.setPropertyValue(params[1], params[2])
except Exceptions.EventScriptFailure as e:
raise e
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exceptions.EventScriptFailure as e:
raise e
except Exceptions.EntityPropertyValueTypeError as e:
raise Exceptions.EntityPropertyValueTypeError(e)
except Exceptions.EntityPropertyValueOutOfBoundsError as e:
raise Exceptions.EntityPropertyValueOutOfBoundsError(e)
except Exception as e:
ex = "Function addEntityDecimalProperty failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
return returnValue
class setStateEventScript(object):
''' Two params: entity, script'''
def execute(self, params):
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
entity.entityLock.acquire(True)
try:
entity.setStateEventScript(params[1])
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exceptions.EntityPropertyValueTypeError as e:
raise Exceptions.EntityPropertyValueTypeError(e)
except Exceptions.EntityPropertyValueOutOfBoundsError as e:
raise Exceptions.EntityPropertyValueOutOfBoundsError(e)
except Exception as e:
ex = "Function setExecScript failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
return None
class installPythonExecutor(object):
''' two params - the entity and the callable object '''
def execute(self, params):
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
entity.entityLock.acquire(True)
try:
entity.installExecutorObject(params[1])
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exceptions.EntityPropertyValueTypeError as e:
raise Exceptions.EntityPropertyValueTypeError(e)
except Exceptions.EntityPropertyValueOutOfBoundsError as e:
raise Exceptions.EntityPropertyValueOutOfBoundsError(e)
except Exception as e:
ex = "Function setExecScript failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
return None
class getChildMemes(object):
def execute(self, params):
return []
class getParentMemes(object):
def execute(self, params):
return []
class getChildMetaMemes(object):
def execute(self, params):
return []
class getParentMetaMemes(object):
#param[0] = meme ID
def execute(self, params):
meme = templateRepository.resolveTemplateAbsolutely(params[0])
return meme.metaMeme
class getExtendingMetamemes (object):
#MetaMeme.collectExtensions
#param[0] = meme ID
def execute(self, params):
meme = templateRepository.resolveTemplateAbsolutely(params[0])
extendingMetaMemes = meme.collectExtensions()
return extendingMetaMemes
class getTaxonomy(object):
#param[0] = meme ID
def execute(self, params):
fullTypeList = []
meme = templateRepository.resolveTemplateAbsolutely(params[0])
metameme = templateRepository.resolveTemplateAbsolutely(meme.metaMeme)
fullTypeList.append(meme.metaMeme)
extendingMetaMemes = metameme.collectExtensions()
fullTypeList.extend(extendingMetaMemes)
return fullTypeList
class getHasTaxonomy(object):
#params[0] = meme ID
#params[1] = metameme path
def execute(self, params):
meme = templateRepository.resolveTemplateAbsolutely(params[0])
isMetaMemeType = meme.testTaxonomy(params[1])
return isMetaMemeType
class getEnhancingMetamemes (object):
#MetaMeme.collectEnhancements
def execute(self, params):
return []
class getEnhancedMetamemes (object):
#MetaMeme.collectEnhancements
def execute(self, params):
return []
class getEnhanceableMemes (object):
#Meme.collectEnhanceableMemes
def execute(self, params):
return []
class getEnhancedMemes (object):
def execute(self, params):
return []
class getEnhancingMemes (object):
#MetaMeme.collectMemesThatEnhanceSelf
def execute(self, params):
return []
class hotLoadTemplate (object):
#MetaMeme.collectMemesThatEnhanceSelf
def execute(self, params):
return True
class getClusterMembers (object):
''' Three params - entity UUID, link types, halt on singleton'''
def execute(self, params):
bigList = []
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
entity.entityLock.acquire(True)
try:
bigList = entity.getClusterMembers(params[1], params[2], [])
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exceptions.EntityPropertyValueTypeError as e:
raise Exceptions.EntityPropertyValueTypeError(e)
except Exceptions.EntityPropertyValueOutOfBoundsError as e:
raise Exceptions.EntityPropertyValueOutOfBoundsError(e)
except Exception as e:
ex = "Function getClusterMembers failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
return bigList
class getCluster(object):
''' Three params - entity UUID, link types, halt on singleton
Returns a python dict corresponding to the following JSON example
{
"nodes": [
{"id": "Myriel", "group": 1},
{"id": "Napoleon", "group": 1}
],
"links": [
{"source": "Napoleon", "target": "Myriel", "value": 1},
{"source": "Mlle.Baptistine", "target": "Myriel", "value": 8}
]
}
'''
def execute(self, params):
nodesDict = {}
nodes = []
links = []
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
selfMeme = api.getEntityMemeType(params[0])
selfMetaMeme = api.getEntityMetaMemeType(params[0])
entityString = getUUIDAsString(params[0])
nodeData = {"id": entityString, "meme": selfMeme, "metaMeme": selfMetaMeme, "position" : 0}
nodesDict[entityString] = nodeData
entity.entityLock.acquire(True)
try:
#bigList = entity.getClusterMembers(params[1], params[2], [])
entireCluster = entity.getEntityCluster(params[1], params[2], [])
# Each entry in bigList looks like [start.uuid, end.uuid, member.memePath.fullTemplatePath, member.metaMeme]
for bigListEntry in entireCluster:
sourceID = getUUIDAsString(bigListEntry[0])
targetID = getUUIDAsString(bigListEntry[1])
linkdata ={"source": sourceID, "target": targetID, "value": 1}
links.append(linkdata)
#We add the node data into a dict, to ensure that each node comes up exactly once
nodeData = {"id": targetID, "meme": bigListEntry[2], "metaMeme": bigListEntry[3], "position" : bigListEntry[4]}
nodesDict[targetID] = nodeData
for nodesDictKey in nodesDict.keys():
nodes.append(nodesDict[nodesDictKey])
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exceptions.EntityPropertyValueTypeError as e:
raise Exceptions.EntityPropertyValueTypeError(e)
except Exceptions.EntityPropertyValueOutOfBoundsError as e:
raise Exceptions.EntityPropertyValueOutOfBoundsError(e)
except Exception as e:
ex = "Function getClusterMembers failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
cluster = {"nodes": nodes, "links": links}
return cluster
class getClusterJSON(object):
''' Four params - entity UUID, link types, halt on singleton
Returns a python dict corresponding to the following JSON example
{
"nodes": [
{"id": "Myriel", "group": 1},
{"id": "Napoleon", "group": 1}
],
"links": [
{"source": "Napoleon", "target": "Myriel", "value": 1},
{"source": "Mlle.Baptistine", "target": "Myriel", "value": 8}
]
}
'''
def execute(self, params):
nodesDict = {}
nodes = []
links = []
try:
entity = entityRepository.getEntity(params[0])
if entity.depricated != True:
selfMeme = api.getEntityMemeType(params[0])
selfMetaMeme = api.getEntityMetaMemeType(params[0])
entityString = getUUIDAsString(params[0])
nodeData = {"id": entityString, "meme": selfMeme, "metaMeme": selfMetaMeme}
nodesDict[entityString] = nodeData
entity.entityLock.acquire(True)
try:
#bigList = entity.getClusterMembers(params[1], params[2], [])
entireCluster = entity.getEntityCluster(params[1], params[2], [])
# Each entry in bigList looks like [start.uuid, end.uuid, member.memePath.fullTemplatePath, member.metaMeme]
for bigListEntry in entireCluster:
sourceID = getUUIDAsString(bigListEntry[0])
targetID = getUUIDAsString(bigListEntry[1])
linkdata ={"source": sourceID, "target": targetID, "value": 1}
links.append(linkdata)
#We add the node data into a dict, to ensure that each node comes up exactly once
nodeData = {"id": targetID, "meme": bigListEntry[2], "metaMeme": bigListEntry[3], "position" : bigListEntry[4]}
nodesDict[targetID] = nodeData
for nodesDictKey in nodesDict.keys():
nodes.append(nodesDict[nodesDictKey])
except Exception as e:
raise e
finally:
entity.entityLock.release()
else:
ex = "Entity %s has been archived and is no longer available" %params[0]
raise Exceptions.ScriptError(ex)
except Exceptions.EntityPropertyValueTypeError as e:
raise Exceptions.EntityPropertyValueTypeError(e)
except Exceptions.EntityPropertyValueOutOfBoundsError as e:
raise Exceptions.EntityPropertyValueOutOfBoundsError(e)
except Exception as e:
ex = "Function getClusterMembers failed. Traceback = %s" %e
raise Exceptions.ScriptError(ex)
cluster = {"nodes": nodes, "links": links}
clusterJSON = json.dumps(cluster)
return clusterJSON
class sourceMemeCreate (object):
''' Three params - modulePath, memeName, metamemePath'''
className = "sourceMemeCreate"
def execute(self, params):
method = moduleName + '.' + self.className + '.execute'
validationResults = []
try:
sourceTemplateRepository.lock.acquire(True)
path = TemplatePath(params[0], params[1])
metaMeme = templateRepository.resolveTemplate(path, params[2])
meme = SourceMeme(path, metaMeme)
try:
templateRepository.lock.acquire(True)
sourceTemplateRepository.catalogTemplate(meme.path, meme)
validationResults = meme.compile(True, False)
except Exception as e:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
nestEerrorMsg = str(fullerror[1])
errorMsg = "Error while trying to create compile meme %s, Nested Traceback = %s: %s" %(path.fullTemplatePath, errorID, nestEerrorMsg)
logQ.put( [logType , logLevel.WARNING , method , errorMsg])
raise Exceptions.SourceMemeManipulationError(errorMsg)
| |
compute left/right vacancy
occupied = map(lambda x: x > 0, _tile_occ['c'])
left = occupied.index(True)
right = occupied[::-1].index(True)
# compute bottom/top vacancy
occupied = map(lambda x: x > 0, _tile_occ['r'])
bot = occupied.index(True)
top = occupied[::-1].index(True)
_vacancy = [left, right, bot, top]
# checked, complete
def firstCell(M1=False):
'''returns the first cell to be placed'''
global _numAdj, _numAdj2
log('Selecting first cell\n')
if len(_numAdj.keys()) == 1:
log('\tselected cell 0\n\n')
return 0
# create adjacency worths for each cell
worth = {key: (_numAdj[key], _numAdj2[key]) for key in _numAdj}
# sort by decreasing worth
order = sorted(_numAdj.keys(), key=lambda x: worth[x])[::-1]
### method 1: max adj
if M1:
log('\trunning method 1: max adjacency...')
# determine how many cells have the maximum worth
num_max = worth.values().count(worth[order[0]])
# randomly select one of these cells
i = int(rand.random()*num_max)
cell = order[i]
log('done\n')
### method 2: fully probabilistic
# probability is ~ numAdj**POW for some power
else:
log('\trunning method 2: probabilistic...')
# give a probability score for each cell
probs = {key: pow(worth[key][0], FIRST_PROB_POW) for key in worth}
# normalise and compute comparison values
total_prob = sum(probs.values())
comps = [0]
for key in order:
probs[key] /= total_prob
comps.append(comps[-1]+probs[key])
# randomly select starting key
i = max(bisect(comps, rand.random()), 1)
cell = order[i-1]
log('done\n')
log('\tselected cell %d\n\n' % cell)
return cell
# checked, complete
def firstQubit(cell, M1=False):
'''Selects the qubit corresponding to the first cell'''
global _qbitAdj, _numAdj
log('Selecting first Qubit\n')
qb = None
adj = _numAdj[cell]
### method 1: middle cell
if M1:
log('\trunning method 1: middle tile... ')
# select candidate tile(s)
n, m = [N//2], [M//2]
if N % 2 == 0:
n.append(N//2-1)
if M % 2 == 0:
m.append(M//2-1)
tiles = [(_n, _m) for _n in n for _m in m]
# shuffle tiles
rand.shuffle(tiles)
for tile in tiles:
r, c = tile
# try to find suitable qubit
order = [(h, i) for h in xrange(2) for i in xrange(L)]
rand.shuffle(order)
for h, i in order:
qbit = (r, c, h, i)
if len(_qbitAdj[qbit]) >= adj:
qb = qbit
break
else:
continue
break
log('done\n')
### method 2: Gaussian dist
else:
log('\trunning method 2: gaussian dist... ')
if N % 2: # if odd rows
Y = np.arange(-(N//2), N//2+1)
else:
Y = .5+np.arange(-(N//2), N//2)
if M % 2: # if odd rows
X = np.arange(-(M//2), M//2+1)
else:
X = .5+np.arange(-(M//2), M//2)
# generate probabilities
CDF = []
for ax in [X, Y]:
Z = np.exp(-ax*ax/(2*FIRST_QBIT_SIG))
Z /= np.sum(Z)
cdf = [0.]
for z in Z:
cdf.append(cdf[-1]+z)
CDF.append(cdf)
# attempt to find qubit
attempt = 0
while attempt < FIRST_QBIT_ATTEMPTS:
attempt += 1
# pick tile
r = max(bisect(CDF[0], rand.random()), 1)-1
c = max(bisect(CDF[1], rand.random()), 1)-1
# pick qubit
order = [(h, i) for h in xrange(2) for i in xrange(L)]
rand.shuffle(order)
for h, i in order:
qbit = (r, c, h, i)
if len(_qbitAdj[qbit]) >= adj:
qb = qbit
break
else:
continue
break
log('done\n')
if qb is None:
log('\n***Failed to identify a suitable qubit')
return None
log('\tselected qbit: %s\n\n' % (str(qb)))
return qb
#######################################################################
#######################################################################
### UPDATE METHODS ###
# checked
def assignQubit(cell, qbit):
'''Assign a qubit to QCA cell'''
global _numAdj, _source, _qubits, _cells
global _cell_flags, _qbit_flags, _qbitAdj
# decrement numAdj for each edjacent cell
for adj in _source[cell]:
_numAdj[adj] -= 1
# set _qubits and _cells
_qubits[cell] = qbit
_cells[qbit] = cell
# update tile_occ and vacancy
if not _qbit_flags[qbit]['reserved']:
setTileOcc([qbit])
setVacancy()
# update flags
_cell_flags[cell]['placed'] = True
_qbit_flags[qbit]['taken'] = True
_qbit_flags[qbit]['assigned'] = True
# make adjacent qubits aware of place cell (for reserved check)
for qb in _qbitAdj[qbit]:
_qbit_flags[qb]['prox'].add(qbit)
Routing.disableQubits([qbit])
# unchecked
def assignPaths(paths):
'''Flag and assign routed paths'''
global _qbit_flags, _qbit_paths, _paths, _cells
reserve_check = set()
for path in paths:
# get end points as key
key = tuple(map(lambda x: _cells[x], [path[0], path[-1]]))
for qbit in path:
# take qbit
_qbit_flags[qbit]['taken'] = True
_qbit_paths[qbit].add(key)
# if qbit is prox, flag for later reserved check
if _qbit_flags[qbit]['prox']:
reserve_check.update(_qbit_flags[qbit]['prox'])
# update tile_occ
setTileOcc(path[1:-1])
Routing.disableQubits(path[1:-1])
_paths[key] = path
# update vacancy
setVacancy()
# check for reservations
try:
reserveQubits(list(reserve_check))
except KeyError:
raise KeyError('Qubit reservation failed during path assignment')
# unchecked
def reserveQubits(qbits):
'''for each qbit in qbits, check if adjacent qubits should be
reserved. Reserve if appropriate.
'''
global _cells, _numAdj, _qbit_flags, _reserved
if not qbits:
return
#log('\n\nReserving locals qbits for: %s\n' % map(str, qbits))
for qbit in qbits:
#log('\nchecking qbit %s\n' % str(qbit))
# cell properties
try:
cell = _cells[qbit]
if cell is None:
raise KeyError
except KeyError:
raise KeyError('Qbit %s is not assigned to a cell...'
% str(qbit))
num_adj = _numAdj[cell]
#log('Required adjacency: %d\n' % num_adj)
# wipe old reservations
old_res = set()
if _reserved[qbit]:
#log('releasing qbits: %s: \n' % map(str, _reserved[qbit]))
for qb in _reserved[qbit]:
_qbit_flags[qb]['reserved'] = False
setTileOcc(_reserved[qbit], dec=True)
old_res = cp(_reserved[qbit])
_reserved[qbit].clear()
# get list of all adjacent unreserved qubits and count qbit type
qbs = []
_qbit_flags[qbit]['c_in'] = set()
_qbit_flags[qbit]['c_out'] = 0
for q in _qbitAdj[qbit]:
if not (_qbit_flags[q]['taken'] or _qbit_flags[q]['reserved']):
qbs.append(q)
if q[0:2] == qbit[0:2]:
_qbit_flags[qbit]['c_in'].add(q)
else:
_qbit_flags[qbit]['c_out'] += 1
# if exact amount of adjacent qubits available, reserve
if num_adj == len(qbs):
# log('Reserving all free qbits for qbit %s\n' % str(qbit))
# reserve all adjacent qubits
res_check = set()
for qb in qbs:
_qbit_flags[qb]['reserved'] = True
_reserved[qbit].add(qb)
res_check.update(_qbit_flags[qb]['prox'])
setTileOcc(qbs)
# if reserved qubits changed, check local qubits for reservations
if old_res == _reserved[qbit]:
reserveQubits(res_check-set(qbits))
# check for insufficient qubits
elif num_adj > len(qbs):
raise KeyError('Insufficent free qubits for cell %s' % str(cell))
setVacancy()
# unchecked
def forgetQubit(qbit, check=True):
''' release a given qubit. Returns a list of all connected paths which
should be forgotten before continuing with the embedding
'''
global _cells, _cell_flags, _numAdj, _source, _qubits
global _qbit_flags, _reserved, _qbit_paths, _qbitAdj
try:
cell = _cells[qbit]
except KeyError:
log('Qbit has not been assigned to any cell')
raise KeyError('Qbit has not been assigned to any cell')
qbs = set([qbit]) # set of qbits to decrement from _tile_occ
_cells[qbit] = None
_qubits[cell] = None
# update flags
_cell_flags[cell]['placed'] = False
_qbit_flags[qbit]['assigned'] = False
_qbit_flags[qbit]['taken'] = False
# refresh source parameters
for adj in _source[cell]:
_numAdj[adj] += 1
# clear reserved list
for qb in _reserved[qbit]:
_qbit_flags[qb]['reserved'] = False
qbs.update(_reserved[qbit])
_reserved[qbit].clear()
# get list of paths connected to qbit
paths = cp(_qbit_paths[qbit])
_qbit_paths[qbit].clear()
# update _tile_occ
setTileOcc(qbs, dec=True)
if check:
setVacancy()
for qb in _qbitAdj[qbit]:
_qbit_flags[qb]['prox'].remove(qbit)
Routing.enableQubits([qbit])
return paths
# unchecked
def forgetPath(key, check=True):
'''Free up qubits of the path with the given key and update appropriate
flags. If check==True, also check qubit reservations for nearby qubits.
'''
global _paths, _qbit_flags, _qbit_paths
reserve_check = set()
path = cp(_paths[key])
_paths.pop(key)
if key in _qbit_paths[path[0]]:
_qbit_paths[path[0]].remove(key)
if key in _qbit_paths[path[-1]]:
_qbit_paths[path[-1]].remove(key)
for qbit in path[1:-1]:
_qbit_flags[qbit]['taken'] = False
_qbit_paths[qbit].clear()
if _qbit_flags[qbit]['prox']:
reserve_check.update(_qbit_flags[qbit]['prox'])
Routing.enableQubits(path[1:-1])
setTileOcc(path[1:-1], dec=True)
if check:
reserveQubits(reserve_check)
setVacancy()
#######################################################################
#######################################################################
### MULTI-SOURCE SEARCH ###
# checked, modify cost scheme if necessary
def extend_Dijkstra(src):
'''Generator for Dijkstra search extension'''
global _qbitAdj, _qbit_flags, M, N
BIG_VAL = 2*len(_qbitAdj) # large value for initial node cost
# initialise
visited = {}
for qbit in _qbitAdj:
if _qbit_flags[qbit]['taken'] or _qbit_flags[qbit]['reserved']:
visited[qbit] = True
else:
visited[qbit] = False
costs = {qbit: BIG_VAL for qbit in _qbitAdj}
next_qb = set()
next_qb.add(src)
costs[src] = 0
# tree growth loop
while next_qb:
# pick lowest cost qbit and yield
qbit = sorted(next_qb, key=lambda x: costs[x])[0]
next_qb.remove(qbit)
yield qbit
# mark as visited
visited[qbit] = True
# update costs of all unvisited adjacent nodes
for qb in _qbitAdj[qbit]:
if not visited[qb]:
# add cost increment
dcost = IN_TILE_COST if qb[0:2] == qbit[0:2] else OUT_TILE_COST
# include edge repulsion
dcost += EDGE_REP_COST*max(map(abs, [qb[0]-.5*(M-1),
qb[1]-.5*(N-1)]))
costs[qb] = min(costs[qb], costs[qbit]+dcost)
next_qb.add(qb)
# unchecked...implement later
def extend_Astar():
'''
'''
pass
# checked, complete
def multiSourceSearch(srcs, adj, forb=set(), typ='Dijkstra'):
'''
Attempt to find the lowest cost suitable point with free paths to
the given sources
'''
global _qbitAdj, _reserved
# create path extension generator
if typ.upper() == 'DIJKSTRA':
extend_func = extend_Dijkstra
else:
extend_func = extend_Astar
extend = {}
# initialise generator for each source
| |
"""
Created on 2020-10-11
@author: cheng.li
"""
from sqlalchemy import (
Column,
INT,
FLOAT,
Date,
Index,
Text,
text
)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class _StkDailyPricePro(Base):
__tablename__ = 'stk_daily_price_pro'
__table_args__ = (
Index('unique_stk_daily_price_pro_index', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
code = Column("security_code", Text)
chgPct = Column("change_pct", FLOAT)
secShortName = Column("name", Text)
is_valid = Column(INT, nullable=False)
flag = Column(INT)
is_verify = Column(INT)
class _IndexDailyPrice(Base):
__tablename__ = 'index_daily_price'
__table_args__ = (
Index('unique_index_daily_price_index', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
indexCode = Column("security_code", Text)
chgPct = Column("change_pct", FLOAT)
secShortName = Column("name", Text)
is_valid = Column(INT, nullable=False)
flag = Column(INT)
is_verify = Column(INT)
class _Index(Base):
__tablename__ = "index"
__table_args__ = (
Index('unique_index_index', 'trade_date', 'isymbol', 'symbol', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
indexSymbol = Column("isymbol", Text)
symbol = Column(Text)
weight = Column("weighing", FLOAT)
flag = Column(INT)
class _IndexComponent(Base):
__tablename__ = "index_component"
__table_args__ = (
Index('unique_index_index', 'trade_date', 'isecurity_code', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
indexSymbol = Column("isymbol", Text)
symbol = Column(Text)
indexCode = Column("isecurity_code", Text)
code = Column("security_code", Text)
flag = Column(INT)
class _StkUniverse(Base):
__tablename__ = 'stk_universe'
__table_args__ = (
Index('unique_stk_universe_index', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
aerodef = Column(INT, server_default=text("'0'"))
agriforest = Column(INT, server_default=text("'0'"))
auto = Column(INT, server_default=text("'0'"))
bank = Column(INT, server_default=text("'0'"))
builddeco = Column(INT, server_default=text("'0'"))
chem = Column(INT, server_default=text("'0'"))
conmat = Column(INT, server_default=text("'0'"))
commetrade = Column(INT, server_default=text("'0'"))
computer = Column(INT, server_default=text("'0'"))
conglomerates = Column(INT, server_default=text("'0'"))
eleceqp = Column(INT, server_default=text("'0'"))
electronics = Column(INT, server_default=text("'0'"))
foodbever = Column(INT, server_default=text("'0'"))
health = Column(INT, server_default=text("'0'"))
houseapp = Column(INT, server_default=text("'0'"))
ironsteel = Column(INT, server_default=text("'0'"))
leiservice = Column(INT, server_default=text("'0'"))
lightindus = Column(INT, server_default=text("'0'"))
machiequip = Column(INT, server_default=text("'0'"))
media = Column(INT, server_default=text("'0'"))
mining = Column(INT, server_default=text("'0'"))
nonbankfinan = Column(INT, server_default=text("'0'"))
nonfermetal = Column(INT, server_default=text("'0'"))
realestate = Column(INT, server_default=text("'0'"))
telecom = Column(INT, server_default=text("'0'"))
textile = Column(INT, server_default=text("'0'"))
transportation = Column(INT, server_default=text("'0'"))
utilities = Column(INT, server_default=text("'0'"))
ashare = Column(INT, server_default=text("'0'"))
ashare_ex = Column(INT, server_default=text("'0'"))
cyb = Column(INT, server_default=text("'0'"))
hs300 = Column(INT, server_default=text("'0'"))
sh50 = Column(INT, server_default=text("'0'"))
zxb = Column(INT, server_default=text("'0'"))
zz1000 = Column(INT, server_default=text("'0'"))
zz500 = Column(INT, server_default=text("'0'"))
zz800 = Column(INT, server_default=text("'0'"))
flag = Column(INT)
is_verify = Column(INT)
class _SwIndustryDaily(Base):
__tablename__ = 'sw_industry_daily'
__table_args__ = (
Index('sw_industry_daily_uindex', 'trade_date', 'industry_code1', 'symbol', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
symbol = Column(Text, nullable=False)
company_id = Column(Text, nullable=False)
code = Column("security_code", Text, nullable=False)
sname = Column(Text, nullable=False)
industry_code1 = Column(Text, nullable=False)
industry_name1 = Column(Text)
industry_code2 = Column(Text)
industry_name2 = Column(Text)
industry_code3 = Column(Text)
industry_name3 = Column(Text)
Industry_code4 = Column(Text)
Industry_name4 = Column(Text)
flag = Column(INT)
is_verify = Column(INT)
class _RiskExposure(Base):
__tablename__ = 'risk_exposure'
__table_args__ = (
Index('risk_exposure_idx', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(INT)
RealEstate = Column(INT)
Health = Column(INT)
Transportation = Column(INT)
Mining = Column(INT)
NonFerMetal = Column(INT)
HouseApp = Column(INT)
LeiService = Column(INT)
MachiEquip = Column(INT)
BuildDeco = Column(INT)
CommeTrade = Column(INT)
CONMAT = Column(INT)
Auto = Column(INT)
Textile = Column(INT)
FoodBever = Column(INT)
Electronics = Column(INT)
Computer = Column(INT)
LightIndus = Column(INT)
Utilities = Column(INT)
Telecom = Column(INT)
AgriForest = Column(INT)
CHEM = Column(INT)
Media = Column(INT)
IronSteel = Column(INT)
NonBankFinan = Column(INT)
ELECEQP = Column(INT)
AERODEF = Column(INT)
Conglomerates = Column(INT)
COUNTRY = Column(INT)
flag = Column(INT)
class _RiskCovDay(Base):
__tablename__ = 'risk_cov_day'
__table_args__ = (
Index('risk_cov_day_idx', 'trade_date', 'FactorID', 'Factor', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
FactorID = Column(INT, nullable=False)
Factor = Column(Text, nullable=False)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(FLOAT)
RealEstate = Column(FLOAT)
Health = Column(FLOAT)
Transportation = Column(FLOAT)
Mining = Column(FLOAT)
NonFerMetal = Column(FLOAT)
HouseApp = Column(FLOAT)
LeiService = Column(FLOAT)
MachiEquip = Column(FLOAT)
BuildDeco = Column(FLOAT)
CommeTrade = Column(FLOAT)
CONMAT = Column(FLOAT)
Auto = Column(FLOAT)
Textile = Column(FLOAT)
FoodBever = Column(FLOAT)
Electronics = Column(FLOAT)
Computer = Column(FLOAT)
LightIndus = Column(FLOAT)
Utilities = Column(FLOAT)
Telecom = Column(FLOAT)
AgriForest = Column(FLOAT)
CHEM = Column(FLOAT)
Media = Column(FLOAT)
IronSteel = Column(FLOAT)
NonBankFinan = Column(FLOAT)
ELECEQP = Column(FLOAT)
AERODEF = Column(FLOAT)
Conglomerates = Column(FLOAT)
COUNTRY = Column(FLOAT)
flag = Column(INT)
class _RiskCovLong(Base):
__tablename__ = 'risk_cov_long'
__table_args__ = (
Index('risk_cov_long_Date_Factor_uindex', 'trade_date', 'Factor', unique=True),
Index('risk_cov_long_Date_FactorID_uindex', 'trade_date', 'FactorID', unique=True)
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
FactorID = Column(INT)
Factor = Column(Text, nullable=False)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(FLOAT)
RealEstate = Column(FLOAT)
Health = Column(FLOAT)
Transportation = Column(FLOAT)
Mining = Column(FLOAT)
NonFerMetal = Column(FLOAT)
HouseApp = Column(FLOAT)
LeiService = Column(FLOAT)
MachiEquip = Column(FLOAT)
BuildDeco = Column(FLOAT)
CommeTrade = Column(FLOAT)
CONMAT = Column(FLOAT)
Auto = Column(FLOAT)
Textile = Column(FLOAT)
FoodBever = Column(FLOAT)
Electronics = Column(FLOAT)
Computer = Column(FLOAT)
LightIndus = Column(FLOAT)
Utilities = Column(FLOAT)
Telecom = Column(FLOAT)
AgriForest = Column(FLOAT)
CHEM = Column(FLOAT)
Media = Column(FLOAT)
IronSteel = Column(FLOAT)
NonBankFinan = Column(FLOAT)
ELECEQP = Column(FLOAT)
AERODEF = Column(FLOAT)
Conglomerates = Column(FLOAT)
COUNTRY = Column(FLOAT)
flag = Column(INT)
class _RiskCovShort(Base):
__tablename__ = 'risk_cov_short'
__table_args__ = (
Index('risk_cov_short_Date_FactorID_uindex', 'trade_date', 'FactorID', unique=True),
Index('risk_cov_short_Date_Factor_uindex', 'trade_date', 'Factor', unique=True)
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
FactorID = Column(INT)
Factor = Column(Text, nullable=False)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(FLOAT)
RealEstate = Column(FLOAT)
Health = Column(FLOAT)
Transportation = Column(FLOAT)
Mining = Column(FLOAT)
NonFerMetal = Column(FLOAT)
HouseApp = Column(FLOAT)
LeiService = Column(FLOAT)
MachiEquip = Column(FLOAT)
BuildDeco = Column(FLOAT)
CommeTrade = Column(FLOAT)
CONMAT = Column(FLOAT)
Auto = Column(FLOAT)
Textile = Column(FLOAT)
FoodBever = Column(FLOAT)
Electronics = Column(FLOAT)
Computer = Column(FLOAT)
LightIndus = Column(FLOAT)
Utilities = Column(FLOAT)
Telecom = Column(FLOAT)
AgriForest = Column(FLOAT)
CHEM = Column(FLOAT)
Media = Column(FLOAT)
IronSteel = Column(FLOAT)
NonBankFinan = Column(FLOAT)
ELECEQP = Column(FLOAT)
AERODEF = Column(FLOAT)
Conglomerates = Column(FLOAT)
COUNTRY = Column(FLOAT)
flag = Column(INT)
class _SpecificRiskDay(Base):
__tablename__ = 'specific_risk_day'
__table_args__ = (
Index('specific_risk_day_Date_Code_uindex', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
exchangeCD = Column(Text)
secShortName = Column(Text)
SRISK = Column(FLOAT)
flag = Column(INT)
class _SpecificRiskLong(Base):
__tablename__ = 'specific_risk_long'
__table_args__ = (
Index('specific_risk_long_Date_Code_uindex', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
exchangeCD = Column(Text)
secShortName = Column(Text)
SRISK = Column(FLOAT)
flag = Column(INT)
class _SpecificRiskShort(Base):
__tablename__ = 'specific_risk_short'
__table_args__ = (
Index('specific_risk_short_Date_Code_uindex', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
exchangeCD = Column(Text)
secShortName = Column(Text)
SRISK = Column(FLOAT)
flag = Column(INT)
# Factor tables
class _FactorMomentum(Base):
__tablename__ = 'factor_momentum'
__table_args__ = (
Index('factor_momentum_uindex', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
code = Column("security_code", Text, nullable=False)
trade_date = Column(Date, nullable=False)
ADX14D = Column(FLOAT)
ADXR14D = Column(FLOAT)
APBMA5D = Column(FLOAT)
ARC50D = Column(FLOAT)
BBI = Column(FLOAT)
BIAS10D = Column(FLOAT)
BIAS20D = Column(FLOAT)
BIAS5D = Column(FLOAT)
BIAS60D = Column(FLOAT)
CCI10D = Column(FLOAT)
CCI20D = Column(FLOAT)
CCI5D = Column(FLOAT)
CCI88D = Column(FLOAT)
ChgTo1MAvg = Column(FLOAT)
ChgTo1YAvg = Column(FLOAT)
ChgTo3MAvg = Column(FLOAT)
ChkOsci3D10D = Column(FLOAT)
ChkVol10D = Column(FLOAT)
DEA = Column(FLOAT)
EMA10D = Column(FLOAT)
EMA120D = Column(FLOAT)
EMA12D = Column(FLOAT)
EMA20D = Column(FLOAT)
EMA26D = Column(FLOAT)
EMA5D = Column(FLOAT)
EMA60D = Column(FLOAT)
EMV14D = Column(FLOAT)
EMV6D = Column(FLOAT)
Fiftytwoweekhigh = Column(FLOAT)
HT_TRENDLINE = Column(FLOAT)
KAMA10D = Column(FLOAT)
MA10Close = Column(FLOAT)
MA10D = Column(FLOAT)
MA10RegressCoeff12 = Column(FLOAT)
MA10RegressCoeff6 = Column(FLOAT)
MA120D = Column(FLOAT)
MA20D = Column(FLOAT)
MA5D = Column(FLOAT)
MA60D = Column(FLOAT)
| |
is different from provided value list('+str(len(value_list))+')')
print('Cannot update field:', field_name)
else:
print('Updating values for field:', field_name)
for row in range(len(db1)):
rec = db1[row]
rec[field_name] = value_list[row]
rec.store()
db1.close()
updated = True
except:
db1.close()
add_field(shapefile,field_name,datatype,fieldWidth)
########################################################################################################################
#Uses the epv function in the rRsac_Tools.r library to extract the point values from a raster
#Uses the r libraries raster and maptools
#Returns a list of values
class looker(object):
"""let you look up pixel value"""
def __init__(self, tifname,ptEPSG = 4326):
"""Give name of tif file (or other raster data?)"""
# open the raster and its spatial reference
self.ds = gdal.Open(tifname)
srRaster = osr.SpatialReference(self.ds.GetProjection())
srRaster.ImportFromEPSG(ptEPSG)
# get the WGS84 spatial reference
srPoint = osr.SpatialReference()
srPoint.ImportFromEPSG(ptEPSG) # WGS84
# coordinate transformation
self.ct = osr.CoordinateTransformation(srPoint, srRaster)
# geotranformation and its inverse
gt = self.ds.GetGeoTransform()
dev = (gt[1]*gt[5] - gt[2]*gt[4])
gtinv = ( gt[0] , gt[5]/dev, -gt[2]/dev,
gt[3], -gt[4]/dev, gt[1]/dev)
self.gt = gt
self.gtinv = gtinv
# band as array
b = self.ds.GetRasterBand(1)
self.arr = b.ReadAsArray()
def lookup(self, x, y):
"""look up value at lon, lat"""
# get coordinate of the raster
xgeo,ygeo,zgeo = self.ct.TransformPoint(x, y, 0)
# convert it to pixel/line on band
u = xgeo - self.gtinv[0]
v = ygeo - self.gtinv[3]
# FIXME this int() is probably bad idea, there should be
# half cell size thing needed
xpix = int(self.gtinv[1] * u + self.gtinv[2] * v)
ylin = int(self.gtinv[4] * u + self.gtinv[5] * v)
# look the value up
return self.arr[ylin,xpix]
def batch_lookup(self,xys):
out_list = []
for x,y in xys:
out_list.append(self.lookup(x,y))
return out_list
def extract_value_to_list(shapefile, raster, rscript = cwd + '/rRsac_Tools.r'):
r_library_loader(['rgdal','raster','maptools'])
r('rast = raster("'+raster+'")')
r('pt = readShapePoints("'+shapefile+'")')
r('coords = (coordinates(pt)[,1:2])')
values = r('extract(rast, coords)')
## r1 = R()
## r1.Rimport(rscript)
## r1.r('print("hello")')
## r2 = R(['raster', 'maptools', 'rgdal'])
## values = r2.r('epv("' + shapefile + '", "' + raster + '")', True)
return values
########################################################################################################################
#Extracts the raster value of a point shapefile and updates a specified field with the value in the shapefile
#Will add the field if it does not already exist using the following datatypes:
#Datatypes: Integer, Real, String, Float
def extract_value_to_shapefile(shapefile, field_name, raster, datatype = 'Integer'):
add_field(shapefile, field_name, datatype)
value_list = extract_value_to_list(shapefile, raster)
update_field(shapefile, field_name, value_list)
return value_list
########################################################################################################################
def extract_xy_value(in_raster_object, x,y,band = 1, c_dt = 'f',dt = 'Float32'):
import struct
rb = in_raster_object.GetRasterBand(band)
try:
raster_value = rb.ReadRaster(x, y, 1, 1, buf_type = eval('gdal.GDT_Int16'))#' + dt))#' + ri['dt']))
value = struct.unpack(c_dt, raster_value)[0]
except:
try:
raster_value = rb.ReadRaster(x, y, 1, 1, buf_type = eval('gdal.' + dt))#' + ri['dt']))
value = struct.unpack(c_dt, raster_value)[0]
except:
value = 0
rb = None
return value
def new_epv(in_point_shp, in_raster,pt_epsg):
#Extract coordinates from shapefile
xys = xy_coords(in_point_shp, False)
l = looker(in_raster,pt_epsg)
return l.batch_lookup(xys)
#Equivalent function as extract_value_to_list, but does not use R
def epv(in_point_shp, in_raster, band = 1,number_samples = None):
import struct
out_list = []
#Extract coordinates from shapefile
xys = xy_coords(in_point_shp, False)
gdal_dt_to_c_dt = {
'Float32' : 'f',
'Float64' : 'f',
'Byte': 'H',
'UInt16':'h',
'Int16':'H'}
#Get the raster info
ri = raster_info(in_raster)
dt = ri['dt']
if numpy_or_gdal(dt) == 'numpy':
dt = dt_converter(dt)
try:
c_dt = gdal_dt_to_c_dt[dt]
except:
c_dt = 'f'
raster_coord_list = []
#Set up a raster object
src_ds = gdal.Open(in_raster)
rb = src_ds.GetRasterBand(band)
eoffset = 0
## print 'Extracting', len(xys), 'values from', in_raster
current_index = 0
if number_samples == None:
list_length = len(xys)
else:
list_length = number_samples
last = 0
for xy in xys[:list_length]:
#print xy
#Convert shape coord to raster coordinate
raster_coords = proj_coord_to_array_coord(xy, ri['coords'], ri['res'])
#Extract raster value
try:
raster_value = rb.ReadRaster(raster_coords[0]-eoffset, raster_coords[1]-eoffset, 1, 1, buf_type = eval('gdal.GDT_Int16'))#' + dt))#' + ri['dt']))
value = struct.unpack(c_dt, raster_value)[0]
except:
try:
raster_value = rb.ReadRaster(raster_coords[0]-eoffset, raster_coords[1]-eoffset, 1, 1, buf_type = eval('gdal.' + dt))#' + ri['dt']))
value = struct.unpack(c_dt, raster_value)[0]
except:
value = -9999
#print value
out_list.append(value)
last = status_bar(current_index, list_length, percent_interval = 5,last = last)
current_index += 1
return out_list
#return [1,2]
def epv_brick(in_point_shp, in_raster, bands = []):
if bands == [] or bands == '' or bands == None:
ri= raster_info(in_raster)
bands = list(range(1, ri['bands'] + 1))
out_list = []
for band in bands:
print('Extracting values from band:',band)
out_list.append(epv(in_point_shp,in_raster, band))
out_list = transpose(out_list)
return out_list
def bat_single_epv(x,y, images, bands = 'All'):
#Get the raster info
out_list = []
for image in images:
ri = raster_info(images[0])
dt = ri['dt']
gdal_dt_to_c_dt = {
'Float32' : 'f',
'Float64' : 'f',
'Byte': 'H',
'UInt16':'h',
'Int16':'H'}
if numpy_or_gdal(dt) == 'numpy':
dt = dt_converter(dt)
try:
c_dt = gdal_dt_to_c_dt[dt]
except:
c_dt = 'f'
if bands == 'All':
bands = list(range(1,ri['bands'] + 1))
#print 'Extracting value',x,y, 'from', base(image)
tl = []
#Set up a raster object
src_ds = gdal.Open(image)
#tl= list(brick(image,'',x,y,1,1).flatten())
for band in bands:
tl.append(extract_xy_value(src_ds,x,y,band,c_dt,dt))
out_list.append(tl)
src_ds = None
return out_list
#Updated EPV
##s = 'R:/NAFD3/timesync_setup/test_sampled_new_sample3/p035r032_1999_2009_union_lfd_use_sampled_pts.shp'
##images = glob('R:/NAFD3/timesync_setup/imagery/3532/refls/','.img')
##ri = raster_info(images[0])
##xys = xy_coords(s, False)
##rcs = proj_coord_to_array_coord(xys[50], ri['coords'], ri['res'])
##x = rcs[0]
##y = rcs[1]
##print x,y
##t1 = time.time()
##bat_single_epv(x,y, images)
##t2 = time.time()
##print t1-t2
##for image in images:
## print epv_brick(s,image)
def epv_single(x,y, in_raster, band = 1,convert_to_raster_coords = False):
xy=[x,y]
#Set up a raster object
src_ds = gdal.Open(in_raster)
rb = src_ds.GetRasterBand(band)
if convert_to_raster_coords:
#Convert shape coord to raster coordinate
raster_coords = proj_coord_to_array_coord(xy, ri['coords'], ri['res'])
else:
raster_coords = xy
#Extract raster value
try:
raster_value = rb.ReadRaster(raster_coords[0]-1, raster_coords[1], 1, 1, buf_type = eval('gdal.GDT_Int16'))#' + dt))#' + ri['dt']))
value = struct.unpack(c_dt, raster_value)[0]
except:
try:
raster_value = rb.ReadRaster(raster_coords[0]-1, raster_coords[1], 1, 1, buf_type = eval('gdal.' + dt))#' + ri['dt']))
value = struct.unpack(c_dt, raster_value)[0]
except:
value = 0
#print value
return value
########################################################################################################################
#Equivalent function to extract_value_to_shapefile, but does not use r
def epv_to_shp(in_point_shp, in_raster, field_name, band = 1, datatype = 'Real'):
print('Extracting values from', base(in_raster), 'to', base(in_point_shp))
values = epv(in_point_shp, in_raster, band)
try:
update_field(in_point_shp, field_name, values)
except:
add_field(in_point_shp, field_name,datatype = datatype)
update_field(in_point_shp, field_name, values)
########################################################################################################################
#Composites a list of images into a single raster
#If there are multiple images with data for a pixel, the value of the latest image in the list will be used
def composite(image_list, output, snap_raster, no_data = 0.0, dt = '', overwrite = True):
if dt == '':
info = raster_info(image_list[0])
dt = info['dt']
if os.path.exists(output) == False or overwrite == True:
base = empty_raster(snap_raster, dt = dt)
info = raster_info(snap_raster)
height = info['height']
width = info['width']
counter = 100
while counter > 0:
for image in image_list:
array = raster(image, dt = dt)
for row in range(0, height, 1):
for column in range(0, width, 1):
pixel = array[row][column]
if pixel != no_data:
base[row][column] = pixel
counter = counter -1
write_raster(base, output, snap_raster, dt = dt)
array = None
base = None
########################################################################################################################
#Takes image list with the same extent, and places the value of the image in the order it is given if the value is
# greater than the data_range_min and less than the data_range_max
def smart_composite(image_list, output, data_range_min = 2, data_range_max = 253, dt = 'Int16'):
info = raster_info(image_list[0])
if dt == '':
dt = info['dt']
band_length = []
for image in image_list:
band_length.append(raster_info(image)['bands'])
band_list = list(range(1, min(band_length) + 1))
#base = empty_raster(image_list[0], dt = dt)
base = brick(image_list[0], dt = dt, band_list = band_list)
for image in image_list[1:]:
array = brick(image, dt = dt, band_list = band_list)
for bi in range(len(array)):
band = array[bi]
band[band < data_range_min] = -100
band[band > data_range_max] = -100
print(numpy.maximum(band, base[bi]))
base[bi] = numpy.maximum(band, base[bi])
write_raster(base, output, image_list[0], bands = min(band_length))
array = None
base = None
band = None
########################################################################################################################
#Downloads, snaps, and clips a dem to the extent of a provided raster
source1 = '//192.168.3.11/Data/National/Terrain/NED/grid/'
source2 = '//192.168.3.11/ned13/grid/'
def dem_clip(extent_raster, output, dem_source = source2,
res = '10', zone = '18', datum = 'WGS84', Buffer = 1000, create_mosaic = True, mask_output = True,
dt = '', n_s_hemisphere = 'n', e_w_hemisphere = 'w', image_prefix = 'grd', overwrite = False):
try:
os.listdir(dem_source)
except:
print('Please log onto:', dem_source)
input('Press enter to continue')
if os.path.splitext(extent_raster)[1] == '.shp':
info = shape_info(extent_raster, False, small = True)
cutline = extent_raster
else:
info = raster_info(extent_raster)
cutline = ''
zone = float(info['zone'])
coords = info['coords']
coords = buffer_coords(coords, Buffer)
gdal_coords = coords_to_gdal(coords)
print(zone)
print(gdal_coords)
res = str(res)
lat_lon_max = utm_to_geog(zone, coords[0], coords[3])
lat_lon_min = utm_to_geog(zone, coords[2], coords[1])
print('min',lat_lon_min)
print('max',lat_lon_max)
lat_range = list(range(math.floor(float(lat_lon_min[0])), math.ceil(float(lat_lon_max[0])) + 1))
lon_range = list(range(abs(math.ceil(float(lat_lon_min[1]))), abs(math.floor(float(lat_lon_max[1]))) + 1))
| |
error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#20:Check:Controllers:vEdge list sync
print(' #20:Checking:Controllers:vEdge list sync')
log_file_logger.info('#20:Check:Controllers:vEdge list sync')
writeFile(report_file, '#20:Check:Controllers:vEdge list sync\n\n')
try:
state_vedgeList,check_result, check_analysis, check_action = warningChecknine(controllers_info)
if check_result == 'Failed':
warning_checks['#20:Check:Controllers:Controller versions'] = [ check_analysis, check_action]
log_file_logger.error('#20: Check result: {}'.format(check_result))
log_file_logger.error('#20: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#20: Controllers with inconsistent state_vedgeList: {}\n'.format(state_vedgeList))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#20: Check result: {}'.format(check_result))
log_file_logger.info('#20: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #20:Check:Controllers:vEdge list sync. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#21:Check:Controllers: Confirm control connections
print(' #21:Checking:Controllers: Confirm control connections')
log_file_logger.info('#21:Check:Controllers: Confirm control connections')
writeFile(report_file, '#21:Check:Controllers: Confirm control connections\n\n')
try:
control_sum_tab, discrepancy,check_result, check_analysis, check_action = warningCheckten(vsmart_count, vbond_count)
if check_result == 'Failed':
warning_checks['#21:Check:Controllers: Confirm control connections'] = [ check_analysis, check_action]
log_file_logger.error('#21: Check result: {}'.format(check_result))
log_file_logger.error('#21: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#21: Control Connections Summary: \n{}\n'.format(control_sum_tab))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#21: Check result: {}'.format(check_result))
log_file_logger.info('#21: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #21:Check:Controllers: Confirm control connections. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#Informational Checks
print('\n**** Performing Informational checks\n')
log_file_logger.info('*** Performing Informational Checks')
#22:Check:vManage:Disk controller type
print(' #22:Check:vManage:Disk controller type')
log_file_logger.info('#22:Check:vManage:Disk controller type')
writeFile(report_file, '#22:Check:vManage:Disk controller type\n\n')
try:
check_result, check_analysis, check_action = infoCheckone(server_type, disk_controller)
if check_result == 'Failed':
warning_checks['#22:Check:vManage:Disk controller type'] = [ check_analysis, check_action]
log_file_logger.error('#22: Check result: {}'.format(check_result))
log_file_logger.error('#22: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#22: Disk Controller type: {}\n'.format(disk_controller))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#22: Check result: {}'.format(check_result))
log_file_logger.info('#22: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#22: Disk Controller type: {}\n'.format(disk_controller))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #22:Check:vManage:Disk controller type. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#23:Check:Controllers:Validate there is at minimum vBond, vSmart present
print(' #23:Check:Controllers:Validate there is at minimum vBond, vSmart present ')
log_file_logger.info('#23:Check:Controllers:Validate there is at minimum vBond, vSmart present')
writeFile(report_file, '#23:Check:Controllers:Validate there is at minimum vBond, vSmart present\n\n')
try:
check_result, check_analysis, check_action = infoChecktwo(vsmart_count,vbond_count)
if check_result == 'Failed':
warning_checks['#23:Check:Controllers:Validate there is at minimum vBond, vSmart present'] = [ check_analysis, check_action]
log_file_logger.error('#23: Check result: {}'.format(check_result))
log_file_logger.error('#23: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#23: vSmart Count: {}'.format(vsmart_count))
log_file_logger.error('#23: vBond Count: {}\n'.format(vbond_count))
writeFile(report_file, 'Result: WARNING - {}\n\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#23: Check result: {}'.format(check_result))
log_file_logger.info('#23: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#23: vSmart Count: {}'.format(vsmart_count))
log_file_logger.info('#23: vBond Count: {}\n'.format(vbond_count))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #23:Check:Controllers:Validate there is at minimum vBond, vSmart present. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#24:Check:Controllers:Validate all controllers are reachable
print(' #24:Check:Controllers:Validate all controllers are reachable')
log_file_logger.info('#24:Check:Controllers:Validate all controllers are reachable')
writeFile(report_file, '#24:Check:Controllers:Validate all controllers are reachable\n\n')
try:
unreach_controllers,check_result, check_analysis, check_action = infoChecktthree(controllers_info)
if check_result == 'Failed':
warning_checks['#24:Check:Controllers:Validate all controllers are reachable'] = [ check_analysis, check_action]
log_file_logger.error('#24: Check result: {}'.format(check_result))
log_file_logger.error('#24: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#24: Unreachable Controllers: {}\n'.format(unreach_controllers))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#24: Check result: {}'.format(check_result))
log_file_logger.info('#24: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #24:Check:Controllers:Validate all controllers are reachable. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
if cluster_size>1:
cluster_checks = {}
log_file_logger.info('*** Performing Cluster Checks')
print('\n**** Performing Cluster checks\n')
#25:Check:Cluster:Version consistency
print(' #25:Checking:Cluster:Version consistency')
log_file_logger.info('#25:Check:Cluster:Version consistency')
writeFile(report_file, '#25:Check:Cluster:Version consistency\n\n')
try:
check_result,check_analysis, check_action = criticalChecktwelve(vmanage_info)
if check_result == 'Failed':
cluster_checks['#25:Check:Cluster:Version consistency'] = [ check_analysis, check_action]
log_file_logger.error('#25: Check result: {}'.format(check_result))
log_file_logger.error('#25: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#25: vManage info: {}\n'.format(vmanage_info))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#25: Check result: {}'.format(check_result))
log_file_logger.info('#25: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#25: vManage info: {}\n'.format(vmanage_info))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #25:Check:Cluster:Version consistency. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#26:Check:Cluster:Cluster health
print(' #26:Checking:Cluster:Cluster health')
log_file_logger.info('#26:Check:Cluster:Cluster health')
writeFile(report_file, '#26:Check:Cluster:Cluster health\n\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip,jsessionid,'clusterManagement/list', args.vmanage_port, tokenid))
services_down, check_result, check_analysis, check_action = criticalCheckthirteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#26:Check:Cluster:Cluster health'] = [ check_analysis, check_action]
log_file_logger.error('#26: Check result: {}'.format(check_result))
log_file_logger.error('#26: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#26: Relevant cluster services that are down: {}\n'.format(services_down))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#26: Check result: {}'.format(check_result))
log_file_logger.info('#26: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #26:Check:Cluster:Cluster health. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#27:Check:Cluster:Cluster ConfigDB topology
print(' #27:Checking:Cluster:Cluster ConfigDB topology')
log_file_logger.info('#27:Check:Cluster:Cluster ConfigDB topology')
writeFile(report_file, '#27:Check:Cluster:Cluster ConfigDB topology\n\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid,'clusterManagement/list', args.vmanage_port, tokenid))
configDB_count, check_result, check_analysis, check_action = criticalCheckfourteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#27:Check:Cluster:Cluster ConfigDB topology'] = [ check_analysis, check_action]
log_file_logger.error('#27: Check result: {}'.format(check_result))
log_file_logger.error('#27: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#27: : No. of configDB servers in the cluster: {}\n'.format(configDB_count))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#27: Check result: {}'.format(check_result))
log_file_logger.info('#27: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#27: : No. of configDB servers in the cluster: {}\n'.format(configDB_count))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #27:Check:Cluster:Cluster ConfigDB topology. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#28:Check:Cluster:Messaging server
print(' #28:Checking:Cluster:Messaging server')
log_file_logger.info('#28:Check:Cluster:Messaging server')
writeFile(report_file, '#28:Check:Cluster:Messaging server\n\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid,'clusterManagement/list', args.vmanage_port, tokenid))
cluster_msdown,check_result,check_analysis, check_action = criticalCheckfifteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#28:Check:Cluster:Messaging server'] = [ check_analysis, check_action]
log_file_logger.error('#28: Check result: {}'.format(check_result))
log_file_logger.error('#28: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#28: Relevant cluster services that are down: {}\n'.format(services_down))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#28: Check result: {}'.format(check_result))
log_file_logger.info('#28: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #28:Check:Cluster:Messaging server. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#29:Check:Cluster:DR replication status
print(' #29:Checking:Cluster:DR replication status')
log_file_logger.info('#29:Check:Cluster:DR replication status')
writeFile(report_file, '#29:Check:Cluster:DR replication status\n\n')
try:
dr_data = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid, 'disasterrecovery/details', args.vmanage_port, tokenid))
dr_status, check_action, check_analysis, check_result = criticalChecksixteen(dr_data)
if check_result == 'Failed':
cluster_checks['#29:Check:Cluster:DR replication status'] = [ check_analysis, check_action]
log_file_logger.error('#29: Check result: {}'.format(check_result))
log_file_logger.error('#29: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#29: DR Replication status: {}\n'.format(dr_status))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#29: Check result: {}'.format(check_result))
log_file_logger.info('#29: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #29:Check:Cluster:DR replication status. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#30:Check:Cluster:Intercluster communication
print(' #30:Checking:Cluster:Intercluster communication')
log_file_logger.info('#30:Check:Cluster:Intercluster communication')
writeFile(report_file, '#30:Check:Cluster:Intercluster communication\n\n')
try:
if criticalCheckseventeen.isAlive():
criticalCheckseventeen.join(10)
if not criticalCheckseventeen.result_queue.empty():
ping_output, ping_output_failed, ping_check_result, ping_check_analysis, ping_check_action = criticalCheckseventeen.result_queue.get()
if ping_check_result == 'Failed':
cluster_checks['#30:Check:Cluster:Intercluster communication'] = [ ping_check_analysis, ping_check_action]
log_file_logger.error('#30: Check result: {}'.format(ping_check_result))
log_file_logger.error('#30: Check Analysis: {}'.format(ping_check_analysis))
log_file_logger.error('#30: Cluster nodes with ping failure: {}\n'.format(ping_output_failed))
writeFile(report_file, 'Result: ERROR - {}\n'.format(ping_check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(ping_check_action))
else:
log_file_logger.info('#30: Check result: {}'.format(ping_check_result))
log_file_logger.info('#30: Check Analysis: {}'.format(ping_check_analysis))
log_file_logger.info('#30: Cluster nodes details: {}\n'.format(ping_output))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(ping_check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #30:Check:Cluster:Intercluster communication. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#Logging out of the Session using jsessionid
log_file_logger.info('Logging out of the Session')
sessionLogout(vmanage_lo_ip, jsessionid, args.vmanage_port)
log_file_logger.info('Successfully closed the connection')
#version equal to or above 20.5
elif version_tuple[0:2] >= ('20','5'):
try:
log_file_logger.info('Generating a JSessionID')
jsessionid = generateSessionIDpy3(vmanage_lo_ip, args.username, password, args.vmanage_port)
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
raise SystemExit('\033[1;31m ERROR: Error generating JSessionID, make sure that the username and password entered is correct. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
try:
log_file_logger.info('Generating CSRF Token')
tokenid = CSRFTokenpy3(vmanage_lo_ip,jsessionid,args.vmanage_port)
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
raise SystemExit('\033[1;31m ERROR: Error generating CSRF Token. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
#Preliminary data
log_file_logger.info('****Collecting Preliminary Data\n')
print ('****Collecting Preliminary Data\n')
try:
controllers = json.loads(getRequestpy3(version_tuple, vmanage_lo_ip, jsessionid , 'system/device/controllers', args.vmanage_port, tokenid))
controllers_info = controllersInfo(controllers)
log_file_logger.info('Collected controllers information: {}'.format(controllers_info))
system_ip_data = json.loads(getRequestpy3(version_tuple, vmanage_lo_ip, jsessionid ,'device/vmanage', args.vmanage_port, tokenid))
system_ip = system_ip_data['data']['ipAddress']
#system_ip = controllers_info[hostname][1]
log_file_logger.info('Collected vManage System IP address: {}'.format(system_ip))
cpu_speed = cpuSpeed()
log_file_logger.info('Collected vManage CPU Speed GHz: {}'.format(cpu_speed))
cpu_count = cpuCount()
log_file_logger.info('Collected vManage CPU Count: {}'.format(cpu_count))
vedges = json.loads(getRequestpy3(version_tuple, vmanage_lo_ip, jsessionid , 'system/device/vedges', args.vmanage_port , tokenid))
vedge_count,vedge_count_active, vedge_info = vedgeCount(vedges)
log_file_logger.info('Collected xEdge Count: {}'.format(vedge_count))
cluster_size, server_mode, vmanage_info = serverMode(controllers_info)
log_file_logger.info('Collected vManage Cluster Size: {}'.format(cluster_size))
log_file_logger.info('Collected vManage Server Mode: {}'.format(server_mode))
disk_controller = diskController()
log_file_logger.info('Collected vManage Disk Controller Type: {}'.format(disk_controller))
dpi_stats = json.loads(getRequestpy3(version_tuple, vmanage_lo_ip, jsessionid , 'statistics/settings/status', args.vmanage_port, tokenid))
dpi_status = dpiStatus(dpi_stats)
log_file_logger.info('Collected DPI Status: {}'.format(dpi_status))
server_type = serverType()
log_file_logger.info('Collected | |
over the given axis, ignoring NaNs.
"""
y = array(a,subok=True)
if not issubclass(y.dtype.type, _nx.integer):
y[isnan(a)] = _nx.inf
return y.min(axis)
def nanargmin(a, axis=None):
"""Find the indices of the minimium over the given axis ignoring NaNs.
"""
y = array(a, subok=True)
if not issubclass(y.dtype.type, _nx.integer):
y[isnan(a)] = _nx.inf
return y.argmin(axis)
def nanmax(a, axis=None):
"""Find the maximum over the given axis ignoring NaNs.
"""
y = array(a, subok=True)
if not issubclass(y.dtype.type, _nx.integer):
y[isnan(a)] = -_nx.inf
return y.max(axis)
def nanargmax(a, axis=None):
"""Find the maximum over the given axis ignoring NaNs.
"""
y = array(a,subok=True)
if not issubclass(y.dtype.type, _nx.integer):
y[isnan(a)] = -_nx.inf
return y.argmax(axis)
def disp(mesg, device=None, linefeed=True):
"""Display a message to the given device (default is sys.stdout)
with or without a linefeed.
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# return number of input arguments and
# number of default arguments
import re
def _get_nargs(obj):
if not callable(obj):
raise TypeError, "Object is not callable."
if hasattr(obj,'func_code'):
fcode = obj.func_code
nargs = fcode.co_argcount
if obj.func_defaults is not None:
ndefaults = len(obj.func_defaults)
else:
ndefaults = 0
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
terr = re.compile(r'.*? takes exactly (?P<exargs>\d+) argument(s|) \((?P<gargs>\d+) given\)')
try:
obj()
return 0, 0
except TypeError, msg:
m = terr.match(str(msg))
if m:
nargs = int(m.group('exargs'))
ndefaults = int(m.group('gargs'))
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
raise ValueError, 'failed to determine the number of arguments for %s' % (obj)
class vectorize(object):
"""
vectorize(somefunction, otypes=None, doc=None)
Generalized function class.
Define a vectorized function which takes nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output, evaluating the function over successive
tuples of the input arrays like the python map function except it uses
the broadcasting rules of numpy.
Data-type of output of vectorized is determined by calling the function
with the first element of the input. This can be avoided by specifying
the otypes argument as either a string of typecode characters or a list
of data-types specifiers. There should be one data-type specifier for
each output.
Parameters
----------
f : callable
A Python function or method.
Examples
--------
>>> def myfunc(a, b):
... if a > b:
... return a-b
... else:
... return a+b
>>> vfunc = vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
"""
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError, "invalid otype specified"
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError, "output types must be a string of typecode characters or a list of data-types"
self.lastcallargs = 0
def __call__(self, *args):
# get number of outputs and output types by calling
# the function on the first entries of args
nargs = len(args)
if self.nin:
if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
raise ValueError, "mismatch between python function inputs"\
" and received arguments"
# we need a new ufunc if this is being called with more arguments.
if (self.lastcallargs != nargs):
self.lastcallargs = nargs
self.ufunc = None
self.nout = None
if self.nout is None or self.otypes == '':
newargs = []
for arg in args:
newargs.append(asarray(arg).flat[0])
theout = self.thefunc(*newargs)
if isinstance(theout, tuple):
self.nout = len(theout)
else:
self.nout = 1
theout = (theout,)
if self.otypes == '':
otypes = []
for k in range(self.nout):
otypes.append(asarray(theout[k]).dtype.char)
self.otypes = ''.join(otypes)
# Create ufunc if not already created
if (self.ufunc is None):
self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
# Convert to object arrays first
newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
if self.nout == 1:
_res = array(self.ufunc(*newargs),copy=False,
subok=True,dtype=self.otypes[0])
else:
_res = tuple([array(x,copy=False,subok=True,dtype=c) \
for x, c in zip(self.ufunc(*newargs), self.otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0):
"""Estimate the covariance matrix.
If m is a vector, return the variance. For matrices return the
covariance matrix.
If y is given it is treated as an additional (set of)
variable(s).
Normalization is by (N-1) where N is the number of observations
(unbiased estimate). If bias is 1 then normalization is by N.
If rowvar is non-zero (default), then each row is a variable with
observations in the columns, otherwise each column
is a variable and the observations are in the rows.
"""
X = array(m, ndmin=2, dtype=float)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y),axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if bias:
fact = N*1.0
else:
fact = N-1.0
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0):
"""The correlation coefficients
"""
c = cov(x, y, rowvar, bias)
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""blackman(M) returns the M-point Blackman window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, normalized to one (the value one
appears only if the number of samples is odd), with the first
and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function.
References
----------
.. [1] <NAME>, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] <NAME> and <NAME>, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] <NAME>, <NAME>, <NAME>, and <NAME>,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> from numpy import bartlett
>>> bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response:
>>> from numpy import clip, log10, array, bartlett
>>> from scipy.fftpack import fft
>>> from matplotlib import pyplot as plt
>>> window = bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.show()
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
>>> plt.title("Frequency response of Bartlett window")
>>> plt.ylabel("Magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.axis('tight'); plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""hanning(M) returns the M-point Hanning window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""hamming(M) returns the M-point Hamming window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
| |
# 2021.05.07-Changed for IPT
# Huawei Technologies Co., Ltd. <<EMAIL>>
import os
from importlib import import_module
import torch
import torch.nn as nn
import torch.nn.parallel as P
import torch.utils.model_zoo
class Model(nn.Module):
def __init__(self, args, ckp):
super(Model, self).__init__()
print('Making model...')
self.args = args
self.scale = args.scale
self.patch_size = args.patch_size
self.idx_scale = 0
self.input_large = (args.model == 'VDSR')
self.self_ensemble = args.self_ensemble
self.precision = args.precision
self.cpu = args.cpu
self.device = torch.device('cpu' if args.cpu else 'cuda')
self.n_GPUs = args.n_GPUs
self.save_models = args.save_models
module = import_module('model.' + args.model.lower())
self.model = module.make_model(args).to(self.device)
if args.precision == 'half':
self.model.half()
self.load(
ckp.get_path('model'),
resume=args.resume,
cpu=args.cpu
)
print(self.model, file=ckp.log_file)
def forward(self, x, idx_scale, opt=None, loss=None, output=None):
self.idx_scale = idx_scale
# Define our training optimizer and loss
self.optimizer = opt
self.loss = loss
if hasattr(self.model, 'set_scale'):
self.model.set_scale(idx_scale)
if self.training:
if self.n_GPUs > 1:
return P.data_parallel(self.model, x, range(self.n_GPUs))
else:
# return self.model(x)
return self.forward_chop_train(x, output=output)
else:
forward_function = self.forward_chop
if self.self_ensemble:
return self.forward_x8(x, forward_function=forward_function)
else:
return forward_function(x)
def save(self, apath, epoch, is_best=False):
save_dirs = [os.path.join(apath, 'model_latest.pt')]
if is_best:
save_dirs.append(os.path.join(apath, 'model_best.pt'))
if self.save_models:
save_dirs.append(
os.path.join(apath, 'model_{}.pt'.format(epoch))
)
for s in save_dirs:
torch.save(self.model.state_dict(), s)
def load(self, apath, pre_train='', resume=-1, cpu=False):
load_from = None
kwargs = {}
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
if resume == -1:
load_from = torch.load(
os.path.join(apath, 'model_latest.pt'),
**kwargs
)
elif resume == 0:
if pre_train == 'download':
print('Download the model')
dir_model = os.path.join('..', 'models')
os.makedirs(dir_model, exist_ok=True)
load_from = torch.utils.model_zoo.load_url(
self.model.url,
model_dir=dir_model,
**kwargs
)
else:
load_from = torch.load(
os.path.join(apath, 'model_{}.pt'.format(resume)),
**kwargs
)
if load_from:
self.model.load_state_dict(load_from, strict=False)
def forward_x8(self, *args, forward_function=None):
def _transform(v, op):
if self.precision != 'single': v = v.float()
v2np = v.data.cpu().numpy()
if op == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif op == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif op == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half': ret = ret.half()
return ret
list_x = []
for a in args:
x = [a]
for tf in 'v', 'h', 't': x.extend([_transform(_x, tf) for _x in x])
list_x.append(x)
list_y = []
for x in zip(*list_x):
y = forward_function(*x)
if not isinstance(y, list): y = [y]
if not list_y:
list_y = [[_y] for _y in y]
else:
for _list_y, _y in zip(list_y, y): _list_y.append(_y)
for _list_y in list_y:
for i in range(len(_list_y)):
if i > 3:
_list_y[i] = _transform(_list_y[i], 't')
if i % 4 > 1:
_list_y[i] = _transform(_list_y[i], 'h')
if (i % 4) % 2 == 1:
_list_y[i] = _transform(_list_y[i], 'v')
y = [torch.cat(_y, dim=0).mean(dim=0, keepdim=True) for _y in list_y]
if len(y) == 1: y = y[0]
return y
def train_step(self, sr, norain):
loss = self.loss(sr.cuda(), norain.cuda())
loss.backward()
self.optimizer.step()
def forward_chop(self, x, shave=12):
x.cpu()
batchsize = self.args.crop_batch_size
self.batchsize_te = self.args.test_batch_size
h, w = x.size()[-2:]
padsize = int(self.patch_size)
shave = int(self.patch_size/2)
scale = self.scale[self.idx_scale]
h_cut = (h-padsize) % (int(shave/2))
w_cut = (w-padsize) % (int(shave/2))
x_unfold = torch.nn.functional.unfold(
x, padsize, stride=int(shave/2)).transpose(0, 2).contiguous()
x_hw_cut = x[..., (h-padsize):, (w-padsize):]
y_hw_cut = self.model.forward(x_hw_cut.cuda()).cpu()
x_h_cut = x[..., (h-padsize):, :]
x_w_cut = x[..., :, (w-padsize):]
y_h_cut = self.cut_h(x_h_cut, h, w, h_cut, w_cut,
padsize, shave, scale, batchsize)
y_w_cut = self.cut_w(x_w_cut, h, w, h_cut, w_cut,
padsize, shave, scale, batchsize)
x_h_top = x[..., :padsize, :]
x_w_top = x[..., :, :padsize]
y_h_top = self.cut_h(x_h_top, h, w, h_cut, w_cut,
padsize, shave, scale, batchsize)
y_w_top = self.cut_w(x_w_top, h, w, h_cut, w_cut,
padsize, shave, scale, batchsize)
x_unfold = x_unfold.view(x_unfold.size(0)*self.batchsize_te, -1, padsize, padsize)
y_unfold = []
x_range = x_unfold.size(0)//batchsize + \
(x_unfold.size(0) % batchsize != 0)
x_unfold.cuda()
for i in range(x_range):
y_unfold.append(P.data_parallel(
self.model, x_unfold[i*batchsize:(i+1)*batchsize, ...], range(self.n_GPUs)).cpu())
y_unfold = torch.cat(y_unfold, dim=0)
untr_y = y_unfold.view(
int(y_unfold.size(0)/self.batchsize_te), -1, self.batchsize_te)
input_y = untr_y.transpose(0, 2).contiguous()
y = torch.nn.functional.fold(input_y, ((
h-h_cut)*scale, (w-w_cut)*scale), padsize*scale, stride=int(shave/2*scale))
y[..., :padsize*scale, :] = y_h_top
y[..., :, :padsize*scale] = y_w_top
y_unfold = y_unfold[..., int(shave/2*scale):padsize*scale-int(shave/2*scale), int(
shave/2*scale):padsize*scale-int(shave/2*scale)].contiguous()
untr_y = y_unfold.view(
int(y_unfold.size(0)/self.batchsize_te), -1, self.batchsize_te)
input_y = untr_y.transpose(0, 2).contiguous()
y_inter = torch.nn.functional.fold(input_y, ((
h-h_cut-shave)*scale, (w-w_cut-shave)*scale), padsize*scale-shave*scale, stride=int(shave/2*scale))
y_ones = torch.ones(y_inter.shape, dtype=y_inter.dtype)
divisor = torch.nn.functional.fold(torch.nn.functional.unfold(y_ones, padsize*scale-shave*scale, stride=int(
shave/2*scale)), ((h-h_cut-shave)*scale, (w-w_cut-shave)*scale), padsize*scale-shave*scale, stride=int(shave/2*scale))
y_inter = y_inter/divisor
y[..., int(shave/2*scale):(h-h_cut)*scale-int(shave/2*scale),
int(shave/2*scale):(w-w_cut)*scale-int(shave/2*scale)] = y_inter
y = torch.cat([y[..., :y.size(2)-int((padsize-h_cut)/2*scale), :],
y_h_cut[..., int((padsize-h_cut)/2*scale+0.5):, :]], dim=2)
y_w_cat = torch.cat([y_w_cut[..., :y_w_cut.size(2)-int((padsize-h_cut)/2*scale), :],
y_hw_cut[..., int((padsize-h_cut)/2*scale+0.5):, :]], dim=2)
y = torch.cat([y[..., :, :y.size(3)-int((padsize-w_cut)/2*scale)],
y_w_cat[..., :, int((padsize-w_cut)/2*scale+0.5):]], dim=3)
return y.cuda()
def cut_h(self, x_h_cut, h, w, h_cut, w_cut, padsize, shave, scale, batchsize):
x_h_cut_unfold = torch.nn.functional.unfold(
x_h_cut, padsize, stride=int(shave/2)).transpose(0, 2).contiguous()
x_h_cut_unfold = x_h_cut_unfold.view(
x_h_cut_unfold.size(0)*self.batchsize_te, -1, padsize, padsize)
x_range = x_h_cut_unfold.size(
0)//batchsize + (x_h_cut_unfold.size(0) % batchsize != 0)
y_h_cut_unfold = []
x_h_cut_unfold.cuda()
for i in range(x_range):
y_h_cut_unfold.append(P.data_parallel(
self.model, x_h_cut_unfold[i*batchsize:(i+1)*batchsize, ...], range(self.n_GPUs)).cpu())
y_h_cut_unfold = torch.cat(y_h_cut_unfold, dim=0)
untr_y = y_h_cut_unfold.view(
int(y_h_cut_unfold.size(0)/self.batchsize_te), -1, self.batchsize_te)
input_y = untr_y.transpose(0, 2).contiguous()
y_h_cut = torch.nn.functional.fold(
input_y, (padsize*scale, (w-w_cut)*scale), padsize*scale, stride=int(shave/2*scale))
y_h_cut_unfold = y_h_cut_unfold[..., :, int(
shave/2*scale):padsize*scale-int(shave/2*scale)].contiguous()
untr_y = y_h_cut_unfold.view(
int(y_h_cut_unfold.size(0)/self.batchsize_te), -1, self.batchsize_te)
input_y = untr_y.transpose(0, 2).contiguous()
y_h_cut_inter = torch.nn.functional.fold(
input_y, (padsize*scale, (w-w_cut-shave)*scale), (padsize*scale, padsize*scale-shave*scale), stride=int(shave/2*scale))
y_ones = torch.ones(y_h_cut_inter.shape, dtype=y_h_cut_inter.dtype)
divisor = torch.nn.functional.fold(torch.nn.functional.unfold(y_ones, (padsize*scale, padsize*scale-shave*scale), stride=int(
shave/2*scale)), (padsize*scale, (w-w_cut-shave)*scale), (padsize*scale, padsize*scale-shave*scale), stride=int(shave/2*scale))
y_h_cut_inter = y_h_cut_inter/divisor
y_h_cut[..., :, int(shave/2*scale):(w-w_cut)*scale -
int(shave/2*scale)] = y_h_cut_inter
return y_h_cut
def cut_w(self, x_w_cut, h, w, h_cut, w_cut, padsize, shave, scale, batchsize):
x_w_cut_unfold = torch.nn.functional.unfold(
x_w_cut, padsize, stride=int(shave/2)).transpose(0, 2).contiguous()
x_w_cut_unfold = x_w_cut_unfold.view(
x_w_cut_unfold.size(0)*self.batchsize_te, -1, padsize, padsize)
x_range = x_w_cut_unfold.size(
0)//batchsize + (x_w_cut_unfold.size(0) % batchsize != 0)
y_w_cut_unfold = []
x_w_cut_unfold.cuda()
for i in range(x_range):
y_w_cut_unfold.append(P.data_parallel(
self.model, x_w_cut_unfold[i*batchsize:(i+1)*batchsize, ...], range(self.n_GPUs)).cpu())
y_w_cut_unfold = torch.cat(y_w_cut_unfold, dim=0)
untr_y = y_w_cut_unfold.view(
int(y_w_cut_unfold.size(0)/self.batchsize_te), -1, self.batchsize_te)
input_y = untr_y.transpose(0, 2).contiguous()
y_w_cut = torch.nn.functional.fold(input_y, ((
h-h_cut)*scale, padsize*scale), padsize*scale, stride=int(shave/2*scale))
y_w_cut_unfold = y_w_cut_unfold[..., int(
shave/2*scale):padsize*scale-int(shave/2*scale), :].contiguous()
untr_y = y_w_cut_unfold.view(
int(y_w_cut_unfold.size(0)/self.batchsize_te), -1, self.batchsize_te)
input_y = untr_y.transpose(0, 2).contiguous()
y_w_cut_inter = torch.nn.functional.fold(input_y, ((
h-h_cut-shave)*scale, padsize*scale), (padsize*scale-shave*scale, padsize*scale), stride=int(shave/2*scale))
y_ones = torch.ones(y_w_cut_inter.shape, dtype=y_w_cut_inter.dtype)
divisor = torch.nn.functional.fold(torch.nn.functional.unfold(y_ones, (padsize*scale-shave*scale, padsize*scale), stride=int(
shave/2*scale)), ((h-h_cut-shave)*scale, padsize*scale), (padsize*scale-shave*scale, padsize*scale), stride=int(shave/2*scale))
y_w_cut_inter = y_w_cut_inter/divisor
y_w_cut[..., int(shave/2*scale):(h-h_cut)*scale -
int(shave/2*scale), :] = y_w_cut_inter
return y_w_cut
def forward_chop_train(self, x, shave=12, output=None):
x.cpu()
output.cpu()
batchsize = self.args.crop_batch_size
self.batchsize_tr = self.args.batch_size
# print("\n\n\n x_shape ==== ", x.shape)
h, w = x.size()[-2:]
padsize = int(self.patch_size)
shave = int(self.patch_size/2)
scale = self.scale[self.idx_scale]
h_cut = (h-padsize)%(int(shave/2))
w_cut = (w-padsize)%(int(shave/2))
x_unfold = torch.nn.functional.unfold(x, padsize, stride=int(shave/2)).transpose(0,2).contiguous()
x_hw_cut = x[...,(h-padsize):,(w-padsize):]
output_unfold = torch.nn.functional.unfold(
output, padsize, stride=int(shave/2)).transpose(0, 2).contiguous()
output_hw_cut = output[..., (h-padsize):, (w-padsize):]
self.optimizer.zero_grad()
pred = self.model.forward(x_hw_cut.cuda())
if self.training:
assert output is not None
self.train_step(pred, output_hw_cut)
y_hw_cut = pred.cpu()
x_h_cut = x[...,(h-padsize):,:]
x_w_cut = x[...,:,(w-padsize):]
output_h_cut = output[..., (h-padsize):, :]
output_w_cut = output[..., :, (w-padsize):]
y_h_cut = self.cut_h_train(x_h_cut, h, w, h_cut, w_cut, padsize,
shave, scale, batchsize, output_h_cut=output_h_cut)
y_w_cut = self.cut_w_train(x_w_cut, h, w, h_cut, w_cut, padsize,
shave, scale, batchsize, output_w_cut=output_w_cut)
x_h_top = x[...,:padsize,:]
x_w_top = x[...,:,:padsize]
output_h_top = output[..., :padsize, :]
output_w_top = output[..., :, :padsize]
y_h_top = self.cut_h_train(x_h_top, h, w, h_cut, w_cut, padsize,
shave, scale, batchsize, output_h_cut=output_h_top)
y_w_top = self.cut_w_train(x_w_top, h, w, h_cut, w_cut, padsize,
shave, scale, batchsize, output_w_cut=output_w_top)
# print("\n\n\n x_unfold_shape === ", x_unfold.shape)
x_unfold = x_unfold.view(x_unfold.size(0)*self.batchsize_tr,-1,padsize,padsize)
# print("\n\n\n x_unfold__view_shape === ", x_unfold.shape)
output_unfold = output_unfold.view(output_unfold.size(
0)*self.batchsize_tr, -1, padsize, padsize)
y_unfold = []
x_range = x_unfold.size(0)//batchsize + (x_unfold.size(0)%batchsize !=0)
x_unfold.cuda()
for i in range(x_range):
self.optimizer.zero_grad()
pred = P.data_parallel(
self.model, x_unfold[i*batchsize:(i+1)*batchsize, ...], range(self.n_GPUs))
if self.training:
assert output is not None
self.train_step(
pred, output_unfold[i*batchsize:(i+1)*batchsize, ...])
y_unfold.append(pred.cpu())
y_unfold = torch.cat(y_unfold,dim=0)
untr_y = y_unfold.view(
int(y_unfold.size(0)/self.batchsize_tr), -1, self.batchsize_tr)
input_y = untr_y.transpose(0, 2).contiguous()
y = torch.nn.functional.fold(input_y, ((
h-h_cut)*scale, (w-w_cut)*scale), padsize*scale, stride=int(shave/2*scale))
y[...,:padsize*scale,:] = y_h_top
y[...,:,:padsize*scale] = y_w_top
y_unfold = y_unfold[...,int(shave/2*scale):padsize*scale-int(shave/2*scale),int(shave/2*scale):padsize*scale-int(shave/2*scale)].contiguous()
untr_y = y_unfold.view(
int(y_unfold.size(0)/self.batchsize_tr), -1, self.batchsize_tr)
input_y = untr_y.transpose(0, 2).contiguous()
y_inter = torch.nn.functional.fold(input_y, ((
h-h_cut-shave)*scale, (w-w_cut-shave)*scale), padsize*scale-shave*scale, stride=int(shave/2*scale))
y_ones = torch.ones(y_inter.shape, dtype=y_inter.dtype)
divisor = torch.nn.functional.fold(torch.nn.functional.unfold(y_ones, padsize*scale-shave*scale, stride=int(shave/2*scale)),((h-h_cut-shave)*scale,(w-w_cut-shave)*scale), padsize*scale-shave*scale, stride=int(shave/2*scale))
y_inter = y_inter/divisor
y[...,int(shave/2*scale):(h-h_cut)*scale-int(shave/2*scale),int(shave/2*scale):(w-w_cut)*scale-int(shave/2*scale)] = y_inter
y = torch.cat([y[...,:y.size(2)-int((padsize-h_cut)/2*scale),:],y_h_cut[...,int((padsize-h_cut)/2*scale+0.5):,:]],dim=2)
y_w_cat = torch.cat([y_w_cut[...,:y_w_cut.size(2)-int((padsize-h_cut)/2*scale),:],y_hw_cut[...,int((padsize-h_cut)/2*scale+0.5):,:]],dim=2)
y = torch.cat([y[...,:,:y.size(3)-int((padsize-w_cut)/2*scale)],y_w_cat[...,:,int((padsize-w_cut)/2*scale+0.5):]],dim=3)
return y.cuda()
def cut_h_train(self, x_h_cut, h, w, h_cut, w_cut, padsize, shave, scale, batchsize, output_h_cut=None):
# print("\n\n\n x_h_cut_shape ==== ", x_h_cut.shape)
unfold = torch.nn.functional.unfold(
x_h_cut, padsize, stride=int(shave/2))
# print("\n\n\n x_unfold_shape ==== ", unfold.shape)
x_h_cut_unfold = unfold.transpose(0,2).contiguous()
# print("\n\n\n x_h_cut_unfold_shape ==== ", x_h_cut_unfold.shape)
x_h_cut_unfold = x_h_cut_unfold.view(x_h_cut_unfold.size(-1)*x_h_cut_unfold.size(0),-1,padsize,padsize)
# print("\n\n\n x_h_cut_unfold_shape ==== ", x_h_cut_unfold.shape)
output_h_cut_unfold = torch.nn.functional.unfold(
output_h_cut, padsize, stride=int(shave/2)).transpose(0, | |
<gh_stars>1-10
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Determine functional groups present in a Molecule.
"""
import copy
from pymatgen.analysis.graphs import MoleculeGraph
from pymatgen.analysis.local_env import OpenBabelNN
from pymatgen.core.structure import Molecule
from pymatgen.io.babel import BabelMolAdaptor
try:
import networkx as nx
import networkx.algorithms.isomorphism as iso
except ImportError:
raise ImportError("pymatgen.analysis.functional_groups requires the NetworkX graph library to be installed.")
__author__ = "<NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Beta"
__date__ = "July 2018"
__credit__ = "<NAME>"
class FunctionalGroupExtractor:
"""
This class is used to algorithmically parse a molecule (represented by an
instance of pymatgen.analysis.graphs.MoleculeGraph) and determine arbitrary
functional groups.
"""
def __init__(self, molecule, optimize=False):
"""
Instantiation method for FunctionalGroupExtractor.
:param molecule: Either a filename, a pymatgen.core.structure.Molecule
object, or a pymatgen.analysis.graphs.MoleculeGraph object.
:param optimize: Default False. If True, then the input molecule will be
modified, adding Hydrogens, performing a simple conformer search,
etc.
"""
self.molgraph = None
if isinstance(molecule, str):
try:
if optimize:
obmol = BabelMolAdaptor.from_file(molecule, file_format="mol")
# OBMolecule does not contain pymatgen Molecule information
# So, we need to wrap the obmol in a BabelMolAdapter
obmol.add_hydrogen()
obmol.make3d()
obmol.localopt()
self.molecule = obmol.pymatgen_mol
else:
self.molecule = Molecule.from_file(molecule)
except OSError:
raise ValueError("Input must be a valid molecule file, a Molecule object, or a MoleculeGraph object.")
elif isinstance(molecule, Molecule):
if optimize:
obmol = BabelMolAdaptor(molecule)
obmol.add_hydrogen()
obmol.make3d()
obmol.localopt()
self.molecule = obmol.pymatgen_mol
else:
self.molecule = molecule
elif isinstance(molecule, MoleculeGraph):
if optimize:
obmol = BabelMolAdaptor(molecule.molecule)
obmol.add_hydrogen()
obmol.make3d()
obmol.localopt()
self.molecule = obmol.pymatgen_mol
else:
self.molecule = molecule.molecule
self.molgraph = molecule
else:
raise ValueError("Input to FunctionalGroupExtractor must be str, Molecule, or MoleculeGraph.")
if self.molgraph is None:
self.molgraph = MoleculeGraph.with_local_env_strategy(self.molecule, OpenBabelNN())
# Assign a specie and coordinates to each node in the graph,
# corresponding to the Site in the Molecule object
self.molgraph.set_node_attributes()
self.species = nx.get_node_attributes(self.molgraph.graph, "specie")
def get_heteroatoms(self, elements=None):
"""
Identify non-H, non-C atoms in the MoleculeGraph, returning a list of
their node indices.
:param elements: List of elements to identify (if only certain
functional groups are of interest).
:return: set of ints representing node indices
"""
heteroatoms = set()
for node in self.molgraph.graph.nodes():
if elements is not None:
if str(self.species[node]) in elements:
heteroatoms.add(node)
else:
if str(self.species[node]) not in ["C", "H"]:
heteroatoms.add(node)
return heteroatoms
def get_special_carbon(self, elements=None):
"""
Identify Carbon atoms in the MoleculeGraph that fit the characteristics
defined Ertl (2017), returning a list of their node indices.
The conditions for marking carbon atoms are (quoted from Ertl):
"- atoms connected by non-aromatic double or triple bond to any
heteroatom
- atoms in nonaromatic carbon–carbon double or triple bonds
- acetal carbons, i.e. sp3 carbons connected to two or more oxygens,
nitrogens or sulfurs; these O, N or S atoms must have only single bonds
- all atoms in oxirane, aziridine and thiirane rings"
:param elements: List of elements that will qualify a carbon as special
(if only certain functional groups are of interest).
Default None.
:return: set of ints representing node indices
"""
specials = set()
# For this function, only carbons are considered
carbons = [n for n in self.molgraph.graph.nodes if str(self.species[n]) == "C"]
# Condition one: double/triple bonds to heteroatoms
for node in carbons:
neighbors = self.molgraph.graph[node]
for neighbor, attributes in neighbors.items():
if elements is not None:
if str(self.species[neighbor]) in elements and int(attributes[0]["weight"]) in [2, 3]:
specials.add(node)
else:
if str(self.species[neighbor]) not in ["C", "H"] and int(attributes[0]["weight"]) in [2, 3]:
specials.add(node)
# Condition two: carbon-carbon double & triple bonds
for node in carbons:
neighbors = self.molgraph.graph[node]
for neighbor, attributes in neighbors.items():
if str(self.species[neighbor]) == "C" and int(attributes[0]["weight"]) in [2, 3]:
specials.add(node)
specials.add(neighbor)
# Condition three: Acetal carbons
for node in carbons:
neighbors = self.molgraph.graph[node]
neighbor_spec = [str(self.species[n]) for n in neighbors.keys()]
ons = len([n for n in neighbor_spec if n in ["O", "N", "S"]])
if len(neighbors.keys()) == 4 and ons >= 2:
specials.add(node)
# Condition four: oxirane/aziridine/thiirane rings
rings = self.molgraph.find_rings()
rings_indices = [set(sum(ring, ())) for ring in rings]
for ring in rings_indices:
ring_spec = sorted(str(self.species[node]) for node in ring)
# All rings of interest are three-member rings
if len(ring) == 3 and ring_spec in [
["C", "C", "O"],
["C", "C", "N"],
["C", "C", "S"],
]:
for node in ring:
if node in carbons:
specials.add(node)
return specials
def link_marked_atoms(self, atoms):
"""
Take a list of marked "interesting" atoms (heteroatoms, special carbons)
and attempt to connect them, returning a list of disjoint groups of
special atoms (and their connected hydrogens).
:param atoms: set of marked "interesting" atoms, presumably identified
using other functions in this class.
:return: list of sets of ints, representing groups of connected atoms
"""
# We will add hydrogens to functional groups
hydrogens = {n for n in self.molgraph.graph.nodes if str(self.species[n]) == "H"}
# Graph representation of only marked atoms
subgraph = self.molgraph.graph.subgraph(list(atoms)).to_undirected()
func_grps = []
for func_grp in nx.connected_components(subgraph):
grp_hs = set()
for node in func_grp:
neighbors = self.molgraph.graph[node]
for neighbor in neighbors.keys():
# Add all associated hydrogens into the functional group
if neighbor in hydrogens:
grp_hs.add(neighbor)
func_grp = func_grp.union(grp_hs)
func_grps.append(func_grp)
return func_grps
def get_basic_functional_groups(self, func_groups=None):
"""
Identify functional groups that cannot be identified by the Ertl method
of get_special_carbon and get_heteroatoms, such as benzene rings, methyl
groups, and ethyl groups.
TODO: Think of other functional groups that are important enough to be
added (ex: do we need ethyl, butyl, propyl?)
:param func_groups: List of strs representing the functional groups of
interest. Default to None, meaning that all of the functional groups
defined in this function will be sought.
:return: list of sets of ints, representing groups of connected atoms
"""
strat = OpenBabelNN()
hydrogens = {n for n in self.molgraph.graph.nodes if str(self.species[n]) == "H"}
carbons = [n for n in self.molgraph.graph.nodes if str(self.species[n]) == "C"]
if func_groups is None:
func_groups = ["methyl", "phenyl"]
results = []
if "methyl" in func_groups:
for node in carbons:
neighbors = strat.get_nn_info(self.molecule, node)
hs = {n["site_index"] for n in neighbors if n["site_index"] in hydrogens}
# Methyl group is CH3, but this will also catch methane
if len(hs) >= 3:
hs.add(node)
results.append(hs)
if "phenyl" in func_groups:
rings_indices = [set(sum(ring, ())) for ring in self.molgraph.find_rings()]
possible_phenyl = [r for r in rings_indices if len(r) == 6]
for ring in possible_phenyl:
# Phenyl group should have only one (0 for benzene) member whose
# neighbors are not two carbons and one hydrogen
num_deviants = 0
for node in ring:
neighbors = strat.get_nn_info(self.molecule, node)
neighbor_spec = sorted(str(self.species[n["site_index"]]) for n in neighbors)
if neighbor_spec != ["C", "C", "H"]:
num_deviants += 1
if num_deviants <= 1:
for node in ring:
ring_group = copy.deepcopy(ring)
neighbors = self.molgraph.graph[node]
# Add hydrogens to the functional group
for neighbor in neighbors.keys():
if neighbor in hydrogens:
ring_group.add(neighbor)
results.append(ring_group)
return results
def get_all_functional_groups(self, elements=None, func_groups=None, catch_basic=True):
"""
Identify all functional groups (or all within a certain subset) in the
molecule, combining the methods described above.
:param elements: List of elements that will qualify a carbon as special
(if only certain functional groups are of interest).
Default None.
:param func_groups: List of strs representing the functional groups of
interest. Default to None, meaning that all of the functional groups
defined in this function will be sought.
:param catch_basic: bool. If True, use get_basic_functional_groups and
other methods
:return: list of sets of ints, representing groups of connected atoms
"""
heteroatoms = self.get_heteroatoms(elements=elements)
special_cs = self.get_special_carbon(elements=elements)
groups = self.link_marked_atoms(heteroatoms.union(special_cs))
if catch_basic:
groups += self.get_basic_functional_groups(func_groups=func_groups)
return groups
def categorize_functional_groups(self, groups):
"""
Determine classes of functional groups present in a set.
:param groups: Set of functional groups.
:return: dict containing representations of the groups, the indices of
where the group occurs in the MoleculeGraph, and how many of each
type of group there is.
"""
categories = {}
em = iso.numerical_edge_match("weight", 1) # pylint: disable=E1102
nm = iso.categorical_node_match("specie", "C")
for group in groups:
atoms = [self.molecule[a] for a in group]
species = [a.specie for a in atoms]
coords = [a.coords for a in atoms]
adaptor = BabelMolAdaptor(Molecule(species, coords))
# Use Canonical SMILES to ensure uniqueness
smiles | |
0.53958),
(0.00000, 0.47610, 0.52371),
(0.00000, 0.49197, 0.50784),
(0.00000, 0.50784, 0.49197),
(0.00000, 0.52371, 0.47610),
(0.00000, 0.53958, 0.46023),
(0.00000, 0.55545, 0.44436),
(0.00000, 0.57132, 0.42849),
(0.00000, 0.58719, 0.41262),
(0.00000, 0.60306, 0.39675),
(0.00000, 0.61893, 0.38088),
(0.00000, 0.63480, 0.36501),
(0.00000, 0.65067, 0.34914),
(0.00000, 0.66654, 0.33327),
(0.00000, 0.68241, 0.31740),
(0.00000, 0.69828, 0.30153),
(0.00000, 0.71415, 0.28566),
(0.00000, 0.73002, 0.26979),
(0.00000, 0.74589, 0.25392),
(0.00000, 0.76176, 0.23805),
(0.00000, 0.77763, 0.22218),
(0.00000, 0.79350, 0.20631),
(0.00000, 0.80937, 0.19044),
(0.00000, 0.82524, 0.17457),
(0.00000, 0.84111, 0.15870),
(0.00000, 0.85698, 0.14283),
(0.00000, 0.87285, 0.12696),
(0.00000, 0.88872, 0.11109),
(0.00000, 0.90459, 0.09522),
(0.00000, 0.92046, 0.07935),
(0.00000, 0.93633, 0.06348),
(0.00000, 0.95220, 0.04761),
(0.00000, 0.96807, 0.03174),
(0.00000, 0.98394, 0.01587),
(0.00000, 0.99981, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.01587, 1.00000, 0.00000),
(0.03174, 1.00000, 0.00000),
(0.04761, 1.00000, 0.00000),
(0.06348, 1.00000, 0.00000),
(0.07935, 1.00000, 0.00000),
(0.09522, 1.00000, 0.00000),
(0.11109, 1.00000, 0.00000),
(0.12696, 1.00000, 0.00000),
(0.14283, 1.00000, 0.00000),
(0.15870, 1.00000, 0.00000),
(0.17457, 1.00000, 0.00000),
(0.19044, 1.00000, 0.00000),
(0.20631, 1.00000, 0.00000),
(0.22218, 1.00000, 0.00000),
(0.23805, 1.00000, 0.00000),
(0.25392, 1.00000, 0.00000),
(0.26979, 1.00000, 0.00000),
(0.28566, 1.00000, 0.00000),
(0.30153, 1.00000, 0.00000),
(0.31740, 1.00000, 0.00000),
(0.33327, 1.00000, 0.00000),
(0.34914, 1.00000, 0.00000),
(0.36501, 1.00000, 0.00000),
(0.38088, 1.00000, 0.00000),
(0.39675, 1.00000, 0.00000),
(0.41262, 1.00000, 0.00000),
(0.42849, 1.00000, 0.00000),
(0.44436, 1.00000, 0.00000),
(0.46023, 1.00000, 0.00000),
(0.47610, 1.00000, 0.00000),
(0.49197, 1.00000, 0.00000),
(0.50784, 1.00000, 0.00000),
(0.52371, 1.00000, 0.00000),
(0.53958, 1.00000, 0.00000),
(0.55545, 1.00000, 0.00000),
(0.57132, 1.00000, 0.00000),
(0.58719, 1.00000, 0.00000),
(0.60306, 1.00000, 0.00000),
(0.61893, 1.00000, 0.00000),
(0.63480, 1.00000, 0.00000),
(0.65067, 1.00000, 0.00000),
(0.66654, 1.00000, 0.00000),
(0.68241, 1.00000, 0.00000),
(0.69828, 1.00000, 0.00000),
(0.71415, 1.00000, 0.00000),
(0.73002, 1.00000, 0.00000),
(0.74589, 1.00000, 0.00000),
(0.76176, 1.00000, 0.00000),
(0.77763, 1.00000, 0.00000),
(0.79350, 1.00000, 0.00000),
(0.80937, 1.00000, 0.00000),
(0.82524, 1.00000, 0.00000),
(0.84111, 1.00000, 0.00000),
(0.85698, 1.00000, 0.00000),
(0.87285, 1.00000, 0.00000),
(0.88872, 1.00000, 0.00000),
(0.90459, 1.00000, 0.00000),
(0.92046, 1.00000, 0.00000),
(0.93633, 1.00000, 0.00000),
(0.95220, 1.00000, 0.00000),
(0.96807, 1.00000, 0.00000),
(0.98394, 1.00000, 0.00000),
(0.99981, 1.00000, 0.00000),
(1.00000, 0.99981, 0.00000),
(1.00000, 0.98394, 0.00000),
(1.00000, 0.96807, 0.00000),
(1.00000, 0.95220, 0.00000),
(1.00000, 0.93633, 0.00000),
(1.00000, 0.92046, 0.00000),
(1.00000, 0.90459, 0.00000),
(1.00000, 0.88872, 0.00000),
(1.00000, 0.87285, 0.00000),
(1.00000, 0.85698, 0.00000),
(1.00000, 0.84111, 0.00000),
(1.00000, 0.82524, 0.00000),
(1.00000, 0.80937, 0.00000),
(1.00000, 0.79350, 0.00000),
(1.00000, 0.77763, 0.00000),
(1.00000, 0.76176, 0.00000),
(1.00000, 0.74589, 0.00000),
(1.00000, 0.73002, 0.00000),
(1.00000, 0.71415, 0.00000),
(1.00000, 0.69828, 0.00000),
(1.00000, 0.68241, 0.00000),
(1.00000, 0.66654, 0.00000),
(1.00000, 0.65067, 0.00000),
(1.00000, 0.63480, 0.00000),
(1.00000, 0.61893, 0.00000),
(1.00000, 0.60306, 0.00000),
(1.00000, 0.58719, 0.00000),
(1.00000, 0.57132, 0.00000),
(1.00000, 0.55545, 0.00000),
(1.00000, 0.53958, 0.00000),
(1.00000, 0.52371, 0.00000),
(1.00000, 0.50784, 0.00000),
(1.00000, 0.49197, 0.00000),
(1.00000, 0.47610, 0.00000),
(1.00000, 0.46023, 0.00000),
(1.00000, 0.44436, 0.00000),
(1.00000, 0.42849, 0.00000),
(1.00000, 0.41262, 0.00000),
(1.00000, 0.39675, 0.00000),
(1.00000, 0.38088, 0.00000),
(1.00000, 0.36501, 0.00000),
(1.00000, 0.34914, 0.00000),
(1.00000, 0.33327, 0.00000),
(1.00000, 0.31740, 0.00000),
(1.00000, 0.30153, 0.00000),
(1.00000, 0.28566, 0.00000),
(1.00000, 0.26979, 0.00000),
(1.00000, 0.25392, 0.00000),
(1.00000, 0.23805, 0.00000),
(1.00000, 0.22218, 0.00000),
(1.00000, 0.20631, 0.00000),
(1.00000, 0.19044, 0.00000),
(1.00000, 0.17457, 0.00000),
(1.00000, 0.15870, 0.00000),
(1.00000, 0.14283, 0.00000),
(1.00000, 0.12696, 0.00000),
(1.00000, 0.11109, 0.00000),
(1.00000, 0.09522, 0.00000),
(1.00000, 0.07935, 0.00000),
(1.00000, 0.06348, 0.00000),
(1.00000, 0.04761, 0.00000),
(1.00000, 0.03174, 0.00000),
(1.00000, 0.01587, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_idl12 = (
(0.00000, 0.00000, 0.00000), # noqa
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87451, 0.74510, 0.74510),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow1 = (
(0.00000, 0.00000, 0.16471),
(0.02745, 0.00000, 0.18431),
(0.05882, 0.00000, 0.20000),
(0.08627, 0.00000, 0.21961),
(0.11373, 0.00000, 0.23922),
(0.14510, 0.00000, | |
import torch
import numpy as np
import copy
import itertools
from offpolicy.utils.util import huber_loss, mse_loss, to_torch
from offpolicy.utils.popart import PopArt
from offpolicy.algorithms.base.trainer import Trainer
class R_MADDPG(Trainer):
def __init__(self, args, num_agents, policies, policy_mapping_fn, device=None, episode_length=None, actor_update_interval=1):
"""
Trainer class for recurrent MADDPG/MATD3. See parent class for more information.
:param episode_length: (int) maximum length of an episode.
:param actor_update_interval: (int) number of critic updates to perform between every update to the actor.
"""
self.args = args
self.use_popart = self.args.use_popart
self.use_value_active_masks = self.args.use_value_active_masks
self.use_per = self.args.use_per
self.per_eps = self.args.per_eps
self.use_huber_loss = self.args.use_huber_loss
self.huber_delta = self.args.huber_delta
self.tpdv = dict(dtype=torch.float32, device=device)
if episode_length is None:
self.episode_length = self.args.episode_length
else:
self.episode_length = episode_length
self.num_agents = num_agents
self.policies = policies
self.policy_mapping_fn = policy_mapping_fn
self.policy_ids = sorted(list(self.policies.keys()))
self.policy_agents = {policy_id: sorted(
[agent_id for agent_id in range(self.num_agents) if self.policy_mapping_fn(agent_id) == policy_id]) for
policy_id in
self.policies.keys()}
if self.use_popart:
self.value_normalizer = {policy_id: PopArt(1) for policy_id in self.policies.keys()}
self.actor_update_interval = actor_update_interval
self.num_updates = {p_id : 0 for p_id in self.policy_ids}
self.use_same_share_obs = self.args.use_same_share_obs
def get_update_info(self, update_policy_id, obs_batch, act_batch, avail_act_batch):
"""
Form centralized observation and action info.
:param update_policy_id: (str) id of policy being updated.
:param obs_batch: (np.ndarray) batch of observation sequences sampled from buffer.
:param act_batch: (np.ndarray) batch of action sequences sampled from buffer.
:param avail_act_batch: (np.ndarray) batch of available action sequences sampled from buffer. None if environment does not limit actions.
:return cent_act_sequence_critic: (np.ndarray) batch of centralized action sequences for critic input.
:return act_sequences: (list) list of action sequences corresponding to each agent.
:return act_sequence_replace_ind_start: (int) index of act_sequences from which to replace actions for actor update.
:return cent_nact_sequence: (np.ndarray) batch of centralize next step action sequences.
"""
act_sequences = []
nact_sequences = []
act_sequence_replace_ind_start = None
# iterate through policies to get the target acts and other centralized info
ind = 0
for p_id in self.policy_ids:
policy = self.policies[p_id]
if p_id == update_policy_id:
# where to start replacing actor actions from during actor update
act_sequence_replace_ind_start = ind
num_pol_agents = len(self.policy_agents[p_id])
act_sequences.append(list(act_batch[p_id]))
batched_obs_seq = np.concatenate(obs_batch[p_id], axis=1)
# same with buffer actions and available actions
batched_act_seq = np.concatenate(act_batch[p_id], axis=1)
if avail_act_batch[p_id] is not None:
batched_avail_act_seq = np.concatenate(avail_act_batch[p_id], axis=1)
else:
batched_avail_act_seq = None
total_batch_size = batched_obs_seq.shape[1]
batch_size = total_batch_size // num_pol_agents
# no gradient tracking is necessary for target actions
with torch.no_grad():
# step target actor through the first actions
if isinstance(policy.act_dim, np.ndarray):
# multidiscrete case
sum_act_dim = int(sum(policy.act_dim))
else:
sum_act_dim = policy.act_dim
batched_prev_act_seq = np.concatenate((np.zeros((1, total_batch_size, sum_act_dim), dtype=np.float32), batched_act_seq[:-1]))
pol_nact_seq, _, _ = policy.get_actions(batched_obs_seq,
batched_prev_act_seq,
policy.init_hidden(-1, total_batch_size),
available_actions=batched_avail_act_seq, use_target=True)
# remove the first timestep for next actions
pol_nact_seq = pol_nact_seq[1:]
# separate the actions into individual agent actions
agent_nact_seqs = pol_nact_seq.cpu().split(split_size=batch_size, dim=1)
# cat to form centralized next step action
nact_sequences.append(torch.cat(agent_nact_seqs, dim=-1))
# increase ind by number agents just processed
ind += num_pol_agents
# form centralized observations and actions by concatenating
# flatten list of lists
act_sequences = list(itertools.chain.from_iterable(act_sequences))
cent_act_sequence_critic = np.concatenate(act_sequences, axis=-1)
cent_nact_sequence = np.concatenate(nact_sequences, axis=-1)
return cent_act_sequence_critic, act_sequences, act_sequence_replace_ind_start, cent_nact_sequence
def train_policy_on_batch(self, update_policy_id, batch):
"""See parent class."""
if self.use_same_share_obs:
return self.shared_train_policy_on_batch(update_policy_id, batch)
else:
return self.cent_train_policy_on_batch((update_policy_id, batch))
def shared_train_policy_on_batch(self, update_policy_id, batch):
"""Training function when all agents share the same centralized observation. See train_policy_on_batch."""
# unpack the batch
obs_batch, cent_obs_batch, \
act_batch, rew_batch, \
dones_batch, dones_env_batch, \
avail_act_batch, \
importance_weights, idxes = batch
train_info = {}
update_actor = self.num_updates[update_policy_id] % self.actor_update_interval == 0
# number of agents controlled by update policy
num_update_agents = len(self.policy_agents[update_policy_id])
update_policy = self.policies[update_policy_id]
batch_size = obs_batch[update_policy_id].shape[2]
total_batch_size = batch_size * num_update_agents
pol_act_dim = int(sum(update_policy.act_dim)) if isinstance(update_policy.act_dim, np.ndarray) else update_policy.act_dim
rew_sequence = to_torch(rew_batch[update_policy_id][0]).to(**self.tpdv)
# use numpy
env_done_sequence = to_torch(dones_env_batch[update_policy_id]).to(**self.tpdv)
# mask the Q and target Q sequences with shifted dones (assume the first obs in episode is valid)
first_step_dones = torch.zeros((1, env_done_sequence.shape[1], env_done_sequence.shape[2])).to(**self.tpdv)
next_steps_dones = env_done_sequence[: self.episode_length - 1, :, :]
curr_env_dones = torch.cat((first_step_dones, next_steps_dones), dim=0)
# last time step does not matter for current observations
cent_obs_sequence = cent_obs_batch[update_policy_id][:-1]
# first time step does not matter for next step observations
cent_nobs_sequence = cent_obs_batch[update_policy_id][1:]
# group data from agents corresponding to one policy into one larger batch
pol_agents_obs_seq = np.concatenate(obs_batch[update_policy_id], axis=1)[:-1]
if avail_act_batch[update_policy_id] is not None:
pol_agents_avail_act_seq = np.concatenate(avail_act_batch[update_policy_id], axis=1)[:-1]
else:
pol_agents_avail_act_seq = None
pol_prev_buffer_act_seq = np.concatenate((np.zeros((1, total_batch_size, pol_act_dim), dtype=np.float32), np.concatenate(act_batch[update_policy_id][:, :-1], axis=1)))
# get centralized sequence information
cent_act_sequence_buffer, act_sequences, act_sequence_replace_ind_start, cent_nact_sequence = \
self.get_update_info(update_policy_id, obs_batch, act_batch, avail_act_batch)
# Critic update:
predicted_Q_sequences, _ = update_policy.critic(cent_obs_sequence, cent_act_sequence_buffer,
update_policy.init_hidden(-1, batch_size))
# iterate over time to get target Qs since the history at each step should be formed from the buffer sequence
next_Q_sequence = []
# detach gradients since no gradients go through target critic
with torch.no_grad():
target_critic_rnn_state = update_policy.init_hidden(-1, batch_size)
for t in range(self.episode_length):
# update the RNN states based on the buffer sequence
_, target_critic_rnn_state = update_policy.target_critic(cent_obs_sequence[t],
cent_act_sequence_buffer[t],
target_critic_rnn_state)
# get the Q value using the next action taken by the target actor, but don't store the RNN state
next_Q_ts, _ = update_policy.target_critic(cent_nobs_sequence[t],
cent_nact_sequence[t],
target_critic_rnn_state)
next_Q_t = torch.cat(next_Q_ts, dim=-1)
# take min to prevent overestimation bias
next_Q_t, _ = torch.min(next_Q_t, dim=-1, keepdim=True)
next_Q_sequence.append(next_Q_t)
# stack over time
next_Q_sequence = torch.stack(next_Q_sequence)
# mask the next step Qs and form targets; use the env dones as the mask since reward can accumulate even after 1 agent dies
next_Q_sequence = (1 - env_done_sequence) * next_Q_sequence
if self.use_popart:
target_Q_sequence = rew_sequence + self.args.gamma * self.value_normalizer[update_policy_id].denormalize(
next_Q_sequence)
nodones_target_Q_sequence = target_Q_sequence[curr_env_dones == 0]
target_Q_sequence[curr_env_dones == 0] = self.value_normalizer[update_policy_id](nodones_target_Q_sequence)
else:
target_Q_sequence = rew_sequence + self.args.gamma * next_Q_sequence
predicted_Q_sequences = [Q_seq * (1 - curr_env_dones) for Q_seq in predicted_Q_sequences]
target_Q_sequence = target_Q_sequence * (1 - curr_env_dones)
# make sure to detach the targets! Loss is MSE loss, but divide by the number of unmasked elements
# Mean bellman error for each timestep
errors = [Q_seq - target_Q_sequence.detach() for Q_seq in predicted_Q_sequences]
if self.use_per:
importance_weights = to_torch(importance_weights).to(**self.tpdv)
# prioritized experience replay
if self.use_huber_loss:
per_batch_critic_loss = [huber_loss(error, self.huber_delta).sum(dim=0).flatten() for error in errors]
else:
per_batch_critic_loss = [mse_loss(error).sum(dim=0).flatten() for error in errors]
# weight each loss element by their importance sample weight
importance_weight_critic_loss = [loss * importance_weights for loss in per_batch_critic_loss]
critic_loss = [loss.sum() / (1 - curr_env_dones).sum() for loss in importance_weight_critic_loss]
critic_loss = torch.stack(critic_loss).sum(dim=0)
# new priorities are a combination of the maximum TD error across sequence and the mean TD error across sequence
td_errors = [error.abs().cpu().detach().numpy() for error in errors]
new_priorities = [((1 - self.args.per_nu) * td_error.mean(axis=0) + self.args.per_nu * td_error.max(axis=0)).flatten() + self.per_eps for td_error in td_errors]
new_priorities = np.stack(new_priorities).mean(axis=0) + self.per_eps
else:
if self.use_huber_loss:
critic_loss = [huber_loss(error, self.huber_delta).sum() / (1 - curr_env_dones).sum() for error in errors]
else:
critic_loss = [mse_loss(error).sum() / (1 - curr_env_dones).sum() for error in errors]
critic_loss = torch.stack(critic_loss).sum(dim=0)
new_priorities = None
update_policy.critic_optimizer.zero_grad()
critic_loss.backward()
critic_grad_norm = torch.nn.utils.clip_grad_norm_(update_policy.critic.parameters(),
self.args.max_grad_norm)
update_policy.critic_optimizer.step()
train_info['critic_loss'] = critic_loss
train_info['critic_grad_norm'] = critic_grad_norm
if update_actor:
# Actor update
# freeze Q-networks
for p in update_policy.critic.parameters():
p.requires_grad = False
agent_Q_sequences = []
# formulate mask to determine how to combine actor output actions with batch output actions
mask_temp = []
for p_id in self.policy_ids:
if isinstance(self.policies[p_id].act_dim, np.ndarray):
# multidiscrete case
sum_act_dim = int(sum(self.policies[p_id].act_dim))
else:
sum_act_dim = self.policies[p_id].act_dim
for _ in self.policy_agents[p_id]:
mask_temp.append(np.zeros(sum_act_dim, dtype=np.float32))
masks = []
done_mask = []
# need to iterate through agents, but only formulate masks at each step
for i in range(num_update_agents):
curr_mask_temp = copy.deepcopy(mask_temp)
curr_mask_temp[act_sequence_replace_ind_start + i] = np.ones(pol_act_dim, dtype=np.float32)
curr_mask_vec = np.concatenate(curr_mask_temp)
# expand this mask into the proper size
curr_mask = np.tile(curr_mask_vec, (batch_size, 1))
masks.append(curr_mask)
# now collect agent dones
# ! use numpy
agent_done_sequence = to_torch(dones_batch[update_policy_id][i])
agent_first_step_dones = torch.zeros((1, agent_done_sequence.shape[1], agent_done_sequence.shape[2]))
agent_next_steps_dones = agent_done_sequence[: self.episode_length - 1, :, :]
curr_agent_dones = torch.cat((agent_first_step_dones, agent_next_steps_dones), dim=0)
done_mask.append(curr_agent_dones)
# cat masks and form into torch tensors
mask = to_torch(np.concatenate(masks)).to(**self.tpdv)
done_mask = torch.cat(done_mask, dim=1).to(**self.tpdv)
# get all the actions from actor, with gumbel softmax to differentiate through the samples
policy_act_seq, _, _ = update_policy.get_actions(pol_agents_obs_seq, pol_prev_buffer_act_seq,
update_policy.init_hidden(-1, total_batch_size),
available_actions=pol_agents_avail_act_seq,
use_gumbel=True)
# separate the output into individual agent act sequences
agent_actor_seqs = policy_act_seq.split(split_size=batch_size, dim=1)
# convert act sequences to torch, formulate centralized | |
array to a raster
#Byte, UInt16, Int16, UInt32, Int32, Float32, Float6
def write_raster(numpy_array,output_name, template = '', df = 'HFA', dt = 'Int16', width = '', height = '', bands = 1, projection = '', transform = '', ct = '', names = '', out_no_data = '',assume_ct_names = True, compress = False):
df = format_dict[os.path.splitext(output_name)[1]]
if numpy_or_gdal(dt) == 'numpy':
dt = dt_converter(dt)
dt = 'gdal.GDT_' + dt
if out_no_data == '' and template != '':
if raster_info(template)['no_data'] != None:
out_no_data = raster_info(template)['no_data']
if template != '' and transform == '':
rast = gdal.Open(template)
width = rast.RasterXSize
height = rast.RasterYSize
#bands = rast.RasterCount
projection = rast.GetProjection()
rast = None
if assume_ct_names and template != '' and (names == '' or names == None) and (ct == '' or ct == None) :
ct, names, b111, rast111 = color_table_and_names(template, band = 1)
if transform == '':
rast = gdal.Open(template)
transform = rast.GetGeoTransform()
driver = gdal.GetDriverByName(df)
if not compress:
ds = driver.Create(output_name, width, height, bands, eval(dt))
else:
ds = driver.Create(output_name, width, height, bands, eval(dt),['COMPRESS=LZW'])
ds.SetProjection(projection)
ds.SetGeoTransform(transform)
print(('Writing: ' + output_name.split('/')[-1]))
print(('Datatype of ' + output_name.split('/')[-1] + ' is: ' + dt))
if bands > 1:
for band in range(1,bands + 1):
ds.GetRasterBand(band).WriteArray(numpy_array[band-1])
if ct != '' and ct != None:
ds.GetRasterBand(band).SetRasterColorTable(ct)
if names != '' and names != None:
try:
ds.GetRasterBand(1).SetRasterCategoryNames(names)
except:
print ('Could not write category names')
if out_no_data != '':
ds.GetRasterBand(band).SetNoDataValue(out_no_data)
else:
if ct != '' and ct != None:
ds.GetRasterBand(1).SetRasterColorTable(ct)
if names != '' and names != None:
try:
ds.GetRasterBand(1).SetRasterCategoryNames(names)
except:
print ('Could not write category names')
if out_no_data != '':
try:
ds.GetRasterBand(1).SetNoDataValue(out_no_data)
except:
print(('Could not set no data value', out_no_data))
ds.GetRasterBand(1).WriteArray(numpy_array)
return output_name
ds = None
numpy_array = None
rast = None
##ti = 'W:/03_Data-Archive/02_Inputs/Landsat_Data/4126/Scenes/glovis/p041r026_distbYear_flt'
##to = 'W:/03_Data-Archive/02_Inputs/Landsat_Data/VCT_Outputs/Mosaicked_Outputs/distbYear_flt_first_distb_mosaic_near_w_color2.img'
##r = raster(ti)
###ct, nms, b, r1 = color_table_and_names(ti)
##ct = gdal.ColorTable()
###ct.SetColorEntry(0, (0, 0, 0, 255))
###ct.SetColorEntry(1, (0, 211, 0, 255))
###ct.SetColorEntry(2, (211, 0, 0, 255))
##ct.CreateColorRamp(1, (0, 211, 0, 255), 45, (211, 0, 0, 255))
###names = ['Unclassified','Nonforest', 'Forest']
##write_raster(r, to, ti, ct = ct)
##
##r = None
##del r
######################################################################################
def color_table_and_names(image, band = 1):
rast = gdal.Open(image)
b1 = rast.GetRasterBand(1)
ct = b1.GetRasterColorTable()
names = b1.GetRasterCategoryNames()
return ct, names, b1, rast
color_dict = {'green' : (0, 200, 0, 255),
'red' : (200, 0, 0, 255),
'blue' : (0, 0, 200, 255),
'light_blue':(0, 102, 255,255),
'light_purple':(235, 153, 235),
'orange' : (255, 128, 0, 255),
'yellow' : (255, 255, 0, 255),
'gray' : (224, 224, 224, 255)
}
######################################################################################
#Function for creating a gdal color ramp
#Uses a numpy array to find the min and max and then creates a color table using those values
#The array must be positive
def min_max_color_ramp(array, cmin = 'red', cmax = 'green', gray_zero = True):
print ('Creating min max color table')
Min = int(numpy.amin(array))
if Min <= 0:
Min = 1
Max = int(numpy.amax(array))
ct = gdal.ColorTable()
ct.CreateColorRamp(Min, color_dict[cmin], Max, color_dict[cmax])
if gray_zero:
ct.SetColorEntry(0, color_dict['gray'])
return ct
##############################################################
##############################################################
# color functions adapted from bsou.io/posts/color-gradients-with-python
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
##############################################################
def color_dict_maker(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
##############################################################
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_rgb(start_hex)
f = hex_to_rgb(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
print(RGB_list)
return color_dict_maker(RGB_list)
def polylinear_gradient(colors, n):
''' returns a list of colors forming linear gradients between
all sequential pairs of colors. "n" specifies the total
number of desired output colors '''
# The number of colors per individual linear gradient
n_out = int(float(n) / (len(colors) - 1)) + 1
print(('n',n))
print(('n_out',n_out))
# If we don't have an even number of color values, we will remove equally spaced values at the end.
if n%n_out != 0:
apply_offset = True
n_out = n_out + 1
print(('new n_out',n_out))
# returns dictionary defined by color_dict()
gradient_dict = linear_gradient(colors[0], colors[1], n_out)
if len(colors) > 1:
for col in range(1, len(colors) - 1):
next = linear_gradient(colors[col], colors[col+1], n_out)
for k in ("hex", "r", "g", "b"):
# Exclude first point to avoid duplicates
gradient_dict[k] += next[k][1:]
# Remove equally spaced values here.
if apply_offset:
#indList = list(range(len(gradient_dict['hex'])))
offset = len(gradient_dict['hex'])-n
sliceval = []
print(('len(gradient_dict)',len(gradient_dict['hex'])))
print(('offset',offset))
for i in range(1, offset+1):
sliceval.append(int(len(gradient_dict['hex'])*i/float(offset+2)))
print(('sliceval',sliceval))
for k in ("hex", "r", "g", "b"):
gradient_dict[k] = [i for j, i in enumerate(gradient_dict[k]) if j not in sliceval]
print(('new len dict', len(gradient_dict['hex'])))
return gradient_dict
######################################################################################
#Function for creating a gdal color ramp
#Uses a numpy array to find the min and max and then creates a color table using those values
#The array must be positive
def min_max_color_ramp2(Min,Max, cmin = 'red', cmax = 'green', gray_zero = True):
print ('Creating min max color table')
## Min = int(numpy.amin(array))
## if Min <= 0:
## Min = 1
## Max = int(numpy.amax(array))
ct = gdal.ColorTable()
ct.CreateColorRamp(Min, color_dict[cmin], Max, color_dict[cmax])
if gray_zero:
ct.SetColorEntry(0, color_dict['gray'])
return ct
##############################################################
def stack_report(image_list, output_name):
report_name = os.path.splitext(output_name)[0] + '_report.txt'
if os.path.exists(report_name) == False:
report_lines = 'Stack report for: ' + output_name + '\n'
report_lines += 'Created: ' + now()+ '\n\n'
report_lines += 'Stacked images:\n'
for image in image_list:
report_lines += image + '\n'
ro = open(report_name, 'w')
ro.writelines(report_lines)
ro.close()
##############################################################
#Stacks a list of rasters
#All rasters must be of the exact same extent
#Should use the
def stack(image_list = [], output_name = '', template = '', df = 'HFA', dt = '', width = '', height = '', projection = '', transform = '', array_list = False, color_table = '', category_names = '', out_no_data = '', report = True,guiable = True,compress = True):
if image_list == []:
image_list = str(askopenfilenames(title = 'Select Rasters to stack',filetypes=[("IMAGINE","*.img"),("tif","*.tif")])).split(' ')
print( image_list)
if output_name == '':
output_name = str(asksaveasfilename(title = 'Select output image name',initialdir = cwd,filetypes=[("IMAGINE","*.img"),("tif","*.tif")]))
if array_list:
image_list = numpy.array(image_list)
if image_list.ndim == 2:
image_list = numpy.array([image_list])
if dt == '' and array_list == False:
info = raster_info(image_list[0])
dt = info['dt']
elif dt == '' and array_list == True:
info = raster_info(template)
dt = info['dt']
if out_no_data == '' and template != '':
if raster_info(template)['no_data'] != None:
out_no_data = raster_info(template)['no_data']
if numpy_or_gdal(dt) == 'numpy':
dt = dt_converter(dt)
numpy_dt = dt_converter(dt)
gdal_dt = 'gdal.GDT_' + dt
print (gdal_dt)
if template != '':
rast = gdal.Open(template)
width = rast.RasterXSize
height = rast.RasterYSize
if projection == '':
projection = rast.GetProjection()
elif width == '':
info = raster_info(image_list[0])
width = info['width']
height = info['height']
projection = info['projection']
transform = info['transform']
bands = len(image_list)
if transform == '':
transform = rast.GetGeoTransform()
if (category_names == '' or category_names == None) and (color_table == '' or color_table == None) and template != '':
color_table, category_names, b111, rast111 = color_table_and_names(template, band = 1)
df = format_dict[os.path.splitext(output_name)[1]]
driver = gdal.GetDriverByName(df)
print (bands)
print(('df',df))
if not compress:
ds = driver.Create(output_name, width, height, bands, eval(gdal_dt))
else:
ds = driver.Create(output_name, width, height, bands, eval(gdal_dt),['COMPRESS=LZW'])
try:
ds.SetProjection(projection)
ds.SetGeoTransform(transform)
except:
print ('Could not set spatial info')
if color_table != '' and color_table != None:
try:
ds.GetRasterBand(1).SetRasterColorTable(color_table)
except:
print ('Could not write color | |
<reponame>tandriamil/BrFAST<gh_stars>1-10
#!/usr/bin/python3
"""Module containing the FPSelect exploration algorithm."""
from datetime import datetime
from math import ceil
from multiprocessing import Pool
from multiprocessing.managers import ListProxy
from os import cpu_count
from typing import Any, Dict, List, Set, Tuple
from loguru import logger
from brfast.config import params
from brfast.data.attribute import AttributeSet
from brfast.data.dataset import FingerprintDataset
from brfast.exploration import (Exploration, ExplorationParameters, State,
TraceData)
from brfast.measures import SensitivityMeasure, UsabilityCostMeasure
class FPSelect(Exploration):
"""The implementation of the FPSelect exploration algorithm."""
def __init__(self, sensitivity_measure: SensitivityMeasure,
usability_cost_measure: UsabilityCostMeasure,
dataset: FingerprintDataset, sensitivity_threshold: float,
explored_paths: int, pruning: bool):
"""Initialize the FPSelect exploration algorithm.
Args:
sensitivity_measure: The sensitivity measure.
usability_cost_measure: The usability cost.
dataset: The fingerprint dataset.
sensitivity_threshold: The sensivity threshold.
explored_paths: The number of paths explored by FPSelect.
pruning: Use the pruning methods.
"""
# Initialize using the __init__ function of Exploration
super().__init__(sensitivity_measure, usability_cost_measure, dataset,
sensitivity_threshold)
# Check the number of explored paths
if explored_paths < 1:
raise AttributeError('The number of explored paths is required to '
'be a positive number.')
# Initialize the specific parameters of FPSelect
self._explored_paths = explored_paths
self._pruning = pruning
logger.info(f'Initialized FPSelect with {explored_paths} paths to '
'explore.')
if pruning:
logger.info('Pruning methods are activated.')
else:
logger.info('Pruning methods are ignored.')
# Initialize the minimum cost currently found
self._solution.append(float('inf')) # Stored in self._solution[1]
# The set S of the attributes set to expand at each step, initialized
# to k empty sets
self._attribute_sets_to_expand = set({AttributeSet()})
# The set I of the attribute sets which supersets are to ignore
self._attribute_sets_ignored_supersets = set()
@property
def parameters(self) -> Dict[str, Any]:
"""Give the parameters of the exploration as a dictionary.
Returns:
The parameters as a dictionary of their name and their value.
"""
parameters = self._default_parameters()
parameters.update({
FPSelectParameters.EXPLORED_PATHS: self._explored_paths,
FPSelectParameters.PRUNING: self._pruning
})
return parameters
def _search_for_solution(self):
"""Search for a solution using the FPSelect exploration algorithm.
This function has to
- Set the best solution currently found (AttributeSet).
- Update the set of the attribute sets that satisfy the sensitivity
threshold (Set[AttributeSet]).
- Update the list of the explored attributes which is the trace of the
execution. The information regarding an explored attribute is stored
as a dictionary with the following key/values:
* time (float): The time spent since the starting of the exploration
in seconds (use timedelta.total_seconds()).
* attributes (Set[int]): The set of the ids of the attributes.
* sensitivity (float): The sensitivity of the attribute set.
* usability_cost (float): The usability cost of the attribute set.
* cost_explanation (Dict[str: float]): The explanation of the cost of
the attribute set.
* state (State): The state of this attribute set (see State class).
- Log the explored attribute sets for debugging purposes using loguru.
Note:
We use the ids of the attributes instead of their name to reduce
the size of the trace in memory and when saved in json format.
"""
# While the set S is not empty, we continue the exploration. Note that
# it is initialized to k empty sets.
stage = 1
while self._attribute_sets_to_expand:
logger.debug('---------------------------------------------------')
logger.debug(f'Starting the stage {stage}.')
logger.debug(f'The {len(self._attribute_sets_to_expand)} attribute'
f' sets to expand: {self._attribute_sets_to_expand}.')
logger.debug(f'The {len(self._attribute_sets_ignored_supersets)} '
'attribute sets which supersets are ignored: '
f'{self._attribute_sets_ignored_supersets}.')
# Generate the attribute sets to explore by expanding the set S
sets_to_explore = self._expand_s()
logger.debug(f'The {len(sets_to_explore)} attribute sets to '
f'explore: {sets_to_explore}.')
# Explore the level and retrieve the next attribute sets to expand
# sorted by their efficiency (the most efficient are firsts)
next_attr_sets_to_expand = self._explore_level(sets_to_explore)
logger.debug(f'After exploring the level {stage}, we obtain '
f'{len(next_attr_sets_to_expand)} attribute sets to '
'expand.')
# Clear the sets to expand to store the ones if there are some
self._attribute_sets_to_expand.clear()
# After having explored all the attribute sets, get the k most
# efficient attr. sets that are the next to explore
for best_atset in sorted(next_attr_sets_to_expand,
key=next_attr_sets_to_expand.get,
reverse=True)[:self._explored_paths]:
self._attribute_sets_to_expand.add(best_atset)
# Next stage
stage += 1
def _explore_level(self, attribute_sets_to_explore: Set[AttributeSet]
) -> Dict[AttributeSet, float]:
"""Explore the attribute sets of a level (i.e., having the same size).
Args:
attribute_sets_to_explore: The attribute sets of this level to
explore.
Returns:
The resulting attribute sets that can be explored with their
efficiency. Only a subset of these will actually be explored.
"""
# Dictionary of { AttributeSet => efficiency } to build the list of the
# next attribute sets to explore that will be sorted at the end
attribute_sets_efficiency = {}
def update_after_exploration(result: Tuple[Dict[AttributeSet, float],
Set[AttributeSet],
Set[AttributeSet]]):
"""Update the informations after exploring a level.
Args:
result: A triplet with
- The attribute sets that could be explored afterwards and
their efficiency.
- The attribute sets that satisfy the threshold.
- The attribute sets which supersets are to be ignored.
Note: This is executed by the main thread and does not pose any
concurrency or synchronization problem.
"""
attr_sets_eff, satisf_attr_sets, attr_sets_ign_sups = result
attribute_sets_efficiency.update(attr_sets_eff)
self._attribute_sets_ignored_supersets.update(attr_sets_ign_sups)
self._satisfying_attribute_sets.extend(satisf_attr_sets)
# If we execute on a single process
if not params.getboolean('Multiprocessing', 'explorations'):
logger.debug('Exploring the attribute sets of this level on a '
'single process...')
update_after_exploration(
_explore_attribute_sets(
attribute_sets_to_explore, self._sensitivity,
self._usability_cost, self._sensitivity_threshold,
self._max_cost, self._solution, self._explored_attr_sets,
self._start_time, self._pruning))
return attribute_sets_efficiency
# Infer the number of cores to use
free_cores = params.getint('Multiprocessing', 'free_cores')
nb_cores = max(cpu_count() - free_cores, 1)
attribute_sets_per_core = int(ceil(len(attribute_sets_to_explore)
/ nb_cores))
logger.debug(f'Sharing {len(attribute_sets_to_explore)} attribute sets'
f' to explore over {nb_cores}(+{free_cores}) cores, hence'
f' {attribute_sets_per_core} attribute sets per core.')
# Spawn a number of processes equal to the number of cores
attribute_sets_to_explore_list = list(attribute_sets_to_explore)
async_results = []
with Pool(processes=nb_cores) as pool:
for process_id in range(nb_cores):
# Generate the attribute sets to explore for this process
start_id = process_id * attribute_sets_per_core
end_id = (process_id + 1) * attribute_sets_per_core
subset_attr_sets_to_explore = (
attribute_sets_to_explore_list[start_id:end_id])
async_result = pool.apply_async(
_explore_attribute_sets,
args=(subset_attr_sets_to_explore, self._sensitivity,
self._usability_cost, self._sensitivity_threshold,
self._max_cost, self._solution,
self._explored_attr_sets, self._start_time,
self._pruning),
callback=update_after_exploration)
async_results.append(async_result)
# Wait for all the processes to finish (otherwise we would exit
# before collecting their result)
for async_result in async_results:
async_result.wait()
return attribute_sets_efficiency
def _expand_s(self) -> Set[AttributeSet]:
"""Expand the set S to obtain the attribute sets to explore.
For each S_i of S, we generate the attribute sets to explore that are
composed of each S_i with one more attribute that is not in S_i.
E <-- {C = S_i Union {a} :
For all S_i in S, For all a in A, a Not in S_i}
We do not hold C in E if:
- It is a superset of an attr. set of T (i.e., if it is a superset of
an attr. set that satisfies the sensitivity threshold).
- The pruning methods are used and C is a superset of the attr. sets
whose supersets are to be ignored.
Returns:
The set E of the next attribute sets to explore.
"""
# The set E of the next attribute sets to explore
next_attr_sets_to_explore = set()
# If we execute on a single process
if not params.getboolean('Multiprocessing', 'explorations'):
logger.debug('Expanding the next attribute sets to explore on a '
'single process...')
return _expand_attribute_sets(
self._attribute_sets_to_expand,
self._dataset.candidate_attributes,
self.get_satisfying_attribute_sets(),
self._attribute_sets_ignored_supersets, self._pruning)
# Infer the number of cores to use
free_cores = params.getint('Multiprocessing', 'free_cores')
nb_cores = max(cpu_count() - free_cores, 1)
attribute_sets_per_core = int(ceil(len(self._attribute_sets_to_expand)
/ nb_cores))
logger.debug(f'Sharing {len(self._attribute_sets_to_expand)} attribute'
f' sets to expand over {nb_cores}(+{free_cores}) cores, '
f'hence {attribute_sets_per_core} attribute sets per '
'core.')
def update_next_attribute_sets_to_explore(result: Set[AttributeSet]):
"""Update the complete set of the next attribute sets to explore.
Args:
result: The next attribute sets to explore given by a process.
Note: This is executed by the main thread and does not pose any
concurrency or synchronization problem.
"""
next_attr_sets_to_explore.update(result)
# Spawn a number of processes equal to the number of cores
satisfying_attribute_sets = self.get_satisfying_attribute_sets()
attribute_sets_to_expand_as_list = list(self._attribute_sets_to_expand)
async_results = []
with Pool(processes=nb_cores) as pool:
for process_id in range(nb_cores):
# Generate the attribute sets to expand for this process
start_id = process_id * attribute_sets_per_core
end_id = (process_id + 1) * attribute_sets_per_core
process_attr_sets_to_expand = (
attribute_sets_to_expand_as_list[start_id:end_id])
async_result = pool.apply_async(
_expand_attribute_sets,
args=(process_attr_sets_to_expand,
self._dataset.candidate_attributes,
satisfying_attribute_sets,
self._attribute_sets_ignored_supersets,
self._pruning),
callback=update_next_attribute_sets_to_explore)
async_results.append(async_result)
# Wait for all the processes to finish (otherwise we would exit
# before collecting their result)
for async_result in async_results:
async_result.wait()
| |
"""
main.py
main.py is the main program of SonaScan and contains most of the GUI functions.
"""
# Global includes
import asyncio
import shutil
import time
import urllib
import webbrowser
from datetime import datetime
from os.path import basename
from src.forge import process_scan, monitor_scan_dir, get_obj, get_forge_token
from glob import glob
import os
import PySimpleGUI as sg # pip3 install --upgrade PySimpleGUI PySImpleGUI creates our GUI
import tkinter as tk
from tkinter import filedialog
from src.CONSTANTS import syscmd, images_dir_path, SCANNER_ID, last_saved_value_file, LOCAL_UPLOAD_DIR_ROOT_PATH, \
SERVER_SCAN_DIR_ROOT_PATH, LOCAL_SCAN_DIR_ROOT_PATH, LOCAL_MODEL_ROOT_PATH, LOCAL_FAILED_ROOT_PATH, status_ready, \
PATH_TO_MODEL_VIEWER_APP, LOCAL_VIRTUAL_SCAN_DIR_ROOT_PATH, LOCAL_SCAN_ID_DIR_PATH, SERVER_UPLOADED_DIR_ROOT_PATH
from src.new_scan_id import get_new_scan_id, create_scan_dirs
# print(get_new_scan_id())
# create_scan_dirs()
print(sg)
print(sg.version)
import re
import subprocess
# local includes
# from debugging import get_line_number
from src.capture_images import list_devices, take_image, manifest_maker, VIRTUAL_CAMERA_LIST, show_image
from src.index_maker import scandir_index_maker
from src.new_scan_id import get_new_scan_id
from src.last_saved_values import from_email
from src.last_saved_values import to_email
from src.last_saved_values import consumer_id
from src.last_saved_values import order_details
from src.upload import scandir_upload, move_scandir_to_upload_dir_on_server, scan_root_sync
short_scan_id, full_scan_id = get_new_scan_id()
create_scan_dirs()
from src.GUI_Layouts import settings, info_panel, layout
def show_splashscreen(seconds_to_display):
sg.popup_non_blocking("SonaScan", font=('Raleway', 100), button_type=sg.POPUP_BUTTONS_NO_BUTTONS,
background_color='#ffffff', auto_close=True, auto_close_duration=seconds_to_display,
title="SONASCAN is starting...")
return True
def get_screen_width_and_height():
results = str(
subprocess.Popen(['system_profiler SPDisplaysDataType'], stdout=subprocess.PIPE, shell=True).communicate()[
0])
res = re.search('Resolution: \d* x \d*', results).group(0).split(' ')
width, height = int(res[1]), int(res[3])
return width, height
def is_internet_available():
ping_output = syscmd("ping -c 1 8.8.8.8 | grep '0.0% packet loss' ")
if (ping_output == ""):
print("Internet isn't present ", ping_output)
return False
else:
# print("Internet is present")
return True
def is_server_reachable():
ping_output = syscmd("ping -c 1 cloud1.tri-di.com | grep '0.0% packet loss' ")
if (ping_output == ""):
print("There is NO route to Sonautics server: cloud1.tri-di.com")
return False
else:
# print("There is a route to Sonautics server: cloud1.tri-di.com")
return True
def save_last_form_values(last_saved_value_file, from_email, to_email, consumer_id, consumer_name, order_details,
monitor_scanning_flag, monitor_modeling_flag, monitor_upload_flag):
if monitor_scanning_flag:
monitor_scanning_str = "True"
else:
monitor_scanning_str = "False"
if monitor_modeling_flag:
monitor_modeling_str = "True"
else:
monitor_modeling_str = "False"
if monitor_upload_flag:
monitor_uploads_str = "True"
else:
monitor_uploads_str = "False"
f = open(last_saved_value_file, "w+")
f.write("from_email = '" + from_email + "'\n")
f.write("to_email = '" + to_email + "'\n")
f.write("consumer_id = '" + consumer_id + "'\n")
f.write("consumer_name = '" + consumer_name + "'\n")
f.write("order_details = '" + order_details + "'\n")
f.write("monitor_scanning_flag = '" + monitor_scanning_str + "'\n")
f.write("monitor_modeling_flag = '" + monitor_modeling_str + "'\n")
f.write("monitor_upload_flag = '" + monitor_uploads_str + "'\n")
f.close()
return last_saved_value_file
# window = sg.Window("SonaScan");
# sg.preview_all_look_and_feel_themes()
# sg.set_options(ttk_theme='default')
# sg.ChangeLookAndFeel('DarkTeal7')
sg.ChangeLookAndFeel('Default1')
# short_scan_id, full_scan_id = get_new_scan_id()
scan_start_time = datetime.now()
dt_string = scan_start_time.strftime("%d-%b-%Y %H:%M:%S")
number_scans_waiting = 0
internet_status = False
route_to_server_status = False
async def scan_with_GUI(camera_list):
from src.last_saved_values import consumer_id
from src.last_saved_values import consumer_name
from src.last_saved_values import order_details
tic = time.perf_counter()
upload_tasks = []
process_tasks = []
# seconds_to_display = 5
# show_splashscreen(seconds_to_display)
width, height = get_screen_width_and_height()
window_width = int(width / 2)
window_height = int(height / 2)
# short_scan_id, full_scan_id = get_new_scan_id()
scan_start_time = datetime.now()
dt_string = scan_start_time.strftime("%d-%b-%Y %H:%M:%S")
settings_window = sg.Window('Settings', settings, font=('Raleway', 30), resizable=True, grab_anywhere=True,
ttk_theme="default", use_ttk_buttons=True, finalize=True, location=(0, 0))
info_window = sg.Window('Info', info_panel, font=('Raleway', 30), resizable=True, grab_anywhere=True,
ttk_theme="default", use_ttk_buttons=True, finalize=True, location=(0, 0))
window = sg.Window('SonaScan', layout, font=('Raleway', 40), resizable=True, grab_anywhere=True,
ttk_theme="default",
use_ttk_buttons=True, finalize=True, location=(0, 0), size=(window_width, window_height))
# no_titlebar=True,
# window.Maximize()
settings_window.Hide()
info_window.Hide()
scans_awaiting_upload_list = glob(os.path.join(LOCAL_SCAN_DIR_ROOT_PATH, SCANNER_ID, "*/"))
number_of_scans_awaiting_upload = len(scans_awaiting_upload_list)
uploaded_scans_list = glob(os.path.join(LOCAL_UPLOAD_DIR_ROOT_PATH, SCANNER_ID, "*/"))
number_of_uploaded_scans = len(uploaded_scans_list)
modeled_scans_list = glob(os.path.join(LOCAL_MODEL_ROOT_PATH, SCANNER_ID, "*/"))
number_of_modeled_scans = len(modeled_scans_list)
failed_scans_list = glob(os.path.join(LOCAL_FAILED_ROOT_PATH, SCANNER_ID, "*/"))
number_of_failed_scans = len(failed_scans_list)
window.FindElement('_UPLOADS_NUMBER_').Update(number_of_scans_awaiting_upload)
print(scans_awaiting_upload_list)
while True: # Event Loop
if is_internet_available():
internet_status = "Online"
else:
internet_status = "Offline"
# window.FindElement('_INTERNET_STATUS_').Update(internet_status)
if is_server_reachable():
route_to_server_status = "Available"
else:
route_to_server_status = "Unavailable"
# window.FindElement('_ROUTE_TO_SERVER_STATUS_').Update(route_to_server_status)
# RESET VARIABLES IN FORM FOR NEXT DISPLAY
print("******************************************** NEW SCAN *************************************************")
from src.last_saved_values import from_email
from src.last_saved_values import to_email
short_scan_id, full_scan_id = get_new_scan_id()
window.FindElement('_DATE_').Update(dt_string)
window.FindElement('_SCAN_ID_').Update(full_scan_id)
# ************************** THIS IS WHAT REPAINTS THE MAIN WINDOW!!! **************************************
event, values = window.Read() # check return status from latest window event
print("event=",event, "values=", values)
# number_scans_waiting = len(scans_waiting(scan_dir_root_path))
# window.FindElement('_SCANS_WAITING_').Update(number_scans_waiting)
# we may need this to make sure that the window is full size on the small screen
# w_size, h_size = sg.Window.get_screen_size(window)
# print("screen size = ", w_size, h_size)
# w_dimension, h_dimension = sg.Window.get_screen_dimensions(window)
# print("screen w_dimension = ", w_dimension, h_dimension)
# event finds out which button was pressed.
if event is None or event == 'Exit': # If event = None, the close box was clicked.
return () # THIS EXITS THE ENTIRE MAIN PROGRAM!
if event == '_SETTINGS_ICON_': # ***************** Settings Button pressed! ******************************
settings_window.UnHide()
settings_event, settings_values = settings_window.Read()
if settings_event == 'Save':
save_last_form_values(last_saved_value_file, from_email, to_email, consumer_id, consumer_name,
order_details)
settings_window.UnHide()
if event == '_INFO_ICON_': # ***************** Info Button pressed! ********************************************
info_window.UnHide()
if event == '_UPLOADS_ICON_': # ***************** Uploads Icon Button pressed! ********************************************
initial_folder_path = os.path.abspath(os.path.join(LOCAL_SCAN_DIR_ROOT_PATH, SCANNER_ID))
print(f"view of scan directory: {initial_folder_path} requested")
selected_scan = tk.filedialog.askdirectory(initialdir=initial_folder_path)
# selected_scan = sg.popup_get_folder('', no_window=True, initial_folder=initial_folder_path)
print(f'selected_scan ={selected_scan}')
if selected_scan:
webbrowser.open_new_tab("file://"+os.path.join(selected_scan,"index.html"))
elif event == 'SCAN': # ***************** SCAN Button pressed! ********************************************
window.FindElement('_ACTION_STATUS_LINE_1_').Update("Scanning...")
window.FindElement('_ACTION_STATUS_LINE_2_').Update(" ")
last_image_captured_time = scan_start_time
consumer_id = values['_CONSUMER_ID_']
consumer_name = values['_CONSUMER_NAME_']
order_details = values['_ORDER_DETAILS_']
# monitor_scanning_flag = values['_MONITOR_SCANNING_CB_']
# monitor_scanning_flag = False
# monitor_upload_flag = values['_MONITOR_UPLOADS_CB_']
# monitor_upload_flag = False
# monitor_modeling_flag = values['_MONITOR_PROCESSING_CB_']
# monitor_modeling_flag = False
# ***************** capturing images ********************************************
LOCAL_SCAN_ID_DIR_PATH = os.path.abspath(os.path.join(LOCAL_SCAN_DIR_ROOT_PATH, full_scan_id))
if not os.path.isdir(LOCAL_SCAN_ID_DIR_PATH):
os.makedirs(LOCAL_SCAN_ID_DIR_PATH)
image_counter = 1
tasks = []
number_of_scanner_cameras = len(camera_list)
if number_of_scanner_cameras < 12:
# Not enough images! Use Virtual Cameras instead
print(number_of_scanner_cameras, " camera responded. Using Virtual Scanner Instead!")
number_of_scanner_cameras =len(VIRTUAL_CAMERA_LIST)
for camera_pair in VIRTUAL_CAMERA_LIST:
camera_id, camera_name = camera_pair
image_file_name = 'img' + str(image_counter).zfill(2) \
+ "-cam" + str(camera_id).zfill(2) \
+ "-" + urllib.parse.quote_plus(camera_name.replace('#', '')) + '.jpg'
image_file_path = os.path.join(LOCAL_SCAN_ID_DIR_PATH, image_file_name)
print(camera_id, camera_name, image_file_path, image_counter)
virtual_file_path = os.path.join(LOCAL_VIRTUAL_SCAN_DIR_ROOT_PATH, image_file_name)
shutil.copyfile(virtual_file_path, image_file_path)
task = asyncio.create_task(
take_image(camera_id, camera_name, image_file_path,
image_counter, number_of_scanner_cameras, window),
name=image_file_name)
tasks.append(task)
image_counter = image_counter + 1
number_of_scanner_cameras = 0
await asyncio.gather(*tasks)
else:
tic = time.perf_counter()
for camera_pair in camera_list:
camera_id, camera_name = camera_pair
image_file_name = 'img' + str(image_counter).zfill(2) \
+ "-cam" + str(camera_id).zfill(2) \
+ "-" + urllib.parse.quote_plus(camera_name.replace('#', '')) + '.jpg'
image_file_path = os.path.join(LOCAL_SCAN_ID_DIR_PATH, image_file_name)
print(camera_id, camera_name, image_file_path, image_counter)
# schedule up to 24 camera captures *concurrently*:
task = asyncio.create_task(
take_image(camera_id, camera_name, image_file_path,
image_counter, number_of_scanner_cameras, window),
name=image_file_name)
tasks.append(task)
image_counter = image_counter + 1
# elapsed = default_timer - start
# Gather all the tasks from the event loop once they finish running
# (This command blocks.)
await asyncio.gather(*tasks)
# ************************************ DONE IMAGING ****************************************************
window.FindElement('_ACTION_STATUS_LINE_1_').Update("Scanning Completed.")
window.FindElement('_ACTION_STATUS_LINE_2_').Update(f"{image_counter-1} images.")
thumbnail = os.path.join(images_dir_path, status_ready)
window.FindElement('_IMAGE_ELEMENT_').Update(filename=thumbnail)
window.Refresh()
# Make a manifest.xml file for this scan containing all the auxilary information'
# other than images that we need.
manifest_maker(full_scan_id, SCANNER_ID, from_email, to_email, LOCAL_SCAN_ID_DIR_PATH)
the_png_files = glob(os.path.join(LOCAL_SCAN_ID_DIR_PATH, "img*.png"))
for filename in the_png_files:
os.remove(filename)
# Make an index file that displays the images nicely
scandir_index_maker(LOCAL_SCAN_ID_DIR_PATH, full_scan_id)
window.FindElement('_ACTION_STATUS_LINE_1_').Update("Ready")
window.FindElement('_ACTION_STATUS_LINE_2_').Update("")
show_image(os.path.join(images_dir_path, status_ready), window)
# window.FindElement('_IMAGE_ELEMENT_').Update()
window.Refresh()
# Update number of scans waiting for upload, and list of waiting scanIDs
scans_awaiting_upload_list = glob(os.path.join(LOCAL_SCAN_DIR_ROOT_PATH, SCANNER_ID, "*/"))
number_of_scans_awaiting_upload = len(scans_awaiting_upload_list)
uploaded_scans_list = glob(os.path.join(LOCAL_UPLOAD_DIR_ROOT_PATH, SCANNER_ID, "*/"))
number_of_uploaded_scans = len(uploaded_scans_list)
modeled_scans_list = glob(os.path.join(LOCAL_MODEL_ROOT_PATH, SCANNER_ID, "*/"))
number_of_modeled_scans = len(modeled_scans_list)
failed_scans_list = glob(os.path.join(LOCAL_FAILED_ROOT_PATH, SCANNER_ID, "*/"))
number_of_failed_scans = len(failed_scans_list)
print(number_of_scans_awaiting_upload, "scans waiting", number_of_uploaded_scans, "uploaded",
number_of_modeled_scans, "modeled", number_of_failed_scans, "failed." )
window.FindElement('_UPLOADS_NUMBER_').Update(number_of_uploaded_scans)
window.Refresh()
toc = time.perf_counter()
print(f"Captured {image_counter - 1} cameras in: {toc - tic:0.0f} seconds")
duration_in_min = (toc - tic) / 60
print(f"Completed Scan in: {duration_in_min:0.2f} minutes")
window.FindElement('_ACTION_STATUS_LINE_3_').Update(
f"Completed Scan in: {duration_in_min:0.2f} minutes")
window.Refresh()
# Spawn off async task to upload asynchronously
SERVER_SCAN_DIR_PATH = SERVER_SCAN_DIR_ROOT_PATH + SCANNER_ID
upload_task = asyncio.create_task(
async_upload(LOCAL_SCAN_ID_DIR_PATH, SERVER_SCAN_DIR_PATH, LOCAL_UPLOAD_DIR_ROOT_PATH, SCANNER_ID,
short_scan_id, full_scan_id, window), name= full_scan_id)
upload_tasks.append(upload_task)
window.FindElement('_ACTION_STATUS_LINE_3_').Update(f"Uploading {short_scan_id}")
window.Refresh()
# await asyncio.gather(*upload_tasks)
await upload_task
# Update number of scans waiting for upload, and list of waiting scanIDs
scans_awaiting_upload_list = glob(os.path.join(LOCAL_SCAN_DIR_ROOT_PATH, SCANNER_ID, "*/"))
number_of_scans_awaiting_upload = len(scans_awaiting_upload_list)
uploaded_scans_list = glob(os.path.join(LOCAL_UPLOAD_DIR_ROOT_PATH, SCANNER_ID, "*/"))
number_of_uploaded_scans = len(uploaded_scans_list)
modeled_scans_list = glob(os.path.join(LOCAL_MODEL_ROOT_PATH, SCANNER_ID, "*/"))
number_of_modeled_scans = len(modeled_scans_list)
failed_scans_list = glob(os.path.join(LOCAL_FAILED_ROOT_PATH, SCANNER_ID, "*/"))
number_of_failed_scans = len(failed_scans_list)
print(number_of_scans_awaiting_upload, "scans waiting", number_of_uploaded_scans, "uploaded",
number_of_modeled_scans, "modeled", number_of_failed_scans, "failed." )
window.FindElement('_ACTION_STATUS_LINE_3_').Update(
f"Upload Completed for {short_scan_id}")
window.FindElement('_UPLOADS_NUMBER_').Update(number_of_uploaded_scans)
window.Refresh()
# return
async def async_upload(LOCAL_SCAN_ID_DIR_PATH,SERVER_SCAN_DIR_PATH, LOCAL_UPLOAD_DIR_ROOT_PATH, SCANNER_ID, short_scan_id, full_scan_id, window):
tic = time.perf_counter()
process_tasks =[]
uploaded_dest_dir = scandir_upload(LOCAL_SCAN_ID_DIR_PATH, SERVER_SCAN_DIR_PATH, LOCAL_UPLOAD_DIR_ROOT_PATH, SCANNER_ID, window)
if uploaded_dest_dir != False:
print(uploaded_dest_dir, 'was successfully uploaded.')
move_scandir_to_upload_dir_on_server(full_scan_id)
access_token = get_forge_token()
# Spawn off an async task to submit models to Forge
window.FindElement('_ACTION_STATUS_LINE_3_').Update(
f"Submitting {short_scan_id} for processing on SonaServer")
window.Refresh()
print("Submitting " + short_scan_id)
photoscene_website = SERVER_UPLOADED_DIR_ROOT_PATH + full_scan_id
# photoscene_website | |
`Maximum Setpoint Humidity Ratio`"""
self["Maximum Setpoint Humidity Ratio"] = value
@property
def reference_setpoint_node_name(self):
"""field `Reference Setpoint Node Name`
| The current setpoint at this node is the
| desired condition for the Mixed Air Node
| This node must have a valid setpoint
| which has been set by another setpoint manager
Args:
value (str): value for IDD Field `Reference Setpoint Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `reference_setpoint_node_name` or None if not set
"""
return self["Reference Setpoint Node Name"]
@reference_setpoint_node_name.setter
def reference_setpoint_node_name(self, value=None):
"""Corresponds to IDD field `Reference Setpoint Node Name`"""
self["Reference Setpoint Node Name"] = value
@property
def mixed_air_stream_node_name(self):
"""field `Mixed Air Stream Node Name`
| Name of Mixed Air Node
Args:
value (str): value for IDD Field `Mixed Air Stream Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `mixed_air_stream_node_name` or None if not set
"""
return self["Mixed Air Stream Node Name"]
@mixed_air_stream_node_name.setter
def mixed_air_stream_node_name(self, value=None):
"""Corresponds to IDD field `Mixed Air Stream Node Name`"""
self["Mixed Air Stream Node Name"] = value
@property
def outdoor_air_stream_node_name(self):
"""field `Outdoor Air Stream Node Name`
| Name of Outdoor Air Stream Node
Args:
value (str): value for IDD Field `Outdoor Air Stream Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outdoor_air_stream_node_name` or None if not set
"""
return self["Outdoor Air Stream Node Name"]
@outdoor_air_stream_node_name.setter
def outdoor_air_stream_node_name(self, value=None):
"""Corresponds to IDD field `Outdoor Air Stream Node Name`"""
self["Outdoor Air Stream Node Name"] = value
@property
def return_air_stream_node_name(self):
"""field `Return Air Stream Node Name`
| Name of Return Air Stream Node
Args:
value (str): value for IDD Field `Return Air Stream Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `return_air_stream_node_name` or None if not set
"""
return self["Return Air Stream Node Name"]
@return_air_stream_node_name.setter
def return_air_stream_node_name(self, value=None):
"""Corresponds to IDD field `Return Air Stream Node Name`"""
self["Return Air Stream Node Name"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which the temperature or humidity
| ratio will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerWarmest(DataObject):
""" Corresponds to IDD object `SetpointManager:Warmest`
This SetpointManager resets the cooling supply air temperature
of a central forced air HVAC system according to the
cooling demand of the warmest zone.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'hvac air loop name',
{'name': u'HVAC Air Loop Name',
'pyname': u'hvac_air_loop_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'minimum setpoint temperature',
{'name': u'Minimum Setpoint Temperature',
'pyname': u'minimum_setpoint_temperature',
'default': 12.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'maximum setpoint temperature',
{'name': u'Maximum Setpoint Temperature',
'pyname': u'maximum_setpoint_temperature',
'default': 18.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'strategy',
{'name': u'Strategy',
'pyname': u'strategy',
'default': u'MaximumTemperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'MaximumTemperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 0,
'name': u'SetpointManager:Warmest',
'pyname': u'SetpointManagerWarmest',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
| Default value: Temperature
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value="Temperature"):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def hvac_air_loop_name(self):
"""field `HVAC Air Loop Name`
| Enter the name of an AirLoopHVAC object
Args:
value (str): value for IDD Field `HVAC Air Loop Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `hvac_air_loop_name` or None if not set
"""
return self["HVAC Air Loop Name"]
@hvac_air_loop_name.setter
def hvac_air_loop_name(self, value=None):
"""Corresponds to IDD field `HVAC Air Loop Name`"""
self["HVAC Air Loop Name"] = value
@property
def minimum_setpoint_temperature(self):
"""field `Minimum Setpoint Temperature`
| Units: C
| Default value: 12.0
Args:
value (float): value for IDD Field `Minimum Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_setpoint_temperature` or None if not set
"""
return self["Minimum Setpoint Temperature"]
@minimum_setpoint_temperature.setter
def minimum_setpoint_temperature(self, value=12.0):
"""Corresponds to IDD field `Minimum Setpoint Temperature`"""
self["Minimum Setpoint Temperature"] = value
@property
def maximum_setpoint_temperature(self):
"""field `Maximum Setpoint Temperature`
| Units: C
| Default value: 18.0
Args:
value (float): value for IDD Field `Maximum Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_setpoint_temperature` or None if not set
"""
return self["Maximum Setpoint Temperature"]
@maximum_setpoint_temperature.setter
def maximum_setpoint_temperature(self, value=18.0):
"""Corresponds to IDD field `Maximum Setpoint Temperature`"""
self["Maximum Setpoint Temperature"] = value
@property
def strategy(self):
"""field `Strategy`
| Default value: MaximumTemperature
Args:
value (str): value for IDD Field `Strategy`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `strategy` or None if not set
"""
return self["Strategy"]
@strategy.setter
def strategy(self, value="MaximumTemperature"):
"""Corresponds to IDD field `Strategy`"""
self["Strategy"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which the temperature will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerColdest(DataObject):
""" Corresponds to IDD object `SetpointManager:Coldest`
This SetpointManager is used in dual duct systems to reset
the setpoint temperature of the air in the heating supply duct.
Usually it is used in conjunction with a SetpointManager:Warmest
resetting the temperature of the air in the cooling supply duct.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'hvac air loop name',
{'name': u'HVAC Air Loop Name',
'pyname': u'hvac_air_loop_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'minimum setpoint temperature',
{'name': u'Minimum Setpoint Temperature',
'pyname': u'minimum_setpoint_temperature',
'default': 20.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'maximum setpoint temperature',
{'name': u'Maximum Setpoint Temperature',
'pyname': u'maximum_setpoint_temperature',
'default': 50.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'strategy',
{'name': u'Strategy',
'pyname': u'strategy',
'default': u'MinimumTemperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'MinimumTemperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 0,
'name': u'SetpointManager:Coldest',
'pyname': u'SetpointManagerColdest',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to | |
<filename>packages/pegasus-python/src/Pegasus/db/schema.py
#!/usr/bin/env python
#
# Copyright 2018-2020 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "<NAME>"
__author__ = "<NAME>"
__author__ = "<NAME>"
__author__ = "<NAME>"
import logging
import time
import warnings
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy.exc import OperationalError, ProgrammingError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import foreign, mapper, relation
from sqlalchemy.schema import Column, ForeignKey, Index, MetaData, UniqueConstraint
from sqlalchemy.sql.expression import and_
from sqlalchemy.types import (
BigInteger,
Boolean,
DateTime,
Enum,
Integer,
Numeric,
String,
Text,
)
from Pegasus.db.ensembles import Ensemble as _Ensemble
from Pegasus.db.ensembles import EnsembleStates
from Pegasus.db.ensembles import EnsembleWorkflow as _EnsembleWorkflow
from Pegasus.db.ensembles import EnsembleWorkflowStates, TriggerStates, TriggerType
__all__ = (
"DBVersion",
"Workflow",
"Workflowstate",
"WorkflowMeta",
"Host",
"Job",
"JobEdge",
"JobInstance",
"Jobstate",
"Tag",
"Task",
"TaskEdge",
"TaskMeta",
"Invocation",
"WorkflowFiles",
"IntegrityMetrics",
"MasterWorkflow",
"MasterWorkflowstate",
"Ensemble",
"EnsembleWorkflow",
"Trigger",
"RCLFN",
"RCPFN",
"RCMeta",
)
log = logging.getLogger(__name__)
# for SQLite
warnings.filterwarnings("ignore", r".*does \*not\* support Decimal*.")
# These are keywords that all tables should have
table_keywords = {"mysql_charset": "utf8mb4", "mysql_engine": "InnoDB"}
KeyInteger = BigInteger()
KeyInteger = KeyInteger.with_variant(postgresql.BIGINT(), "postgresql")
KeyInteger = KeyInteger.with_variant(mysql.BIGINT(), "mysql")
KeyInteger = KeyInteger.with_variant(sqlite.INTEGER(), "sqlite")
TimestampType = Numeric(precision=16, scale=6)
DurationType = Numeric(precision=10, scale=3)
# --------------------------------------------------------------------
class SABase:
"""
Base class for all the DB mapper objects.
"""
def _commit(self, session, batch, merge=False):
if merge:
session.merge(self)
else:
session.add(self)
if batch:
return
session.flush()
session.commit()
def commit_to_db(self, session, batch=False):
"""
Commit the DB object/row to the database.
@type session: sqlalchemy.orm.scoping.ScopedSession object
@param session: SQLAlch session to commit row to.
"""
self._commit(session, batch)
def merge_to_db(self, session, batch=False):
"""
Merge the DB object/row with an existing row in the database.
@type session: sqlalchemy.orm.scoping.ScopedSession object
@param session: SQLAlch session to merge row with.
Using this method pre-supposes that the developer has already
assigned any primary key information to the object before
calling.
"""
self._commit(session, batch, merge=True)
def __repr__(self):
retval = "%s:\n" % self.__class__
for k, v in self.__dict__.items():
if k == "_sa_instance_state":
continue
retval += " * {} : {}\n".format(k, v)
return retval
metadata = MetaData()
Base = declarative_base(cls=SABase, metadata=metadata)
# ----------------------------------------------------------------
# Method to verify if tables exist or are according to the schema
# ----------------------------------------------------------------
def get_missing_tables(db):
tables = (
DBVersion,
# WORKFLOW
Workflow,
Workflowstate,
WorkflowMeta,
WorkflowFiles,
Host,
Job,
JobEdge,
JobInstance,
Jobstate,
Tag,
Task,
TaskEdge,
TaskMeta,
Invocation,
IntegrityMetrics,
# MASTER
MasterWorkflow,
MasterWorkflowstate,
Ensemble,
EnsembleWorkflow,
# JDBCRC
RCLFN,
RCPFN,
RCMeta,
)
missing_tables = []
for table in tables:
if not check_table_exists(db, table):
missing_tables.append(table.__tablename__)
return missing_tables
def check_table_exists(engine, table):
try:
engine.execute(table.__table__.select().limit(1))
return True
except OperationalError as e:
if (
"no such table" in str(e).lower()
or "unknown" in str(e).lower()
or "no such column" in str(e).lower()
):
return False
raise
except ProgrammingError as e:
if "doesn't exist" in str(e).lower():
return False
raise
# ---------------------------------------------
# DB ADMIN
# ---------------------------------------------
class DBVersion(Base):
"""."""
__tablename__ = "dbversion"
__table_args__ = (
{
"mysql_charset": "utf8mb4",
"mysql_engine": "InnoDB",
"sqlite_autoincrement": True,
},
)
id = Column("id", KeyInteger, primary_key=True, autoincrement=True)
version_number = Column("version_number", Integer, default=5)
version = Column("version", String(50), nullable=False)
version_timestamp = Column("version_timestamp", Integer, nullable=False)
# ---------------------------------------------
# STAMPEDE
# ---------------------------------------------
class Workflow(Base):
"""."""
__tablename__ = "workflow"
# ==> Information comes from braindump.txt file
wf_id = Column("wf_id", KeyInteger, primary_key=True)
wf_uuid = Column("wf_uuid", String(255), nullable=False)
dag_file_name = Column("dag_file_name", String(255))
timestamp = Column("timestamp", TimestampType)
submit_hostname = Column("submit_hostname", String(255))
submit_dir = Column("submit_dir", Text)
planner_arguments = Column("planner_arguments", Text)
user = Column("user", String(255))
grid_dn = Column("grid_dn", String(255))
planner_version = Column("planner_version", String(255))
dax_label = Column("dax_label", String(255))
dax_version = Column("dax_version", String(255))
dax_file = Column("dax_file", String(255))
db_url = Column("db_url", Text)
parent_wf_id = Column(
"parent_wf_id", KeyInteger, ForeignKey(wf_id, ondelete="CASCADE"),
)
# not marked as FK to not screw up the cascade.
root_wf_id = Column("root_wf_id", KeyInteger)
# Relationships
root_wf = relation(
lambda: Workflow,
cascade="all, delete-orphan",
single_parent=True,
remote_side=(wf_id,),
)
parent_wf = relation(
lambda: Workflow,
cascade="all, delete-orphan",
single_parent=True,
remote_side=(wf_id,),
)
states = relation(
lambda: Workflowstate,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
order_by=lambda: Workflowstate.timestamp,
)
jobs = relation(
lambda: Job,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
job_edges = relation(
lambda: JobEdge,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
tasks = relation(
lambda: Task,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
task_edges = relation(
lambda: TaskEdge,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
invocations = relation(
lambda: Invocation,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
hosts = relation(
lambda: Host,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
workflow_files = relation(
lambda: WorkflowFiles,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
files = relation(
lambda: RCLFN,
secondary=lambda: WorkflowFiles.__table__,
primaryjoin=lambda: Workflow.wf_id == WorkflowFiles.wf_id,
secondaryjoin=lambda: WorkflowFiles.lfn_id == RCLFN.lfn_id,
)
integrity_metrics = relation(
lambda: IntegrityMetrics,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
tags = relation(
lambda: Tag,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
meta = relation(
lambda: WorkflowMeta,
backref="workflow",
cascade="all, delete-orphan",
passive_deletes=True,
)
Workflow.__table_args__ = (
UniqueConstraint(Workflow.wf_uuid, name="UNIQUE_WF_UUID"),
table_keywords,
)
class Workflowstate(Base):
"""."""
__tablename__ = "workflowstate"
__table_args__ = (table_keywords,)
# All three columns are marked as primary key to produce the desired
# effect - ie: it is the combo of the three columns that make a row
# unique.
wf_id = Column(
"wf_id",
KeyInteger,
ForeignKey(Workflow.wf_id, ondelete="CASCADE"),
primary_key=True,
)
state = Column(
"state",
Enum("WORKFLOW_STARTED", "WORKFLOW_TERMINATED", name="workflow_state"),
primary_key=True,
)
timestamp = Column(
"timestamp", TimestampType, primary_key=True, default=time.time(),
)
restart_count = Column("restart_count", Integer, nullable=False)
status = Column("status", Integer)
reason = Column("reason", Text)
Index("workflowstate_timestamp_COL", Workflowstate.timestamp)
class WorkflowMeta(Base):
"""."""
__tablename__ = "workflow_meta"
__table_args__ = (table_keywords,)
wf_id = Column(
"wf_id",
KeyInteger,
ForeignKey(Workflow.wf_id, ondelete="CASCADE"),
primary_key=True,
)
key = Column("key", String(255), primary_key=True)
value = Column("value", String(255), nullable=False)
# Host definition
# ==> Information from kickstart output file
#
# site_name = <resource, from invocation element>
# hostname = <hostname, from invocation element>
# ip_address = <hostaddr, from invocation element>
# uname = <combined (system, release, machine) from machine element>
# total_ram = <ram_total from machine element>
class Host(Base):
"""."""
__tablename__ = "host"
host_id = Column("host_id", KeyInteger, primary_key=True)
wf_id = Column(
"wf_id",
KeyInteger,
ForeignKey(Workflow.wf_id, ondelete="CASCADE"),
nullable=False,
)
site = Column("site", String(255), nullable=False)
hostname = Column("hostname", String(255), nullable=False)
ip = Column("ip", String(255), nullable=False)
uname = Column("uname", String(255))
total_memory = Column("total_memory", Integer)
Host.__table_args__ = (
UniqueConstraint(Host.wf_id, Host.site, Host.hostname, Host.ip, name="UNIQUE_HOST"),
table_keywords,
)
class Job(Base):
"""."""
__tablename__ = "job"
job_id = Column("job_id", KeyInteger, primary_key=True)
wf_id = Column(
"wf_id",
KeyInteger,
ForeignKey(Workflow.wf_id, ondelete="CASCADE"),
nullable=False,
)
exec_job_id = Column("exec_job_id", String(255), nullable=False)
submit_file = Column("submit_file", String(255), nullable=False)
type_desc = Column(
"type_desc",
Enum(
"unknown",
"compute",
"stage-in-tx",
"stage-out-tx",
"registration",
"inter-site-tx",
"create-dir",
"staged-compute",
"cleanup",
"chmod",
"dax",
"dag",
name="job_type_desc",
),
nullable=False,
)
clustered = Column("clustered", Boolean, nullable=False)
max_retries = Column("max_retries", Integer, nullable=False)
executable = Column("executable", Text, nullable=False)
argv = Column("argv", Text)
task_count = Column("task_count", Integer, nullable=False)
# Relationships
parents = relation(
lambda: Job,
backref="children",
cascade="all",
secondary=lambda: JobEdge.__table__,
primaryjoin=lambda: and_(
Job.wf_id == JobEdge.wf_id,
Job.exec_job_id == foreign(JobEdge.child_exec_job_id),
),
secondaryjoin=lambda: and_(
Job.wf_id == JobEdge.wf_id,
Job.exec_job_id == foreign(JobEdge.parent_exec_job_id),
),
)
tasks = relation(
lambda: Task, backref="job", cascade="all, delete-orphan", passive_deletes=True,
)
job_instances = relation(
lambda: JobInstance,
backref="job",
cascade="all, delete-orphan",
passive_deletes=True,
)
Job.__table_args__ = (
UniqueConstraint(Job.wf_id, Job.exec_job_id, name="UNIQUE_JOB"),
table_keywords,
)
Index("job_type_desc_COL", Job.type_desc)
Index("job_exec_job_id_COL", Job.exec_job_id)
class JobEdge(Base):
"""."""
__tablename__ = "job_edge"
__table_args__ = (table_keywords,)
wf_id = Column(
"wf_id",
KeyInteger,
ForeignKey(Workflow.wf_id, ondelete="CASCADE"),
primary_key=True,
)
parent_exec_job_id = Column("parent_exec_job_id", String(255), primary_key=True)
child_exec_job_id = Column("child_exec_job_id", String(255), primary_key=True)
class JobInstance(Base):
"""."""
__tablename__ = "job_instance"
job_instance_id = Column("job_instance_id", KeyInteger, primary_key=True)
job_id = Column(
"job_id",
KeyInteger,
ForeignKey(Job.job_id, ondelete="CASCADE"),
nullable=False,
)
host_id = Column(
"host_id", KeyInteger, ForeignKey(Host.host_id, ondelete="SET NULL"),
)
job_submit_seq = Column("job_submit_seq", Integer, nullable=False)
sched_id = Column("sched_id", String(255))
site = Column("site", String(255))
user = Column("user", String(255))
work_dir = Column("work_dir", Text)
cluster_start = Column("cluster_start", TimestampType)
cluster_duration = Column("cluster_duration", DurationType)
local_duration = Column("local_duration", DurationType)
subwf_id = Column(
"subwf_id", KeyInteger, ForeignKey(Workflow.wf_id, ondelete="SET NULL"),
)
stdout_file = Column("stdout_file", String(255))
stdout_text = Column("stdout_text", Text)
stderr_file = Column("stderr_file", String(255))
stderr_text = Column("stderr_text", Text)
stdin_file = Column("stdin_file", String(255))
multiplier_factor = Column("multiplier_factor", Integer, nullable=False, default=1)
exitcode = Column("exitcode", Integer)
# Relationships
# PM-712 don't want merges to happen to invocation table .
# setting lazy = false leads to a big join query when a job_instance is updated
# with the postscript status.
invocations = relation(
lambda: Invocation,
backref="job_instance",
cascade="all, delete-orphan",
passive_deletes=True,
)
states = relation(
lambda: Jobstate,
backref="job_instance",
cascade="all, delete-orphan",
passive_deletes=True,
)
sub_workflow = relation(
lambda: Workflow,
backref="job_instance",
cascade="all, delete-orphan",
single_parent=True,
)
host = relation(
lambda: Host,
backref="job_instance",
cascade="all, delete-orphan",
| |
import numpy as np
import scipy.linalg as linalg
from pyriemann.estimation import Covariances
from pyriemann.utils.mean import mean_covariance
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.cross_decomposition import CCA
from sklearn.utils.validation import check_is_fitted
from .utils import filterbank
class SSVEP_CCA(BaseEstimator, ClassifierMixin):
"""Classifier based on Canonical Correlation Analysis for SSVEP
A CCA is computed from the set of training signals and some pure
sinusoids to act as reference.
Classification is made by taking the frequency with the max correlation,
as proposed in [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2009). An online
multi-channel SSVEP-based brain-computer interface using a
canonical correlation analysis method. Journal of neural
engineering, 6(4), 046002.
https://doi.org/10.1088/1741-2560/6/4/046002
"""
def __init__(self, interval, freqs, n_harmonics=3):
self.Yf = dict()
self.cca = CCA(n_components=1)
self.interval = interval
self.slen = interval[1] - interval[0]
self.freqs = freqs
self.n_harmonics = n_harmonics
self.one_hot = {}
for i, k in enumerate(freqs.keys()):
self.one_hot[k] = i
def fit(self, X, y, sample_weight=None):
"""Compute reference sinusoid signal
These sinusoid are generated for each frequency in the dataset
"""
n_times = X.shape[2]
for f in self.freqs:
if f.replace(".", "", 1).isnumeric():
freq = float(f)
yf = []
for h in range(1, self.n_harmonics + 1):
yf.append(
np.sin(2 * np.pi * freq * h * np.linspace(0, self.slen, n_times))
)
yf.append(
np.cos(2 * np.pi * freq * h * np.linspace(0, self.slen, n_times))
)
self.Yf[f] = np.array(yf)
return self
def predict(self, X):
"""Predict is made by taking the maximum correlation coefficient"""
y = []
for x in X:
corr_f = {}
for f in self.freqs:
if f.replace(".", "", 1).isnumeric():
S_x, S_y = self.cca.fit_transform(x.T, self.Yf[f].T)
corr_f[f] = np.corrcoef(S_x.T, S_y.T)[0, 1]
y.append(self.one_hot[max(corr_f, key=lambda k: corr_f[k])])
return y
def predict_proba(self, X):
"""Probabilty could be computed from the correlation coefficient"""
P = np.zeros(shape=(len(X), len(self.freqs)))
for i, x in enumerate(X):
for j, f in enumerate(self.freqs):
if f.replace(".", "", 1).isnumeric():
S_x, S_y = self.cca.fit_transform(x.T, self.Yf[f].T)
P[i, j] = np.corrcoef(S_x.T, S_y.T)[0, 1]
return P / np.resize(P.sum(axis=1), P.T.shape).T
class SSVEP_TRCA(BaseEstimator, ClassifierMixin):
"""Classifier based on the Task-Related Component Analysis method [1]_ for SSVEP.
Parameters
----------
sfreq : float
Sampling frequency of the data to be analyzed.
freqs : dict with n_classes keys
Frequencies corresponding to the SSVEP components. These are
necessary to design the filterbank bands.
n_fbands : int, default=5
Number of sub-bands considered for filterbank analysis.
downsample: int, default=1
Factor by which downsample the data. A downsample value of N will result
on a sampling frequency of (sfreq // N) by taking one sample every N of
the original data. In the original TRCA paper [1]_ data are at 250Hz.
is_ensemble: bool, default=False
If True, predict on new data using the Ensemble-TRCA method described
in [1]_.
method: str, default='original'
'original' computes euclidean mean for S as in the original paper [1]_.
'riemann' variation computes geodesic mean instead. This geodesic
mean is more robust to outlier but negatively impacted by ill-conditioned
matrices (when only few samples are available for training for instance).
If the geometric mean can't be estimated, please consider trying 'logeuclid'.
It computes log-euclidean mean instead of the affine-invariant one and is more robust
computationally.
'riemann' and 'logeuclid' variations are useful when lots of noisy
training data are available. With few training data 'original' is more
appropriate.
estimator: str
For both methods, regularization to use for covariance matrices estimations.
Consider 'schaefer', 'lwf', 'oas' or 'scm' for no regularization.
In the original implementation from TRCA paper [1]_, no regularization
is used. So method='original' and regul='scm' is similar to original
implementation.
Attributes
----------
fb_coefs : list of len (n_fbands)
Alpha coefficients for the fusion of the filterbank sub-bands.
classes_ : ndarray of shape (n_classes,)
Array with the class labels extracted at fit time.
n_classes: int
Number of unique labels/classes.
templates_ : ndarray of shape (n_classes, n_bands, n_channels, n_samples)
Template data obtained by averaging all training trials for a given
class. Each class templates is divided in n_fbands sub-bands extracted
from the filterbank approach.
weights_ : ndarray of shape (n_fbands, n_classes, n_channels)
Weight coefficients for the different electrodes which are used
as spatial filters for the data.
Reference
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
"Enhancing detection of SSVEPs for a high-speed brain speller using
task-related component analysis",
IEEE Trans. Biomed. Eng, 65(1):104-112, 2018.
Code based on the Matlab implementation from authors of [1]_
(https://github.com/mnakanishi/TRCA-SSVEP).
"""
def __init__(
self,
interval,
freqs,
n_fbands=5,
downsample=1,
is_ensemble=True,
method="original",
estimator="scm",
):
self.freqs = freqs
self.peaks = np.array([float(f) for f in freqs.keys()])
self.n_fbands = n_fbands
self.downsample = downsample
self.interval = interval
self.slen = interval[1] - interval[0]
self.is_ensemble = is_ensemble
self.fb_coefs = [(x + 1) ** (-1.25) + 0.25 for x in range(self.n_fbands)]
self.estimator = estimator
self.method = method
def _Q_S_estim(self, data):
# Check if X is a single trial (test data) or not
if data.ndim == 2:
data = data[np.newaxis, ...]
# Get data shape
n_trials, n_channels, n_samples = data.shape
X = np.concatenate((data, data), axis=1)
# Initialize S matrix
S = np.zeros((n_channels, n_channels))
# Estimate covariance between every trial and the rest of the trials (excluding itself)
for trial_i in range(n_trials - 1):
x1 = np.squeeze(data[trial_i, :, :])
# Mean centering for the selected trial
x1 -= np.mean(x1, 0)
# Select a second trial that is different
for trial_j in range(trial_i + 1, n_trials):
x2 = np.squeeze(data[trial_j, :, :])
# Mean centering for the selected trial
x2 -= np.mean(x2, 0)
# Put the two trials together
X = np.concatenate((x1, x2))
if n_channels == 1:
X = X.reshape((n_channels, len(X)))
# Regularized covariance estimate
cov = Covariances(estimator=self.estimator).fit_transform(
X[np.newaxis, ...]
)
cov = np.squeeze(cov)
# Compute empirical covariance betwwen the two selected trials and sum it
if n_channels > 1:
S = S + cov[:n_channels, n_channels:] + cov[n_channels:, :n_channels]
else:
S = S + cov + cov
# Concatenate all the trials
UX = np.empty((n_channels, n_samples * n_trials))
for trial_n in range(n_trials):
UX[:, trial_n * n_samples : (trial_n + 1) * n_samples] = data[trial_n, :, :]
# Mean centering
UX -= np.mean(UX, 1)[:, None]
cov = Covariances(estimator=self.estimator).fit_transform(UX[np.newaxis, ...])
Q = np.squeeze(cov)
return S, Q
def _Q_S_estim_riemann(self, data):
# Check if X is a single trial (test data) or not
if data.ndim == 2:
data = data[np.newaxis, ...]
# Get data shape
n_trials, n_channels, n_samples = data.shape
X = np.concatenate((data, data), axis=1)
# Concatenate all the trials
UX = np.empty((n_channels, n_samples * n_trials))
for trial_n in range(n_trials):
UX[:, trial_n * n_samples : (trial_n + 1) * n_samples] = data[trial_n, :, :]
# Mean centering
UX -= np.mean(UX, 1)[:, None]
# Compute empirical variance of all data (to be bounded)
cov = Covariances(estimator=self.estimator).fit_transform(UX[np.newaxis, ...])
Q = np.squeeze(cov)
cov = Covariances(estimator=self.estimator).fit_transform(X)
S = cov[:, :n_channels, n_channels:] + cov[:, n_channels:, :n_channels]
S = mean_covariance(S, metric=self.method)
return S, Q
def _compute_trca(self, X):
"""Computation of TRCA spatial filters.
Parameters
----------
X: ndarray of shape (n_trials, n_channels, n_samples)
Training data
Returns
-------
W: ndarray of shape (n_channels)
Weight coefficients for electrodes which can be used as
a spatial filter.
"""
if self.method == "original":
S, Q = self._Q_S_estim(X)
elif self.method == "riemann" or self.method == "logeuclid":
S, Q = self._Q_S_estim_riemann(X)
else:
raise ValueError(
"Method should be either 'original', 'riemann' or 'logeuclid'."
)
# Compute eigenvalues and vectors
lambdas, W = linalg.eig(S, Q, left=True, right=False)
# Sort eigenvectors by eigenvalue
arr1inds = lambdas.argsort()
W = W[:, arr1inds[::-1]]
return W[:, 0], W
def fit(self, X, y):
"""Extract spatial filters and templates from the given calibration data.
Parameters
----------
X : ndarray of shape (n_trials, n_channels, n_samples)
Training data. Trials are grouped by class, divided in n_fbands bands by
the filterbank approach and then used to calculate weight vectors and
templates for each class and band.
y : ndarray of shape (n_trials,)
Label vector in respect to X.
Returns
-------
self: CCA object
Instance of classifier.
"""
# Downsample data
X = X[:, :, :: self.downsample]
# Get shape of X and labels
n_trials, n_channels, n_samples = X.shape
self.sfreq = int(n_samples / self.slen)
self.sfreq | |
= self._expect_num()
if n_bits not in {8, 16, 32, 64}:
self._parse_error("expected 8, 16, 32, or 64")
self._expect_token("<")
self._parse_cells(prop, n_bits//8)
elif tok.val == "[":
self._parse_bytes(prop)
elif tok.id is _T_STRING:
prop._add_marker(_TYPE_STRING)
prop.value += self._unescape(tok.val.encode("utf-8")) + b"\0"
elif tok.id is _T_REF:
prop._add_marker(_REF_PATH, tok.val)
elif tok.id is _T_INCBIN:
self._parse_incbin(prop)
else:
self._parse_error("malformed value")
# Parse labels after the value (e.g., '< 0 > label:, ...')
self._parse_value_labels(prop)
tok = self._next_token()
if tok.val == ";":
return
if tok.val == ",":
continue
self._parse_error("expected ';' or ','")
def _parse_cells(self, prop, n_bytes):
# Parses '<...>'
prop._add_marker(_N_BYTES_TO_TYPE[n_bytes])
while True:
tok = self._peek_token()
if tok.id is _T_REF:
self._next_token()
if n_bytes != 4:
self._parse_error("phandle references are only allowed in "
"arrays with 32-bit elements")
prop._add_marker(_REF_PHANDLE, tok.val)
elif tok.id is _T_LABEL:
prop._add_marker(_REF_LABEL, tok.val)
self._next_token()
elif self._check_token(">"):
return
else:
# Literal value
num = self._eval_prim()
try:
prop.value += num.to_bytes(n_bytes, "big")
except OverflowError:
try:
# Try again as a signed number, in case it's negative
prop.value += num.to_bytes(n_bytes, "big", signed=True)
except OverflowError:
self._parse_error("{} does not fit in {} bits"
.format(num, 8*n_bytes))
def _parse_bytes(self, prop):
# Parses '[ ... ]'
prop._add_marker(_TYPE_UINT8)
while True:
tok = self._next_token()
if tok.id is _T_BYTE:
prop.value += tok.val.to_bytes(1, "big")
elif tok.id is _T_LABEL:
prop._add_marker(_REF_LABEL, tok.val)
elif tok.val == "]":
return
else:
self._parse_error("expected two-digit byte or ']'")
def _parse_incbin(self, prop):
# Parses
#
# /incbin/ ("filename")
#
# and
#
# /incbin/ ("filename", <offset>, <size>)
prop._add_marker(_TYPE_UINT8)
self._expect_token("(")
tok = self._next_token()
if tok.id is not _T_STRING:
self._parse_error("expected quoted filename")
filename = tok.val
tok = self._next_token()
if tok.val == ",":
offset = self._eval_prim()
self._expect_token(",")
size = self._eval_prim()
self._expect_token(")")
else:
if tok.val != ")":
self._parse_error("expected ',' or ')'")
offset = None
try:
with self._open(filename, "rb") as f:
if offset is None:
prop.value += f.read()
else:
f.seek(offset)
prop.value += f.read(size)
except OSError as e:
self._parse_error("could not read '{}': {}"
.format(filename, e))
def _parse_value_labels(self, prop):
# _parse_assignment() helper for parsing labels before/after each
# comma-separated value
while True:
tok = self._peek_token()
if tok.id is not _T_LABEL:
return
prop._add_marker(_REF_LABEL, tok.val)
self._next_token()
def _node_phandle(self, node):
# Returns the phandle for Node 'node', creating a new phandle if the
# node has no phandle, and fixing up the value for existing
# self-referential phandles (which get set to b'\0\0\0\0' initially).
# Self-referential phandles must be rewritten instead of recreated, so
# that labels are preserved.
if "phandle" in node.props:
phandle_prop = node.props["phandle"]
else:
phandle_prop = Property(node, "phandle")
phandle_prop._add_marker(_TYPE_UINT32) # For displaying
phandle_prop.value = b'\0\0\0\0'
if phandle_prop.value == b'\0\0\0\0':
phandle_i = 1
while phandle_i in self.phandle2node:
phandle_i += 1
self.phandle2node[phandle_i] = node
phandle_prop.value = phandle_i.to_bytes(4, "big")
node.props["phandle"] = phandle_prop
return phandle_prop.value
# Expression evaluation
def _eval_prim(self):
tok = self._peek_token()
if tok.id in (_T_NUM, _T_CHAR_LITERAL):
return self._next_token().val
tok = self._next_token()
if tok.val != "(":
self._parse_error("expected number or parenthesized expression")
val = self._eval_ternary()
self._expect_token(")")
return val
def _eval_ternary(self):
val = self._eval_or()
if self._check_token("?"):
if_val = self._eval_ternary()
self._expect_token(":")
else_val = self._eval_ternary()
return if_val if val else else_val
return val
def _eval_or(self):
val = self._eval_and()
while self._check_token("||"):
val = 1 if self._eval_and() or val else 0
return val
def _eval_and(self):
val = self._eval_bitor()
while self._check_token("&&"):
val = 1 if self._eval_bitor() and val else 0
return val
def _eval_bitor(self):
val = self._eval_bitxor()
while self._check_token("|"):
val |= self._eval_bitxor()
return val
def _eval_bitxor(self):
val = self._eval_bitand()
while self._check_token("^"):
val ^= self._eval_bitand()
return val
def _eval_bitand(self):
val = self._eval_eq()
while self._check_token("&"):
val &= self._eval_eq()
return val
def _eval_eq(self):
val = self._eval_rela()
while True:
if self._check_token("=="):
val = 1 if val == self._eval_rela() else 0
elif self._check_token("!="):
val = 1 if val != self._eval_rela() else 0
else:
return val
def _eval_rela(self):
val = self._eval_shift()
while True:
if self._check_token("<"):
val = 1 if val < self._eval_shift() else 0
elif self._check_token(">"):
val = 1 if val > self._eval_shift() else 0
elif self._check_token("<="):
val = 1 if val <= self._eval_shift() else 0
elif self._check_token(">="):
val = 1 if val >= self._eval_shift() else 0
else:
return val
def _eval_shift(self):
val = self._eval_add()
while True:
if self._check_token("<<"):
val <<= self._eval_add()
elif self._check_token(">>"):
val >>= self._eval_add()
else:
return val
def _eval_add(self):
val = self._eval_mul()
while True:
if self._check_token("+"):
val += self._eval_mul()
elif self._check_token("-"):
val -= self._eval_mul()
else:
return val
def _eval_mul(self):
val = self._eval_unary()
while True:
if self._check_token("*"):
val *= self._eval_unary()
elif self._check_token("/"):
denom = self._eval_unary()
if not denom:
self._parse_error("division by zero")
val //= denom
elif self._check_token("%"):
denom = self._eval_unary()
if not denom:
self._parse_error("division by zero")
val %= denom
else:
return val
def _eval_unary(self):
if self._check_token("-"):
return -self._eval_unary()
if self._check_token("~"):
return ~self._eval_unary()
if self._check_token("!"):
return 0 if self._eval_unary() else 1
return self._eval_prim()
#
# Lexing
#
def _check_token(self, val):
if self._peek_token().val == val:
self._next_token()
return True
return False
def _peek_token(self):
if not self._saved_token:
self._saved_token = self._next_token()
return self._saved_token
def _next_token(self):
if self._saved_token:
tmp = self._saved_token
self._saved_token = None
return tmp
while True:
tok_id = None
match = _token_re.match(self._file_contents, self._tok_end_i)
if match:
tok_id = match.lastindex
if tok_id is _T_CHAR_LITERAL:
val = self._unescape(match.group(tok_id).encode("utf-8"))
if len(val) != 1:
self._parse_error("character literals must be length 1")
tok_val = ord(val)
else:
tok_val = match.group(tok_id)
elif self._lexer_state is _DEFAULT:
match = _num_re.match(self._file_contents, self._tok_end_i)
if match:
tok_id = _T_NUM
num_s = match.group(1)
tok_val = int(num_s,
16 if num_s.startswith(("0x", "0X")) else
8 if num_s[0] == "0" else
10)
elif self._lexer_state is _EXPECT_PROPNODENAME:
match = _propnodename_re.match(self._file_contents,
self._tok_end_i)
if match:
tok_id = _T_PROPNODENAME
tok_val = match.group(1)
self._lexer_state = _DEFAULT
else: # self._lexer_state is _EXPECT_BYTE
match = _byte_re.match(self._file_contents, self._tok_end_i)
if match:
tok_id = _T_BYTE
tok_val = int(match.group(), 16)
if not tok_id:
match = _misc_re.match(self._file_contents, self._tok_end_i)
if match:
tok_id = _T_MISC
tok_val = match.group()
else:
self._tok_i = self._tok_end_i
# Could get here due to a node/property naming appearing in
# an unexpected context as well as for bad characters in
# files. Generate a token for it so that the error can
# trickle up to some context where we can give a more
# helpful error message.
return _Token(_T_BAD, "<unknown token>")
self._tok_i = match.start()
self._tok_end_i = match.end()
if tok_id is _T_SKIP:
self._lineno += tok_val.count("\n")
continue
# /include/ is handled in the lexer in the C tools as well, and can
# appear anywhere
if tok_id is _T_INCLUDE:
# Can have newlines between /include/ and the filename
self._lineno += tok_val.count("\n")
# Do this manual extraction instead of doing it in the regex so
# that we can properly count newlines
filename = tok_val[tok_val.find('"') + 1:-1]
self._enter_file(filename)
continue
if tok_id is _T_LINE:
# #line directive
self._lineno = int(tok_val.split()[0]) - 1
self.filename = tok_val[tok_val.find('"') + 1:-1]
continue
if tok_id is _T_EOF:
if self._filestack:
self._leave_file()
continue
return _Token(_T_EOF, "<EOF>")
# State handling
if tok_id in (_T_DEL_PROP, _T_DEL_NODE, _T_OMIT_IF_NO_REF) or \
tok_val in ("{", ";"):
self._lexer_state = _EXPECT_PROPNODENAME
elif tok_val == "[":
self._lexer_state = _EXPECT_BYTE
elif tok_id in (_T_MEMRESERVE, _T_BITS) or tok_val == "]":
self._lexer_state = _DEFAULT
return _Token(tok_id, tok_val)
def _expect_token(self, tok_val):
# Raises an error if the next token does not have the string value
# 'tok_val'. Returns the token.
tok = self._next_token()
if tok.val != tok_val:
self._parse_error("expected '{}', not '{}'"
.format(tok_val, tok.val))
return tok
def _expect_num(self):
# Raises an error if the next token is not a number. Returns the token.
tok = self._next_token()
if tok.id is not _T_NUM:
self._parse_error("expected number")
return tok.val
def _parse_error(self, s):
_err("{}:{} (column {}): parse error: {}".format(
self.filename, self._lineno,
# This works out for the first line of the file too, where rfind()
# returns -1
self._tok_i - self._file_contents.rfind("\n", 0, self._tok_i + 1),
s))
def _enter_file(self, filename):
# Enters the /include/d file 'filename', remembering the position in
# the /include/ing file for later
self._filestack.append((self.filename, self._lineno,
self._file_contents, self._tok_end_i))
# Handle escapes in filenames, just for completeness
filename = self._unescape(filename.encode("utf-8"))
try:
filename = filename.decode("utf-8")
except UnicodeDecodeError:
self._parse_error("filename is not valid UTF-8")
with self._open(filename, encoding="utf-8") as f:
try:
self._file_contents = f.read()
except OSError as e:
self._parse_error(e)
# Check for recursive /include/
for i, parent in enumerate(self._filestack):
if filename == parent[0]:
self._parse_error("recursive /include/:\n" + " ->\n".join(
["{}:{}".format(parent[0], parent[1])
for parent | |
<reponame>code-backdoor/code-backdoor
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules import (
LearnedPositionalEmbedding, MultiheadAttention,
SinusoidalPositionalEmbedding,
)
from ncc.modules.seq2seq.ncc_incremental_decoder import NccIncrementalDecoder
from ncc.modules.code2vec.ncc_encoder import NccEncoder
from ncc.models.ncc_model import NccModel
from ncc.models import register_model
# from . import (
# FairseqIncrementalDecoder, NccEncoder, NccModel,
# register_model, register_model_architecture,
# )
@register_model('doc_pretrain_transformer')
class DocPretrainTransformerModel(NccModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
# @staticmethod
# def add_args(parser):
# """Add model-specific arguments to the parser."""
# parser.add_argument('--dropout', type=float, metavar='D',
# help='dropout probability')
# parser.add_argument('--attention-dropout', type=float, metavar='D',
# help='dropout probability for attention weights')
# parser.add_argument('--relu-dropout', type=float, metavar='D',
# help='dropout probability after ReLU in FFN')
# parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
# help='path to pre-trained encoder embedding')
# parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
# help='encoder embedding dimension')
# parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
# help='encoder embedding dimension for FFN')
# parser.add_argument('--encoder-layers', type=int, metavar='N',
# help='num encoder layers')
# parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
# help='num encoder attention heads')
# parser.add_argument('--encoder-normalize-before', default=False, action='store_true',
# help='apply layernorm before each encoder block')
# parser.add_argument('--encoder-learned-pos', default=False, action='store_true',
# help='use learned positional embeddings in the encoder')
# parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
# help='path to pre-trained decoder embedding')
# parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
# help='decoder embedding dimension')
# parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
# help='decoder embedding dimension for FFN')
# parser.add_argument('--decoder-layers', type=int, metavar='N',
# help='num decoder layers')
# parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
# help='num decoder attention heads')
# parser.add_argument('--decoder-learned-pos', default=False, action='store_true',
# help='use learned positional embeddings in the decoder')
# parser.add_argument('--decoder-normalize-before', default=False, action='store_true',
# help='apply layernorm before each decoder block')
# parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true',
# help='share decoder input and output embeddings')
# parser.add_argument('--share-all-embeddings', default=False, action='store_true',
# help='share encoder, decoder and output embeddings'
# ' (requires shared dictionary and embed dim)')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
# base_architecture(args)
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise RuntimeError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise RuntimeError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
# note the decoder has the same vocabulary size
decoder_embed_tokens = build_embedding(
src_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = TransformerEncoder(args, src_dict, encoder_embed_tokens)
decoder = TransformerDecoder(args, src_dict, tgt_dict, decoder_embed_tokens)
return DocPretrainTransformerModel(encoder, decoder)
def forward(self, src_tokens, doc_pad_mask, doc_pos_tok, masked_sent_positions, prev_output_tokens):
encoder_out = self.encoder(src_tokens, doc_pad_mask, doc_pos_tok)
decoder_out = self.decoder(encoder_out, masked_sent_positions, prev_output_tokens)
return decoder_out
def get_normalized_probs(self, net_output, log_probs, idx=0):
"""Get normalized probabilities (or log probs) from a net's output."""
assert idx == 0 or idx == 1
logits = net_output[idx].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def get_targets(self, sample, net_output, key='target'):
"""Get targets from either the sample or the net's output."""
return sample[key]
class TransformerEncoder(NccEncoder):
"""Transformer encoder."""
def __init__(self, args, dictionary, embed_tokens, left_pad=True):
super().__init__(dictionary)
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
1024, embed_dim, self.padding_idx,
left_pad=left_pad,
learned=args.encoder_learned_pos,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
self.sent_embed_positions = PositionalEmbedding(
1024, embed_dim, self.padding_idx,
left_pad=False,
learned=args.encoder_learned_pos,
)
self.doc_layers = nn.ModuleList([])
self.doc_layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
def forward(self, src_tokens, doc_pad_mask, doc_pos_tok):
bsz, n_sent, seqlen = src_tokens.size()
src_tokens = src_tokens.view(bsz * n_sent, seqlen)
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
doc_pos = self.sent_embed_positions(doc_pos_tok)
sent_repr = x[-1].view(bsz, n_sent, -1)
sent_repr = sent_repr + doc_pos
# n_sent x bsz x C
sent_repr = sent_repr.transpose(0, 1)
for doc_layer in self.doc_layers:
sent_repr = doc_layer(sent_repr, doc_pad_mask)
return {
'encoder_out': sent_repr, # n_sent x bsz x C
'encoder_padding_mask': doc_pad_mask, # bsz x n_sent
}
def reorder_encoder_out(self, encoder_out_dict, new_order):
if encoder_out_dict['encoder_out'] is not None:
encoder_out_dict['encoder_out'] = \
encoder_out_dict['encoder_out'].index_select(1, new_order)
if encoder_out_dict['encoder_padding_mask'] is not None:
encoder_out_dict['encoder_padding_mask'] = \
encoder_out_dict['encoder_padding_mask'].index_select(0, new_order)
return encoder_out_dict
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions()
def upgrade_state_dict(self, state_dict):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
if 'encoder.embed_positions.weights' in state_dict:
del state_dict['encoder.embed_positions.weights']
if 'encoder.embed_positions._float_tensor' not in state_dict:
state_dict['encoder.embed_positions._float_tensor'] = torch.FloatTensor()
return state_dict
def get_sent_end_repr(src_emb, sent_ends):
bsz, nsent = sent_ends.size()
assert bsz == src_emb.size(0)
seqlen = src_emb.size(1)
offset = torch.linspace(0, (bsz-1)*seqlen, bsz).type(sent_ends.type())
sent_ends_abs = sent_ends + offset.view(-1, 1)
sent_ends_repr = src_emb.contiguous().view(bsz*seqlen, -1)[sent_ends_abs]
sent_ends_repr = sent_ends_repr.view(bsz, nsent, -1)
return sent_ends_repr
class TransformerDecoder(NccIncrementalDecoder):
"""Transformer decoder."""
def __init__(self, args, src_dictionary, dictionary, embed_tokens, left_pad=False):
'''
The decoder has two parts:
1) a transformer decoder to predict masked sentences
2) a decoder to predict sentence labels
'''
super().__init__(src_dictionary)
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
embed_dim = embed_tokens.embedding_dim
padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
1024, embed_dim, padding_idx,
left_pad=left_pad,
learned=args.decoder_learned_pos,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args)
for i in range(args.decoder_layers)
])
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(src_dictionary), embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=embed_dim ** -0.5)
# print('embed_out size', self.embed_out.size())
# this part is for sentence label prediction
self.out_proj = Linear(embed_dim, len(dictionary))
def forward_sent_label(self, x):
x = F.dropout(x, p=self.dropout, training=self.training)
return self.out_proj(x)
def forward_masked_sent(self, encoder_sent_repr, masked_sent_positions, prev_output_tokens, incremental_state=None):
# encoder_sent_repr: bsz x n_sent x C
# masked_sent_positions: bsz x max_num_mask
# masked_encoder_sent_repr: bsz x max_num_mask x C
masked_encoder_sent_repr = get_sent_end_repr(encoder_sent_repr, masked_sent_positions)
masked_encoder_sent_repr_2d = masked_encoder_sent_repr.view(
masked_encoder_sent_repr.size(0)*masked_encoder_sent_repr.size(1),
masked_encoder_sent_repr.size(2) )
# prev_output_tokens: bsz x max_n_sent x T --> (bsz x max_n_sent) x T
prev_output_tokens = prev_output_tokens.view(prev_output_tokens.size(0)*prev_output_tokens.size(1),
prev_output_tokens.size(2))
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# decoder layers
for layer in self.layers:
x = layer(
x,
masked_encoder_sent_repr_2d,
incremental_state,
)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# project back to size of vocabulary
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = F.linear(x, self.embed_out)
return x
def forward(self, encoder_out, masked_sent_positions, prev_output_tokens, incremental_state=None):
# n_sent x bsz x C
x = encoder_out['encoder_out']
# n_sent x bsz x C -> bsz x n_sent x C
x = x.transpose(0, 1)
# predict sentence label
sent_label = self.forward_sent_label(x)
# predict masked sentence
masked_sent = self.forward_masked_sent(x, masked_sent_positions, prev_output_tokens, incremental_state)
return masked_sent, sent_label
def max_positions(self):
"""Maximum output length supported by the decoder."""
return 1024
def upgrade_state_dict(self, state_dict):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
if 'decoder.embed_positions.weights' in state_dict:
del state_dict['decoder.embed_positions.weights']
if 'decoder.embed_positions._float_tensor' not in state_dict:
state_dict['decoder.embed_positions._float_tensor'] = torch.FloatTensor()
return state_dict
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: dropout -> add residual -> layernorm.
In the tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
dropout -> add residual.
We default to the approach in the paper, but the tensor2tensor approach can
be enabled by setting `normalize_before=True`.
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for i in range(2)])
def forward(self, x, encoder_padding_mask):
residual = x
x = self.maybe_layer_norm(0, x, before=True)
x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
| |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_modulated')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_modulated')
_modulated = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_modulated', [dirname(__file__)])
except ImportError:
import _modulated
return _modulated
try:
_mod = imp.load_module('_modulated', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_modulated = swig_import_helper()
del swig_import_helper
else:
import _modulated
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
import btk20
from btk20 import stream
oldimport = """
import btk20.stream
"""
class NormalFFTAnalysisBankPtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, NormalFFTAnalysisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, NormalFFTAnalysisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_NormalFFTAnalysisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.NormalFFTAnalysisBankPtr___iter__(self)
def __deref__(self):
return _modulated.NormalFFTAnalysisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_NormalFFTAnalysisBankPtr
__del__ = lambda self: None
def next(self, frameX=-5):
return _modulated.NormalFFTAnalysisBankPtr_next(self, frameX)
def reset(self):
return _modulated.NormalFFTAnalysisBankPtr_reset(self)
def fftlen(self):
return _modulated.NormalFFTAnalysisBankPtr_fftlen(self)
def name(self):
return _modulated.NormalFFTAnalysisBankPtr_name(self)
def size(self):
return _modulated.NormalFFTAnalysisBankPtr_size(self)
def current(self):
return _modulated.NormalFFTAnalysisBankPtr_current(self)
NormalFFTAnalysisBankPtr_swigregister = _modulated.NormalFFTAnalysisBankPtr_swigregister
NormalFFTAnalysisBankPtr_swigregister(NormalFFTAnalysisBankPtr)
class OverSampledDFTAnalysisBankPtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, OverSampledDFTAnalysisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, OverSampledDFTAnalysisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_OverSampledDFTAnalysisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.OverSampledDFTAnalysisBankPtr___iter__(self)
def __deref__(self):
return _modulated.OverSampledDFTAnalysisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_OverSampledDFTAnalysisBankPtr
__del__ = lambda self: None
def polyphase(self, m, n):
return _modulated.OverSampledDFTAnalysisBankPtr_polyphase(self, m, n)
def next(self, frameX=-5):
return _modulated.OverSampledDFTAnalysisBankPtr_next(self, frameX)
def reset(self):
return _modulated.OverSampledDFTAnalysisBankPtr_reset(self)
def is_end(self):
return _modulated.OverSampledDFTAnalysisBankPtr_is_end(self)
def fftlen(self):
return _modulated.OverSampledDFTAnalysisBankPtr_fftlen(self)
def shiftlen(self):
return _modulated.OverSampledDFTAnalysisBankPtr_shiftlen(self)
def frame_no(self):
return _modulated.OverSampledDFTAnalysisBankPtr_frame_no(self)
def name(self):
return _modulated.OverSampledDFTAnalysisBankPtr_name(self)
def size(self):
return _modulated.OverSampledDFTAnalysisBankPtr_size(self)
def current(self):
return _modulated.OverSampledDFTAnalysisBankPtr_current(self)
OverSampledDFTAnalysisBankPtr_swigregister = _modulated.OverSampledDFTAnalysisBankPtr_swigregister
OverSampledDFTAnalysisBankPtr_swigregister(OverSampledDFTAnalysisBankPtr)
class OverSampledDFTSynthesisBankPtr(btk20.stream.VectorFloatFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, OverSampledDFTSynthesisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, OverSampledDFTSynthesisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_OverSampledDFTSynthesisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.OverSampledDFTSynthesisBankPtr___iter__(self)
def __deref__(self):
return _modulated.OverSampledDFTSynthesisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_OverSampledDFTSynthesisBankPtr
__del__ = lambda self: None
def polyphase(self, m, n):
return _modulated.OverSampledDFTSynthesisBankPtr_polyphase(self, m, n)
def next(self, frameX=-5):
return _modulated.OverSampledDFTSynthesisBankPtr_next(self, frameX)
def reset(self):
return _modulated.OverSampledDFTSynthesisBankPtr_reset(self)
def input_source_vector(self, block):
return _modulated.OverSampledDFTSynthesisBankPtr_input_source_vector(self, block)
def no_stream_feature(self, flag=True):
return _modulated.OverSampledDFTSynthesisBankPtr_no_stream_feature(self, flag)
def name(self):
return _modulated.OverSampledDFTSynthesisBankPtr_name(self)
def size(self):
return _modulated.OverSampledDFTSynthesisBankPtr_size(self)
def is_end(self):
return _modulated.OverSampledDFTSynthesisBankPtr_is_end(self)
def current(self):
return _modulated.OverSampledDFTSynthesisBankPtr_current(self)
OverSampledDFTSynthesisBankPtr_swigregister = _modulated.OverSampledDFTSynthesisBankPtr_swigregister
OverSampledDFTSynthesisBankPtr_swigregister(OverSampledDFTSynthesisBankPtr)
class PerfectReconstructionFFTAnalysisBankPtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PerfectReconstructionFFTAnalysisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, PerfectReconstructionFFTAnalysisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_PerfectReconstructionFFTAnalysisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr___iter__(self)
def __deref__(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_PerfectReconstructionFFTAnalysisBankPtr
__del__ = lambda self: None
def polyphase(self, m, n):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_polyphase(self, m, n)
def next(self, frameX=-5):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_next(self, frameX)
def reset(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_reset(self)
def name(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_name(self)
def size(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_size(self)
def current(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_current(self)
PerfectReconstructionFFTAnalysisBankPtr_swigregister = _modulated.PerfectReconstructionFFTAnalysisBankPtr_swigregister
PerfectReconstructionFFTAnalysisBankPtr_swigregister(PerfectReconstructionFFTAnalysisBankPtr)
class PerfectReconstructionFFTSynthesisBankPtr(btk20.stream.VectorFloatFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PerfectReconstructionFFTSynthesisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, PerfectReconstructionFFTSynthesisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_PerfectReconstructionFFTSynthesisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr___iter__(self)
def __deref__(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_PerfectReconstructionFFTSynthesisBankPtr
__del__ = lambda self: None
def next(self, frameX=-5):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_next(self, frameX)
def reset(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_reset(self)
def polyphase(self, m, n):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_polyphase(self, m, n)
def name(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_name(self)
def size(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_size(self)
def is_end(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_is_end(self)
def current(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_current(self)
PerfectReconstructionFFTSynthesisBankPtr_swigregister = _modulated.PerfectReconstructionFFTSynthesisBankPtr_swigregister
PerfectReconstructionFFTSynthesisBankPtr_swigregister(PerfectReconstructionFFTSynthesisBankPtr)
class DelayFeaturePtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DelayFeaturePtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DelayFeaturePtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_DelayFeaturePtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.DelayFeaturePtr___iter__(self)
def __deref__(self):
return _modulated.DelayFeaturePtr___deref__(self)
__swig_destroy__ = _modulated.delete_DelayFeaturePtr
__del__ = lambda self: None
def set_time_delay(self, time_delay):
return _modulated.DelayFeaturePtr_set_time_delay(self, time_delay)
def next(self, frameX=-5):
return _modulated.DelayFeaturePtr_next(self, frameX)
def reset(self):
return _modulated.DelayFeaturePtr_reset(self)
def name(self):
return _modulated.DelayFeaturePtr_name(self)
def size(self):
return _modulated.DelayFeaturePtr_size(self)
def current(self):
return _modulated.DelayFeaturePtr_current(self)
DelayFeaturePtr_swigregister = _modulated.DelayFeaturePtr_swigregister
DelayFeaturePtr_swigregister(DelayFeaturePtr)
class CosineModulatedPrototypeDesign(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CosineModulatedPrototypeDesign, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CosineModulatedPrototypeDesign, name)
__repr__ = _swig_repr
def __init__(self, M=256, N=3072, fs=1.0):
this = _modulated.new_CosineModulatedPrototypeDesign(M, N, fs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _modulated.delete_CosineModulatedPrototypeDesign
__del__ = lambda self: None
def fcn(self, x, f):
return _modulated.CosineModulatedPrototypeDesign_fcn(self, x, f)
def grad(self, x, g):
return _modulated.CosineModulatedPrototypeDesign_grad(self, x, g)
def M(self):
return _modulated.CosineModulatedPrototypeDesign_M(self)
def N(self):
return _modulated.CosineModulatedPrototypeDesign_N(self)
def m(self):
return _modulated.CosineModulatedPrototypeDesign_m(self)
def J(self):
return _modulated.CosineModulatedPrototypeDesign_J(self)
def proto(self):
return _modulated.CosineModulatedPrototypeDesign_proto(self)
CosineModulatedPrototypeDesign_swigregister = _modulated.CosineModulatedPrototypeDesign_swigregister
CosineModulatedPrototypeDesign_swigregister(CosineModulatedPrototypeDesign)
def design_f(v, params):
return _modulated.design_f(v, params)
design_f = _modulated.design_f
def design_df(v, params, df):
return _modulated.design_df(v, params, df)
design_df = _modulated.design_df
def design_fdf(v, params, f, df):
return _modulated.design_fdf(v, params, f, df)
design_fdf = _modulated.design_fdf
def write_gsl_format(fileName, prototype):
return _modulated.write_gsl_format(fileName, prototype)
write_gsl_format = _modulated.write_gsl_format
class AnalysisOversampledDFTDesignPtr(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AnalysisOversampledDFTDesignPtr, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AnalysisOversampledDFTDesignPtr, name)
__repr__ = _swig_repr
def __init__(self, M=256, m=4, r=1, wp=1.0, tau_h=-1):
this = _modulated.new_AnalysisOversampledDFTDesignPtr(M, m, r, wp, tau_h)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
return _modulated.AnalysisOversampledDFTDesignPtr___deref__(self)
__swig_destroy__ = _modulated.delete_AnalysisOversampledDFTDesignPtr
__del__ = lambda self: None
def design(self, tolerance=2.2204e-16):
return _modulated.AnalysisOversampledDFTDesignPtr_design(self, tolerance)
def save(self, fileName):
return _modulated.AnalysisOversampledDFTDesignPtr_save(self, fileName)
def calcError(self, doPrint=True):
return _modulated.AnalysisOversampledDFTDesignPtr_calcError(self, doPrint)
AnalysisOversampledDFTDesignPtr_swigregister = _modulated.AnalysisOversampledDFTDesignPtr_swigregister
AnalysisOversampledDFTDesignPtr_swigregister(AnalysisOversampledDFTDesignPtr)
class SynthesisOversampledDFTDesignPtr(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SynthesisOversampledDFTDesignPtr, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SynthesisOversampledDFTDesignPtr, name)
__repr__ = _swig_repr
def __init__(self, h, M=256, m=4, r=1, v=1.0, wp=1.0, tau_T=-1):
this = _modulated.new_SynthesisOversampledDFTDesignPtr(h, M, m, r, v, wp, tau_T)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
return _modulated.SynthesisOversampledDFTDesignPtr___deref__(self)
__swig_destroy__ = _modulated.delete_SynthesisOversampledDFTDesignPtr
__del__ = lambda self: None
def design(self, tolerance=2.2204e-16):
return _modulated.SynthesisOversampledDFTDesignPtr_design(self, tolerance)
def save(self, fileName):
return _modulated.SynthesisOversampledDFTDesignPtr_save(self, fileName)
def calcError(self, doPrint=True):
return _modulated.SynthesisOversampledDFTDesignPtr_calcError(self, doPrint)
SynthesisOversampledDFTDesignPtr_swigregister = _modulated.SynthesisOversampledDFTDesignPtr_swigregister
SynthesisOversampledDFTDesignPtr_swigregister(SynthesisOversampledDFTDesignPtr)
class AnalysisNyquistMDesignPtr(AnalysisOversampledDFTDesignPtr):
__swig_setmethods__ = {}
for _s in [AnalysisOversampledDFTDesignPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AnalysisNyquistMDesignPtr, name, value)
__swig_getmethods__ = {}
for _s in [AnalysisOversampledDFTDesignPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, AnalysisNyquistMDesignPtr, name)
__repr__ = _swig_repr
def __init__(self, M=512, m=2, r=1, wp=1.0):
this = _modulated.new_AnalysisNyquistMDesignPtr(M, m, r, wp)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
| |
#!/usr/bin/python3
"""This script wraps up running clang tidy to acheive the goals listed below.
1. Support specifying a yaml configuration file on the command line (-config-file)
2. Optionally run the `run-clang-tidy` python wrapper, which supports job threads
3. Filter redundant output from `run-clang-tidy`
4. Mutate paths to deal with several path reporting issues, which come from how compile_commands.json is configured
5. Optionally report project relative paths.
These goals are described in more detail below.
1. Command line config file
Clang-tidy frustratingly only supports a specific .clang-tidy file which is recursively searched, or command line
cofiguration. Configuration can be very long, so file configuration is preferable, but we want to support different
analysis modes. So we add support to load a .clang-tidy yaml file as specified by `-config-file` and pass this to
clang tidy via the command line.
2. Use `run-clang-tidy`
clang-tidy ships with a companion python script `run-clang-tidy`. This script is currently the only way to run
clang-tidy and use multiple threads. The script essentially splits the files listed and sets up separate processes
for each file.
3. Fix redundant output from `run-clang-tidy`
When using `run-clang-tidy` it outputs the entire configuration used to stdout. This makes for a not of redundant
output especially when processing many files. We filter and suppress this output.
4. Mutate paths
An interesting quirk was discovered in path reporting when using ninja to generate the compile_commands.json. Ninja can
add relative include paths to the json file. This chains on to highlight an undefined behaviour bug in clang-tidy.
Essentially, depending on which checks actually report issues or not, the output can inlcude a mix of absolute and
relative paths - relative to the compile_commands.json file. Specifically, `google-readability-casting` would report
relative paths, unless `readability-identifier-naming` also had issues to report (this is the only known case).
TODO(KS): insert bug report reference
We detect these relative paths, assume they are relative to compile_commands.json and make them absolute.
5. Project relative paths
Project relative paths can be reported by specifying the argument `-relative-to`. Reported paths are made relative to
this path. The intension is to report portable paths which can, for example, be generated on a CI server, but handled
on another computer.
"""
from __future__ import print_function
import argparse
import os
import platform
import re
import subprocess
import sys
import threading
if sys.version_info[0] >= 3:
from queue import Queue
else:
from Queue import Queue
# Command line parser setup.
def setup_args():
"""Setup and return the command line argument parser"""
parser = argparse.ArgumentParser(description='')
# parser.add_argument('csv', type=str, help='CSV file to load')
parser.add_argument(
'-clang-tidy-binary', help='Path to the clang-tidy executable.', metavar='PATH', required=True)
parser.add_argument('-clang-apply-replacements-binary',
help='Path to the clang-apply-replacements binary. Required when using -fix and -runner-py' +
' arguments.')
parser.add_argument(
'-runner-py', help='Python script wrapping clang-tidy with support for multiple jobs. run-clang-tidy.py ships' +
' with clang-tidy. Without this clang-tidy is run directly.', metavar='PATH')
parser.add_argument('-fix', action='store_true',
help='Apply automatic fixes. Passes -fix to clang-tidy. When using -runner-py' +
' (run-clang-tidy.py), the argument -clang-apply-replacements-binary must also be set to the' +
' clang-apply-fixes binary.')
parser.add_argument(
'-config-file', help='clang-tidy configuration file. Extracted and passed as the -config argument to' +
' clang-tidy.')
parser.add_argument(
'-p', help='clang-tidy build path (path to compile_commands.json). Extracted and passed as the -p argument to' +
' clang-tidy.', required=False)
parser.add_argument(
'-j', help='Number of parallel jobs to run. Only supported when using the -runner-py script. Ignored ' +
'otherwise.', required=False)
parser.add_argument(
'-relative-to', help='Modify clang-tidy message paths to be relative to this directory. Intended for CI' +
' builds to report portable paths.', required=False)
return parser
def resolve_build_path(args):
"""Resolve the compile_commands.json directory if specified. None when not present."""
# Use argparser to resolve the -p option so we support '-p <path>" (space)
# and "-p=<path>" (equals)
build_path = None
if args.p is not None:
# Strip 'comile_commands.json' if present
build_path = args.p
if build_path.endswith('compile_commands.json'):
build_path = build_path[:len(
build_path) - len('compile_commands.json')]
return build_path
def escape_path(path):
# Need to escape back slashes in args for Windows.
if platform.system() == 'Windows':
return path.replace('\\', '\\\\')
return path
class ProcessMessagePump:
"""A helper class for handling subprocess output and attempting to maintain output order.
Starts a thread each for stdout and stderr then collates on the main thread.
Usage:
- Create a subprocess with both stdout and stderr set to subprocess.PIPE
- Create a ProcessMessagePump around the process
- Call ProcessMessagePump.pump() with an appropriate logging function.
"""
def __init__(self, process):
"""Create a piper around process"""
self.process = process
self.log_queue = Queue()
self.pipes_running = 0
def pump(self, log):
"""Start logging using the log function until the process is done.
The log function signature must be log(process, pipe, line)
"""
threading.Thread(target=ProcessMessagePump._pump, args=[
self, self.process.stdout]).start()
threading.Thread(target=ProcessMessagePump._pump, args=[
self, self.process.stderr]).start()
self.pipes_running += 2
while self.pipes_running > 0 or not self.log_queue.empty():
pipe_source, line = self.log_queue.get()
if pipe_source is None or line is None:
continue
log(self.process, pipe_source, line)
def _pump(self, pipe):
"""Thread pump function"""
try:
# Keep going until the process ends
while self.process.poll() is None:
# Read a line each loop and add to the queue
line = pipe.readline()
if line:
self.log_queue.put((pipe, line))
# Final flush
try:
for line in iter(pipe.readline, ''):
self.log_queue.put((pipe, line))
except:
# Ok to have an I/O operation failure on this call. The pipe may have been closed
pass
finally:
# Ensure we note completion of this thread in the queue and class.
self.log_queue.put((pipe, None))
self.pipes_running -= 1
if __name__ == '__main__':
# ---------------------------------------------------------------------------
# Parse arguments.
arg_parse = setup_args()
args = arg_parse.parse_known_args(sys.argv[1:])
# ---------------------------------------------------------------------------
# Start building process arguments in tidy_args
tidy_args = []
using_runner = False
# Handle using run-clang-tidy or clang-tidy directly
if args[0].runner_py:
using_runner = True
if platform.system() == 'Windows':
# The runner will be an executable on platforms *other* than Windows. For Windows, run via python.
tidy_args.append(sys.executable)
tidy_args.append(args[0].runner_py)
if args[0].clang_tidy_binary:
tidy_args.append('-clang-tidy-binary=' +
escape_path(args[0].clang_tidy_binary))
if args[0].clang_apply_replacements_binary:
tidy_args.append('-clang-apply-replacements-binary=' +
escape_path(args[0].clang_apply_replacements_binary))
if args[0].j:
tidy_args.append('-j')
tidy_args.append(args[0].j)
else:
# We explicitly specify the number of jobs to run. The parallel run script fully loads the CPUs when running
# parallel, so we limit it to keep any UI and OS tasks responsive.
try:
import psutil
job_threads = psutil.cpu_count() - 2
if job_threads < 2:
job_threads = 2
tidy_args.append('-j')
tidy_args.append(str(job_threads))
except ImportError:
pass
else:
tidy_args.append(escape_path(args[0].clang_tidy_binary))
# Resolve the compile_commands.json path. Note this must be the path to this file, but exclude the file name.
# This is perculiar to using run-clang-tidy, and clang-tidy itself is ok with the file name.
build_path = None
if args[0].p is not None:
tidy_args.append('-p={}'.format(args[0].p))
build_path = resolve_build_path(args[0])
# Apply fixes?
if args[0].fix:
tidy_args.append('-fix')
# Use command line specified .clang-tidy yaml file and extract content to the command line
config_lines = []
if args[0].config_file:
# Read the config file to use.
with open(args[0].config_file) as config_file:
config = config_file.read()
# # Replace line endings with the character sequence '\' 'n' (2 characters) in a way which deals with
# # any line ending setup.
# # Replace microsoft line endings
# config = config.replace('\r\n', '\\n')
# # Replace MacOS line endings
# config = config.replace('\r', '\\n')
# # Replace Unix line endings
# config = config.replace('\n', '\\n')
tidy_args.append('-config={}'.format(config))
# Build the filter for goal "Fix redundant output from `run-clang-tidy`"
config_lines = config.splitlines() if using_runner else config_lines
# Add -quiet to suppress a message about which checks are being used (run-clang-tidy)
tidy_args.append('-quiet')
# Add other arguments - like the file list
tidy_args.extend(args[1])
# ---------------------------------------------------------------------------
# Setup running the process.
# Build a regular expression for path fix parsing.
# Groups:
# 0: file path (to normalise)
# 1: line number if column also present
# 2: column number if 1/2 present, otherwise line number
# 3: the rest (message)
error_msg_re = re.compile(r'^(.*?):(.*)')
def fix_path(line):
"""Fix certain aspects of paths.
Firstly clang-tidy with ninja generated compile_commands.json can report relative
paths (to the compile_commands.json path). We ensure these paths are made absolute.
Secondly we normalise paths.
Lastly we modify paths to be relative to args.relative_to if specified.
"""
match = error_msg_re.match(line)
if match:
path = match.groups()[0]
if build_path is not None:
if not os.path.isabs(path):
# Relative path reported. Make relative to compile_commands.json
path = os.path.join(build_path, path)
# Normalise the path.
path = os.path.abspath(path)
if args[0].relative_to:
| |
"/" + version + "/jsonFile")
self.projectDict = {
"projectPath": self.projectPath,
"version": [version],
"groupIDStart": 1,
"taskIDStart": 1,
"elementIDStart": 1,
"templateIDStart": 1,
"TaskImagePath": {
"element": {},
"template": {}
},
"GameOverIDStart": 1,
"CloseIconIDStart": 1,
"DeviceCloseIDStart":1,
"UIStatesIDStart": 1,
"UIImagePath": {
"gameOver": {},
"closeIcons": {},
"devicesCloseIcons": {},
"uiStates": {}
},
"ReferImagePath": {
},
"ActionSampleImagePath": {
}
}
# 将project.json中的内容传给各个模块,以便各模块的函数使用
self.mapPath.SetProjectInfo(self.projectName, self.projectPath)
self.taskConfig.SetProjectDict(self.projectDict)
self.uiConfig.SetProjectDict(self.projectDict)
self.actionSample.SetProjectDict(self.projectDict)
self.projectVersion = self.projectDict["version"]
self.writeProjectFile()
# 拷贝文件
self.copyFile(directoryName, self.projectPath + "/" + version + "/data")
taskJsonFileDict = {
"allTask": []
}
# 初始化配置文件的dictionary
self.taskJsonFile[version] = taskJsonFileDict
self.writeTaskJsonFile()
self.referJsonFile[version] = taskJsonFileDict
self.writeReferJsonFile()
self.UIJsonFile[version] = {}
self.writeUIJsonFile()
self.actionSampleJsonFile[version] = {}
self.writeActionSampleJsonFile()
self.mapPathJsonDict[version] = {}
self.writeMapPathJsonFile()
self.mapGraphPathJsonDict[version] = {}
self.writeMapGraphPathJsonFile()
# 建立项目的树结构
self.ui.treeWidget.clear()
root = QTreeWidgetItem(self.ui.treeWidget)
root.setText(0, self.projectName)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/menu/floder.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
root.setIcon(0, icon)
self.GetFileList(self.projectPath, root, 1)
root.setText(2, ITEM_TYPE_PROJECT)
for itemIdx in range(root.childCount()):
treeItemVersion = root.child(itemIdx)
version = treeItemVersion.text(0)
if version not in ["project.json", "project.json~"]:
treeItemVersion.setText(2, ITEM_TYPE_VERSION)
# 导入task配置文件,建立对应的树结构
self.taskConfig.LoadTaskJson(self.taskJsonFile[version], treeItemVersion)
treeItemVersion.setExpanded(True)
root.setExpanded(True)
self.ui.pushButton_prev.setEnabled(False)
self.ui.pushButton_next.setEnabled(False)
self.canvas.resetState()
'''
写project.json文件
'''
def writeProjectFile(self):
if self.projectDict is None:
self.__logger.error('writeProjectFile failed, projectDict is None')
return
self.projectDict["groupIDStart"] = self.taskConfig.groupIDIndex
self.projectDict["taskIDStart"] = self.taskConfig.taskIDIndex
self.projectDict["elementIDStart"] = self.taskConfig.elementIDIndex
self.projectDict["templateIDStart"] = self.taskConfig.templateIDIndex
self.projectDict["GameOverIDStart"] = self.uiConfig.UIGameOverIDIndex
self.projectDict["CloseIconIDStart"] = self.uiConfig.UICloseIconsIDIndex
self.projectDict["UIStatesIDStart"] = self.uiConfig.UIStatesIDIndex
self.projectDict["ActionIDStart"] = self.actionSample.ActionIDIndex
with open(self.projectPath + "/project.json", "w") as f:
json.dump(self.projectDict, f, indent = 4, separators=(',', ':'))
'''
写task.json文件
'''
def writeTaskJsonFile(self):
for version in self.projectVersion:
with open(self.projectPath + "/" + version + "/jsonFile/task.json", "w") as f:
json.dump(self.taskJsonFile[version], f, indent=4, separators=(',', ':'))
'''
写UIConfig.json文件
'''
def writeUIJsonFile(self):
for version in self.projectVersion:
with open(self.projectPath + "/" + version + "/jsonFile/UIConfig.json", "w") as f:
json.dump(self.UIJsonFile[version], f, indent=4, separators=(',', ':'))
'''
写refer.json文件
'''
def writeReferJsonFile(self):
for version in self.projectVersion:
with open(self.projectPath + "/" + version + "/jsonFile/refer.json", "w") as f:
json.dump(self.referJsonFile[version], f, indent=4, separators=(',', ":"))
'''
写动作配置文件
'''
def writeActionSampleJsonFile(self):
for version in self.projectVersion:
if os.path.exists(self.projectPath + "/" + version + "/jsonFile/actionSample") is False:
os.mkdir(self.projectPath + "/" + version + "/jsonFile/actionSample")
with open(self.projectPath + "/" + version + "/jsonFile/actionSample/cfg.json", "w") as f:
cfgJson = self.actionSampleJsonFile[version].copy()
if "ActionCfgFile" in cfgJson.keys():
cfgJson['ActionCfgFile'] += '.json'
if "ActionSample" in cfgJson.keys():
del cfgJson['ActionSample']
json.dump(cfgJson, f, indent=4, separators=(',', ":"))
# shutil.copy(self.projectPath + "/" + version + "/jsonFile/actionSample/cfg.json", "bin/ActionSampler/cfg/cfg.json")
if "ActionSample" in self.actionSampleJsonFile[version].keys():
for actionSample in self.actionSampleJsonFile[version]['ActionSample']:
fileName = actionSample['fileName']
del actionSample['fileName']
with open(self.projectPath + "/" + version + "/jsonFile/actionSample/" + fileName + ".json", "w") as f:
json.dump(actionSample, f, indent=4, separators=(',', ":"))
# shutil.copy(self.projectPath + "/" + version + "/jsonFile/actionSample/" + fileName + ".json", "bin/ActionSampler/cfg/" + fileName + ".json")
'''
写地图路径配置文件
'''
def writeMapPathJsonFile(self):
for version in self.projectVersion:
with open(self.projectPath + "/" + version + "/jsonFile/mapPathTemp.json", "w") as f:
json.dump(self.mapPathJsonDict[version], f, indent=4, separators=(',', ":"))
fPre = open(self.projectPath + "/" + version + "/jsonFile/mapPathTemp.json", "r")
fCur = open(self.projectPath + "/" + version + "/jsonFile/mapPath.json", "w")
for line in fPre.readlines():
lineOut = line.replace('\"[', '[')
lineOut = lineOut.replace(']\"', ']')
fCur.write(lineOut)
fCur.close()
fPre.close()
os.remove(self.projectPath + "/" + version + "/jsonFile/mapPathTemp.json")
'''
写图结构的地图路径配置文件
'''
def writeMapGraphPathJsonFile(self):
for version in self.projectVersion:
with open(self.projectPath + "/" + version + "/jsonFile/GraphPath.json", "w") as f:
json.dump(self.mapGraphPathJsonDict[version], f, indent=4, separators=(',', ":"))
'''
拷贝文件
'''
def copyFile(self, sourcePath, targetPath):
for file in os.listdir(sourcePath):
sourceFile = os.path.join(sourcePath, file)
targetFile = os.path.join(targetPath, file)
if os.path.isdir(sourceFile):
if os.path.exists(targetFile) is False:
os.makedirs(targetFile)
self.copyFile(sourceFile, targetFile)
if os.path.isfile(sourceFile):
extension = os.path.splitext(file)[1]
# if extension in ['.jpg', '.png', '.bmp', '.jpeg']:
# try:
# NormaImage(sourceFile, targetFile)
#
# except Exception as error:
# dlg = CommonDialog(title="ImportError", text="error {}".format(error))
# dlg.popUp()
# elif extension in [".names", ".cfg", ".weights"]:
# shutil.copy(sourceFile, targetFile)
if extension in [".jpg", ".png", ".bmp", ".jpeg", ".names", ".cfg", ".weights"]:
shutil.copy(sourceFile, targetFile)
'''
导入已有的项目
'''
def ImportProject(self):
self.labelModel = False
# self.__uiExplore = None
# self.ClearTreeWidget()
if self.__uiExplore is not None:
self.__uiExplore.Finish()
self.__uiExplore = None
self.mode = None
self.canvas.update()
projectFileName, Type = QFileDialog.getOpenFileName(None, "打开工程文件", "", "*.json;;*.json;;All Files(*)")
if projectFileName == "":
self.__logger.info('project file path is empty')
return
# 读取项目文件 project.json中的内容
with open(projectFileName, "r") as f:
self.projectDict = json.load(f)
if "groupIDStart" not in self.projectDict.keys():
self.projectDict["groupIDStart"] = 1
if "taskIDStart" not in self.projectDict.keys():
self.projectDict["taskIDStart"] = 1
if "elementIDStart" not in self.projectDict.keys():
self.projectDict["elementIDStart"] = 1
if "templateIDStart" not in self.projectDict.keys():
self.projectDict["templateIDStart"] = 1
if "GameOverIDStart" not in self.projectDict.keys():
self.projectDict["GameOverIDStart"] = 1
if "CloseIconIDStart" not in self.projectDict.keys():
self.projectDict["CloseIconIDStart"] = 1
if "UIStatesIDStart" not in self.projectDict.keys():
self.projectDict["UIStatesIDStart"] = 1
if "TaskImagePath" not in self.projectDict.keys():
self.projectDict["TaskImagePath"] = {
"element": {},
"template": {}
}
if "UIImagePath" not in self.projectDict.keys():
self.projectDict["UIImagePath"] = {
"gameOver": {},
"closeIcons": {},
"uiStates": {}
}
if "ReferImagePath" not in self.projectDict.keys():
self.projectDict["ReferImagePath"] = {}
if "ActionSampleImagePath" not in self.projectDict.keys():
self.projectDict["ActionSampleImagePath"] = {}
# 将project.json内容传给各个模块,以便其成员函数使用
self.projectVersion = self.projectDict["version"]
self.projectPath = self.projectDict["projectPath"]
self.taskConfig.SetProjectDict(self.projectDict)
self.uiConfig.SetProjectDict(self.projectDict)
self.actionSample.SetProjectDict(self.projectDict)
# 导入所有已存在的配置文件
for version in self.projectDict["version"]:
# 读取task.json配置文件
if os.path.exists(self.projectPath + "/" + version + "/jsonFile/task.json"):
with open(self.projectPath + "/" + version + "/jsonFile/task.json", "r") as taskfile:
self.taskJsonFile[version] = json.load(taskfile)
else:
self.taskJsonFile[version] = {"allTask": []}
self.writeTaskJsonFile()
# 读取UIConfig.json配置文件
if os.path.exists(self.projectPath + "/" + version + "/jsonFile/UIConfig.json"):
with open(self.projectPath + "/" + version + "/jsonFile/UIConfig.json", "r") as UIFile:
self.UIJsonFile[version] = json.load(UIFile)
elif os.path.exists(self.projectPath + "/" + version + "/jsonFile/UI.json"):
with open(self.projectPath + "/" + version + "/jsonFile/UI.json", "r") as UIFile:
self.UIJsonFile[version] = json.load(UIFile)
else:
self.UIJsonFile[version] = OrderedDict()
self.writeUIJsonFile()
# 读取refer.json配置文件
if os.path.exists(self.projectPath + "/" + version + "/jsonFile/refer.json"):
with open(self.projectPath + "/" + version + "/jsonFile/refer.json", "r") as referFile:
self.referJsonFile[version] = json.load(referFile)
else:
self.referJsonFile[version] = {"allTask": []}
self.writeReferJsonFile()
# 读取动作配置相关的配置文件
if os.path.exists(self.projectPath + "/" + version + "/jsonFile/actionSample/cfg.json"):
with open(self.projectPath + "/" + version + "/jsonFile/actionSample/cfg.json", "r") as actionFile:
self.actionSampleJsonFile[version] = json.load(actionFile)
if self.actionSampleJsonFile[version] != {}:
if "ActionCfgFile" in self.actionSampleJsonFile[version].keys():
fileName = self.actionSampleJsonFile[version]['ActionCfgFile']
self.actionSampleJsonFile[version]['ActionCfgFile'] = os.path.splitext(fileName)[0]
self.actionSampleJsonFile[version]['ActionSample'] = list()
for fileName in os.listdir(self.projectPath + "/" + version + "/jsonFile/actionSample"):
if fileName == "cfg.json":
continue
actionSampleDict = OrderedDict()
with open(self.projectPath + "/" + version + "/jsonFile/actionSample/" + fileName, "r") as actionFile:
actionSampleDict = json.load(actionFile)
actionSampleDict["fileName"] = os.path.splitext(fileName)[0]
self.actionSampleJsonFile[version]['ActionSample'].append(actionSampleDict)
else:
self.actionSampleJsonFile[version] = {}
self.writeActionSampleJsonFile()
# 读取mapPth.json配置文件
if os.path.exists(self.projectPath + "/" + version + "/jsonFile/mapPath.json"):
with open(self.projectPath + "/" + version + "/jsonFile/mapPath.json", "r") as mapPathFile:
self.mapPathJsonDict[version] = json.load(mapPathFile)
else:
self.mapPathJsonDict[version] = {}
self.writeMapPathJsonFile()
# 读取GraphPath.json配置文件
if os.path.exists(self.projectPath + "/" + version + "/jsonFile/GraphPath.json"):
with open(self.projectPath + "/" + version + "/jsonFile/GraphPath.json", "r") as graphPathFile:
self.mapGraphPathJsonDict[version] = json.load(graphPathFile)
else:
self.mapGraphPathJsonDict[version] = {}
self.writeMapGraphPathJsonFile()
_, self.projectName = os.path.split(self.projectPath) # 读取项目名
self.mapPath.SetProjectInfo(self.projectName, self.projectPath)
# 根据读取的各个配置文件,生成每个配置文件对应的树结构
self.ui.treeWidget.clear()
root = QTreeWidgetItem(self.ui.treeWidget)
root.setText(0, self.projectName)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/menu/floder.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
root.setIcon(0, icon)
self.GetFileList(self.projectPath, root, 1)
root.setText(2, ITEM_TYPE_PROJECT)
for itemIdx in range(root.childCount()):
treeItemVersion = root.child(itemIdx)
version = treeItemVersion.text(0)
if version not in ["project.json", "project.json~"]:
treeItemVersion.setText(2, ITEM_TYPE_VERSION)
# 生成task任务的树结构
self.taskConfig.LoadTaskJson(self.taskJsonFile[version], treeItemVersion)
# 生成UI任务的树结构
if self.UIJsonFile[version] != {}:
self.uiConfig.LoadUIJson(self.UIJsonFile[version], treeItemVersion)
# 生成refer任务的树结构
self.taskConfig.LoadReferJson(self.referJsonFile[version], treeItemVersion)
# 生成动作配置任务的树结构
if self.actionSampleJsonFile[version] != {}:
self.actionSample.LoadActionSample(self.actionSampleJsonFile[version], treeItemVersion)
# 生成地图路径任务的树结构
if self.mapPathJsonDict[version] != {}:
self.mapPath.CreateMapPathTree(self.mapPathJsonDict[version], treeItemVersion)
# 生成图结构的地图路径任务的树结构
if self.mapGraphPathJsonDict[version] != {}:
self.graphPath.CreateMapGraphPathTree(self.mapGraphPathJsonDict[version], treeItemVersion)
treeItemVersion.setExpanded(True)
root.setExpanded(True)
self.ui.pushButton_prev.setEnabled(False)
self.ui.pushButton_next.setEnabled(False)
self.canvas.resetState()
'''
获取对应key值的子Item
'''
def GetChildItem(self, treeItem, itemKey):
if treeItem is None:
self.logger.error("GetChildItem failed, treeItem is None")
return
for itemIdx in range(treeItem.childCount()):
childItem = treeItem.child(itemIdx)
if childItem.text(0) == itemKey:
return childItem
return None
'''
添加version的槽函数
'''
def AddVersion(self):
version = self.versionDialog.popUp()
if version == "":
self.__logger.info('version is empty')
return
self.LoadImgDir(version, False)
'''
删除version的槽函数
'''
def DeletVersion(self):
treeItemVersion = self.ui.treeWidget.currentItem()
if treeItemVersion is None:
self.__logger.error('DeletVersion failed, treeItemVersion is None')
return
version = treeItemVersion.text(0)
# 弹出“确认删除”对话框
confirmDia = confirmDialog("确认删除")
confirmFlag = confirmDia.popUp()
if confirmFlag == True:
# 删除对应文件
treeItemVersion.parent().removeChild(treeItemVersion)
self.taskJsonFile.pop(version)
self.projectDict["version"].remove(version)
self.writeProjectFile()
if os.path.exists(self.projectPath + "/" + version):
shutil.rmtree(self.projectPath + "/" + version)
'''
删除某一item
'''
def DelItem(self):
treeItem = self.ui.treeWidget.currentItem()
if treeItem is None:
self.__logger.error('DelItem failed, treeItem is None')
return
treeItem.parent().removeChild(treeItem)
'''
创建一个QTreeItem
输入参数:key为item的key值,也就是第一列的值
输入参数:value为item的value值,也就是第二列的值
输入参数:type为item的类别,也就是第三列的值,所有类别均在define.py中定义
输入参数:edit表示item是否可编辑
'''
def CreateTreeItem(self, key, value=None, type=None, edit=False):
child = QTreeWidgetItem()
child.setText(0, str(key))
if value is not None:
child.setText(1, str(value))
if type is not None:
child.setText(2, type)
child.setIcon(0, self.treeIcon)
if edit is True:
child.setFlags(child.flags() | Qt.ItemIsEditable)
return child
'''
修改图片路径
'''
def ChangeFilePath(self):
treeItem = self.ui.treeWidget.currentItem()
if treeItem is None:
self.__logger.error('ChangeFilePath failed, treeItem is None')
return
# 判断当前的item的key值是否在以下集合中,不在则返回
keyName = treeItem.text(0)
| |
first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def blockcos_bulkpost(self, ids):
return self.__blockcos_bulkget_reps(ids)
def __blockcos_bulkget_ids(self):
return self.api('GET', URI_BLOCKVPOOLS_BULKGET)
def __blockcos_bulkget_reps(self, ids):
return self.api('POST', URI_BLOCKVPOOLS_BULKGET, ids)
def filecos_bulkgetids(self):
ids = self.__filecos_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def filecos_bulkpost(self, ids):
return self.__filecos_bulkget_reps(ids)
def __filecos_bulkget_ids(self):
return self.api('GET', URI_FILEVPOOLS_BULKGET)
def __filecos_bulkget_reps(self, ids):
return self.api('POST', URI_FILEVPOOLS_BULKGET, ids)
def smisprovider_bulkgetids(self):
ids = self.__smisprovider_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def smisprovider_bulkpost(self, ids):
return self.__smisprovider_bulkget_reps(ids)
def __smisprovider_bulkget_ids(self):
return self.api('GET', URI_SMISPROVIDER_BULKGET)
def __smisprovider_bulkget_reps(self, ids):
return self.api('POST', URI_SMISPROVIDER_BULKGET, ids)
def blocksnapshot_bulkgetids(self):
ids = self.__blocksnapshot_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def blocksnapshot_bulkpost(self, ids):
return self.__blocksnapshot_bulkget_reps(ids)
def __blocksnapshot_bulkget_ids(self):
return self.api('GET', URI_BLOCKSNAPSHOT_BULKGET)
def __blocksnapshot_bulkget_reps(self, ids):
return self.api('POST', URI_BLOCKSNAPSHOT_BULKGET, ids)
def filesnapshot_bulkgetids(self):
ids = self.__filesnapshot_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def filesnapshot_bulkpost(self, ids):
return self.__filesnapshot_bulkget_reps(ids)
def __filesnapshot_bulkget_ids(self):
return self.api('GET', URI_FILESNAPSHOT_BULKGET)
def __filesnapshot_bulkget_reps(self, ids):
return self.api('POST', URI_FILESNAPSHOT_BULKGET, ids)
def exportgroup_bulkgetids(self):
ids = self.__exportgroup_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def exportgroup_bulkpost(self, ids):
return self.__exportgroup_bulkget_reps(ids)
def __exportgroup_bulkget_ids(self):
return self.api('GET', URI_EXPORTGROUP_BULKGET)
def __exportgroup_bulkget_reps(self, ids):
return self.api('POST', URI_EXPORTGROUP_BULKGET, ids)
def update_chunkinfo(self, primaryZone, message):
self._headers['x-emc-primaryzone'] = primaryZone
self._port = GEO_PORT
return self.coreapi('POST', URI_CHUNKINFO, message, None, content_type=CONTENT_TYPE_OCTET)
@resetHeaders
def send_chunkdata(self, chunkId, primaryZone, secondaryZone, repGroup, data):
if len(chunkId) != 36:
raise Exception('wrong chunkId format (must be uuid) ' + chunkId)
all_data = ""
for d in data:
d = self.getDataValueFromCli(d)
d = self._addChecksum(d, chunkId)
all_data += d
length = len(all_data)
self._headers['x-emc-primaryzone'] = primaryZone
self._headers['x-emc-secondaryzone'] = secondaryZone
self._headers['x-emc-dataservice-vpool'] = repGroup
self._headers['x-emc-chunklength'] = str(length)
# TODO: just REPO
self._headers['x-emc-chunk-datatype'] = "0"
self._port = GEO_PORT
return self.coreapi('POST', URI_CHUNKDATA.format(chunkId), all_data, None, content_type=CONTENT_TYPE_OCTET)
@resetHeaders
def delete_chunkdata(self, chunkId, repGroup):
self._headers['x-emc-dataservice-vpool'] = repGroup
self._port = GEO_PORT
return self.coreapi('DELETE', URI_CHUNKDATA.format(chunkId), None, None, content_type=CONTENT_TYPE_OCTET)
def repgroup_create(self, repGrpId, name, cos_list, isAllowAllNamespaces):
parms = dict()
parms['id'] = repGrpId
parms['name'] = name
parms['description'] = name
parms['isAllowAllNamespaces'] = isAllowAllNamespaces
parms['zone_mappings'] = []
for cos in cos_list.split(','):
pair = cos.split('.')
zone_uuid = self.vdcinfo_query(pair[0])
cos_uuid = self.neighborhood_query(pair[1])
parms['zone_mappings'].append({"name" : zone_uuid, "value" : cos_uuid})
return self.coreapi('POST', URI_REPLICATION_GROUPS, parms)
def repgroup_add(self, repGrpId, cos_list):
parms = dict()
parms['mappings'] = []
for cos in cos_list.split(','):
pair = cos.split('.')
zone_uuid = self.vdcinfo_query(pair[0])
cos_uuid = self.neighborhood_query(pair[1])
parms['mappings'].append({"name" : zone_uuid, "value" : cos_uuid})
return self.coreapi('PUT', URI_REPLICATION_EXTEND.format(repGrpId), parms)
def repgroup_remove(self, repGrpId, cos_list):
parms = dict()
parms['mappings'] = []
for cos in cos_list.split(','):
pair = cos.split('.')
zone_uuid = self.vdcinfo_query(pair[0])
cos_uuid = self.neighborhood_query(pair[1])
parms['mappings'].append({"name" : zone_uuid, "value" : cos_uuid})
return self.coreapi('PUT', URI_REPLICATION_COMPRESS.format(repGrpId), parms)
def repgroup_show(self, grpId):
o = self.api('GET', URI_REPLICATION_GROUP.format(grpId))
if (not o):
return None
else:
return o;
def repgroup_list(self):
o = self.api('GET', URI_REPLICATION_GROUPS)
if (not o):
return {};
else:
return o;
# replication group name from uuid
def repgroup_query(self, name):
if (self.__is_uri(name)):
return name
rg_res = self.repgroup_list()
rg_list = rg_res['data_service_vpool']
for rg_iter in rg_list :
rg = self.repgroup_show(rg_iter['id'])
if (rg['name'] == name):
return rg['id']
return None
#
# Encode HashSet of VPOOL parameters as a list
#
def encode_cos(self,stringmap):
cos_params = {'vpool_param' : self.__encode_list(stringmap)}
return cos_params
def cos_list(self, type):
o = self.api('GET', URI_VPOOLS.format(type))
if (not o):
return {};
return o['virtualpool']
def cos_create(self, type, name, description, useMatchedPools,
protocols, numpaths, minpaths, pathsperinitiator, systemtype,
highavailability, haNhUri, haCosUri, activeProtectionAtHASite, metropoint, file_cos, provisionType,
mirrorCosUri, neighborhoods, expandable, sourceJournalSize, journalVarray, journalVpool, standbyJournalVarray,
standbyJournalVpool, rp_copy_mode, rp_rpo_value, rp_rpo_type, protectionCoS,
multiVolumeConsistency, max_snapshots, max_mirrors, thin_volume_preallocation_percentage,
long_term_retention, drive_type, system_type, srdf, auto_tiering_policy_name, host_io_limit_bandwidth, host_io_limit_iops,
auto_cross_connect, placement_policy, compressionEnabled, snapshot_schedule, replication_support,
filepolicy_at_project, filepolicy_at_fs):
if (type != 'block' and type != 'file' and type != "object" ):
raise Exception('wrong type for vpool: ' + str(type))
parms = dict()
if (name):
parms['name'] = name
if (description):
parms['description'] = description
if (useMatchedPools):
parms['use_matched_pools'] = useMatchedPools
if (protocols):
parms['protocols'] = protocols
if (drive_type):
parms['drive_type'] = drive_type
if (system_type):
parms['system_type'] = system_type
if (numpaths):
parms['num_paths'] = numpaths
if (minpaths):
parms['min_paths'] = minpaths
if (pathsperinitiator):
parms['paths_per_initiator'] = pathsperinitiator
if (systemtype):
parms['system_type'] = systemtype
if (compressionEnabled):
parms['compression_enabled'] = compressionEnabled
if (highavailability):
if (highavailability == 'vplex_local'):
parms['high_availability'] = {'type' : highavailability, 'autoCrossConnectExport' : auto_cross_connect}
else:
parms['high_availability'] = {'type' : highavailability, 'metroPoint' : metropoint, 'ha_varray_vpool' : {'varray' : haNhUri, 'vpool' : haCosUri, 'activeProtectionAtHASite' : activeProtectionAtHASite}, 'autoCrossConnectExport' : auto_cross_connect}
if (file_cos):
parms['file_vpool'] = file_cos
if (provisionType):
parms['provisioning_type'] = provisionType
if (expandable):
parms['expandable'] = expandable
if(multiVolumeConsistency):
parms['multi_volume_consistency'] = multiVolumeConsistency
if (thin_volume_preallocation_percentage):
parms['thin_volume_preallocation_percentage'] = thin_volume_preallocation_percentage;
if (auto_tiering_policy_name):
parms['auto_tiering_policy_name'] = auto_tiering_policy_name;
if (long_term_retention):
parms['long_term_retention'] = long_term_retention;
if (type == 'block' and placement_policy):
parms['placement_policy'] = placement_policy;
if (max_snapshots or max_mirrors or protectionCoS or srdf):
cos_protection_params = dict()
if (type == 'block'):
if (srdf):
cos_protection_srdf_params = dict()
copies = srdf.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copy['remote_copy_mode'] = copyParam[2]
except:
pass
copyEntries.append(copy)
cos_protection_srdf_params['remote_copy_settings'] = copyEntries
cos_protection_params['remote_copies'] = cos_protection_srdf_params
if (max_mirrors):
cos_protection_mirror_params = dict()
cos_protection_mirror_params['max_native_continuous_copies'] = max_mirrors
if (mirrorCosUri):
cos_protection_mirror_params['protection_mirror_vpool'] = mirrorCosUri
cos_protection_params['continuous_copies'] = cos_protection_mirror_params
if (protectionCoS):
cos_protection_rp_params = dict()
if (sourceJournalSize or rp_copy_mode or rp_rpo_value or standbyJournalVarray or standbyJournalVpool or journalVarray or journalVpool):
sourcePolicy = dict();
if (sourceJournalSize):
sourcePolicy['journal_size'] = sourceJournalSize
if (rp_copy_mode):
sourcePolicy['remote_copy_mode'] = rp_copy_mode;
if (rp_rpo_value):
sourcePolicy['rpo_value'] = rp_rpo_value;
if (rp_rpo_type):
sourcePolicy['rpo_type'] = rp_rpo_type;
if (journalVarray):
sourcePolicy['journal_varray'] = self.neighborhood_query(journalVarray);
if (journalVpool):
sourcePolicy['journal_vpool'] = self.cos_query("block", journalVpool);
if (standbyJournalVarray):
sourcePolicy['standby_journal_varray'] = self.neighborhood_query(standbyJournalVarray);
if (standbyJournalVpool):
sourcePolicy['standby_journal_vpool'] = self.cos_query("block", standbyJournalVpool);
cos_protection_rp_params['source_policy'] = sourcePolicy
copies = protectionCoS.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copyPolicy = dict()
copyPolicy['journal_size'] = copyParam[2]
copyPolicy['journal_varray'] = self.neighborhood_query(copyParam[3])
copyPolicy['journal_vpool'] = self.cos_query("block", copyParam[4])
copy['policy'] = copyPolicy
except:
pass
copyEntries.append(copy)
cos_protection_rp_params['copies'] = copyEntries
cos_protection_params['recoverpoint'] = cos_protection_rp_params
if (max_snapshots):
cos_protection_snapshot_params = dict()
cos_protection_snapshot_params['max_native_snapshots'] = max_snapshots
cos_protection_params['snapshots'] = cos_protection_snapshot_params
if(snapshot_schedule is not None):
cos_protection_params['schedule_snapshots'] = snapshot_schedule
if(replication_support is not None):
cos_protection_params['replication_supported'] = replication_support
if(filepolicy_at_project is not None):
cos_protection_params['allow_policy_at_project_level'] = filepolicy_at_project
if(filepolicy_at_fs is not None):
cos_protection_params['allow_policy_at_fs_level'] = filepolicy_at_fs
parms['protection'] = cos_protection_params
nhs = list()
if(neighborhoods):
for n in neighborhoods:
nhs.append(self.neighborhood_query(n))
parms['varrays'] = nhs
if (host_io_limit_bandwidth):
parms['host_io_limit_bandwidth'] = host_io_limit_bandwidth
if (host_io_limit_iops):
parms['host_io_limit_iops'] = host_io_limit_iops
if (type == 'object'):
del parms['protection']
print "VPOOL CREATE Params = ", parms
return self.api('POST', URI_VPOOLS.format(type), parms)
def cos_match(self, type, useMatchedPools,
protocols, numpaths, highavailability, haNhUri, haCosUri, activeProtectionAtHASite, metropoint, file_cos, provisionType,
mirrorCosUri, neighborhoods, expandable, sourceJournalSize, journalVarray, journalVpool, standbyJournalVarray,
standbyJournalVpool, rp_copy_mode, rp_rpo_value, rp_rpo_type, protectionCoS,
multiVolumeConsistency, max_snapshots, max_mirrors, thin_volume_preallocation_percentage, drive_type,
system_type, srdf, compressionEnabled):
if (type != 'block' and type != 'file' and type != "object" ):
raise Exception('wrong type for vpool: ' + str(type))
parms = dict()
if (useMatchedPools):
parms['use_matched_pools'] = useMatchedPools
if (protocols):
parms['protocols'] = protocols
if (drive_type):
parms['drive_type'] = drive_type
if (system_type):
parms['system_type'] = system_type
if (numpaths):
parms['num_paths'] = numpaths
if (compressionEnabled):
parms['compression_enabled'] = compressionEnabled
if (highavailability):
if (highavailability == 'vplex_local'):
parms['high_availability'] = {'type' : highavailability}
else:
parms['high_availability'] = {'type' : highavailability, 'metroPoint' : metropoint, 'ha_varray_vpool' : {'varray' : haNhUri, 'vpool' : haCosUri, 'activeProtectionAtHASite' : activeProtectionAtHASite}}
if (file_cos):
parms['file_vpool'] = file_cos
if (provisionType):
parms['provisioning_type'] = provisionType
if (expandable):
parms['expandable'] = expandable
if(multiVolumeConsistency):
parms['multi_volume_consistency'] = multiVolumeConsistency
if (thin_volume_preallocation_percentage):
parms['thin_volume_preallocation_percentage'] = thin_volume_preallocation_percentage;
if (max_snapshots or max_mirrors or protectionCoS or srdf):
cos_protection_params = dict()
if (type == 'block'):
if (srdf):
cos_protection_srdf_params = dict()
copies = srdf.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copy['remote_copy_mode'] = copyParam[2]
except:
pass
copyEntries.append(copy)
cos_protection_srdf_params['remote_copy_settings'] = copyEntries
cos_protection_params['remote_copies'] = cos_protection_srdf_params
if (max_mirrors):
cos_protection_mirror_params = dict()
cos_protection_mirror_params['max_native_continuous_copies'] = max_mirrors
if (mirrorCosUri):
cos_protection_mirror_params['protection_mirror_vpool'] = mirrorCosUri
cos_protection_params['continuous_copies'] = cos_protection_mirror_params
if (protectionCoS):
cos_protection_rp_params = dict()
if (sourceJournalSize):
sourcePolicy = dict()
sourcePolicy['journal_size'] = sourceJournalSize
sourcePolicy['journal_varray'] = journalVarray
sourcePolicy['journal_vpool'] = journalVpool
sourcePolicy['standby_journal_varray'] = standbyJournalVarray
sourcePolicy['standby_journal_vpool'] = standbyJournalVpool
cos_protection_rp_params['source_policy'] = sourcePolicy
copies = protectionCoS.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copyPolicy = dict()
copyPolicy['journal_size'] = copyParam[2]
copyPolicy['journal_varray'] = self.neighborhood_query(copyParam[3])
copyPolicy['journal_vpool'] = self.cos_query("block", copyParam[4])
copy['policy'] = copyPolicy
except:
pass
copyEntries.append(copy)
cos_protection_rp_params['copies'] = copyEntries
cos_protection_params['recoverpoint'] = | |
None:
return []
written_methods.add((self.version, self.op_id))
parameters = self.prep_inbound_params()
if self.is_staticmethod and self.self_param:
parameters.append(self.self_param)
lines = []
lines.extend(self.get_meth_decorators())
lines.append(self.get_meth_defline(parameters=parameters))
ds = self.make_docstring(parameters=parameters)
if ds:
lines.extend(ds)
lines.extend(self.get_meth_body(parameters=self.prep_outbound_params(),
cd=cd))
lines.extend(self.as_crud_python_method(cd))
return lines
def register_crud_class(verb: str):
def rcc(cls):
Operation.crud_registry[verb] = cls
return cls
return rcc
class SyntheticOperation(Operation):
op_name = 'noop' # must be overridden by derived classes for the real op name
def __init__(self, base_op: Operation):
gvk = {'group': base_op.group,
'version': base_op.gvk_version,
'kind': base_op.kind}
super(SyntheticOperation, self).__init__(base_op.verb, base_op.op_path,
self.op_name,
base_op.description,
gvk)
for p in base_op.parameters:
self.add_parameter(p.name, p.ptype, p.description, p.required)
for r in base_op.returns.values():
assert isinstance(r, OpResponse)
self.add_return(str(r.code), r.ref, r.description)
self.base_op = base_op
def get_effective_op_id(self) -> str:
return self.base_op.op_id
def make_return_doc(self) -> List[str]:
doc = list()
doc.append(' :return: returns self; the state of self may be '
'permuted with a returned')
doc.append(' HikaruDocumentBase object, whose values will be '
'merged into self ')
doc.append('(if of the same type).')
doc.append(' :raises: KubernetesException. Raised only by the CRUD '
'methods to signal ')
doc.append(' that a return code of 400 or higher was returned by the '
'underlying ')
doc.append(' Kubernetes library.')
return doc
def get_meth_return(self) -> str:
return f"'{self.base_op.kind}'"
def get_async_param(self) -> List[str]:
return []
def get_async_doc(self) -> List[str]:
return []
def as_python_method(self, cd: Optional['ClassDescriptor'] = None) -> List[str]:
code = super(SyntheticOperation, self).as_python_method(cd=cd)
code.extend(self.post_method_code(cd=cd))
return code
def post_method_code(self, cd: Optional['ClassDescriptor'] = None) -> List[str]:
return []
_create_body_with_namespace = \
"""
# noinspection PyDataclass
client = client or self.client
if namespace is not None:
effective_namespace = namespace
elif not self.metadata or not self.metadata.namespace:
raise RuntimeError("There must be a namespace supplied in either "
"the arguments to {op_name}() or in a "
"{classname}'s metadata")
else:
effective_namespace = self.metadata.namespace
res = self.{methname}({paramlist})
if not 200 <= res.code <= 299:
raise KubernetesException("Kubernetes returned error " + str(res.code))
if self.__class__.__name__ == res.obj.__class__.__name__:
self.merge(res.obj, overwrite=True)
return self
"""
_create_body_no_namespace = \
"""
# noinspection PyDataclass
client = client or self.client
res = self.{methname}({paramlist})
if not 200 <= res.code <= 299:
raise KubernetesException("Kubernetes returned error " + str(res.code))
if self.__class__.__name__ == res.obj.__class__.__name__:
self.merge(res.obj, overwrite=True)
return self
"""
@register_crud_class('create')
class CreateOperation(SyntheticOperation):
"""
A synthetic operation; making a synonym named 'create()' for whatever the
actual create method is
"""
op_name = 'create'
def get_meth_decorators(self) -> List[str]:
return []
def prep_inbound_params(self) -> List['OpParameter']:
params = [p for p in self.prep_outbound_params()
if p.name not in ('name', 'async_req')]
return params
def prep_outbound_params(self) -> List['OpParameter']:
params = []
for p in self.parameters:
if p.name == 'namespace':
p.required = False
p.description = f"{p.description}. NOTE: if you leave out the " \
f"namespace from the arguments you *must* have " \
f"filled in the namespace attribute in the metadata " \
f"for the resource!"
if p.name == "async_req":
continue
params.append(p)
return params
def _with_namespace_template(self):
return _create_body_with_namespace
def _without_namespace_template(self):
return _create_body_no_namespace
def namespace_name(self):
return 'effective_namespace'
def name_name(self):
return 'self.metadata.name'
def get_meth_body(self, parameters: Optional[List['OpParameter']] = None,
cd: Optional['ClassDescriptor'] = None) -> List[str]:
required = [p for p in parameters if p.required]
optional = [p for p in parameters if not p.required]
param_assignments = []
seen_namespace = False
for p in chain(required, optional):
assert isinstance(p, OpParameter)
if p.name == "namespace":
seen_namespace = True
local_name = self.namespace_name()
param_name = camel_to_pep8(p.name)
elif p.name == 'name':
local_name = self.name_name()
param_name = camel_to_pep8(p.name)
else:
local_name = param_name = camel_to_pep8(p.name)
param_assignments.append(f"{param_name}={local_name}")
param_assignments.append("client=client")
body_str = (self._with_namespace_template()
if seen_namespace else
self._without_namespace_template())
fdict = {"classname": cd.short_name if cd else 'UNKNOWN',
"methname": self.base_op.meth_name,
'paramlist': ", ".join(param_assignments),
'op_name': self.op_name}
body = body_str.format(**fdict)
return body.split("\n")
_update_context_manager = \
"""
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
passed = ex_type is None and ex_value is None and ex_traceback is None
has_rollback = hasattr(self, "__rollback")
if passed:
try:
self.update()
except Exception:
if has_rollback:
self.merge(getattr(self, "__rollback"), overwrite=True)
delattr(self, "__rollback")
raise
if has_rollback:
if not passed:
self.merge(getattr(self, "__rollback"), overwrite=True)
delattr(self, "__rollback")
return False
"""
@register_crud_class('update')
class UpdateOperation(CreateOperation):
"""
A synthetic operation to make an 'update()' crud method to provide a synonym
to the patch method
"""
op_name = 'update'
def post_method_code(self, cd: Optional['ClassDescriptor'] = None) -> List[str]:
return _update_context_manager.split("\n")
_delete_body_with_namespace = \
"""
# noinspection PyDataclass
client = client or self.client
if namespace is not None:
effective_namespace = namespace
elif not self.metadata or not self.metadata.namespace:
raise RuntimeError("There must be a namespace supplied in either "
"the arguments to {op_name}() or in a "
"{classname}'s metadata")
else:
effective_namespace = self.metadata.namespace
if name is not None:
effective_name = name
elif not self.metadata or not self.metadata.name:
raise RuntimeError("There must be a name supplied in either "
"the arguments to {op_name}() or in a "
"{classname}'s metadata")
else:
effective_name = self.metadata.name
res = self.{methname}({paramlist})
if not 200 <= res.code <= 299:
raise KubernetesException("Kubernetes returned error " + str(res.code))
if self.__class__.__name__ == res.obj.__class__.__name__:
self.merge(res.obj, overwrite=True)
return self
"""
_delete_body_without_namespace = \
"""
# noinspection PyDataclass
client = client or self.client
if name is not None:
effective_name = name
elif not self.metadata or not self.metadata.name:
raise RuntimeError("There must be a name supplied in either "
"the arguments to {op_name}() or in a "
"{classname}'s metadata")
else:
effective_name = self.metadata.name
res = self.{methname}({paramlist})
if not 200 <= res.code <= 299:
raise KubernetesException("Kubernetes returned error " + str(res.code))
if self.__class__.__name__ == res.obj.__class__.__name__:
self.merge(res.obj, overwrite=True)
return self
"""
@register_crud_class('delete')
class DeleteOperation(CreateOperation):
op_name = 'delete'
def get_meth_decorators(self) -> List[str]:
return []
def prep_inbound_params(self) -> List['OpParameter']:
return self.prep_outbound_params()
def prep_outbound_params(self) -> List['OpParameter']:
params = []
for p in self.parameters:
if p.name in ('namespace', 'name'):
p.required = False
p.description = f"{p.description}. NOTE: if you leave out the " \
f"{p.name} from the arguments you *must* have " \
f"filled in the {p.name} attribute in the metadata " \
f"for the resource!"
if p.name == 'async_req':
continue
params.append(p)
return params
def name_name(self):
return 'effective_name'
def _with_namespace_template(self):
return _delete_body_with_namespace
def _without_namespace_template(self):
return _delete_body_without_namespace
@register_crud_class('read')
class ReadOperation(DeleteOperation):
"""
A synthetic operation; simple read() method for a more complex class
"""
op_name = 'read'
# maybe later...
# class ListCreateOperation(Operation):
# def __init__(self, contained_class: 'ClassDescriptor'):
# super(ListCreateOperation, self).__init__(self, "put", "-created, no path-"
# 'deferred', 'deferred',
# {'version': contained_class.version,
# 'group': contained_class.group,
# 'kind': f'{contained_class.kind}List'})
# self.is_staticmethod = False
# self.contained_class = contained_class
# for k in self.contained_class.operations:
# if k.startswith('create'):
# self._op_id = f'{k}List'
# break
# else:
# self._op_id = f'{self.contained_class.short_name}List'
#
# @property
# def op_id(self):
# for k in self.contained_class.operations:
# if k.startswith('create'):
# self._op_id = f'{k}List'
# break
# else:
# self._op_id = f'{self.contained_class.short_name}List'
# return self._op_id
#
# @op_id.setter
# def op_id(self, name):
# self._op_id = name
class ClassDescriptor(object):
def __init__(self, key, d):
self.full_name = full_swagger_name(key)
group, version, name = process_swagger_name(self.full_name)
self.short_name = name
self.group = None
self.kind = None
self.version = version
self.description = "None"
self.all_properties = {}
self.required = []
self.type = None
self.is_subclass_of = None
self.operations: Dict[str, Operation] = {}
self.has_gvk_dict = False
self.has_doc_markers = False
self.required_props = []
self.optional_props = []
self.update(d)
# crud_ops_created is a set of SytheticOperation.op_name values
# for operations already created on this object. the contents
# of this set is managed by the Operation instances so it can
# see when an operation has already be generated for this object
self.crud_ops_created = set()
# flag re: if watch operations should be supported
self.watchable = False
# OK, now a hack for Kubernetes:
# Although K8s defines top-level objects that are collections
# of other objects, and there are API calls that fetch these collection
# objects, there are not API calls in the swagger that accept these
# objects as input for the purposes of creation. The K8s client, nonetheless,
# provides support for creation of these things directly from YAML (only)
# by iterating over them in the local client and just doing repeated calls
# to the singleton creation method. Since there's no spec'd version of this,
# we have to detect such when we have one of these an generate a special
# Operation ourselves that does the same iteration and calls the underlying
# operation on another class. I'll refrain from saying what I think about
# having to do this...
if self.short_name.endswith('List'):
# OK, invent an operation for these that can create a list
contained_class_name = self.short_name.replace('List', '')
mod = get_module_def(self.version)
cd = mod.get_class_desc_from_full_name(contained_class_name)
if cd is None:
raise NotImplementedError(f"Need | |
<gh_stars>1-10
import warnings
warnings.filterwarnings('ignore')
import argparse
import math
from math import log10
import os
from collections import defaultdict
import itertools
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
#from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
import numpy as np
from console_progressbar import ProgressBar
import dataset
import models
import losses
import utils
if __name__ == "__main__":
no_summary = False
try:
from torchsummary import summary
except ModuleNotFoundError:
no_summary = True
print("No summary writer found")
#########################################################
# ARGUMENT PARSING
#########################################################
parser = argparse.ArgumentParser(
description='Superresolution for Isosurface Raytracing - Sparse',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Dataset
parser_group = parser.add_argument_group("Dataset")
parser_group.add_argument('--dataset', type=str, default=None, help="""
Semicolon-separated list of directories with the dataset of numpy images.\n
Specify either '--dataset' or '--hdf5'""")
parser_group.add_argument('--hdf5', type=str, default=None, help="""
Semicolon-separated list of HDF5 files with the dataset.
The HDF5 files are created with a specific crop size, so all crop settings are ignored.
Specify either '--dataset' or '--hdf5'""")
parser_group.add_argument('--numberOfImages', type=int, default=-1,
help="Number of images taken from the inpt dataset. Default: -1 = unlimited. Ignored for HDF5-dataset")
parser_group.add_argument('--testFraction', type=float, default=0.2,
help="The fraction of samples used for testing")
parser_group.add_argument('--trainCropsPerImage', type=int, default=51,
help="The number of crops per image. Ignored for HDF5-dataset")
parser_group.add_argument('--testCropsPerImage', type=int, default=23,
help="The number of crops per image. Ignored for HDF5-dataset")
parser_group.add_argument('--trainCropSize', type=int, default=64,
help="The size of the crops used for training. Ignored for HDF5-dataset")
parser_group.add_argument('--testCropSize', type=int, default=256,
help="The size of the crops used for testing. Ignored for HDF5-dataset")
parser_group.add_argument('--interpolateInput', action='store_true', help=
"""Use interpolated input from the sparse samples as input instead of only the samples.
This will append an extra channel to the network input with the plain sample mask.""")
parser_group.add_argument('--bufferSize', type=int, default=5,
help="The number of images that are loaded and buffered asynchronously. Ignored for HDF5-dataset")
parser_group.add_argument('--cropFillRate', type=float, default=0.4,
help="crops are only taken if at least cropFillRate*100%% pixels in the dense image are filled. Ignored for HDF5-dataset")
parser_group.add_argument('--smoothDense', action='store_true',
help="Set to true to use smoothed target images")
parser_group.add_argument('--externalFlow', action='store_true', help="""
False (old, default): the network gets the flow also as input an has to estimate it. The warping is done with the previous flow.
True (new): the optical flow for the warping is taken from the interpolated sparse samples and not passed to the network.
""")
# Model
parser_group = parser.add_argument_group("Model")
parser_group.add_argument('--architecture', type=str, default='UNet', choices=['UNet', 'DeepFovea'],
help="The network architecture, supported are 'UNet' and 'DeepFovea'")
parser_group.add_argument('--depth', type=int, default=6, help="The depth of the network")
#parser_group.add_argument('--filters', type=int, default=6, nargs='+', help="""
parser_group.add_argument('--filters', type=int, default=6, help="""
UNet: an int, the number of filters in the first layer of the UNet is 2**this_value.
DeepFovea: n integers with n=depth specifying the number of features per layer.""")
parser_group.add_argument('--padding', type=str, default="partial", choices=['off','zero','partial'],
help="UNet: The padding mode for the UNet")
parser_group.add_argument('--batchNorm', action='store_true', help="UNet: Use batch normalization in the network")
parser_group.add_argument('--residual', action='store_true', help="Use residual connections from input to output")
parser_group.add_argument('--hardInput', action='store_true', help="""
UNet:
If true, the valid input pixels are directly copied to the output.
This hardly enforces that the sparse input samples are preserved in the output,
instead of relying on the network and loss function to not change them.""")
parser_group.add_argument('--upMode', type=str, default='upsample', choices=['upconv', 'upsample'],
help="UNet: The upsample mode")
parser_group.add_argument('--disableTemporal', action='store_true', help='Disables temporal consistency')
parser_group.add_argument('--initialImage', type=str, default='zero',
choices=['zero','unshaded','input'], help="""
Specifies what should be used as the previous high res frame for the first frame of the sequence,
when no previous image is available from the previous predition.
Available options:
- zero: fill everything with zeros (default)
- unshaded: Special defaults for unshaded mode: mask=-1, normal=[0,0,1], depth=0.5, ao=1
- input: Use the interpolated input
""")
parser_group.add_argument('--warpSpecialMask', action='store_true',
help="if True, the mask is filled with -1 instead of 0 at the borders")
# Losses
parser_group = parser.add_argument_group("Loss")
parser_group.add_argument('--losses', type=str, required=True, help="""
comma-separated list of loss terms with weighting as string
Format: <loss>:<target>:<weighting>
with: loss in {l1, l2, tl2}
target in {mask, normal, depth, color, ao, flow}
weighting a positive number
""")
parser_group.add_argument('--lossBorderPadding', type=int, default=24, help="""
Because flow + warping can't be accurately estimated at the borders of the image,
the border of the input images to the loss (ground truth, low res input, prediction)
are overwritten with zeros. The size of the border is specified by this parameter.
Pass zero to disable this padding. Default=16 as in the TecoGAN paper.
""")
parser_group.add_argument('--lossAO', type=float, default=1.0,
help="Strength of ambient occlusion in the loss function. Default=1")
parser_group.add_argument('--lossAmbient', type=float, default=0.1,
help="Strength of the ambient light color in the loss function's shading. Default=0.1")
parser_group.add_argument('--lossDiffuse', type=float, default=0.1,
help="Strength of the diffuse light color in the loss function's shading. Default=1.0")
parser_group.add_argument('--lossSpecular', type=float, default=0.0,
help="Strength of the specular light color in the loss function's shading. Default=0.0")
# Training
parser_group = parser.add_argument_group("Training")
parser_group.add_argument('--trainBatchSize', type=int, default=4, help='training batch size')
parser_group.add_argument('--testBatchSize', type=int, default=4, help='testing batch size')
parser_group.add_argument('--nEpochs', type=int, default=1000, help='number of epochs to train for')
parser_group.add_argument('--lr', type=float, default=0.0001, help='Learning Rate. Default=0.01')
parser_group.add_argument('--lrGamma', type=float, default=0.5, help='The learning rate decays every lrStep-epochs by this factor')
parser_group.add_argument('--lrStep', type=int, default=500, help='The learning rate decays every lrStep-epochs (this parameter) by lrGamma factor')
parser_group.add_argument('--weightDecay', type=float, default=0, help="Weight decay (L2 penalty), if supported by the optimizer. Default=0")
parser_group.add_argument('--optim', type=str, default="Adam", choices=['RMSprop', 'Rprop', 'Adam', 'LBFGS'],
help="The optimizer")
parser_group.add_argument('--noCuda', action='store_true', help='Disable cuda')
parser_group.add_argument('--seed', type=int, default=123, help='random seed to use')
parser_group.add_argument('--checkpointFreq', type=int, default=10,
help='checkpoints are saved every "checkpointFreq" epoch')
# Restore
parser_group = parser.add_argument_group("Restore")
parser_group.add_argument('--restore', type=int, default=-1, help="Restore training from the specified run index")
parser_group.add_argument('--restoreEpoch', type=int, default=-1, help="In combination with '--restore', specify the epoch from which to recover. Default: last epoch")
parser_group.add_argument('--pretrained', type=str, default=None, help="Path to a pretrained generator")
# Output
parser_group = parser.add_argument_group("Output")
parser_group.add_argument('--logdir', type=str, required=True,
help='directory for tensorboard logs')
parser_group.add_argument('--modeldir', type=str, required=True,
help='Output directory for the checkpoints')
parser_group.add_argument('--numVisImages', type=int, default=4,
help="The number of test images (see testBatchSize) that are saved for visualization, ")
# Parse it
opt = parser.parse_args()
opt_dict = vars(opt)
opt_dict['type'] = 'sparse1'
if not opt.noCuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run with --noCuda")
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
device = torch.device("cpu" if opt.noCuda else "cuda")
torch.set_num_threads(4)
print("Device:", device)
#########################################################
# RESERVE OUTPUT DIRECTORY
#########################################################
if not os.path.exists(opt.modeldir):
os.mkdir(opt.modeldir)
if not os.path.exists(opt.logdir):
os.mkdir(opt.logdir)
# Run directory
def findNextRunNumber(folder):
files = os.listdir(folder)
files = sorted([f for f in files if f.startswith('run')])
if len(files)==0:
return 0
return int(files[-1][3:])
nextRunNumber = max(findNextRunNumber(opt.logdir), findNextRunNumber(opt.modeldir)) + 1
if opt.restore == -1:
print('Current run: %05d'%nextRunNumber)
runName = 'run%05d'%nextRunNumber
logdir = os.path.join(opt.logdir, runName)
modeldir = os.path.join(opt.modeldir, runName)
runName = 'run%05d'%nextRunNumber
os.makedirs(logdir)
os.makedirs(modeldir)
startEpoch = 1
else:
# prepare restoring
nextRunNumber = opt.restore
runName = 'run%05d'%nextRunNumber
modeldir = os.path.join(opt.modeldir, runName)
if opt.restoreEpoch == -1:
restoreEpoch = 0
while True:
modelInName = os.path.join(modeldir, "model_epoch_{}.pth".format(restoreEpoch+1))
if not os.path.exists(modelInName):
break;
restoreEpoch += 1
else:
restoreEpoch = opt.restoreEpoch
modelInName = os.path.join(modeldir, "model_epoch_{}.pth".format(restoreEpoch))
startEpoch = restoreEpoch
print('Current run: %05d'%nextRunNumber)
runName = 'run%05d'%nextRunNumber
logdir = os.path.join(opt.logdir, runName)
modeldir = os.path.join(opt.modeldir, runName)
# write settings and open tensorboard logger
optStr = str(opt);
print(optStr)
with open(os.path.join(modeldir, 'info.txt'), "w") as text_file:
text_file.write(optStr)
writer = SummaryWriter(logdir)
writer.add_text('info', optStr, 0)
#########################################################
# CREATE DATASETS
#########################################################
print('===> Loading datasets')
crop_size = None
if opt.dataset is not None:
dataset_directories = opt.dataset.split(';')
locator = dataset.SparseDatasetLocator(dataset_directories, opt.testFraction, opt.numberOfImages)
training_samples = locator.get_training_samples()
test_samples = locator.get_test_samples();
print("Number of training sample files:", len(training_samples))
print("Number of test sample files:", len(test_samples))
print("Number of crops per image: ", opt.trainCropsPerImage, opt.testCropsPerImage)
crop_size = opt.trainCropSize
train_set = dataset.SparseDataset(
samples=training_samples,
crops_per_sample=opt.trainCropsPerImage,
crop_size=opt.trainCropSize,
fill_ratio=opt.cropFillRate,
buffer_size=opt.bufferSize)
test_set = dataset.SparseDataset(
samples=training_samples,
crops_per_sample=opt.testCropsPerImage,
crop_size=opt.testCropSize,
fill_ratio=opt.cropFillRate,
buffer_size=opt.bufferSize)
training_data_loader = DataLoader(
dataset=train_set,
batch_size=opt.trainBatchSize,
shuffle=False)
testing_data_loader = DataLoader(
dataset=test_set,
batch_size=opt.testBatchSize,
shuffle=False)
elif opt.hdf5 is not None:
dataset_directories = opt.hdf5.split(';')
locator = dataset.SparseDatasetLocatorHDF(
dataset_directories, opt.testFraction, opt.smoothDense)
crop_size = locator.crop_size()
train_set = dataset.SparseDatasetHDF5(locator, False)
test_set = dataset.SparseDatasetHDF5(locator, True)
training_data_loader = DataLoader(
dataset=train_set,
batch_size=opt.trainBatchSize,
shuffle=True)
testing_data_loader = DataLoader(
dataset=test_set,
batch_size=opt.testBatchSize,
shuffle=False)
else:
print("You must specify either '--dataset' or '--hdf5'!")
exit(-1)
"""
The DataLoader will return a tuple with:
- sparse, then input of shape B*T*9*H*W
- dense, the output of shape B*T*8*H*W
for the channels, see mainSparseDatasetGenerator.py
"""
#########################################################
# CREATE MODEL
#########################################################
print('===> Building model')
"""
interpolateInput:
- True: | |
<gh_stars>1-10
# coding: utf-8
# SQLAlchemy Mappings generated from Rfam 14.0 using sqlacodegen (https://pypi.org/project/sqlacodegen/)
# sqlacodegen mysql+mysqlconnector://[email protected]:4497/Rfam --outfile rfam_db.py
import sqlalchemy.orm
import sqlalchemy.engine.url
from sqlalchemy import CHAR, Column, DECIMAL, Date, DateTime, Enum, Float, ForeignKey, Index, String, TIMESTAMP, Table, Text, text
from sqlalchemy.dialects.mysql import BIGINT, INTEGER, LONGBLOB, LONGTEXT, MEDIUMINT, MEDIUMTEXT, TINYINT, TINYTEXT, VARCHAR
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from dotenv import load_dotenv, find_dotenv
import os
Base = declarative_base()
metadata = Base.metadata
def rfam_session():
"""
Create a new connection to the Rfam database.
:return:
"""
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
# Set up a connection to a local docker copy of Rfam if appropriate environment variables have been set, otherwise
# connect to the public Rfam database
database = os.environ.get("MYSQL_DATABASE", default='Rfam')
host = os.environ.get("MYSQL_HOST", default='mysql-rfam-public.ebi.ac.uk')
port = os.environ.get("MYSQL_PORT", default=4497)
username = os.environ.get("MYSQL_USER", default='rfamro')
password = os.environ.get("MYSQL_PASSWORD")
# Set up the connection to the MySQL server and create a session
database_url = sqlalchemy.engine.url.URL("mysql+mysqlconnector", username=username, password=password,
host=host, port=port, database=database,)
engine = sqlalchemy.create_engine(database_url, encoding='utf8')
SessionMaker = sqlalchemy.orm.sessionmaker(bind=engine)
session = SessionMaker()
return session
class Author(Base):
__tablename__ = 'author'
author_id = Column(INTEGER(11), primary_key=True, unique=True)
name = Column(String(20), nullable=False)
last_name = Column(String(50))
initials = Column(String(4))
orcid = Column(String(19))
synonyms = Column(String(100))
class Clan(Base):
__tablename__ = 'clan'
clan_acc = Column(String(7), primary_key=True, unique=True)
id = Column(String(40))
previous_id = Column(TINYTEXT)
description = Column(String(100))
author = Column(TINYTEXT)
comment = Column(LONGTEXT)
created = Column(DateTime, nullable=False)
updated = Column(TIMESTAMP, nullable=False)
family = relationship('Family', secondary='clan_membership')
class DbVersion(Base):
__tablename__ = 'db_version'
rfam_release = Column(Float(4, True), primary_key=True)
rfam_release_date = Column(DateTime, nullable=False)
number_families = Column(INTEGER(10), nullable=False)
embl_release = Column(TINYTEXT, nullable=False)
genome_collection_date = Column(DateTime)
refseq_version = Column(INTEGER(11))
pdb_date = Column(DateTime)
infernal_version = Column(String(45))
t_dead_clan = Table(
'dead_clan', metadata,
Column('clan_acc', String(7), nullable=False, unique=True, server_default=text("''")),
Column('clan_id', String(40), nullable=False),
Column('comment', MEDIUMTEXT),
Column('forward_to', String(7)),
Column('user', TINYTEXT, nullable=False)
)
t_dead_family = Table(
'dead_family', metadata,
Column('rfam_acc', String(7), nullable=False, unique=True, server_default=text("''")),
Column('rfam_id', String(40), nullable=False),
Column('comment', MEDIUMTEXT),
Column('forward_to', String(7)),
Column('title', String(150)),
Column('user', TINYTEXT, nullable=False)
)
class EnsemblName(Base):
__tablename__ = 'ensembl_names'
insdc = Column(String(50), primary_key=True, server_default=text("''"))
ensembl = Column(String(50))
t_family_author = Table(
'family_author', metadata,
Column('rfam_acc', String(7), nullable=False),
Column('author_id', INTEGER(11), nullable=False),
Column('desc_order', INTEGER(4), nullable=False)
)
class Genome(Base):
__tablename__ = 'genome'
upid = Column(String(20), primary_key=True, unique=True, server_default=text("''"))
assembly_acc = Column(String(20))
assembly_version = Column(INTEGER(4))
wgs_acc = Column(String(20))
wgs_version = Column(INTEGER(4))
assembly_name = Column(String(100))
assembly_level = Column(Enum('contig', 'chromosome', 'scaffold', 'complete-genome'))
study_ref = Column(String(20))
description = Column(MEDIUMTEXT)
total_length = Column(BIGINT(20))
ungapped_length = Column(BIGINT(20))
circular = Column(TINYINT(3))
ncbi_id = Column(INTEGER(10), nullable=False, index=True)
scientific_name = Column(String(300))
common_name = Column(String(200))
kingdom = Column(String(50))
num_rfam_regions = Column(INTEGER(10))
num_families = Column(INTEGER(10))
is_reference = Column(TINYINT(1), nullable=False, server_default=text("'1'"))
is_representative = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
created = Column(DateTime, nullable=False)
updated = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
t_genseq = Table(
'genseq', metadata,
Column('upid', String(20), nullable=False, index=True, server_default=text("''")),
Column('rfamseq_acc', String(20), nullable=False, index=True, server_default=text("''")),
Column('chromosome_name', String(100)),
Column('chromosome_type', String(100)),
Column('version', String(6))
)
class Keyword(Base):
__tablename__ = 'keywords'
__table_args__ = (
Index('rfam_kw_idx', 'description', 'rfam_general', 'literature', 'wiki', 'pdb_mappings', 'clan_info'),
)
rfam_acc = Column(String(7), primary_key=True, server_default=text("''"))
rfam_id = Column(String(40))
description = Column(String(100), server_default=text("'NULL'"))
rfam_general = Column(LONGTEXT)
literature = Column(LONGTEXT)
wiki = Column(LONGTEXT)
pdb_mappings = Column(LONGTEXT)
clan_info = Column(LONGTEXT)
class LiteratureReference(Base):
__tablename__ = 'literature_reference'
pmid = Column(INTEGER(10), primary_key=True)
title = Column(TINYTEXT)
author = Column(Text)
journal = Column(TINYTEXT)
class Motif(Base):
__tablename__ = 'motif'
motif_acc = Column(String(7), primary_key=True)
motif_id = Column(String(40), index=True)
description = Column(String(75))
author = Column(TINYTEXT)
seed_source = Column(TINYTEXT)
gathering_cutoff = Column(Float(5, True))
trusted_cutoff = Column(Float(5, True))
noise_cutoff = Column(Float(5, True))
cmbuild = Column(TINYTEXT)
cmcalibrate = Column(TINYTEXT)
type = Column(String(50))
num_seed = Column(BIGINT(20))
average_id = Column(Float(5, True))
average_sqlen = Column(Float(7, True))
ecmli_lambda = Column(Float(10, True))
ecmli_mu = Column(Float(10, True))
ecmli_cal_db = Column(MEDIUMINT(9), server_default=text("'0'"))
ecmli_cal_hits = Column(MEDIUMINT(9), server_default=text("'0'"))
maxl = Column(MEDIUMINT(9), server_default=text("'0'"))
clen = Column(MEDIUMINT(9), server_default=text("'0'"))
match_pair_node = Column(TINYINT(1), server_default=text("'0'"))
hmm_tau = Column(Float(10, True))
hmm_lambda = Column(Float(10, True))
wiki = Column(String(80))
created = Column(DateTime, nullable=False)
updated = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Pdb(Base):
__tablename__ = 'pdb'
pdb_id = Column(String(4), primary_key=True)
keywords = Column(TINYTEXT)
title = Column(MEDIUMTEXT)
date = Column(TINYTEXT)
resolution = Column(DECIMAL(5, 2), server_default=text("'0.00'"))
method = Column(TINYTEXT)
author = Column(MEDIUMTEXT)
class Refseq(Base):
__tablename__ = 'refseq'
refseq_acc = Column(String(14), primary_key=True)
description = Column(MEDIUMTEXT)
species = Column(MEDIUMTEXT)
ncbi_taxid = Column(INTEGER(10))
t_rnacentral_matches = Table(
'rnacentral_matches', metadata,
Column('rfamseq_acc', String(20), nullable=False, index=True, server_default=text("''")),
Column('seq_start', BIGINT(19), nullable=False, index=True, server_default=text("'0'")),
Column('seq_end', BIGINT(19), nullable=False, index=True),
Column('md5', String(32), nullable=False),
Column('rnacentral_id', String(13), index=True)
)
class Taxonomy(Base):
__tablename__ = 'taxonomy'
ncbi_id = Column(INTEGER(10), primary_key=True, server_default=text("'0'"))
species = Column(String(100), nullable=False, index=True, server_default=text("''"))
tax_string = Column(MEDIUMTEXT)
tree_display_name = Column(String(100))
align_display_name = Column(String(50))
t_taxonomy_websearch = Table(
'taxonomy_websearch', metadata,
Column('ncbi_id', INTEGER(10), index=True, server_default=text("'0'")),
Column('species', String(100), index=True),
Column('rgt', INTEGER(10), index=True),
Column('taxonomy', MEDIUMTEXT),
Column('lft', INTEGER(10), index=True),
Column('parent', INTEGER(10), index=True),
Column('level', String(200), index=True),
Column('minimal', TINYINT(1), nullable=False, index=True, server_default=text("'0'")),
Column('rank', String(100))
)
class Version(Base):
__tablename__ = 'version'
rfam_release = Column(Float(4, True), primary_key=True)
rfam_release_date = Column(Date, nullable=False)
number_families = Column(INTEGER(10), nullable=False)
embl_release = Column(TINYTEXT, nullable=False)
class Wikitext(Base):
__tablename__ = 'wikitext'
auto_wiki = Column(INTEGER(10), primary_key=True)
title = Column(VARCHAR(150), nullable=False, unique=True)
t_clan_database_link = Table(
'clan_database_link', metadata,
Column('clan_acc', ForeignKey('clan.clan_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('db_id', TINYTEXT, nullable=False),
Column('comment', TINYTEXT),
Column('db_link', TINYTEXT, nullable=False),
Column('other_params', TINYTEXT)
)
t_clan_literature_reference = Table(
'clan_literature_reference', metadata,
Column('clan_acc', ForeignKey('clan.clan_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('pmid', ForeignKey('literature_reference.pmid', ondelete='CASCADE'), nullable=False, index=True),
Column('comment', TINYTEXT),
Column('order_added', TINYINT(3))
)
class Family(Base):
__tablename__ = 'family'
rfam_acc = Column(String(7), primary_key=True, unique=True)
rfam_id = Column(String(40), nullable=False, index=True)
auto_wiki = Column(ForeignKey('wikitext.auto_wiki'), nullable=False, index=True)
description = Column(String(75))
author = Column(TINYTEXT)
seed_source = Column(TINYTEXT)
gathering_cutoff = Column(Float(5, True))
trusted_cutoff = Column(Float(5, True))
noise_cutoff = Column(Float(5, True))
comment = Column(LONGTEXT)
previous_id = Column(TINYTEXT)
cmbuild = Column(TINYTEXT)
cmcalibrate = Column(TINYTEXT)
cmsearch = Column(TINYTEXT)
num_seed = Column(BIGINT(20))
num_full = Column(BIGINT(20))
num_genome_seq = Column(BIGINT(20))
num_refseq = Column(BIGINT(20))
type = Column(String(50))
structure_source = Column(TINYTEXT)
number_of_species = Column(BIGINT(20))
number_3d_structures = Column(INTEGER(11))
tax_seed = Column(MEDIUMTEXT)
ecmli_lambda = Column(Float(10, True))
ecmli_mu = Column(Float(10, True))
ecmli_cal_db = Column(MEDIUMINT(9), server_default=text("'0'"))
ecmli_cal_hits = Column(MEDIUMINT(9), server_default=text("'0'"))
maxl = Column(MEDIUMINT(9), server_default=text("'0'"))
clen = Column(MEDIUMINT(9), server_default=text("'0'"))
match_pair_node = Column(TINYINT(1), server_default=text("'0'"))
hmm_tau = Column(Float(10, True))
hmm_lambda = Column(Float(10, True))
created = Column(DateTime, nullable=False)
updated = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
wikitext = relationship('Wikitext')
t_motif_database_link = Table(
'motif_database_link', metadata,
Column('motif_acc', ForeignKey('motif.motif_acc', ondelete='CASCADE', onupdate='CASCADE'), nullable=False, index=True),
Column('db_id', TINYTEXT, nullable=False),
Column('comment', TINYTEXT),
Column('db_link', TINYTEXT, nullable=False),
Column('other_params', TINYTEXT)
)
t_motif_file = Table(
'motif_file', metadata,
Column('motif_acc', ForeignKey('motif.motif_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('seed', LONGBLOB, nullable=False),
Column('cm', LONGBLOB, nullable=False)
)
t_motif_literature = Table(
'motif_literature', metadata,
Column('motif_acc', ForeignKey('motif_old.motif_acc', ondelete='CASCADE', onupdate='CASCADE'), nullable=False, index=True),
Column('pmid', ForeignKey('literature_reference.pmid', ondelete='CASCADE', onupdate='CASCADE'), nullable=False, index=True),
Column('comment', TINYTEXT),
Column('order_added', TINYINT(3))
)
t_motif_pdb = Table(
'motif_pdb', metadata,
Column('motif_acc', ForeignKey('motif_old.motif_acc', ondelete='CASCADE', onupdate='CASCADE'), nullable=False, index=True),
Column('pdb_id', String(4), nullable=False, index=True),
Column('chain', String(4)),
Column('pdb_start', MEDIUMINT(9)),
Column('pdb_end', MEDIUMINT(9))
)
class Rfamseq(Base):
__tablename__ = 'rfamseq'
rfamseq_acc = Column(String(20), primary_key=True, unique=True, server_default=text("''"))
accession = Column(String(15), nullable=False)
version = Column(INTEGER(6), nullable=False, index=True)
ncbi_id = Column(ForeignKey('taxonomy.ncbi_id', ondelete='CASCADE'), nullable=False, index=True)
mol_type = Column(Enum('protein', 'genomic DNA', 'DNA', 'ss-DNA', 'RNA', 'genomic RNA', 'ds-RNA', 'ss-cRNA', 'ss-RNA', 'mRNA', 'tRNA', 'rRNA', 'snoRNA', 'snRNA', 'scRNA', 'pre-RNA', 'other RNA', 'other DNA', 'unassigned DNA', 'unassigned RNA', 'viral cRNA', 'cRNA', 'transcribed RNA'), nullable=False)
length = Column(INTEGER(10), server_default=text("'0'"))
description = Column(String(250), nullable=False, server_default=text("''"))
previous_acc = Column(MEDIUMTEXT)
source = Column(CHAR(20), nullable=False)
ncbi = relationship('Taxonomy')
t_alignment_and_tree = Table(
'alignment_and_tree', metadata,
Column('rfam_acc', ForeignKey('family.rfam_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('type', Enum('seed', 'seedTax', 'genome', 'genomeTax'), nullable=False),
Column('alignment', LONGBLOB),
Column('tree', LONGBLOB),
Column('treemethod', TINYTEXT),
Column('average_length', Float(7, True)),
Column('percent_id', Float(5, True)),
Column('number_of_sequences', BIGINT(20))
)
t_clan_membership = Table(
'clan_membership', metadata,
Column('clan_acc', ForeignKey('clan.clan_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('rfam_acc', ForeignKey('family.rfam_acc', ondelete='CASCADE'), nullable=False, unique=True)
)
t_database_link = Table(
'database_link', metadata,
Column('rfam_acc', ForeignKey('family.rfam_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('db_id', TINYTEXT, nullable=False),
Column('comment', TINYTEXT),
Column('db_link', TINYTEXT, nullable=False),
Column('other_params', TINYTEXT)
)
t_family_literature_reference = Table(
'family_literature_reference', metadata,
Column('rfam_acc', ForeignKey('family.rfam_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('pmid', ForeignKey('literature_reference.pmid', ondelete='CASCADE'), nullable=False, index=True),
Column('comment', TINYTEXT),
Column('order_added', TINYINT(3))
)
t_family_ncbi = Table(
'family_ncbi', metadata,
Column('ncbi_id', ForeignKey('taxonomy.ncbi_id', ondelete='CASCADE'), nullable=False, index=True),
Column('rfam_id', String(40)),
Column('rfam_acc', ForeignKey('family.rfam_acc', ondelete='CASCADE'), nullable=False, index=True)
)
t_features = Table(
'features', metadata,
Column('rfamseq_acc', ForeignKey('rfamseq.rfamseq_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('database_id', String(50), nullable=False),
Column('primary_id', String(100), nullable=False),
Column('secondary_id', String(255)),
Column('feat_orient', TINYINT(3), nullable=False, server_default=text("'0'")),
Column('feat_start', BIGINT(19), nullable=False, server_default=text("'0'")),
Column('feat_end', BIGINT(19), nullable=False, server_default=text("'0'")),
Column('quaternary_id', String(150))
)
t_full_region = Table(
'full_region', metadata,
Column('rfam_acc', ForeignKey('family.rfam_acc', ondelete='CASCADE'), nullable=False),
Column('rfamseq_acc', ForeignKey('rfamseq.rfamseq_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('seq_start', BIGINT(19), nullable=False, server_default=text("'0'")),
Column('seq_end', BIGINT(19), nullable=False),
Column('bit_score', Float(7, True), nullable=False, server_default=text("'0.00'")),
Column('evalue_score', String(15), nullable=False, server_default=text("'0'")),
Column('cm_start', MEDIUMINT(8), nullable=False),
Column('cm_end', MEDIUMINT(8), nullable=False),
Column('truncated', Enum('0', '5', '3', '53'), nullable=False),
Column('type', Enum('seed', 'full'), nullable=False, server_default=text("'full'")),
Column('is_significant', TINYINT(1), nullable=False, index=True),
Index('full_region_acc_sign', 'rfam_acc', 'is_significant')
)
t_html_alignment = Table(
'html_alignment', metadata,
Column('rfam_acc', ForeignKey('family.rfam_acc', ondelete='CASCADE'), nullable=False, index=True),
Column('type', Enum('seed', 'genome', 'seedColorstock', 'genomeColorstock'), nullable=False, index=True),
Column('html', LONGBLOB),
Column('block', INTEGER(6), nullable=False, index=True),
Column('html_alignmentscol', VARCHAR(45))
)
t_matches_and_fasta = Table(
'matches_and_fasta', metadata,
Column('rfam_acc', ForeignKey('family.rfam_acc'), nullable=False, index=True),
Column('match_list', | |
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel
from .models import file_storage_type_mapping
missing = Sentinel("Missing")
class FileStorageClient(object):
"""
API for the File Storage service. Use this API to manage file systems, mount targets, and snapshots. For more information, see [Overview of File Storage](/iaas/Content/File/Concepts/filestorageoverview.htm).
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'timeout': kwargs.get('timeout'),
'base_path': '/20171215',
'service_endpoint_template': 'https://filestorage.{region}.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False)
}
self.base_client = BaseClient("file_storage", config, signer, file_storage_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
def change_file_system_compartment(self, file_system_id, change_file_system_compartment_details, **kwargs):
"""
Moves a file system and its associated snapshots into a different compartment within the same tenancy. For information about moving resources between compartments, see `Moving Resources to a Different Compartment`__
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str file_system_id: (required)
The OCID of the file system.
:param ChangeFileSystemCompartmentDetails change_file_system_compartment_details: (required)
Details for changing the compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/fileSystems/{fileSystemId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_file_system_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"fileSystemId": file_system_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_file_system_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_file_system_compartment_details)
def change_mount_target_compartment(self, mount_target_id, change_mount_target_compartment_details, **kwargs):
"""
Moves a mount target and its associated export set into a different compartment within the same tenancy. For information about moving resources between compartments, see `Moving Resources to a Different Compartment`__
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str mount_target_id: (required)
The OCID of the mount target.
:param ChangeMountTargetCompartmentDetails change_mount_target_compartment_details: (required)
Details for changing the compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request.
If you need to contact Oracle about a particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/mountTargets/{mountTargetId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_mount_target_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"mountTargetId": mount_target_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_mount_target_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_mount_target_compartment_details)
def create_export(self, create_export_details, **kwargs):
"""
Creates a new export in the specified export set, path, and
file system.
:param CreateExportDetails create_export_details: (required)
Details for creating a new export.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. | |
"""
Sets the value of the `configuration` property.
"""
Struct._check_type('configuration', value, Configuration)
self._configuration = value
@property
def timezone(self):
"""
Returns the value of the `timezone` property.
"""
return self._timezone
@timezone.setter
def timezone(self, value):
"""
Sets the value of the `timezone` property.
"""
self._timezone = value
@property
def authorized_ssh_keys(self):
"""
Returns the value of the `authorized_ssh_keys` property.
"""
return self._authorized_ssh_keys
@authorized_ssh_keys.setter
def authorized_ssh_keys(self, value):
"""
Sets the value of the `authorized_ssh_keys` property.
"""
self._authorized_ssh_keys = value
@property
def dns_search(self):
"""
Returns the value of the `dns_search` property.
"""
return self._dns_search
@dns_search.setter
def dns_search(self, value):
"""
Sets the value of the `dns_search` property.
"""
self._dns_search = value
@property
def cloud_init_network_protocol(self):
"""
Returns the value of the `cloud_init_network_protocol` property.
"""
return self._cloud_init_network_protocol
@cloud_init_network_protocol.setter
def cloud_init_network_protocol(self, value):
"""
Sets the value of the `cloud_init_network_protocol` property.
"""
Struct._check_type('cloud_init_network_protocol', value, CloudInitNetworkProtocol)
self._cloud_init_network_protocol = value
@property
def system_locale(self):
"""
Returns the value of the `system_locale` property.
"""
return self._system_locale
@system_locale.setter
def system_locale(self, value):
"""
Sets the value of the `system_locale` property.
"""
self._system_locale = value
@property
def user_locale(self):
"""
Returns the value of the `user_locale` property.
"""
return self._user_locale
@user_locale.setter
def user_locale(self, value):
"""
Sets the value of the `user_locale` property.
"""
self._user_locale = value
@property
def active_directory_ou(self):
"""
Returns the value of the `active_directory_ou` property.
"""
return self._active_directory_ou
@active_directory_ou.setter
def active_directory_ou(self, value):
"""
Sets the value of the `active_directory_ou` property.
"""
self._active_directory_ou = value
@property
def org_name(self):
"""
Returns the value of the `org_name` property.
"""
return self._org_name
@org_name.setter
def org_name(self, value):
"""
Sets the value of the `org_name` property.
"""
self._org_name = value
@property
def domain(self):
"""
Returns the value of the `domain` property.
"""
return self._domain
@domain.setter
def domain(self, value):
"""
Sets the value of the `domain` property.
"""
self._domain = value
@property
def ui_language(self):
"""
Returns the value of the `ui_language` property.
"""
return self._ui_language
@ui_language.setter
def ui_language(self, value):
"""
Sets the value of the `ui_language` property.
"""
self._ui_language = value
@property
def windows_license_key(self):
"""
Returns the value of the `windows_license_key` property.
"""
return self._windows_license_key
@windows_license_key.setter
def windows_license_key(self, value):
"""
Sets the value of the `windows_license_key` property.
"""
self._windows_license_key = value
@property
def input_locale(self):
"""
Returns the value of the `input_locale` property.
"""
return self._input_locale
@input_locale.setter
def input_locale(self, value):
"""
Sets the value of the `input_locale` property.
"""
self._input_locale = value
@property
def nic_configurations(self):
"""
Returns the value of the `nic_configurations` property.
"""
return self._nic_configurations
@nic_configurations.setter
def nic_configurations(self, value):
"""
Sets the value of the `nic_configurations` property.
"""
self._nic_configurations = value
@property
def dns_servers(self):
"""
Returns the value of the `dns_servers` property.
"""
return self._dns_servers
@dns_servers.setter
def dns_servers(self, value):
"""
Sets the value of the `dns_servers` property.
"""
self._dns_servers = value
@property
def cloud_init(self):
"""
Returns the value of the `cloud_init` property.
"""
return self._cloud_init
@cloud_init.setter
def cloud_init(self, value):
"""
Sets the value of the `cloud_init` property.
"""
Struct._check_type('cloud_init', value, CloudInit)
self._cloud_init = value
@property
def custom_script(self):
"""
Returns the value of the `custom_script` property.
"""
return self._custom_script
@custom_script.setter
def custom_script(self, value):
"""
Sets the value of the `custom_script` property.
"""
self._custom_script = value
@property
def user_name(self):
"""
Returns the value of the `user_name` property.
"""
return self._user_name
@user_name.setter
def user_name(self, value):
"""
Sets the value of the `user_name` property.
"""
self._user_name = value
@property
def regenerate_ids(self):
"""
Returns the value of the `regenerate_ids` property.
"""
return self._regenerate_ids
@regenerate_ids.setter
def regenerate_ids(self, value):
"""
Sets the value of the `regenerate_ids` property.
"""
self._regenerate_ids = value
@property
def root_password(self):
"""
Returns the value of the `root_password` property.
"""
return self._root_password
@root_password.setter
def root_password(self, value):
"""
Sets the value of the `root_password` property.
"""
self._root_password = value
class Io(Struct):
def __init__(
self,
threads=None,
):
super(Io, self).__init__(
)
self.threads = threads
@property
def threads(self):
"""
Returns the value of the `threads` property.
"""
return self._threads
@threads.setter
def threads(self, value):
"""
Sets the value of the `threads` property.
"""
self._threads = value
class Ip(Struct):
def __init__(
self,
address=None,
gateway=None,
netmask=None,
version=None,
):
super(Ip, self).__init__(
)
self.address = address
self.gateway = gateway
self.netmask = netmask
self.version = version
@property
def version(self):
"""
Returns the value of the `version` property.
"""
return self._version
@version.setter
def version(self, value):
"""
Sets the value of the `version` property.
"""
Struct._check_type('version', value, IpVersion)
self._version = value
@property
def address(self):
"""
Returns the value of the `address` property.
"""
return self._address
@address.setter
def address(self, value):
"""
Sets the value of the `address` property.
"""
self._address = value
@property
def netmask(self):
"""
Returns the value of the `netmask` property.
"""
return self._netmask
@netmask.setter
def netmask(self, value):
"""
Sets the value of the `netmask` property.
"""
self._netmask = value
@property
def gateway(self):
"""
Returns the value of the `gateway` property.
"""
return self._gateway
@gateway.setter
def gateway(self, value):
"""
Sets the value of the `gateway` property.
"""
self._gateway = value
class IpAddressAssignment(Struct):
def __init__(
self,
assignment_method=None,
ip=None,
):
super(IpAddressAssignment, self).__init__(
)
self.assignment_method = assignment_method
self.ip = ip
@property
def ip(self):
"""
Returns the value of the `ip` property.
"""
return self._ip
@ip.setter
def ip(self, value):
"""
Sets the value of the `ip` property.
"""
Struct._check_type('ip', value, Ip)
self._ip = value
@property
def assignment_method(self):
"""
Returns the value of the `assignment_method` property.
"""
return self._assignment_method
@assignment_method.setter
def assignment_method(self, value):
"""
Sets the value of the `assignment_method` property.
"""
Struct._check_type('assignment_method', value, BootProtocol)
self._assignment_method = value
class IscsiBond(Identified):
def __init__(
self,
comment=None,
data_center=None,
description=None,
id=None,
name=None,
networks=None,
storage_connections=None,
):
super(IscsiBond, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.data_center = data_center
self.networks = networks
self.storage_connections = storage_connections
@property
def storage_connections(self):
"""
Returns the value of the `storage_connections` property.
"""
return self._storage_connections
@storage_connections.setter
def storage_connections(self, value):
"""
Sets the value of the `storage_connections` property.
"""
self._storage_connections = value
@property
def data_center(self):
"""
Returns the value of the `data_center` property.
"""
return self._data_center
@data_center.setter
def data_center(self, value):
"""
Sets the value of the `data_center` property.
"""
Struct._check_type('data_center', value, DataCenter)
self._data_center = value
@property
def networks(self):
"""
Returns the value of the `networks` property.
"""
return self._networks
@networks.setter
def networks(self, value):
"""
Sets the value of the `networks` property.
"""
self._networks = value
class IscsiDetails(Struct):
def __init__(
self,
address=None,
disk_id=None,
initiator=None,
lun_mapping=None,
password=<PASSWORD>,
paths=None,
port=None,
portal=None,
product_id=None,
serial=None,
size=None,
status=None,
storage_domain_id=None,
target=None,
username=None,
vendor_id=None,
volume_group_id=None,
):
super(IscsiDetails, self).__init__(
)
self.address = address
self.disk_id = disk_id
self.initiator = initiator
self.lun_mapping = lun_mapping
self.password = password
self.paths = paths
self.port = port
self.portal = portal
self.product_id = product_id
self.serial = serial
self.size = size
self.status = status
self.storage_domain_id = storage_domain_id
self.target = target
self.username = username
self.vendor_id = vendor_id
self.volume_group_id = volume_group_id
@property
def storage_domain_id(self):
"""
Returns the value of the `storage_domain_id` property.
"""
return self._storage_domain_id
@storage_domain_id.setter
def storage_domain_id(self, value):
"""
Sets the value of the `storage_domain_id` property.
"""
self._storage_domain_id = value
@property
def vendor_id(self):
"""
Returns the value of the `vendor_id` property.
"""
return self._vendor_id
@vendor_id.setter
def vendor_id(self, value):
"""
Sets the value of the `vendor_id` property.
"""
self._vendor_id = value
@property
def address(self):
"""
Returns the value of the `address` property.
"""
return self._address
@address.setter
def address(self, value):
"""
Sets the value of the `address` property.
"""
self._address = value
@property
def initiator(self):
"""
Returns the value of the `initiator` property.
"""
return self._initiator
@initiator.setter
def initiator(self, value):
"""
Sets the value of the `initiator` property.
"""
self._initiator = value
@property
def product_id(self):
"""
Returns the value of the `product_id` property.
"""
return self._product_id
@product_id.setter
def product_id(self, value):
"""
Sets the value of the `product_id` property.
"""
self._product_id = value
@property
def disk_id(self):
"""
Returns the value of the `disk_id` property.
"""
return self._disk_id
@disk_id.setter
def disk_id(self, value):
"""
Sets the value of the `disk_id` property.
"""
self._disk_id = value
@property
def target(self):
"""
Returns the value of the `target` property.
"""
return self._target
@target.setter
def target(self, value):
"""
Sets the value of the `target` property.
"""
self._target = value
@property
def serial(self):
"""
Returns the value of the `serial` property.
| |
<filename>connectomics/utils/evaluate.py<gh_stars>0
import numpy as np
import scipy.sparse as sparse
import h5py
from scipy import ndimage
__all__ = [
'get_binary_jaccard',
]
def adapted_rand(seg, gt, all_stats=False):
"""Compute Adapted Rand error as defined by the SNEMI3D contest [1]
Formula is given as 1 - the maximal F-score of the Rand index
(excluding the zero component of the original labels). Adapted
from the SNEMI3D MATLAB script, hence the strange style.
Parameters
----------
seg : np.ndarray
the segmentation to score, where each value is the label at that point
gt : np.ndarray, same shape as seg
the groundtruth to score against, where each value is a label
all_stats : boolean, optional
whether to also return precision and recall as a 3-tuple with rand_error
Returns
-------
are : float
The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$,
where $p$ and $r$ are the precision and recall described below.
prec : float, optional
The adapted Rand precision. (Only returned when `all_stats` is ``True``.)
rec : float, optional
The adapted Rand recall. (Only returned when `all_stats` is ``True``.)
References
----------
[1]: http://brainiac2.mit.edu/SNEMI3D/evaluation
"""
# segA is truth, segB is query
segA = np.ravel(gt)
segB = np.ravel(seg)
n = segA.size
n_labels_A = np.amax(segA) + 1
n_labels_B = np.amax(segB) + 1
ones_data = np.ones(n,int)
p_ij = sparse.csr_matrix((ones_data, (segA[:], segB[:])), shape=(n_labels_A, n_labels_B))
a = p_ij[1:n_labels_A,:]
b = p_ij[1:n_labels_A,1:n_labels_B]
c = p_ij[1:n_labels_A,0].todense()
d = b.multiply(b)
a_i = np.array(a.sum(1))
b_i = np.array(b.sum(0))
sumA = np.sum(a_i * a_i)
sumB = np.sum(b_i * b_i) + (np.sum(c) / n)
sumAB = np.sum(d) + (np.sum(c) / n)
precision = sumAB / sumB
recall = sumAB / sumA
fScore = 2.0 * precision * recall / (precision + recall)
are = 1.0 - fScore
if all_stats:
return (are, precision, recall)
else:
return are
# Evaluation code courtesy of <NAME>, taken from
# https://github.com/janelia-flyem/gala/blob/master/gala/evaluate.py
def voi(reconstruction, groundtruth, ignore_reconstruction=[], ignore_groundtruth=[0]):
"""Return the conditional entropies of the variation of information metric. [1]
Let X be a reconstruction, and Y a ground truth labelling. The variation of
information between the two is the sum of two conditional entropies:
VI(X, Y) = H(X|Y) + H(Y|X).
The first one, H(X|Y), is a measure of oversegmentation, the second one,
H(Y|X), a measure of undersegmentation. These measures are referred to as
the variation of information split or merge error, respectively.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
The ground truth segmentation.
ignore_seg, ignore_gt : list of int, optional
Any points having a label in this list are ignored in the evaluation.
By default, only the label 0 in the ground truth will be ignored.
Returns
-------
(split, merge) : float
The variation of information split and merge error, i.e., H(X|Y) and H(Y|X)
References
----------
[1] <NAME>. (2007). Comparing clusterings - an information based
distance. Journal of Multivariate Analysis 98, 873-895.
"""
(hyxg, hxgy) = split_vi(reconstruction, groundtruth, ignore_reconstruction, ignore_groundtruth)
return (hxgy, hyxg)
def split_vi(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return the symmetric conditional entropies associated with the VI.
The variation of information is defined as VI(X,Y) = H(X|Y) + H(Y|X).
If Y is the ground-truth segmentation, then H(Y|X) can be interpreted
as the amount of under-segmentation of Y and H(X|Y) is then the amount
of over-segmentation. In other words, a perfect over-segmentation
will have H(Y|X)=0 and a perfect under-segmentation will have H(X|Y)=0.
If y is None, x is assumed to be a contingency table.
Parameters
----------
x : np.ndarray
Label field (int type) or contingency table (float). `x` is
interpreted as a contingency table (summing to 1.0) if and only if `y`
is not provided.
y : np.ndarray of int, same shape as x, optional
A label field to compare to `x`.
ignore_x, ignore_y : list of int, optional
Any points having a label in this list are ignored in the evaluation.
Ignore 0-labeled points by default.
Returns
-------
sv : np.ndarray of float, shape (2,)
The conditional entropies of Y|X and X|Y.
See Also
--------
vi
"""
_, _, _ , hxgy, hygx, _, _ = vi_tables(x, y, ignore_x, ignore_y)
# false merges, false splits
return np.array([hygx.sum(), hxgy.sum()])
def vi_tables(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return probability tables used for calculating VI.
If y is None, x is assumed to be a contingency table.
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that may or may not sum to 1.
ignore_x, ignore_y : list of int, optional
Rows and columns (respectively) to ignore in the contingency table.
These are labels that are not counted when evaluating VI.
Returns
-------
pxy : sparse.csc_matrix of float
The normalized contingency table.
px, py, hxgy, hygx, lpygx, lpxgy : np.ndarray of float
The proportions of each label in `x` and `y` (`px`, `py`), the
per-segment conditional entropies of `x` given `y` and vice-versa, the
per-segment conditional probability p log p.
"""
if y is not None:
pxy = contingency_table(x, y, ignore_x, ignore_y)
else:
cont = x
total = float(cont.sum())
# normalize, since it is an identity op if already done
pxy = cont / total
# Calculate probabilities
px = np.array(pxy.sum(axis=1)).ravel()
py = np.array(pxy.sum(axis=0)).ravel()
# Remove zero rows/cols
nzx = px.nonzero()[0]
nzy = py.nonzero()[0]
nzpx = px[nzx]
nzpy = py[nzy]
nzpxy = pxy[nzx, :][:, nzy]
# Calculate log conditional probabilities and entropies
lpygx = np.zeros(np.shape(px))
lpygx[nzx] = xlogx(divide_rows(nzpxy, nzpx)).sum(axis=1).ravel()
# \sum_x{p_{y|x} \log{p_{y|x}}}
hygx = -(px*lpygx) # \sum_x{p_x H(Y|X=x)} = H(Y|X)
lpxgy = np.zeros(np.shape(py))
lpxgy[nzy] = xlogx(divide_columns(nzpxy, nzpy)).sum(axis=0).ravel()
hxgy = -(py*lpxgy)
return [pxy] + list(map(np.asarray, [px, py, hxgy, hygx, lpygx, lpxgy]))
def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True):
"""Return the contingency table for all regions in matched segmentations.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
The ground truth segmentation.
ignore_seg : list of int, optional
Values to ignore in `seg`. Voxels in `seg` having a value in this list
will not contribute to the contingency table. (default: [0])
ignore_gt : list of int, optional
Values to ignore in `gt`. Voxels in `gt` having a value in this list
will not contribute to the contingency table. (default: [0])
norm : bool, optional
Whether to normalize the table so that it sums to 1.
Returns
-------
cont : scipy.sparse.csc_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `seg` and `j` in `gt`. (Or the proportion of such voxels
if `norm=True`.)
"""
segr = seg.ravel()
gtr = gt.ravel()
ignored = np.zeros(segr.shape, np.bool)
data = np.ones(len(gtr))
for i in ignore_seg:
ignored[segr == i] = True
for j in ignore_gt:
ignored[gtr == j] = True
data[ignored] = 0
cont = sparse.coo_matrix((data, (segr, gtr))).tocsc()
if norm:
cont /= float(cont.sum())
return cont
def divide_columns(matrix, row, in_place=False):
"""Divide each column of `matrix` by the corresponding element in `row`.
The result is as follows: out[i, j] = matrix[i, j] / row[j]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column : a 1D np.ndarray, shape (N,)
The row dividing `matrix`.
in_place : bool (optional, default False)
Do the computation in-place.
Returns
-------
out : same type as `matrix`
The result of the row-wise division.
"""
if in_place:
out = matrix
else:
out = matrix.copy()
if type(out) in [sparse.csc_matrix, sparse.csr_matrix]:
if type(out) == sparse.csc_matrix:
convert_to_csc = True
out = out.tocsr()
else:
convert_to_csc = False
row_repeated = np.take(row, out.indices)
nz = out.data.nonzero()
out.data[nz] /= row_repeated[nz]
if convert_to_csc:
out = out.tocsc()
else:
out /= row[np.newaxis, :]
return out
def divide_rows(matrix, column, in_place=False):
"""Divide each row of `matrix` by the corresponding element in `column`.
The result is as follows: out[i, j] = matrix[i, j] / column[i]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column | |
= K8sHelper('VirtualMachinePool')
disk_node_name = get_node_name(disk_heler.get(params.vol))
pool_node_name = get_node_name(pool_helper.get(params.pool))
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
disk_info = get_vol_info_from_k8s(params.vol)
old_pool_info = get_pool_info_from_k8s(disk_info['pool'])
check_pool_active(old_pool_info)
prepareInfo = disk_prepare(disk_info['poolname'], params.vol, disk_info['uni'])
# create disk dir and create disk in dir.
disk_dir = '%s/%s' % (old_pool_info['path'], params.vol)
uuid = randomUUID().replace('-', '')
middle_disk_dir = '%s/%s' % (old_pool_info['path'], uuid)
middle_disk_path = '%s/%s' % (middle_disk_dir, params.newname)
clone_disk_dir = '%s/%s' % (pool_info['path'], params.newname)
clone_disk_path = '%s/%s' % (clone_disk_dir, params.newname)
if not os.path.exists(middle_disk_dir):
os.makedirs(middle_disk_dir)
with open('%s/config.json' % disk_dir, "r") as f:
config = load(f)
try:
op1 = Operation('cp -f %s %s' % (config['current'], middle_disk_path), {})
op1.execute()
except:
if os.path.exists(middle_disk_dir):
op3 = Operation('rm -rf %s' % middle_disk_dir, {})
op3.execute()
raise ExecuteException('', 'Copy %s to middle_disk_path %s failed!, aborting clone.' % (
config['current'], middle_disk_path))
try:
backing_file = DiskImageHelper.get_backing_file(middle_disk_path)
if backing_file:
op2 = Operation('qemu-img rebase -f %s -b "" %s' % (params.format, middle_disk_path), {})
op2.execute()
except:
if os.path.exists(middle_disk_dir):
op3 = Operation('rm -rf %s' % middle_disk_dir, {})
op3.execute()
raise ExecuteException('', 'Execute "qemu-img rebase %s" failed!, aborting clone.' % middle_disk_path)
# write config
config = {}
config['name'] = params.newname
config['dir'] = clone_disk_dir
config['current'] = clone_disk_path
config['pool'] = params.pool
config['poolname'] = pool_info['poolname']
with open('%s/config.json' % middle_disk_dir, "w") as f:
dump(config, f)
if disk_node_name == pool_node_name:
op = Operation('mv %s %s/%s' % (middle_disk_dir, pool_info['path'], params.newname), {})
op.execute()
jsondicts = get_disk_jsondict(params.pool, params.newname)
create_all_jsondict(jsondicts)
else:
ip = get_node_ip_by_node_name(pool_node_name)
op = Operation('scp -r %s root@%s:%s' % (middle_disk_dir, ip, clone_disk_dir), {})
op.execute()
op = Operation('rm -rf %s' % middle_disk_dir, {})
op.execute()
op = Operation('kubesds-adm registerDiskToK8s --pool %s --vol %s' % (params.pool, params.newname), {},
ip=ip, remote=True, with_result=True)
remote_result = op.execute()
if remote_result['result']['code'] != 0:
raise ExecuteException('RunCmdError', 'remote run cmd kubesds-adm registerDiskToK8s error.')
if result:
helper = K8sHelper("VirtualMachineDisk")
helper.create(params.newname, "volume", result)
success_print("success clone disk %s." % params.vol, result)
else:
success_print("success clone disk %s." % params.vol, {})
def registerDiskToK8s(params):
jsondicts = get_disk_jsondict(params.pool, params.vol)
create_all_jsondict(jsondicts)
success_print("success register disk %s to k8s." % params.vol, {})
# only use when migrate disk to another node
def rebaseDiskSnapshot(params):
rebase_snapshot_with_config(params.pool, params.vol)
disk_info = get_vol_info_from_k8s(params.vol)
disk_prepare(disk_info['poolname'], disk_info['disk'], disk_info['uni'])
success_print("success rebase disk.", {})
def createDiskFromImage(params):
pool_info = get_pool_info_from_k8s(params.targetPool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
dest_dir = '%s/%s' % (pool_info['path'], params.name)
dest = '%s/%s' % (dest_dir, params.name)
dest_config_file = '%s/config.json' % (dest_dir)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir, 0o711)
if os.path.exists(dest_config_file):
raise Exception('Path %s already in use, aborting copy.' % dest_dir)
if params.full_copy:
try:
source_info = get_disk_info(params.source)
if source_info['format'] != 'qcow2':
op = Operation(
'qemu-img convert -f %s %s -O qcow2 %s' % (source_info['format'], params.source, dest), {})
op.execute()
else:
op = Operation('cp -f %s %s' % (params.source, dest), {})
op.execute()
except:
if os.path.exists(dest_dir):
op = Operation('rm -rf %s' % dest_dir, {})
op.execute()
raise Exception('Copy %s to %s failed!' % (params.source, dest))
try:
dest_info = get_disk_info(dest)
if dest_info['format'] == 'qcow2':
op = Operation('qemu-img rebase -f qcow2 %s -b "" -u' % (dest), {})
op.execute()
except:
if os.path.exists(dest_dir):
op = Operation('rm -rf %s' % dest_dir, {})
op.execute()
raise Exception('Execute "qemu-img rebase -f qcow2 %s" failed!' % (dest))
else:
if params.source.find('snapshots') >= 0:
source_disk_dir = os.path.dirname(os.path.dirname(params.source))
else:
source_disk_dir = os.path.dirname(params.source)
config = get_disk_config_by_path('%s/config.json' % source_disk_dir)
disk_info = get_disk_info(config['current'])
op = Operation(
'qemu-img create -f qcow2 -b %s -F %s %s' %
(config['current'], disk_info['format'], dest), {})
op.execute()
write_config(params.name, dest_dir, dest, params.targetPool, poolname)
result = get_disk_info_to_k8s(poolname, params.name)
helper = K8sHelper("VirtualMachineDisk")
helper.update(params.name, "volume", result)
success_print("success createDiskFromImage %s." % params.name, result)
def disk_prepare(pool, vol, uni):
# // prepare
logger.debug(pool)
logger.debug(vol)
logger.debug(uni)
dp = None
try:
vol_info = get_vol_info_from_k8s(vol)
dp = vol_info['pool']
except:
ss_info = get_snapshot_info_from_k8s(vol)
dp = ss_info['pool']
# pool_info = get_pool_info_from_k8s(vol_info['pool'])
# op = Operation('vdisk-prepare ', {'poolname': pool, 'name': vol,
# 'uni': uni}, with_result=True)
auto_mount(dp)
def remote_disk_prepare(ip, pool, vol, uni):
# // remote prepare
op = Operation('kubesds-adm prepareDisk ', {'vol': vol}, remote=True, ip=ip, with_result=True)
cstor = op.execute()
if cstor['result']['code'] != 0:
raise ExecuteException('',
'remote prepare disk fail. cstor raise exception: cstor error code: %d, msg: %s, obj: %s' % (
cstor['result']['code'], cstor['result']['msg'], cstor['obj']))
return cstor
def prepareDisk(params):
if params.domain:
disk_paths = list(get_disks_spec(params.domain).keys())
logger.debug(disk_paths)
for path in disk_paths:
prepare_disk_by_path(path)
if params.vol:
prepare_disk_by_metadataname(params.vol)
if params.path:
prepare_disk_by_path(params.path)
success_print("prepare disk successful.", {})
def releaseDisk(params):
if params.domain:
disk_paths = list(get_disks_spec(params.domain).keys())
logger.debug(disk_paths)
for path in disk_paths:
release_disk_by_path(path)
if params.vol:
release_disk_by_metadataname(params.vol)
if params.path:
release_disk_by_path(params.path)
success_print("success release disk %s." % params.vol, {})
def showDisk(params):
pool_info = get_pool_info_from_k8s(params.pool)
poolname = pool_info['poolname']
result = get_disk_info_to_k8s(poolname, params.vol)
success_print("show disk %s success." % params.pool, result)
def showDiskSnapshot(params):
if params.type == "localfs" or params.type == "nfs" or params.type == "glusterfs" or params.type == "vdiskfs":
ss_info = get_snapshot_info_from_k8s(params.name)
poolname = ss_info['poolname']
disk_config = get_disk_config(poolname, params.vol)
ss_path = '%s/snapshots/%s' % (disk_config['dir'], params.name)
result = get_snapshot_info_to_k8s(poolname, params.vol, params.name)
success_print("success show disk snapshot %s." % params.name, result)
def createExternalSnapshot(params):
disk_info = get_vol_info_from_k8s(params.vol)
poolname = disk_info['poolname']
disk_prepare(poolname, params.vol, disk_info['uni'])
disk_config = get_disk_config(poolname, params.vol)
if params.domain is None:
if check_disk_in_use(disk_config['current']):
raise ExecuteException('', 'disk in using, current file %s is using by another process, '
'is there a vm using the current file, plz check.' % disk_config['current'])
ss_dir = '%s/snapshots' % disk_config['dir']
if not os.path.exists(ss_dir):
os.makedirs(ss_dir)
ss_path = '%s/%s' % (ss_dir, params.name)
op1 = Operation('qemu-img create -f %s -b %s -F %s %s' %
(params.format, disk_config['current'], params.format, ss_path), {})
op1.execute()
with open('%s/config.json' % disk_config['dir'], "r") as f:
config = load(f)
config['current'] = ss_path
with open('%s/config.json' % disk_config['dir'], "w") as f:
dump(config, f)
else:
specs = get_disks_spec(params.domain)
if disk_config['current'] not in list(specs.keys()):
logger.debug('disk %s current is %s.' % (params.vol, disk_config['current']))
raise ExecuteException('', 'domain %s not has disk %s' % (params.domain, params.vol))
vm_disk = specs[disk_config['current']]
ss_path = '%s/snapshots/%s' % (disk_config['dir'], params.name)
ss_dir = '%s/snapshots' % disk_config['dir']
if not os.path.exists(ss_dir):
os.makedirs(ss_dir)
not_need_snapshot_spec = ''
for disk_path in list(specs.keys()):
if disk_path == disk_config['current']:
continue
not_need_snapshot_spec = not_need_snapshot_spec + '--diskspec %s,snapshot=no ' % specs[disk_path]
# '/var/lib/libvirt/pooltest3/wyw123/snapshots/wyw123.6'
# 'vdb,snapshot=no'
op = Operation('virsh snapshot-create-as --domain %s --name %s --atomic --disk-only --no-metadata '
'--diskspec %s,snapshot=external,file=%s,driver=%s %s' %
(params.domain, params.name, vm_disk, ss_path, params.format, not_need_snapshot_spec),
{})
op.execute()
config_path = '%s/config.json' % os.path.dirname(ss_dir)
with open(config_path, "r") as f:
config = load(f)
config['current'] = ss_path
with open(config_path, "w") as f:
dump(config, f)
result = get_snapshot_info_to_k8s(poolname, params.vol, params.name)
# modify disk in k8s
modify_disk_info_in_k8s(poolname, params.vol)
vol_helper = K8sHelper('VirtualMachineDiskSnapshot')
vol_helper.create(params.name, 'volume', result)
success_print("success create disk external snapshot %s" % params.name, result)
# create snapshot on params.name, then rename snapshot to current
def revertExternalSnapshot(params):
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
helper = K8sHelper("VirtualMachineDiskSnapshot")
k8s_ss_info = helper.get_data(params.name, "volume")
backing_file = k8s_ss_info['full_backing_filename']
disk_prepare(poolname, params.vol, pool_info['url'])
if params.domain and is_vm_active(params.domain):
raise ExecuteException('', 'domain %s is still active, plz stop it first.')
disk_config = get_disk_config(poolname, params.vol)
if check_disk_in_use(disk_config['current']):
raise ExecuteException('', 'error: current disk in use, plz check or set real domain field.')
ss_path = '%s/snapshots/%s' % (disk_config['dir'], params.name)
if ss_path is None:
raise ExecuteException('', 'error: can not get snapshot backing file.')
uuid = randomUUID().replace('-', '')
new_file_path = '%s/%s' % (os.path.dirname(backing_file), uuid)
op1 = Operation('qemu-img create -f %s -b %s -F %s %s' %
(params.format, backing_file, params.format, new_file_path), {})
op1.execute()
# change vm disk
if params.domain and not change_vm_os_disk_file(params.domain, disk_config['current'], new_file_path):
op2 = Operation('rm -f %s' % new_file_path, {})
op2.execute()
raise ExecuteException('', 'can not change disk source in domain xml')
# modify json file, make os_event_handler to modify data on api server .
with open('%s/config.json' % disk_config['dir'], "r") as f:
config = load(f)
config['current'] = new_file_path
with open('%s/config.json' % disk_config['dir'], "w") as f:
dump(config, f)
# modify disk in k8s
modify_disk_info_in_k8s(poolname, params.vol)
# delete lifecycle
helper.delete_lifecycle(params.name)
success_print("success revert disk external snapshot %s." % params.name, {})
def deleteExternalSnapshot(params):
pool_info = get_pool_info_from_k8s(params.pool)
check_pool_active(pool_info)
poolname = pool_info['poolname']
helper = K8sHelper("VirtualMachineDiskSnapshot")
k8s_ss_info = helper.get_data(params.name, "volume")
backing_file = k8s_ss_info['full_backing_filename']
# prepare base
disk_prepare(poolname, params.vol, pool_info['url'])
if params.domain:
specs = get_disks_spec(params.domain)
disk_config = get_disk_config(poolname, params.vol)
if disk_config['current'] not in list(specs.keys()):
raise ExecuteException('', 'domain %s not has disk %s' % (params.domain, params.vol))
disk_config = get_disk_config(poolname, params.vol)
# get all snapshot to delete(if the snapshot backing file chain contains backing_file), except current.
snapshots_to_delete = []
files = os.listdir('%s/snapshots' % disk_config['dir'])
for df in files:
try:
bf_paths = get_sn_chain_path('%s/snapshots/%s' % (disk_config['dir'], df))
if backing_file in bf_paths:
snapshots_to_delete.append(df)
except:
continue
# if snapshot to delete | |
pass
try:
os.remove("gauss.emcee")
except:
print("Could not remove file: gauss.emcee")
try:
os.remove("musig.emcee")
except:
print("Could not remove file: musig.emcee")
def sanity_MCMCSampler(self):
# Import some required modules
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
import pymc
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Creating a Gaussian with some noise
# Choose some parameters...
gPar = {"A":-5.0, "sig":10.0, "mu":10.0, "off":1.0, "lin":0.0}
# Calculate profile
x = arange(100) - 50.0
y = gPar["off"] + gPar["A"] / sqrt(2*pi*gPar["sig"]**2) \
* exp(-(x-gPar["mu"])**2/(2*gPar["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Now let us come to the fitting
# First, we create the Gauss1d fit object
gf = fuf.GaussFit1d()
# See what parameters are available
print("List of available parameters: ", gf.availableParameters())
# Set guess values for the parameters
gf["A"] = -10.0
gf["sig"] = 15.77
gf["off"] = 0.87
gf["mu"] = 7.5
# Let us see whether the assignment worked
print("Parameters and guess values: ", gf.parameters())
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf.thaw(["A", "sig", "off", "mu"])
# Now start a simplex fit
gf.fit(x,y,yerr=ones(x.size)*0.01)
# Obtain the best-fit values derived by the simplex fit.
# They are to be used as start values for the MCMC sampling.
# Note that 'A' is missing - we will introduce this later.
X0 = {"sig":gf["sig"], "off":gf["off"], "mu":gf["mu"]}
# Now we specify the limits within which the individual parameters
# can be varied (for those parameters listed in the 'X0' dictionary).
Lims = {"sig":[-20.,20.], "off":[0.,2.], "mu":[5.,15.]}
# For the parameters contained in 'X0', define the step widths, which
# are to be used by the MCMC sampler. The steps are specified using
# the same scale/units as the actual parameters.
steps = {"A":0.01, "sig":0.1, "off":0.1, "mu":0.1}
# In this example, we wish to define our ``own'' PyMC variable for the parameter
# 'A'. This can be useful, if nonstandard behavior is desired. Note that this
# is an optional parameter and you could simply include the parameter 'A' into
# The framework of X0, Lims, and steps.
ppa = {}
ppa["A"] = pymc.Uniform("A", value=gf["A"], lower=-20., \
upper=10.0, doc="Amplitude")
# Start the sampling. The resulting Marchov-Chain will be written
# to the file 'mcmcExample.tmp'. In default configuration, pickle
# is used to write that file.
# To save the chain to a compressed 'hdf5'
# file, you have to specify the dbArgs keyword; e.g., use:
# dbArgs = {"db":"hdf5", "dbname":"mcmcExample.hdf5"}
gf.fitMCMC(x, y, X0, Lims, steps, yerr=ones(x.size)*0.01, \
pymcPars=ppa, iter=2500, burn=0, thin=1, \
dbfile="mcmcExample.tmp")
# Reload the database (here, this is actually not required, but it is
# if the Marchov chain is to be analyzed later).
db = pymc.database.pickle.load('mcmcExample.tmp')
# Plot the trace of the amplitude, 'A'.
plt.hist(db.trace("A", 0)[:])
# plt.show()
def sanity_MCMCPriorExample(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
import pymc
# Create a Gauss-fit object
gf = fuf.GaussFit1d()
# Choose some parameters
gf["A"] = -0.65
gf["mu"] = 1.0
gf["lin"] = 0.0
gf["off"] = 1.1
gf["sig"] = 0.2
# Simulate data with noise
x = np.linspace(0., 2., 100)
y = gf.evaluate(x)
y += np.random.normal(0, 0.05, len(x))
gf.thaw(["A", "off", "mu", "sig"])
# Set up a normal prior for the offset parameter
# Note!---The name (first parameter) must correspond to that
# of the parameter.
# The expectation value us set to 0.9 while the width is given
# as 0.01 (tau = 1/sigma**2). The starting value is specified
# as 1.0.
offPar = pymc.Normal("off", mu=0.9, tau=(1./0.01)**2, value=1.0)
# Use a uniform prior for mu.
muPar = pymc.Uniform("mu", lower=0.95, upper=0.97, value=0.96)
# Collect the "extra"-variables in a dictionary using
# their names as keys
pymcPars = {"mu":muPar, "off":offPar}
# Specify starting values, X0, and limits, lims, for
# those parameter distributions not given specifically.
X0 = {"A":gf["A"], "sig":gf["sig"]}
lims = {"A":[-1.0,0.0], "sig":[0., 1.0]}
# Still, the steps dictionary has to contain all
# parameter distributions.
steps = {"A":0.02, "sig":0.02, "mu":0.01, "off":0.01}
# Carry out the MCMC sampling
gf.fitMCMC(x, y, X0, lims, steps, yerr=np.ones(len(x))*0.05, \
pymcPars=pymcPars, burn=1000, iter=3000)
# Setting parameters to mean values
for p in gf.freeParameters():
gf[p] = gf.MCMC.trace(p)[:].mean()
# Show the "data" and model in the upper panel
plt.subplot(2,1,1)
plt.title("Data and model")
plt.errorbar(x, y, yerr=np.ones(len(x))*0.05, fmt="bp")
# Plot lowest deviance solution
plt.plot(x, gf.evaluate(x), 'r--')
# Show the residuals in the lower panel
plt.subplot(2,1,2)
plt.title("Residuals")
plt.errorbar(x, y-gf.evaluate(x), yerr=np.ones(len(x))*0.05, fmt="bp")
plt.plot([min(x), max(x)], [0.0,0.0], 'r-')
#plt.show()
def sanity_autoMCMCExample1(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
# Generate some "data" to fit
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# Thaw the parameters A, mu, and sig
gauss.thaw(["A","mu","sig"])
# Define the ranges, which are used to construct the
# uniform priors and step sizes.
# Note that for "sig", we give only a single value.
# In this case, the limits for the uniform prior will
# be constructed as [m0-1.5, m0+1.5], where m0 is the
# starting value interpreted as the current value of
# mu (23. in this case).
ranges = {"A":[0,10],"mu":3, "sig":[0.1,1.0]}
# Generate default input for X0, lims, and steps
X0, lims, steps = gauss.MCMCautoParameters(ranges)
# Show what happened...
print()
print("Auto-generated input parameters:")
print("X0: ", X0)
print("lims: ", lims)
print("steps: ", steps)
print()
# Call the usual sampler
gauss.fitMCMC(x, y, X0, lims, steps, yerr=yerr, iter=1000)
# and plot the results
plt.plot(x, y, 'k+')
plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_autoMCMCExample2(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
# Generate some "data" to fit
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# Define the ranges, which are used to construct the
# uniform priors and step sizes.
# Note that for "sig", we give only a single value.
# In this case, the limits for the uniform prior will
# be constructed as [m0-1.5, m0+1.5], where m0 is the
# starting value interpreted as the current value of
# mu (23. in this case).
ranges = {"A":[0,10],"mu":3, "sig":[0.1,1.0]}
# Call the auto-sampler
# Note that we set picky to False here. In this case, the
# parameters specified in ranges will be thawed automatically.
# All parameters not mentioned there, will be frozen.
gauss.autoFitMCMC(x, y, ranges, yerr=yerr, picky=False, iter=1000)
# and plot the results
plt.plot(x, y, 'k+')
plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_TAtut_createTrace(self):
"""
TA tutorial, all examples
"""
import numpy as np
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Starting from with Voigt profile
vp = fuf.Voigt1d()
# Set some values to create a model
vp["A"] = -0.4
vp["al"] = 0.7
vp["mu"] = 5500.
vp["ad"] = 0.3
vp["off"] = 1.0
x = np.linspace(5490., 5510., 200)
# Create our data with some noise
yerr = np.ones(len(x))*0.01
y = vp.evaluate(x) + np.random.normal(0.0, 0.01, len(x))
# Say, we have a guess of the parameters, which is, however,
# not entirely correct
vp["A"] = -0.376
vp["al"] = 0.9
vp["mu"] = 5499.7
vp["ad"] = 0.4
vp["off"] = 1.0
# Plot the data and our guess
plt.errorbar(x, y, yerr=yerr, fmt='b.-')
plt.plot(x, vp.evaluate(x), 'r--')
# plt.show()
# Thaw the parameters, which we wish to vary
# during the sampling
vp.thaw(["A", "al", "mu", "ad"])
# Use current parameters as starting point for the sampling
X0 = vp.freeParameters()
print("Starting point for sampling: ", X0)
# | |
<filename>lib/python/batch_sim/parameter_tables.py
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for writing batch simulations like hover disturbances."""
import copy
import csv
import datetime
import json
import os
import re
import shutil
import tempfile
import textwrap
import gflags
import makani
from makani.config import mconfig
from makani.config import overrides_util
from makani.control import system_types
from makani.lib.python import build_info
from makani.lib.python import dict_util
from makani.lib.python import gsutil
from makani.lib.python.batch_sim import client as client_base
import makani.lib.python.batch_sim.batch_sim_util as batch_sim_util
from makani.lib.python.batch_sim.scoring_functions import plot
import matplotlib
import numpy
gflags.DEFINE_integer('parameter_seed', 12345,
'Seed for use in parameter table generation.')
gflags.DEFINE_boolean('randomize_parameter_seed', False,
'Select a random value for parameter table seed.')
gflags.DEFINE_boolean('upload_overview_data', False,
'Upload overview_data.json to batch sim folder on GCS.')
FLAGS = gflags.FLAGS
class _Random(object):
"""Container for a PRNG that can be seeded using --parameter_seed."""
def __init__(self):
self.seed = None
self._prng = None
def Generator(self):
if self.seed is None:
if FLAGS.randomize_parameter_seed:
self.seed = numpy.random.randint(numpy.iinfo(numpy.uint32).max)
else:
self.seed = FLAGS.parameter_seed
self._prng = numpy.random.RandomState(self.seed)
return self._prng
_random = _Random()
class OverridesTable(object):
"""Abstract class for a table of simulations with parameter overrides.
Attributes:
name: The title of this table.
x_label: The x-axis label.
x_values: Numerical labels for the columns of this table.
y_label: The y-axis label.
y_values: Numerical labels for the rows of this table.
base_overrides: Overrides to apply to all table entries.
base_params: Parameter structure with base overrides.
"""
def __init__(self, name, x_label, x_values, y_label, y_values,
base_overrides=None):
if base_overrides is None:
base_overrides = {}
self.name = name
self.base_overrides = copy.deepcopy(base_overrides)
self.base_params = mconfig.MakeParams(
'common.all_params', overrides=copy.deepcopy(base_overrides),
override_method='derived')
self.x_label = x_label
self.x_values = tuple(x_values)
self.y_label = y_label
self.y_values = tuple(y_values)
def GetDimensions(self):
"""Return the dimensions of this table."""
return (len(self.x_values), len(self.y_values))
def GetOverrides(self, x_idx, y_idx):
"""Abstract method for getting the overrides for this simulation."""
raise NotImplementedError
def GetRangeValues(self, x_idx, y_idx):
"""Abstract method for getting the parameter ranges for this simulation."""
raise NotImplementedError
class OverridesTableSimClient(client_base.BatchSimClient):
"""Abstract client for generating a table of tables."""
def __init__(self, output_dir, tables, scoring_functions,
columns=3, title=None, **kwargs):
"""Constructor for a disturbance sim.
Args:
output_dir: Directory in which outputs are to be written.
tables: A list of OverridesTables.
scoring_functions: A list of ScoringFunctions.
columns: Number of columns to use in displaying tables.
title: Title for the generated HTML.
**kwargs: See client_base.BatchSimClient.
"""
super(OverridesTableSimClient, self).__init__(**kwargs)
self._output_dir = output_dir
self._tables = tables
self._scoring_functions = scoring_functions
self._title = title
self._columns = columns
# This array should be populated by _GetConfigs with each
# configuration corresponding to the return value of
# _GetSimParameters.
self._linear_indices = [None for _ in range(self._GetNumTables())]
self._linear_index_to_table_index = []
self._overrides = []
for table_idx in range(self._GetNumTables()):
table_dim = self._tables[table_idx].GetDimensions()
self._linear_indices[table_idx] = [[None for _ in range(table_dim[1])]
for _ in range(table_dim[0])]
for x_idx, y_idx in numpy.ndindex(table_dim):
self._linear_indices[table_idx][x_idx][y_idx] = len(self._overrides)
self._linear_index_to_table_index.append(table_idx)
self._overrides += [
self._tables[table_idx].GetOverrides(x_idx, y_idx)
]
def _GetNumTables(self):
"""Method returning the number of tables.
Returns:
The number of parameter tables to be swept.
"""
return len(self._tables)
def _GetLinearIndex(self, idx):
"""Convert a 3-D index into a linear index."""
return self._linear_indices[idx[0]][idx[1]][idx[2]]
def _GetConfig(self, idx):
table_idx = self._linear_index_to_table_index[idx]
return mconfig.SimpleOverride(
overrides_util.PreprocessOverrides(self._overrides[idx]),
copy.deepcopy(self._tables[table_idx].base_params))
def _GenerateConfigs(self):
for idx in range(len(self._overrides)):
yield self._GetConfig(idx)
@client_base.JsonReducer
def _ReduceWorkerOutput(self, outputs):
matplotlib.use('Agg')
self._GenerateHtml(self._output_dir, outputs)
self._GenerateJson(self._output_dir, outputs)
def _GenerateHtml(self, output_dir, outputs):
# Create directory to place output files.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# File to write each results page.
def _GetOverridesPageFilename(i):
return 'overrides_%d.html' % i
def _GetOverridesFilename(i):
return 'overrides_%d.json' % i
def _GetStatsFilename(table_idx):
return 'table_%d.csv' % table_idx
def _GetScoreCheckboxes():
"""Returns an HTML title bar for selecting which scores to show."""
score_fn_names = [score_fn.GetName()
for score_fn in self._scoring_functions]
lines = ['<div id="score_container" style="text-align: center">'
'<div style="display: inline-block; text-align: left">'
'<h3>Scores</h3>',
'<button onclick="$(\'#score_display\').toggle();"'
'>Hide/show</button>',
'<div id="score_display">',
'<form id="active_scores" style="margin-top: 1rem;">']
for i, name in enumerate(score_fn_names):
elt_name = 'score' + str(i)
line = ''.join([
'<input type="checkbox" name="{0}" value="{0}" checked>'.format(
elt_name),
name,
'</input><br>'])
lines.append(line)
lines += ['</form>',
'<button id="select_all_scores">Select all</button>',
'<button id="clear_all_scores">Clear all</button>',
'</div>', # score_display
'</div>',
'</div>']
return '\n'.join(lines)
# Convert a worker's output into an HTML table cell.
def _GetScoreData(idx, outputs):
"""Get score data and link for a given index.
Args:
idx: Multi-index of this sim (see _GetLinearIndex).
outputs: Array of outputs from the workers.
Returns:
(default_score, all_scores, link) tuple.
"""
i = self._GetLinearIndex(idx)
output = outputs[i]
all_scores = []
default_score = float('nan')
if output['sim_success']:
all_scores = [score_fn.GetScore(output[score_fn.GetName()])
for score_fn in self._scoring_functions]
default_score = numpy.max(all_scores)
return (default_score, all_scores, _GetOverridesPageFilename(i))
def _WriteScoreTables(filename):
"""Write score data tables into HTML string.
Args:
filename: name of file where HTML string will be written.
"""
with open(filename, 'w') as f:
f.write(textwrap.dedent("""
<html>
<head>
<link rel="stylesheet" type="text/css" href="style.css">
<style>
#score_container {
position: fixed;
top: 1rem;
left: 2rem;
background-color: rgba(255, 255, 255, 1);
border-style: solid;
z-index: 10;
padding: 0rem 1rem 1rem 1rem;
overflow: auto;
max-height: 90%;
box-shadow: 4px 4px 3px rgba(120, 120, 120, 1);
}
</style>
</head>
<script src="jquery.js"></script>
<script src="scoring_function_util.js"></script>
<script>
$(document).ready(function() {
var scoreCheckboxes = $("#active_scores").find(":checkbox");
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
scoreCheckboxes.change(function() {
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
});
$("#select_all_scores").click(function() {
scoreCheckboxes.prop("checked", true);
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
});
$("#clear_all_scores").click(function() {
scoreCheckboxes.removeAttr("checked");
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
});
});
</script>
<body>
<center>
<h1>"""))
if self._title is not None:
f.write(self._title + ' ')
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M %Z'))
f.write(textwrap.dedent("""\
</h1>
%s
%s
<table>""" % (_GetScoreCheckboxes(),
batch_sim_util.GetHtmlScoreTableLegend())))
for table_idx in range(self._GetNumTables()):
table_dim = self._tables[table_idx].GetDimensions()
if (table_idx % self._columns) == 0:
f.write('<tr>\n')
f.write('<td align="right">\n')
# Note that we transpose the array here.
table_values = [
[_GetScoreData([table_idx, x_idx, y_idx], outputs)
for x_idx in range(table_dim[0])]
for y_idx in range(table_dim[1])
]
f.write(batch_sim_util.GetHtmlScoreTable(
table_values,
'<a href="%s">%s</a>' % (_GetStatsFilename(table_idx),
self._tables[table_idx].name),
self._tables[table_idx].x_values,
self._tables[table_idx].x_label,
self._tables[table_idx].y_values,
self._tables[table_idx].y_label))
f.write('</td>\n')
if (table_idx % self._columns) == self._columns - 1:
f.write('</tr>\n')
f.write(textwrap.dedent("""
</center></body>
</html>"""))
def _GcsLink(filename):
url = ('https://storage.cloud.google.com/makani/batch_sim/'
+ self._sim_name + '/h5_logs/' + filename)
return '<a href="%s">%s</a>' % (url, filename)
def _WriteStats(table_idx, outputs):
"""Writes out batch sim output values into a unique csv file.
Args:
table_idx: batch sim number of a given Override table.
outputs: contains all HTML outputs for the batch sim at table_idx.
"""
filename = os.path.join(output_dir, _GetStatsFilename(table_idx))
table = self._tables[table_idx]
with open(filename, 'w') as f:
writer = csv.writer(f, delimiter=' ', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow([table.name])
labels = [
score_fn.GetName() for score_fn in self._scoring_functions
] + [
override_name
for override_name, _ in table.GetOverridesDesc(0, 0)
]
writer.writerow(labels)
for x_idx, y_idx in numpy.ndindex(table.GetDimensions()):
idx = self._GetLinearIndex((table_idx, x_idx, y_idx))
if not outputs[idx]['sim_success']:
continue
row = []
for score_fn in self._scoring_functions:
score_fn_output = outputs[idx][score_fn.GetName()]
row.append(score_fn.GetValue(score_fn_output))
for _, value in table.GetOverridesDesc(x_idx, y_idx):
row.append(value)
writer.writerow(row)
# For each output, write an overrides page.
assert len(outputs) == numpy.sum([
numpy.prod(self._tables[i].GetDimensions())
for i in range(self._GetNumTables())
])
for table_idx in range(self._GetNumTables()):
_WriteStats(table_idx, outputs)
table_dims = self._tables[table_idx].GetDimensions()
for x_idx, y_idx in numpy.ndindex(table_dims):
idx = self._GetLinearIndex((table_idx, x_idx, y_idx))
with open(os.path.join(output_dir,
_GetOverridesFilename(idx)), 'w') as f:
f.write(json.dumps(self._GetConfig(idx)))
with open(os.path.join(output_dir,
_GetOverridesPageFilename(idx)), 'w') as f:
header_info = [('Table', self._tables[table_idx].name),
('Overrides',
'<a href="%s">%s</a>' % (_GetOverridesFilename(idx),
_GetOverridesFilename(idx))),
('Full log', _GcsLink('%d.h5' % idx)),
('Sparse log (10 Hz)', _GcsLink('%d_sparse.h5' % idx)),
('Note', 'Log files are overwritten whenever sim name '
'%s is reused.' % self._sim_name)]
overrides = self._tables[table_idx].GetOverridesDesc(x_idx, y_idx)
overrides_lim = (self._tables[table_idx]
.GetOverridesLimits(x_idx, y_idx))
results = []
if outputs[idx]['sim_success']:
for score_fn in self._scoring_functions:
score_fn_output = outputs[idx][score_fn.GetName()]
results += [batch_sim_util.HtmlScoreTableValue(
name=score_fn.GetName(),
severity=score_fn.GetSeverity(),
limits=score_fn.Limits(),
value=score_fn.GetValue(score_fn_output),
score=score_fn.GetScore(score_fn_output)
)]
else:
err = batch_sim_util.EscapeHtml(
outputs[idx]['sim_error_message'])
results += [batch_sim_util.HtmlScoreTableValue(
name='Error Message',
severity=None,
limits=None,
value='<pre>%s</pre>' % err,
score=None
)]
# Need to zip the two overrides tuples together
overrides_info = []
for (description, limits) in zip(overrides, overrides_lim):
overrides_info += [(description + limits)]
command_info = [
('Command line',
'run_sim -o \'%s\'' % json.dumps(dict_util.UpdateNestedDict(
self._tables[table_idx].base_overrides,
self._overrides[idx])))
]
if 'events' in outputs[idx]:
with tempfile.NamedTemporaryFile(
suffix='.html', delete=False) as temp_fp:
plot.PlotEvents(outputs[idx]['events'], temp_fp.name)
with open(temp_fp.name) as fp:
event_html = fp.read()
os.remove(temp_fp.name)
else:
event_html = ''
f.write(batch_sim_util.GetHtmlScoreTableResultPage(
results, header_info, overrides_info, command_info, event_html))
_WriteScoreTables(output_dir + '/old_report.html')
# Copy CSS and JS files to the output directory.
shutil.copyfile(os.path.join(
makani.HOME, 'lib/python/batch_sim/overrides_table_style.css'),
os.path.join(output_dir, 'style.css'))
shutil.copy(os.path.join(
makani.HOME, 'lib/python/batch_sim/scoring_function_util.js'),
output_dir)
shutil.copy(os.path.join(makani.HOME, 'lib/python/batch_sim/jquery.js'),
output_dir)
os.chmod(os.path.join(output_dir, 'jquery.js'), 0770)
def _GenerateJson(self, output_dir, outputs):
| |
<gh_stars>100-1000
"""
Composition featurizers for compositions with ionic data.
"""
import itertools
import numpy as np
from pymatgen.core.composition import Composition
from matminer.featurizers.base import BaseFeaturizer
from matminer.featurizers.utils.stats import PropertyStats
from matminer.featurizers.utils.oxidation import has_oxidation_states
from matminer.utils.data import (
DemlData,
PymatgenData,
)
from matminer.featurizers.composition.composite import ElementProperty
class CationProperty(ElementProperty):
"""
Features based on properties of cations in a material
Requires that oxidation states have already been determined. Property
statistics weighted by composition.
Features: Based on the statistics of the data_source chosen, computed
by element stoichiometry. The format generally is:
"{data source} {statistic} {property}"
For example:
"DemlData range magn_moment" # Range of magnetic moment via Deml et al. data
For a list of all statistics, see the PropertyStats documentation; for a
list of all attributes available for a given data_source, see the
documentation for the data sources (e.g., PymatgenData, MagpieData,
MatscholarElementData, etc.).
"""
@classmethod
def from_preset(cls, preset_name):
if preset_name == "deml":
data_source = "deml"
features = [
"total_ioniz",
"xtal_field_split",
"magn_moment",
"so_coupling",
"sat_magn",
]
stats = ["minimum", "maximum", "range", "mean", "std_dev"]
else:
raise ValueError('Preset "%s" not found' % preset_name)
return cls(data_source, features, stats)
def feature_labels(self):
return [f + " of cations" for f in super().feature_labels()]
def featurize(self, comp):
# Check if oxidation states are present
if not has_oxidation_states(comp):
raise ValueError("Oxidation states have not been determined")
if not is_ionic(comp):
raise ValueError("Composition is not ionic")
# Prepare to store the attributes
all_attributes = []
# Initialize stats computer
pstats = PropertyStats()
# Get the cation species and fractions
cations, fractions = zip(*[(s, f) for s, f in comp.items() if s.oxi_state > 0])
for attr in self.features:
elem_data = [self.data_source.get_charge_dependent_property_from_specie(c, attr) for c in cations]
for stat in self.stats:
all_attributes.append(pstats.calc_stat(elem_data, stat, fractions))
return all_attributes
def citations(self):
return [
"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density "
"functional theory total energies and enthalpies of formation of metal-nonmetal "
"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, "
"number={44}, journal={ChemInform}, author={<NAME>. and <NAME> and "
"<NAME> and <NAME>}, year={2016}}"
]
class OxidationStates(BaseFeaturizer):
"""
Statistics about the oxidation states for each specie.
Features are concentration-weighted statistics of the oxidation states.
"""
def __init__(self, stats=None):
"""
Args:
stats - (list of string), which statistics compute
"""
self.stats = stats or ["minimum", "maximum", "range", "std_dev"]
@classmethod
def from_preset(cls, preset_name):
if preset_name == "deml":
stats = ["minimum", "maximum", "range", "std_dev"]
else:
ValueError('Preset "%s" not found' % preset_name)
return cls(stats=stats)
def featurize(self, comp):
# Check if oxidation states are present
if not has_oxidation_states(comp):
raise ValueError("Oxidation states have not been determined")
# Get the oxidation states and their proportions
oxid_states, fractions = zip(*[(s.oxi_state, f) for s, f in comp.items()])
# Compute statistics
return [PropertyStats.calc_stat(oxid_states, s, fractions) for s in self.stats]
def feature_labels(self):
return ["%s oxidation state" % s for s in self.stats]
def citations(self):
return [
"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density "
"functional theory total energies and enthalpies of formation of metal-nonmetal "
"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, "
"number={44}, journal={ChemInform}, author={<NAME>. and <NAME> and "
"Wolverton, Chris and <NAME>}, year={2016}}"
]
def implementors(self):
return ["<NAME>"]
class IonProperty(BaseFeaturizer):
"""
Ionic property attributes. Similar to ElementProperty.
"""
def __init__(self, data_source=PymatgenData(), fast=False):
"""
Args:
data_source - (OxidationStateMixin) - A AbstractData class that supports
the `get_oxidation_state` method.
fast - (boolean) whether to assume elements exist in a single oxidation state,
which can dramatically accelerate the calculation of whether an ionic compound
is possible, but will miss heterovalent compounds like Fe3O4.
"""
self.data_source = data_source
self.fast = fast
def featurize(self, comp):
"""
Ionic character attributes
Args:
comp: (Composition) Composition to be featurized
Returns:
cpd_possible (bool): Indicates if a neutral ionic compound is possible
max_ionic_char (float): Maximum ionic character between two atoms
avg_ionic_char (float): Average ionic character
"""
elements, fractions = zip(*comp.element_composition.items())
if len(elements) < 2: # Single element
cpd_possible = True
max_ionic_char = 0
avg_ionic_char = 0
else:
# Get magpie data for each element
elec = self.data_source.get_elemental_properties(elements, "X")
# Determine if neutral compound is possible
if has_oxidation_states(comp):
charges, fractions = zip(*[(s.oxi_state, f) for s, f in comp.items()])
cpd_possible = np.isclose(np.dot(charges, fractions), 0)
else:
oxidation_states = [self.data_source.get_oxidation_states(e) for e in elements]
if self.fast:
# Assume each element can have only 1 oxidation state
cpd_possible = False
for ox in itertools.product(*oxidation_states):
if np.isclose(np.dot(ox, fractions), 0):
cpd_possible = True
break
else:
# Use pymatgen's oxidation state checker which
# can detect whether an takes >1 oxidation state (as in Fe3O4)
oxi_state_dict = dict(zip([e.symbol for e in elements], oxidation_states))
cpd_possible = len(comp.oxi_state_guesses(oxi_states_override=oxi_state_dict)) > 0
# Ionic character attributes
atom_pairs = itertools.combinations(range(len(elements)), 2)
el_frac = list(np.true_divide(fractions, sum(fractions)))
ionic_char = []
avg_ionic_char = 0
for pair in atom_pairs:
XA = elec[pair[0]]
XB = elec[pair[1]]
ionic_char.append(1.0 - np.exp(-0.25 * ((XA - XB) ** 2)))
avg_ionic_char += el_frac[pair[0]] * el_frac[pair[1]] * ionic_char[-1]
max_ionic_char = np.max(ionic_char)
return [cpd_possible, max_ionic_char, avg_ionic_char]
def feature_labels(self):
labels = ["compound possible", "max ionic char", "avg ionic char"]
return labels
def citations(self):
citation = [
"@article{ward_agrawal_choudary_wolverton_2016, title={A general-purpose "
"machine learning framework for predicting properties of inorganic materials}, "
"volume={2}, DOI={10.1038/npjcompumats.2017.28}, number={1}, journal={npj "
"Computational Materials}, author={<NAME> and <NAME> and Choudhary, "
"Alok and <NAME>}, year={2016}}"
]
return citation
def implementors(self):
return ["<NAME>", "<NAME>"]
class ElectronAffinity(BaseFeaturizer):
"""
Calculate average electron affinity times formal charge of anion elements.
Note: The formal charges must already be computed before calling `featurize`.
Generates average (electron affinity*formal charge) of anions.
"""
def __init__(self):
self.data_source = DemlData()
def featurize(self, comp):
"""
Args:
comp: (Composition) Composition to be featurized
Returns:
avg_anion_affin (single-element list): average electron affinity*formal charge of anions
"""
# Check if oxidation states have been computed
if not has_oxidation_states(comp):
raise ValueError("Composition lacks oxidation states")
# Get the species and fractions
species, fractions = zip(*comp.items())
# Determine which species are anions
anions, fractions = zip(*[(s, f) for s, f in zip(species, fractions) if s.oxi_state < 0])
# Compute the electron_affinity*formal_charge for each anion
electron_affin = [
self.data_source.get_elemental_property(s.element, "electron_affin") * s.oxi_state for s in anions
]
# Compute the average affinity
avg_anion_affin = PropertyStats.mean(electron_affin, fractions)
return [avg_anion_affin]
def feature_labels(self):
return ["avg anion electron affinity"]
def citations(self):
citation = [
"@article{deml_ohayre_wolverton_stevanovic_2016, title={Predicting density "
"functional theory total energies and enthalpies of formation of metal-nonmetal "
"compounds by linear regression}, volume={47}, DOI={10.1002/chin.201644254}, "
"number={44}, journal={ChemInform}, author={<NAME>. and <NAME> and "
"<NAME> and <NAME>}, year={2016}}"
]
return citation
def implementors(self):
return ["<NAME>", "<NAME>"]
class ElectronegativityDiff(BaseFeaturizer):
"""
Features from electronegativity differences between anions and cations.
These features are computed by first determining the concentration-weighted
average electronegativity of the anions. For example, the average
electronegativity of the anions in CaCoSO is equal to 1/2 of that of S and 1/2 of that of O.
We then compute the difference between the electronegativity of each cation
and the average anion electronegativity.
The feature values are then determined based on the concentration-weighted statistics
in the same manner as ElementProperty features. For example, one value could be
the mean electronegativity difference over all the anions.
Parameters:
stats: Property statistics to compute
Generates average electronegativity difference between cations and anions
"""
def __init__(self, stats=None):
if stats == None:
self.stats = ["minimum", "maximum", "range", "mean", "std_dev"]
else:
self.stats = stats
def featurize(self, comp):
"""
Args:
comp: Pymatgen Composition object
Returns:
en_diff_stats (list of floats): Property stats of electronegativity difference
"""
# Check if oxidation states have been determined
if not has_oxidation_states(comp):
raise ValueError("Oxidation states have not yet been determined")
if not is_ionic(comp):
raise ValueError("Composition is not ionic")
# Determine the average anion EN
anions, anion_fractions = zip(*[(s, x) for s, x in comp.items() if s.oxi_state < 0])
# If there are no anions, raise an Exception
if len(anions) == 0:
raise Exception("Features not applicable: Compound contains no anions")
anion_en = [s.element.X for s in anions]
mean_anion_en = PropertyStats.mean(anion_en, anion_fractions)
# Determine the EN difference for each cation
cations, cation_fractions = zip(*[(s, x) for s, x in comp.items() if s.oxi_state > 0])
# If there are no cations, raise an Exception
# It is possible to construct a non-charge-balanced Composition,
# so we have to check for both the presence of anions and cations
if len(cations) == | |
= 2.0e3
self.n = 4.0
self.E = 471e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Quartz_Diorite-Hansen_Carter_1982":
# taken from Carter and Tsenn (1986). Flow properties of continental lithosphere - page 18.
self.A = pow(10,-1.5)
self.n = 2.4
self.E = 212e3
self.V = 0
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Diabase-Caristan_1982":
# Taken from <NAME>'Ars et al./Tectonophysics (1999). Hydrothermalism and Diapirism in the Archaean: gravitational instability constrains. - page 5
self.A = 6e-2
self.n = 3.05
self.E = 276e3
self.V = 1
self.tensorCorrection = "UniAxial"
self.MPa = False
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Tumut_Pond_Serpentinite-Raleigh_Paterson_1965":
# Taken from <NAME>'Ars et al./Tectonophysics (1999). Hydrothermalism and Diapirism in the Archaean: gravitational instability constrains. - page 5
self.A = 6.3e-7
self.n = 2.8
self.E = 66e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Wet_Quarzite-Ranalli_1995":
# used in <NAME>., <NAME>. and <NAME>. (2010),
# Influence of tectonic overpressure on PT paths of HPUHP rocks in continental collision zones: thermomechanical modelling.
# Journal of Metamorphic Geology, 28: 227247. doi: 10.1111/j.1525-1314.2009.00864.x Table 2
# in Ranalli 1995 (page 334 Table 10.3)
self.A = 3.2e-4
self.n = 2.3
self.E = 154e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Quarzite-Ranalli_1995":
# used in LI, <NAME>., <NAME>. and <NAME>. (2010),
# Influence of tectonic overpressure on PT paths of HPUHP rocks in continental collision zones: thermomechanical modelling.
# Journal of Metamorphic Geology, 28: 227247. doi: 10.1111/j.1525-1314.2009.00864.x Table 2
# in Ranalli 1995 (page 334 Table 10.3)
self.A = 6.7e-6
self.n = 2.4
self.E = 156e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Mafic_Granulite-Ranalli_1995":
# used in <NAME>., <NAME>. and <NAME>. (2010),
# Influence of tectonic overpressure on PT paths of HPUHP rocks in continental collision zones: thermomechanical modelling.
# Journal of Metamorphic Geology, 28: 227247. doi: 10.1111/j.1525-1314.2009.00864.x Table 2
# in Ranalli 1995 (page 334 Table 10.3)
self.A = 1.4e4
self.n = 4.2
self.E = 445e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Maryland_strong_diabase-Mackwell_et_al_1998":
# Mackwell, Zimmerman & Kohlstedt (1998). High-temperature deformation
# of dry diabase with application to tectonics on Venus. JGR 103. B1. 975-984. page 980
self.A = 8
self.n = 4.7
self.E = 485e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Wet_Quarzite-Ueda_et_al_2008":
# Parameters used in Ueda et al (PEPI 2008)
self.A = pow(10,-3.5)
self.n = 2.3
self.E = 154e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Diabase-Huismans_et_al_2001":
# parameters used in Huismans et al 2001
self.A = 3.2e-20
self.n = 3.05
self.E = 276e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = False
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Granite-Huismans_et_al_2001":
# parameters used in Huismans et al 2001
self.A = 3.16e-26
self.n = 3.3
self.E = 186.5e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = False
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Dry_Upper_Crust-Schmalholz_Kaus_Burg_2009":
# granite - <NAME> (1999)
self.A = 3.16e-26
self.n = 3.3
self.E = 190e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = False
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Weak_Lower_Crust-Schmalholz_Kaus_Burg_2009":
# diabase - <NAME> (1999)
self.A = 3.2e-20
self.n = 3.0
self.E = 276e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = False
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Plagioclase_An75-Ranalli_1995":
self.A = 3.3e-4
self.n = 3.2
self.E = 238e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Wet_Olivine_disl_creep-Hirth_Kohlstedt_2003":
# after <NAME>. & Kohlstedt (2003), D. Rheology of the upper mantle and the mantle wedge: A view from the experimentalists.
# Inside the subduction Factory 83?105. Table 1, "wet dislocation" parameters
self.A = 1600
self.n = 3.5
self.E = 520e3
self.V = 22e-6
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.C_OH_0 = 1000
self.r = 1.2
elif flowLaw == "Wet_Olivine_disl_creep-Hirth_Kohlstedt_2003_constant_C_OH":
# after <NAME>. & Kohlstedt (2003), D. Rheology of the upper mantle and the mantle wedge: A view from the experimentalists.
# Inside the subduction Factory 83?105. Table 1, "wet dislocation (constant C_OH)" parameters
self.A = 90
self.n = 3.5
self.E = 480e3
self.V = 11e-6
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.C_OH_0 = 1000
self.r = 1.2
elif flowLaw == "Dry_Olivine_disl_creep-Hirth_Kohlstedt_2003":
# after <NAME>. & Kohlstedt (2003), D. Rheology of the upper mantle and the mantle wedge: A view from the experimentalists.
# Inside the subduction Factory 83?105. Table 1, "dry dislocation" parameters
self.A = 1.1e5
self.n = 3.5
self.E = 530e3
self.V = 15e-6
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Olivine-Burg_Podladchikov_1999":
# after Burg and Podladchikov 1999
self.A = 7.1e-14
self.n = 3.0
self.E = 510e3
self.V = 0
self.tensorCorrection = "SimpleShear"
self.MPa = False
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Wet_Upper_Mantle-Burg_Schmalholz_2008":
# used in SchmalholzKausBurg(2009), Geology (wet olivine)
self.A = 2e-21
self.n = 4.0
self.E = 471e3
self.V = 0
self.tensorCorrection = "SimpleShear"
self.MPa = False
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Granite-Tirel_et_al_2008":
# used in SchmalholzKausBurg(2009), Geology
self.A = 1.25e-9
self.n = 3.2
self.E = 123e3
self.V = 0
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Ara_rocksalt-Urai_et_al.(2008)":
# Ara rocksalt as published in Urai et al.(2008)
self.A = 1.82e-9
self.n = 5
self.E = 32.4e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self. r = 0
elif flowLaw == "Polycrystalline_Anhydrite-Mueller_and_Briegel(1978)":
self.A = 3.16228e1
self.n = 2
self.E = 152.3e3
self.V = 0
self.tensorCorrection = "UniAxial"
self.MPa = True
self.C_OH_0 = 1
self.r = 0
else:
raise ValueError("No such dislocation creep profile: %s! " % flowLaw)
self.correctUnitsAndComputeB()
def correctUnitsAndComputeB(self):
n = self.n
if self.MPa == True:
self.A *= 1e6**(-n)
self.B = self.A * self.C_OH_0**(self.r);
if (self.tensorCorrection == "UniAxial"):
F2 = pow(0.5,(n-1)/n) / pow(3,(n+1)/(2*n)) # F2 = 1/2^((n-1)/n)/3^((n+1)/2/n)
elif (self.tensorCorrection == "SimpleShear"):
F2 = pow(0.5,(2*n-1)/n) # F2 = 1/2^((2*n-1)/n)
elif (self.tensorCorrection == "None"):
F2 = 0.5
else:
raise ValueError("Unknown tensor correction in vDisl!")
self.B *= (2.0*F2)**(-n)
class DiffusionCreep(Frozen):
_Frozen__List = ["flowLaw","A","B","E","V","tensorCorrection","MPa","d0","p","C_OH_0","r","isActive"]
def __init__(self,flowLaw="Default",eta0=1.0):
self.isActive = True
self.flowLaw = flowLaw
self.B = 0
if flowLaw == "Off":
self.isActive = False
flowLaw = "Default"
if flowLaw == "Default":
self.A = 0.5/eta0
self.E = 0.0
self.V = 0.0
self.tensorCorrection = "None"
self.MPa = False
self.d0 = 1.0
self.p = 0.0
self.C_OH_0 = 1.0
self.r = 0.0
elif flowLaw == "Dry_Olivine_diff_creep-Hirth_Kohlstedt_2003":
# after <NAME>. & Kohlstedt (2003), D. Rheology of the upper mantle and the mantle wedge: A view from the experimentalists.
self.A = 1.5e9
self.E = 375e3
self.V = 5e-6
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.d0 = 10e3
self.p = 3
self.C_OH_0 = 1
self.r = 0
elif flowLaw == "Wet_Olivine_diff_creep-Hirth_Kohlstedt_2003_constant_C_OH":
# after <NAME>. & Kohlstedt (2003), <NAME>ology of the upper mantle and the mantle wedge: A view from the experimentalists.
self.A = 1.0e6
self.E = 335e3
self.V = 4e-6
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.d0 = 10e3
self.p = 3
self.C_OH_0 = 1000
self.r = 1
elif flowLaw == "Wet_Olivine_diff_creep-Hirth_Kohlstedt_2003":
# after <NAME>. & Kohlstedt (2003), D. Rheology of the upper mantle and the mantle wedge: A view from the experimentalists.
self.A = 2.5e7
self.E = 375e3
self.V = 10e-6
self.tensorCorrection = "SimpleShear"
self.MPa = True
self.d0 = 10e3
self.p = 3
self.C_OH_0 = | |
<reponame>agdsn/pycroft<filename>pycroft/lib/user.py
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
"""
pycroft.lib.user
~~~~~~~~~~~~~~
This module contains.
:copyright: (c) 2012 by AG DSN.
"""
import os
import re
from datetime import datetime, timedelta, date
from difflib import SequenceMatcher
from typing import Iterable
from sqlalchemy import func, select, Boolean, String
from sqlalchemy.engine import Row
from pycroft import config, property
from pycroft.helpers import user as user_helper, AttrDict, utc
from pycroft.helpers.errorcode import Type1Code, Type2Code
from pycroft.helpers.i18n import deferred_gettext
from pycroft.helpers.interval import closed, closedopen
from pycroft.helpers.printing import generate_user_sheet as generate_pdf
from pycroft.helpers.user import generate_random_str
from pycroft.lib.address import get_or_create_address
from pycroft.lib.exc import PycroftLibException
from pycroft.lib.facilities import get_room
from pycroft.lib.finance import user_has_paid
from pycroft.lib.logging import log_user_event, log_event
from pycroft.lib.mail import MailTemplate, Mail, UserConfirmEmailTemplate, \
UserCreatedTemplate, \
UserMovedInTemplate, MemberRequestPendingTemplate, \
MemberRequestDeniedTemplate, \
MemberRequestMergedTemplate, UserResetPasswordTemplate
from pycroft.lib.membership import make_member_of, remove_member_of
from pycroft.lib.net import get_free_ip, MacExistsException, \
get_subnets_for_room
from pycroft.lib.swdd import get_relevant_tenancies
from pycroft.lib.task import schedule_user_task
from pycroft.model import session
from pycroft.model.address import Address
from pycroft.model.facilities import Room
from pycroft.model.finance import Account
from pycroft.model.host import IP, Host, Interface
from pycroft.model.session import with_transaction
from pycroft.model.task import TaskType, UserTask, TaskStatus
from pycroft.model.task_serialization import UserMoveParams, UserMoveOutParams, \
UserMoveInParams
from pycroft.model.traffic import TrafficHistoryEntry
from pycroft.model.user import User, UnixAccount, PreMember, BaseUser, \
RoomHistoryEntry, \
PropertyGroup
from pycroft.model.webstorage import WebStorage
from pycroft.task import send_mails_async
mail_confirm_url = os.getenv('MAIL_CONFIRM_URL')
password_reset_url = os.getenv('PASSWORD_RESET_URL')
def encode_type1_user_id(user_id):
"""Append a type-1 error detection code to the user_id."""
return f"{user_id:04d}-{Type1Code.calculate(user_id):d}"
type1_user_id_pattern = re.compile(r"^(\d{4,})-(\d)$")
def decode_type1_user_id(string):
"""
If a given string is a type1 user id return a (user_id, code) tuple else
return None.
:param unicode string: Type1 encoded user ID
:returns (number, code) pair or None
:rtype (Integral, Integral) | None
"""
match = type1_user_id_pattern.match(string)
return match.groups() if match else None
def encode_type2_user_id(user_id):
"""Append a type-2 error detection code to the user_id."""
return f"{user_id:04d}-{Type2Code.calculate(user_id):02d}"
type2_user_id_pattern = re.compile(r"^(\d{4,})-(\d{2})$")
def decode_type2_user_id(string):
"""
If a given string is a type2 user id return a (user_id, code) tuple else
return None.
:param unicode string: Type2 encoded user ID
:returns (number, code) pair or None
:rtype (Integral, Integral) | None
"""
match = type2_user_id_pattern.match(string)
return match.groups() if match else None
def check_user_id(string):
"""
Check if the given string is a valid user id (type1 or type2).
:param string: Type1 or Type2 encoded user ID
:returns True if user id was valid, otherwise False
:rtype Boolean
"""
if not string:
return False
idsplit = string.split("-")
if len(idsplit) != 2:
return False
uid = idsplit[0]
code = idsplit[1]
if len(code) == 2:
# Type2 code
verify = encode_type2_user_id(int(uid))
else:
# Type1 code
verify = encode_type1_user_id(int(uid))
if string == verify:
return True
else:
return False
class HostAliasExists(ValueError):
pass
def setup_ipv4_networking(host):
"""Add suitable ips for every interface of a host"""
subnets = get_subnets_for_room(host.room)
for interface in host.interfaces:
ip_address, subnet = get_free_ip(subnets)
new_ip = IP(interface=interface, address=ip_address,
subnet=subnet)
session.session.add(new_ip)
def store_user_sheet(new_user, wifi, user=None, timeout=15, plain_user_password=None,
generation_purpose='', plain_wifi_password=''):
"""Generate a user sheet and store it in the WebStorage.
Returns the generated `WebStorage` object holding the pdf.
:param bool new_user: generate page with user details
:param bool wifi: generate page with wifi credantials
:param int timeout: The lifetime in minutes
Necessary in every case:
:param User user: A pycroft user
Only necessary if new_user=True:
:param str plain_user_password:
Only necessary if wifi=True:
:param str plain_wifi_password: The password for wifi
Optional:
:param str generation_purpose:
"""
pdf_data = generate_user_sheet(
new_user, wifi, user,
plain_user_password=<PASSWORD>,
generation_purpose=generation_purpose,
plain_wifi_password=plain_wifi_password,
)
pdf_storage = WebStorage(data=pdf_data,
expiry=session.utcnow() + timedelta(minutes=timeout))
session.session.add(pdf_storage)
return pdf_storage
def get_user_sheet(sheet_id):
"""Fetch the storage object given an id.
If not existent, return None.
"""
WebStorage.auto_expire()
if sheet_id is None:
return None
if (storage := WebStorage.get(sheet_id)) is None:
return None
return storage.data
@with_transaction
def reset_password(user, processor):
if not can_target(user, processor):
raise PermissionError("cannot reset password of a user with a"
" greater or equal permission level.")
plain_password = <PASSWORD>_helper.<PASSWORD>(12)
user.password = <PASSWORD>
message = deferred_gettext("Password was reset")
log_user_event(author=processor,
user=user,
message=message.to_json())
return plain_password
def can_target(user, processor):
if user != processor:
return user.permission_level < processor.permission_level
else:
return True
@with_transaction
def reset_wifi_password(user: User, processor: User) -> str:
plain_password = generate_wifi_password()
user.wifi_password = <PASSWORD>
message = deferred_gettext("WIFI-Password was reset")
log_user_event(author=processor,
user=user,
message=message.to_json())
return plain_password
def maybe_setup_wifi(user: User, processor: User) -> str | None:
"""If wifi is available, sets a wifi password."""
if user.room and user.room.building.wifi_available:
return reset_wifi_password(user, processor)
return None
@with_transaction
def change_password(user, password):
# TODO: verify password complexity
user.password = password
message = deferred_gettext("Password was changed")
log_user_event(author=user,
user=user,
message=message.to_json())
def generate_wifi_password():
return user_helper.generate_password(12)
def create_user(
name: str, login: str, email: str, birthdate: date,
groups: list[PropertyGroup], processor: User | None, address: Address,
passwd_hash: str = None,
send_confirm_mail: bool = False
):
"""Create a new member
Create a new user with a generated password, finance- and unix account, and make him member
of the `config.member_group` and `config.network_access_group`.
:param name: The full name of the user (e.g. <NAME>)
:param login: The unix login for the user
:param email: E-Mail address of the user
:param birthdate: Date of birth
:param groups: The initial groups of the new user
:param processor: The processor
:param address: Where the user lives. May or may not come from a room.
:param passwd_hash: Use password hash instead of generating a new password
:param send_confirm_mail: If a confirmation mail should be send to the user
:return:
"""
now = session.utcnow()
plain_password: str | None = user_helper.generate_password(12)
# create a new user
new_user = User(
login=login,
name=name,
email=email,
registered_at=now,
account=Account(name="", type="USER_ASSET"),
password=<PASSWORD>,
wifi_password=generate_wifi_password(),
birthdate=birthdate,
address=address
)
processor = processor if processor is not None else new_user
if passwd_hash:
new_user.passwd_hash = passwd_hash
plain_password = None
account = UnixAccount(home_directory=f"/home/{login}")
new_user.unix_account = account
with session.session.begin_nested():
session.session.add(new_user)
session.session.add(account)
new_user.account.name = deferred_gettext("User {id}").format(
id=new_user.id).to_json()
for group in groups:
make_member_of(new_user, group, processor, closed(now, None))
log_user_event(author=processor,
message=deferred_gettext("User created.").to_json(),
user=new_user)
user_send_mail(new_user, UserCreatedTemplate(), True)
if email is not None and send_confirm_mail:
send_confirmation_email(new_user)
return new_user, plain_password
@with_transaction
def move_in(
user: User,
building_id: int, level: int, room_number: str,
mac: str | None,
processor: User | None = None,
birthdate: date = None,
host_annex: bool = False,
begin_membership: bool = True,
when: datetime | None = None
):
"""Move in a user in a given room and do some initialization.
The user is given a new Host with an interface of the given mac, a
UnixAccount, a finance Account, and is made member of important
groups. Networking is set up.
:param User user: The user to move in
:param building_id:
:param level:
:param room_number:
:param mac: The mac address of the users pc.
:param processor:
:param birthdate: Date of birth`
:param host_annex: when true: if MAC already in use,
annex host to new user
:param begin_membership: Starts a membership if true
:param when: The date at which the user should be moved in
:return: The user object.
"""
if when and when > session.utcnow():
task_params = UserMoveInParams(
building_id=building_id, level=level, room_number=room_number,
mac=mac, birthdate=birthdate,
host_annex=host_annex, begin_membership=begin_membership
)
return schedule_user_task(task_type=TaskType.USER_MOVE_IN,
due=when,
user=user,
parameters=task_params,
processor=processor)
if user.room is not None:
raise ValueError("user is already living in a room.")
room = get_room(building_id, level, room_number)
if birthdate:
user.birthdate = birthdate
if begin_membership:
for group in {config.external_group, config.pre_member_group}:
if user.member_of(group):
remove_member_of(user, group, processor, closedopen(session.utcnow(), None))
for group in {config.member_group, config.network_access_group}:
if not user.member_of(group):
make_member_of(user, group, processor, closed(session.utcnow(), None))
if room:
user.room = room
user.address = room.address
if mac and user.birthdate:
interface_existing = Interface.q.filter_by(mac=mac).first()
if interface_existing is not None:
if host_annex:
host_existing = interface_existing.host
host_existing.owner_id = user.id
session.session.add(host_existing)
migrate_user_host(host_existing, user.room, processor)
else:
raise MacExistsException
else:
new_host = Host(owner=user, room=room)
session.session.add(new_host)
session.session.add(Interface(mac=mac, host=new_host))
setup_ipv4_networking(new_host)
user_send_mail(user, UserMovedInTemplate(), True)
msg = deferred_gettext("Moved in: {room}")
log_user_event(author=processor if processor is not None else user,
message=msg.format(room=room.short_name).to_json(),
user=user)
return user
def migrate_user_host(host, new_room, processor):
"""
Migrate a UserHost to a new room and if necessary to a new subnet.
If the host changes subnet, it will get a new IP address.
:param Host host: Host to be migrated
:param Room new_room: new room of the host
:param User processor: User processing the migration
:return:
"""
old_room = host.room
host.room = new_room
subnets_old = get_subnets_for_room(old_room)
subnets = get_subnets_for_room(new_room)
if subnets_old != subnets:
for interface in host.interfaces:
old_ips = tuple(ip for ip in interface.ips)
for old_ip in old_ips:
ip_address, subnet = get_free_ip(subnets)
new_ip = IP(interface=interface, address=ip_address, subnet=subnet)
session.session.add(new_ip)
old_address = old_ip.address
session.session.delete(old_ip)
| |
<reponame>lixinsu/Lion
#!/usr/bin/env python
# coding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import six
import copy
import spacy
import logging
import unicodedata
import collections
from shutil import copyfile
import jieba
import jieba.posseg as pseg
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, opts=None):
self.data = data
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def whitespace_tokenize(self, text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def __del__(self):
self.shutdown()
class JiebaTokenizer(Tokenizer):
def __init__(self, **kwargs):
pass
def tokenize(self, text):
# discard newline
clean_text = text.replace('\n', ' ')
tokens, poss = [], []
for tok, pos in pseg.cut(clean_text):
tokens.append(tok)
poss.append(pos)
idxs = []
j = 0
i = 0
while i < len(tokens):
if clean_text[j:j+len(tokens[i])] == tokens[i]:
idxs.append(j)
j += len(tokens[i])
i += 1
else:
j += 1
# print(tokens)
# print(idxs)
data = []
for i in range(len(tokens)):
start_ws = idxs[i]
if i + 1 < len(tokens):
end_ws = idxs[i+1]
else:
end_ws = idxs[i] + len(tokens[i])
data.append((
tokens[i],
text[start_ws:end_ws],
(idxs[i], idxs[i] + len(tokens[i])),
poss[i],
tokens[i],
'fake',
))
return Tokens(data)
class SpacyTokenizer(Tokenizer):
def __init__(self):
"""
Args:
annotators: set that can include pos, lemma, and ner.
model: spaCy model to use (either path, or keyword like 'en').
"""
nlp_kwargs = {'parser': False}
nlp_kwargs['tagger'] = True
nlp_kwargs['entity'] = True
self.nlp = spacy.load("en_core_web_sm")
def tokenize(self, text):
# We don't treat new lines as tokens.
clean_text = text.replace('\n', ' ')
tokens = self.nlp.tokenizer(clean_text)
self.nlp.tagger(tokens)
self.nlp.entity(tokens)
data = []
for i in range(len(tokens)):
# Get whitespace
start_ws = tokens[i].idx
if i + 1 < len(tokens):
end_ws = tokens[i + 1].idx
else:
end_ws = tokens[i].idx + len(tokens[i].text)
data.append((
tokens[i].text,
text[start_ws: end_ws],
(tokens[i].idx, tokens[i].idx + len(tokens[i].text)),
tokens[i].tag_,
tokens[i].lemma_,
tokens[i].ent_type_,
))
# Set special option for non-entity tag: '' vs 'O' in spaCy
return Tokens(data, opts={'non_ent': ''})
class BertTokenizer(Tokenizer):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'".format(vocab_file))
self.vocab = self.load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
# split_tokens.append(sub_token)
split_tokens.append((
sub_token,
None,
None,
None,
None,
None,
))
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
# return split_tokens
# Set special option for non-entity tag: '' vs 'O' in spaCy
return Tokens(split_tokens, opts={'non_ent': ''})
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(
len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def load_vocab(self, vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
class BasicTokenizer(Tokenizer):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = self.whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = self.whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if self._is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" | |
#
# $Id: sphinxapi.py 1216 2008-03-14 23:25:39Z shodan $
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006-2008, <NAME>
# Copyright (c) 2006, <NAME>
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License. You should have
# received a copy of the GPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
import sys
import select
import socket
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS= 3
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x113
VER_COMMAND_EXCERPT = 0x100
VER_COMMAND_UPDATE = 0x101
VER_COMMAND_KEYWORDS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_MULTI = 0X40000000
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 3312 # searchd port (default is 3312)
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_ALL # query matching mode (default is SPH_MATCH_ALL)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0xFFFFFFFF # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._fieldweights = {} # per-field-name weights
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
return
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
assert(isinstance(port, int))
self._host = host
self._port = port
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
try:
sock = socket.socket ( socket.AF_INET, socket.SOCK_STREAM )
sock.connect ( ( self._host, self._port ) )
except socket.error as msg:
if sock:
sock.close()
self._error = 'connection to %s:%s failed (%s)' % ( self._host, self._port, msg )
return 0
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return 0
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
self._error = 'unknown status code %d' % status
return None
# check version
if ver<client_ver:
self._warning = 'searchd command v.%d.%d older than client\'s v.%d.%d, some options might not work' \
% (ver>>8, ver&0xff, client_ver>>8, client_ver&0xff)
return response
def SetLimits (self, offset, limit, maxmatches=0, cutoff=0):
"""
Set offset and count into result set, and optionally set max-matches and cutoff limits.
"""
assert(isinstance(offset, int) and offset>=0)
assert(isinstance(limit, int) and limit>0)
assert(maxmatches>=0)
self._offset = offset
self._limit = limit
if maxmatches>0:
self._maxmatches = maxmatches
if cutoff>=0:
self._cutoff = cutoff
def SetMaxQueryTime (self, maxquerytime):
"""
Set maximum query time, in milliseconds, per-index. 0 means 'do not limit'.
"""
assert(isinstance(maxquerytime,int) and maxquerytime>0)
self._maxquerytime = maxquerytime
def SetMatchMode (self, mode):
"""
Set matching mode.
"""
assert(mode in [SPH_MATCH_ALL, SPH_MATCH_ANY, SPH_MATCH_PHRASE, SPH_MATCH_BOOLEAN, SPH_MATCH_EXTENDED, SPH_MATCH_FULLSCAN, SPH_MATCH_EXTENDED2])
self._mode = mode
def SetRankingMode (self, ranker):
"""
Set ranking mode.
"""
assert(ranker in [SPH_RANK_PROXIMITY_BM25, SPH_RANK_BM25, SPH_RANK_NONE, SPH_RANK_WORDCOUNT])
self._ranker = ranker
def SetSortMode ( self, mode, clause='' ):
"""
Set sorting mode.
"""
assert ( mode in [SPH_SORT_RELEVANCE, SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC, SPH_SORT_TIME_SEGMENTS, SPH_SORT_EXTENDED, SPH_SORT_EXPR] )
assert ( isinstance ( clause, str ) )
self._sort = mode
self._sortby = clause
def SetWeights (self, weights):
"""
Set per-field weights.
WARNING, DEPRECATED; do not use it! use SetFieldWeights() instead
"""
assert(isinstance(weights, list))
for w in weights:
assert(isinstance(w, int))
self._weights = weights
def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in list(weights.items()):
assert(isinstance(key,str))
assert(isinstance(val,int))
self._fieldweights = weights
def SetIndexWeights (self, weights):
"""
Bind per-index weights by name; expects (name,index_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in list(weights.items()):
assert(isinstance(key,str))
assert(isinstance(val,int))
self._indexweights = weights
def SetIDRange (self, minid, maxid):
"""
Set IDs range to match.
Only match records if document ID is beetwen $min and $max (inclusive).
"""
assert(isinstance(minid, int))
assert(isinstance(maxid, int))
assert(minid<=maxid)
self._min_id = minid
self._max_id = maxid
def SetFilter ( self, attribute, values, exclude=0 ):
"""
Set values set filter.
Only match records where 'attribute' value is in given 'values' set.
"""
assert(isinstance(attribute, str))
assert(isinstance(values, list))
assert(values)
for value in values:
assert(isinstance(value, int))
self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
def SetFilterRange (self, attribute, min_, max_, exclude=0 ):
"""
Set range filter.
Only match records if 'attribute' value is beetwen 'min_' and 'max_' (inclusive).
"""
assert(isinstance(attribute, str))
assert(isinstance(min_, int))
assert(isinstance(max_, int))
assert(min_<=max_)
self._filters.append ( { 'type':SPH_FILTER_RANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_ } )
def SetFilterFloatRange (self, attribute, min_, max_, exclude=0 ):
assert(isinstance(attribute,str))
assert(isinstance(min_,float))
assert(isinstance(max_,float))
assert(min_ <= max_)
self._filters.append ( {'type':SPH_FILTER_FLOATRANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_} )
def SetGeoAnchor (self, attrlat, attrlong, latitude, longitude):
assert(isinstance(attrlat,str))
assert(isinstance(attrlong,str))
assert(isinstance(latitude,float))
assert(isinstance(longitude,float))
self._anchor['attrlat'] = attrlat
self._anchor['attrlong'] = attrlong
self._anchor['lat'] = latitude
self._anchor['long'] = longitude
def SetGroupBy ( self, attribute, func, groupsort='@group desc' ):
"""
Set grouping attribute and function.
"""
assert(isinstance(attribute, str))
assert(func in [SPH_GROUPBY_DAY, SPH_GROUPBY_WEEK, SPH_GROUPBY_MONTH, SPH_GROUPBY_YEAR, SPH_GROUPBY_ATTR] )
assert(isinstance(groupsort, str))
self._groupby = attribute
self._groupfunc = func
self._groupsort = groupsort
def SetGroupDistinct (self, attribute):
assert(isinstance(attribute,str))
self._groupdistinct = attribute
def SetRetries (self, count, delay=0):
assert(isinstance(count,int) and count>=0)
assert(isinstance(delay,int) and delay>=0)
self._retrycount = count
self._retrydelay = delay
def ResetFilters (self):
"""
Clear all filters (for multi-queries).
"""
self._filters = []
self._anchor = {}
def ResetGroupBy (self):
"""
Clear groupby settings (for multi-queries).
"""
self._groupby = ''
self._groupfunc = SPH_GROUPBY_DAY
self._groupsort = '@group desc'
self._groupdistinct = ''
def Query (self, query, index='*', comment=''):
"""
Connect to searchd server and run given search query.
Returns None on failure; result set hash on success (see documentation for details).
"""
assert(len(self._reqs)==0)
self.AddQuery(query,index,comment)
results = self.RunQueries()
if not results or len(results)==0:
return None
self._error = results[0]['error']
self._warning = results[0]['warning']
if results[0]['status'] == SEARCHD_ERROR:
return None
return results[0]
def AddQuery (self, query, index='*', comment=''):
"""
Add query to batch.
"""
# build request
req = [pack('>5L', self._offset, self._limit, self._mode, self._ranker, self._sort)]
req.append(pack('>L', len(self._sortby)))
req.append(self._sortby)
if isinstance(query,str):
query = query.encode('utf-8')
assert(isinstance(query,bytes))
req.append(pack('>L', len(query)))
req.append(query)
req.append(pack('>L', len(self._weights)))
for w in self._weights:
req.append(pack('>L', w))
req.append(pack('>L', len(index)))
req.append(index)
req.append(pack('>L',0)) # id64 range marker FIXME! IMPLEMENT!
req.append(pack('>L', self._min_id))
req.append(pack('>L', self._max_id))
# filters
req.append ( pack ( '>L', len(self._filters) ) )
for f in self._filters:
req.append ( pack ( '>L', len(f['attr'])) + f['attr'])
filtertype = f['type']
req.append ( pack ( '>L', filtertype))
if filtertype == SPH_FILTER_VALUES:
req.append ( pack ('>L', len(f['values'])))
for val in f['values']:
req.append ( pack ('>L', val))
elif filtertype == SPH_FILTER_RANGE:
req.append ( pack ('>2L', f['min'], f['max']))
elif filtertype == SPH_FILTER_FLOATRANGE:
req.append ( pack ('>2f', f['min'], f['max']))
req.append ( pack ( '>L', f['exclude'] ) )
# group-by, max-matches, group-sort
req.append ( pack ( '>2L', self._groupfunc, len(self._groupby) ) )
req.append ( self._groupby )
req.append ( pack ( '>2L', self._maxmatches, len(self._groupsort) ) )
req.append ( self._groupsort )
req.append ( pack ( '>LLL', self._cutoff, self._retrycount, self._retrydelay))
req.append ( pack ( '>L', len(self._groupdistinct)))
req.append ( self._groupdistinct)
# anchor point
if len(self._anchor) == 0:
req.append ( pack ('>L', 0))
else:
attrlat, attrlong = self._anchor['attrlat'], self._anchor['attrlong']
latitude, longitude = self._anchor['lat'], self._anchor['long']
req.append ( pack ('>L', 1))
req.append ( pack ('>L', len(attrlat)) + attrlat)
req.append ( pack ('>L', len(attrlong)) + attrlong)
req.append ( pack ('>f', latitude) + pack ('>f', longitude))
# per-index weights
req.append ( pack ('>L',len(self._indexweights)))
for indx,weight in list(self._indexweights.items()):
req.append ( pack ('>L',len(indx)) + indx + pack ('>L',weight))
# max query time
req.append ( pack ('>L', self._maxquerytime) )
# per-field weights
req.append ( pack ('>L',len(self._fieldweights) ) )
for field,weight in list(self._fieldweights.items()):
req.append ( pack ('>L',len(field)) + field + pack ('>L',weight) )
# comment
req.append ( pack('>L',len(comment)) + comment )
# send query, get response
req = ''.join(req)
self._reqs.append(req)
return
def RunQueries (self):
"""
Run queries batch.
Returns None on network IO failure; or an array of result set hashes on success.
"""
if len(self._reqs)==0:
self._error = 'no queries defined, issue AddQuery() first'
return None
sock = self._Connect()
if not sock:
return None
req = ''.join(self._reqs)
length = len(req)+4
req = pack('>HHLL', SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, length, len(self._reqs))+req
sock.send(req)
response = self._GetResponse(sock, VER_COMMAND_SEARCH)
if not response:
return None
nreqs = len(self._reqs)
# parse response
max_ = len(response)
p = | |
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_vitni_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending nominal phrase
np = match.first_match(". >> { 'vitni' }")
# Find the attached prepositional phrase
pp = match.first_match('P > { "af" }')
if pp is None:
pp = match.first_match('ADVP > { "af" }')
assert np is not None
assert pp is not None
# Calculate the start and end token indices, spanning both phrases
start, end = min(np.span[0], pp.span[0]), max(np.span[1], pp.span[1])
text = "'verða vitni af' á sennilega að vera 'verða vitni að'"
detail = (
"Í samhenginu 'verða vitni að e-u' er notuð "
"forsetningin 'að', ekki 'af'."
)
suggest = self.suggestion_complex(match, "vitni", "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_grin_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending verbal and nominal phrases
vp = match.first_match("VP > { 'gera' }")
np = match.first_match("NP >> { 'grín' }")
# Find the attached prepositional phrase
pp = match.first_match('P > { "af" }')
if pp is None:
pp = match.first_match('ADVP > { "af" }')
assert np is not None
assert pp is not None
# Calculate the start and end token indices, spanning both phrases
if vp is None:
start, end = min(np.span[0], pp.span[0]), max(np.span[1], pp.span[1])
else:
start, end = (
min(vp.span[0], np.span[0], pp.span[0]),
max(vp.span[1], np.span[1], pp.span[1]),
)
text = "'gera grín af' á sennilega að vera 'gera grín að'"
detail = (
"Í samhenginu 'gera grín að e-u' er notuð " "forsetningin 'að', ekki 'af'."
)
suggest = self.suggestion_complex(match, "grín", "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_leida_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending verbal and nominal phrases
vp = match.first_match("VP > { 'leiða' }")
np = match.first_match("NP >> { ( 'líkur'|'rök'|'rak' ) }")
# Find the attached prepositional phrase
pp = match.first_match('P > { "af" }')
if pp is None:
pp = match.first_match('ADVP > { "af" }')
assert vp is not None
assert np is not None
assert pp is not None
# Calculate the start and end token indices, spanning both phrases
start, end = (
min(vp.span[0], np.span[0], pp.span[0]),
max(vp.span[1], np.span[1], pp.span[1]),
)
text = "'leiða {0} af' á sennilega að vera 'leiða {0} að'".format(np.tidy_text)
detail = (
"Í samhenginu 'leiða {0} af e-u' er notuð "
"forsetningin 'að', ekki 'af'.".format(np.tidy_text)
)
suggest = self.suggestion_complex(match, "leiða", "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_marka_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending verbal and nominal phrases
vp = match.first_match("VP > { 'marka' }")
lemma = "marka"
if vp is None:
vp = match.first_match("NP > { 'markaður' }")
lemma = "markaður"
np = match.first_match("NP >> { ( 'upphaf'|'upphafinn' ) }")
if np is None:
np = match.first_match("VP > { 'upphefja' }")
# Find the attached prepositional phrase
pp = match.first_match('P > { "af" }')
if pp is None:
pp = match.first_match('ADVP > { "af" }')
assert vp is not None
assert np is not None
assert pp is not None
# Calculate the start and end token indices, spanning both phrases
start, end = (
min(vp.span[0], np.span[0], pp.span[0]),
max(vp.span[1], np.span[1], pp.span[1]),
)
text = "'marka upphaf af' á sennilega að vera 'marka upphaf að'"
detail = (
"Í samhenginu 'marka upphaf að e-u' er notuð "
"forsetningin 'að', ekki 'af'."
)
suggest = self.suggestion_complex(match, lemma, "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_leggja_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending verbal phrase
vp = match.first_match("VP > { 'leggja' }")
if vp is None:
vp = match.first_match("VP >> { 'leggja' }")
# Find the attached prepositional phrase
pp = match.first_match('P > { "af" }')
if pp is None:
pp = match.first_match('ADVP > { "af" }')
# Find the offending nominal phrase
np = match.first_match('NP >> { "velli" }')
assert vp is not None
assert pp is not None
assert np is not None
# Calculate the start and end token indices, spanning both phrases
start, end = (
min(vp.span[0], pp.span[0], np.span[0]),
max(vp.span[-1], pp.span[-1], np.span[-1]),
)
text = "'leggja af velli' á sennilega að vera 'leggja að velli'"
detail = (
"Í samhenginu 'leggja einhvern að velli' er notuð "
"forsetningin 'að', ekki 'af'."
)
suggest = self.suggestion_complex(match, "leggja", "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_utan_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending adverbial phrase
advp = match.first_match("ADVP > { 'utan' }")
if advp is None:
advp = match.first_match("ADVP >> { 'utan' }")
# Find the attached prepositional phrase
pp = match.first_match('ADVP > { "af" }')
assert advp is not None
assert pp is not None
# Calculate the start and end token indices, spanning both phrases
start, end = min(advp.span[0], pp.span[0]), max(advp.span[1], pp.span[1])
text = "'utan af' á sennilega að vera 'utan að'"
detail = (
"Í samhenginu 'kunna eitthvað utan að' er notuð "
"forsetningin 'að', ekki 'af'."
)
suggest = self.suggestion_complex(match, "utan", "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_uppvis_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending verbal phrase
vp = match.first_match("VP >> { 'verða' }")
# Find the attached nominal phrase
np = match.first_match("NP >> { 'uppvís' }")
# Find the attached prepositional phrase
pp = match.first_match('PP > { "af" }')
if pp is None:
pp = match.first_match("ADVP > { 'af' }")
assert vp is not None
assert np is not None
assert pp is not None
# Calculate the start and end token indices, spanning both phrases
start, end = (
min(vp.span[0], np.span[0], pp.span[0]),
max(vp.span[1], np.span[1], pp.span[1]),
)
text = "'uppvís af' á sennilega að vera 'uppvís að'"
detail = (
"Í samhenginu 'verða uppvís að einhverju' er notuð "
"forsetningin 'að', ekki 'af'."
)
suggest = self.suggestion_complex(match, "uppvís", "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def wrong_preposition_verða_af(self, match: SimpleTree) -> None:
"""Handle a match of a suspect preposition pattern"""
# Find the offending verbal phrase
vp = match.first_match("VP > { 'verða' }")
if vp is None:
vp = match.first_match("VP >> { 'verða' }")
# Find the attached prepositional phrase
pp = match.first_match("P > 'af' ")
if pp is None:
pp = match.first_match("ADVP > 'af' ")
# Find the attached nominal phrase
np = match.first_match("NP > 'ósk' ")
if np is None:
np = match.first_match("NP >> 'ósk' ")
assert vp is not None
assert pp is not None
assert np is not None
# Calculate the start and end token indices, spanning both phrases
start, end = (
min(vp.span[0], pp.span[0], np.span[0]),
max(pp.span[1], pp.span[1], np.span[1]),
)
text = "'af ósk' á sennilega að vera 'að ósk'"
detail = (
"Í samhenginu 'að verða að ósk' er notuð " "forsetningin 'að', ekki 'af'."
)
suggest = self.suggestion_complex(match, "verða", "af")
self._ann.append(
Annotation(
start=start,
end=end,
code="P_WRONG_PREP_AF",
text=text,
detail=detail,
original=match.tidy_text,
suggest=suggest,
)
)
def suggestion_complex(self, match: SimpleTree, lemma: str, prep: str) -> str:
"""Find the preposition to correct for the suggestion"""
p_ter = match.first_match(f"'{lemma}'")
assert p_ter is not None
# The instance of the preposition which comes right after the phrase terminal is substituted
all_m = match.all_matches(f"@'{prep}'")
subtree = None
for m in all_m:
assert m is not None
if m.span[0] > p_ter.span[-1]:
subtree = m
break
assert subtree is not None
suggest = ""
if prep == "að":
suggest = match.substituted_text(subtree, "af")
elif prep == "af":
suggest = match.substituted_text(subtree, "að")
assert suggest != "" # All cases should be | |
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.reference import Reference
from pycatia.knowledge_interfaces.angle import Angle
from pycatia.knowledge_interfaces.length import Length
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapeHealing(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapeHealing
|
| Represents the hybrid shape healing feature object.
| Role: Allows to access to the body to process for a Healing feature. Use the
| CATIAHybridShapeFactory to create HybridShapeFeature object.
|
| See also:
| HybridShapeFactory.AddNewHealing
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_healing = com_object
@property
def canonic_free_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property CanonicFreeMode(long iMode)
|
| Returns or sets the Canonic Free Mode of the healing.
|
| Parameters:
|
| oMode
| (For get_CanonicFreeMode) Long parameter for retrieving the
| CanonicFreeMode.
| iMode
| (For set_CanonicFreeMode) Long parameter for settingthe
| CanonicFreeMode.
|
| Example:
| This example sets and retrieves the CanonicFreeMode of the
| healing of the HybShpHealing hybrid shape
| healing.
|
| Dim HybShpHealMode As Long
| HybShpHealMode = ..set appropriate value
| HybShpHealing.CanonicFreeMode = HybShpHealMode
| HybShpHealCont = HybShpHealing.CanonicFreeMode
:return: int
:rtype: int
"""
return self.hybrid_shape_healing.CononicFreeMode
@canonic_free_mode.setter
def canonic_free_mode(self, mode: int):
"""
:param int mode:
"""
self.hybrid_shape_healing.CanonicFreeMode = mode
@property
def continuity(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Continuity(long iContinuity)
|
| Returns or sets the continuity type of the healing.
|
| Parameters:
|
| Continuity
| Parameter for the continuity. Legal values are 0 and
| 1
|
| Example:
| This example sets and retrieves the Continuity of the healing
| of the HybShpHealing hybrid shape healing.
|
| Dim HybShpHealCont As Long
| HybShpHealCont = ..set appropriate value
| HybShpHealing.Continuity = HybShpHealCont
| HybShpHealCont = HybShpHealing.Continuity
:return: False
:rtype: False
"""
return self.hybrid_shape_healing.Continuity
@continuity.setter
def continuity(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_healing.Continuity = value
@property
def distance_objective(self) -> Length:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property DistanceObjective() As Length (Read Only)
|
| Returns the Distance Objective of the healing.
|
| Parameters:
|
| DistanceObjective
| Length parameter for retrieving the Distance
| Objective.
|
| Example:
| This example retrieves the DistanceObjective of the healing of
| the HybShpHealing hybrid shape healing.
|
| Dim HybShpHealDistObjective As Length
| Set HybShpHealDistObjective = HybShpHealing.DistanceObjective
:return: Length
:rtype: Length
"""
return Length(self.hybrid_shape_healing.DistanceObjective)
@property
def merging_distance(self) -> Length:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property MergingDistance() As Length (Read Only)
|
| Returns the Merging Distance of the healing.
|
| Parameters:
|
| MergingDistance
| Length parameter for retrieving the Merging
| Distance.
|
| Example:
| This example retrieves the MergingDistance of the healing of
| the HybShpHealing hybrid shape healing.
|
| Dim HybShpHealMergeDist As Length
| Set HybShpHealMergeDist = HybShpHealing.MergingDistance
:return: Length
:rtype: Length
"""
return Length(self.hybrid_shape_healing.MergingDistance)
@property
def no_of_bodies_to_heal(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NoOfBodiesToHeal() As long (Read Only)
|
| Returns the number of bodies to heal of the healing.
|
| Parameters:
|
| NumberOfbodies
| Number of bodies to heal in the healing.
|
| Example:
| This example retrieves the number of bodies to heal of the
| HybShpHealing hybrid shape Healing.
|
| Dim NoOfBodiesToHeal As long
| NoOfBodiesToHeal = HybShpHealing.NoOfBodiesToHeal
:return: int
:rtype: int
"""
return self.hybrid_shape_healing.NoOfBodiesToHeal
@property
def no_of_edges_to_keep_sharp(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NoOfEdgesToKeepSharp() As long (Read Only)
|
| Returns the number of edges to keep sharp of the healing.
|
| Parameters:
|
| NumberOfEdges
| Number of edges to keep sharp.
|
| Example:
| This example retrieves the number of edges to keep sharp of the
| HybShpHealing hybrid shape Healing.
|
| Dim NoOfEdges As long
| NoOfEdges = HybShpHealing.NoOfEdgesToKeepSharp
:return: int
:rtype: int
"""
return self.hybrid_shape_healing.NoOfEdgesToKeepSharp
@property
def no_of_elements_to_freeze(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NoOfElementsToFreeze() As long (Read Only)
|
| Returns the number of elements to heal of the healing.
|
| Parameters:
|
| NumberOfElements
| Number of elements to freeze in the healing.
|
| Example:
| This example retrieves the number of elements to freeze of the
| HybShpHealing hybrid shape Healing.
|
| Dim NoOfElementsToFreeze As long
| NoOfElementsToFreeze = HybShpHealing.NoOfElementsToFreeze
:return: int
:rtype: int
"""
return self.hybrid_shape_healing.NoOfElementsToFreeze
@property
def sharpness_angle(self) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SharpnessAngle() As Angle (Read Only)
|
| Returns the Sharpness Angle of the healing.
|
| Parameters:
|
| SharpnessAngle
| Angle parameter for retrieving the Sharpness
| Angle.
|
| Example:
| This example retrieves the Sharpness Angle of the healing of
| the HybShpHealing hybrid shape healing.
|
| Dim HybShpHealSharpnessAngle As Angle
| Set HybShpHealSharpnessAngle = HybShpHealing.SharpnessAngle
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_healing.SharpnessAngle)
@property
def tangency_angle(self) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property TangencyAngle() As Angle (Read Only)
|
| Returns the Tangency Angle of the healing.
|
| Parameters:
|
| TangencyAngle
| Angle parameter for retrieving the TangencyAngle.
|
| Example:
| This example retrieves the TangencyAngle of the healing of the
| HybShpHealing hybrid shape healing.
|
| Dim HybShpHealTangencyAngle As Angle
| Set HybShpHealTangencyAngle = HybShpHealing.TangencyAngle
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_healing.TangencyAngle)
@property
def tangency_objective(self) -> Length:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property TangencyObjective() As Length (Read Only)
|
| Returns the Tangency Objective of the healing.
|
| Parameters:
|
| TangencyObjective
| Length parameter for retrieving the Tangency
| Objective.
|
| Example:
| This example retrieves the TangencyObjective of the healing of
| the HybShpHealing hybrid shape healing.
|
| Dim HybShpHealTangencyObjective As Length
| Set HybShpHealTangencyObjective = HybShpHealing.TangencyObjective
:return: Length
:rtype: Length
"""
return Length(self.hybrid_shape_healing.TangencyObjective)
def add_body_to_heal(self, i_body: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub AddBodyToHeal(Reference iBody)
|
| Adds the body to be healed to the list.
|
| Parameters:
|
| Body
| Reference to the body to be added to the list.
|
| Example:
| This example adds the body to the list. of the HybShpHealing
| hybrid shape healing.
|
| HybShpHealing.AddBodyToHeal refBody
:param Reference i_body:
:return: None
:rtype: None
"""
return self.hybrid_shape_healing.AddBodyToHeal(i_body.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'add_body_to_heal'
# # | |
#!/usr/bin/python
from __future__ import print_function
import os
import shlex
import signal
import subprocess
import sys
import time
import random
import string
import stat
from optparse import OptionParser
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
# make python 3.x compatible with python 2.x
if sys.version_info >= (3,):
def unicode(s, errors="strict"):
if isinstance(s, str):
return s
elif isinstance(s, bytes) or isinstance(s, bytearray):
return s.decode("utf-8", errors)
raise SandboxError("Tried to convert unrecognized type to unicode")
try:
from server_info import server_info
_SECURE_DEFAULT = server_info.get('secure_jail', True)
except ImportError:
_SECURE_DEFAULT = False
class SandboxError(Exception):
pass
def _guard_monitor(jail):
guard_out = jail.command_process.stdout
while True:
line = guard_out.readline()
if not line:
end_item = (time.time(), None)
jail.resp_queue.put(end_item)
jail.stdout_queue.put(end_item)
jail.stderr_queue.put(end_item)
break
line = line.rstrip("\r\n")
words = line.split(None, 2)
if len(words) < 3:
msg, ts = words
data = ""
else:
msg, ts, data = words
ts = float(ts)
data = unicode(data, errors="replace")
if msg == "STDOUT":
jail.stdout_queue.put((time, data))
elif msg == "STDERR":
jail.stderr_queue.put((time, data))
elif msg == "SIGNALED":
jail.resp_queue.put((time, data))
class Jail(object):
""" Provide a secure sandbox to run arbitrary commands in.
This will only function on specially prepared Ubuntu systems.
"""
def __init__(self, working_directory):
"""Initialize a new sandbox for the given working directory.
working_directory: the directory in which the shell command should
be launched. Files from this directory are copied
into the secure space before the shell command is
executed.
"""
self.locked = False
jail_base = "/srv/chroot"
all_jails = os.listdir(jail_base)
all_jails = [j for j in all_jails if j.startswith("jailuser")]
for jail in all_jails:
lock_dir = os.path.join(jail_base, jail, "locked")
try:
os.mkdir(lock_dir)
except OSError:
# if the directory could not be created, that should mean the
# jail is already locked and in use
continue
with open(os.path.join(lock_dir, "lock.pid"), "w") as pid_file:
pid_file.write(str(os.getpid()))
self.locked = True
self.name = jail
break
else:
raise SandboxError("Could not find an unlocked jail")
self.jchown = os.path.join(server_info["repo_path"], "worker/jail_own")
self.base_dir = os.path.join(jail_base, jail)
self.number = int(jail[len("jailuser"):])
self.chroot_cmd = "sudo -u {0} schroot -u {0} -c {0} -d {1} -- jailguard.py ".format(
self.name, "/home/jailuser")
self._is_alive = False
self.command_process = None
self.resp_queue = Queue()
self.stdout_queue = Queue()
self.stderr_queue = Queue()
self._prepare_with(working_directory)
def __del__(self):
if self.locked:
raise SandboxError("Jail object for %s freed without being released"
% (self.name))
@property
def is_alive(self):
"""Indicates whether a command is currently running in the sandbox"""
if self._is_alive:
sub_result = self.command_process.poll()
if sub_result is None:
return True
self._is_alive = False
return False
def release(self):
"""Release the sandbox for further use
Unlocks and releases the jail for reuse by others.
Must be called exactly once after Jail.is_alive == False.
"""
if self.is_alive:
raise SandboxError("Sandbox released while still alive")
if not self.locked:
raise SandboxError("Attempt to release jail that is already unlocked")
if os.system("sudo umount %s" % (os.path.join(self.base_dir, "root"),)):
raise SandboxError("Error returned from umount of jail %d"
% (self.number,))
lock_dir = os.path.join(self.base_dir, "locked")
pid_filename = os.path.join(lock_dir, "lock.pid")
with open(pid_filename, 'r') as pid_file:
lock_pid = int(pid_file.read())
if lock_pid != os.getpid():
# if we ever get here something has gone seriously wrong
# most likely the jail locking mechanism has failed
raise SandboxError("Jail released by different pid, name %s, lock_pid %d, release_pid %d"
% (self.name, lock_pid, os.getpid()))
os.unlink(pid_filename)
os.rmdir(lock_dir)
self.locked = False
def _prepare_with(self, command_dir):
if os.system("%s c %d" % (self.jchown, self.number)) != 0:
raise SandboxError("Error returned from jail_own c %d in prepare"
% (self.number,))
scratch_dir = os.path.join(self.base_dir, "scratch")
if os.system("rm -rf %s" % (scratch_dir,)) != 0:
raise SandboxError("Could not remove old scratch area from jail %d"
% (self.number,))
home_dir = os.path.join(scratch_dir, "home/jailuser")
os.makedirs(os.path.join(scratch_dir, "home"))
if os.system("cp -r %s %s" % (command_dir, home_dir)) != 0:
raise SandboxError("Error copying working directory '%s' to jail %d"
% (command_dir, self.number))
if os.system("sudo mount %s" % (os.path.join(self.base_dir, "root"),)):
raise SandboxError("Error returned from mount of %d in prepare"
% (self.number,))
if os.system("%s j %d" % (self.jchown, self.number)) != 0:
raise SandboxError("Error returned from jail_own j %d in prepare"
% (self.number,))
self.home_dir = home_dir
self.command_dir = command_dir
def retrieve(self):
"""Copy the working directory back out of the sandbox."""
if self.is_alive:
raise SandboxError("Tried to retrieve sandbox while still alive")
os.system("rm -rf %s" % (self.command_dir,))
if os.system("%s c %d" % (self.jchown, self.number)) != 0:
raise SandboxError("Error returned from jail_own c %d in prepare"
% (self.number,))
os.system("cp -r %s %s" % (self.home_dir, self.command_dir))
def start(self, shell_command):
"""Start a command running in the sandbox"""
if self.is_alive:
raise SandboxError("Tried to run command with one in progress.")
shell_command = self.chroot_cmd + shell_command
shell_command = shlex.split(shell_command.replace('\\','/'))
try:
self.command_process = subprocess.Popen(shell_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError as e:
raise SandboxError('Failed to start {0} due to {1}'.format(shell_command, str(e)))
self._is_alive = True
monitor = Thread(target=_guard_monitor, args=(self,))
monitor.daemon = True
monitor.start()
def _signal(self, signal):
if not self.locked:
raise SandboxError("Attempt to send %s to unlocked jail" % (signal,))
result = subprocess.call("sudo -u {0} kill -{1} -1".format(
self.name, signal), shell=True)
if result != 0:
raise SandboxError("Error returned from jail %s sending signal %s"
% (self.name, signal))
def kill(self):
"""Stops the sandbox.
Stops down the sandbox, cleaning up any spawned processes, threads, and
other resources. The shell command running inside the sandbox may be
suddenly terminated.
"""
try:
self.command_process.stdin.write("KILL\n")
self.command_process.stdin.flush()
except IOError as exc:
if exc.errno != 32:
raise
try:
item = self.resp_queue.get(timeout=5)
if item[1] != "KILL" and item[1] is not None:
raise SandboxError("Bad response from jailguard after kill, %s"
% (item,))
except Empty:
pass
self._signal("CONT")
for i in range(20):
if self.command_process.poll() != None:
break
if i == 10:
self._signal("KILL")
time.sleep(0.1)
# final check to make sure processes are died and raise error if not
if self.is_alive:
raise SandboxError("Could not kill sandbox children")
def pause(self):
"""Pause the process by sending a SIGSTOP to the child"""
try:
self.command_process.stdin.write("STOP\n")
self.command_process.stdin.flush()
except IOError as exc:
if exc.errno == 32: # Broken pipe, guard exited
return
raise
item = self.resp_queue.get()
if item[1] != "STOP" and item[1] is not None:
raise SandboxError("Bad response from jailguard after pause, %s"
% (item,))
def resume(self):
"""Resume the process by sending a SIGCONT to the child"""
try:
self.command_process.stdin.write("CONT\n")
self.command_process.stdin.flush()
except IOError as exc:
if exc.errno == 32: # Broken pipe, guard exited
return
raise
item = self.resp_queue.get()
if item[1] != "CONT" and item[1] is not None:
raise SandboxError("Bad response from jailguard after resume, %s"
% (item,))
def write(self, data):
"""Write str to stdin of the process being run"""
for line in data.splitlines():
self.write_line(line)
def write_line(self, line):
"""Write line to stdin of the process being run
A newline is appended to line and written to stdin of the child process
"""
if not self.is_alive:
return False
try:
self.command_process.stdin.write("SEND %s\n" % (line,))
self.command_process.stdin.flush()
except (OSError, IOError):
self.kill()
def read_line(self, timeout=0):
"""Read line from child process
Returns a line of the child process' stdout, if one isn't available
within timeout seconds it returns None. Also guaranteed to return None
at least once after each command that is run in the sandbox.
"""
if not self.is_alive:
timeout=0
try:
time, line = self.stdout_queue.get(block=True, timeout=timeout)
return line
except Empty:
return None
def read_error(self, timeout=0):
"""Read line from child process' stderr
Returns a line of the child process' stderr, if one isn't available
within timeout seconds it returns None. Also guaranteed to return None
at least once after each command that is run in the sandbox.
"""
if not self.is_alive:
timeout=0
try:
time, line = self.stderr_queue.get(block=True, timeout=timeout)
return line
except Empty:
return None
def check_path(self, path, errors):
resolved_path = os.path.join(self.home_dir, path)
if not os.path.exists(resolved_path):
errors.append("Output file " + str(path) + " was not created.")
return False
else:
return True
def _monitor_file(fd, q):
while True:
line = fd.readline()
if not line:
q.put(None)
break
line = unicode(line, errors="replace")
line = line.rstrip('\r\n')
q.put(line)
class IsolatedHouse:
"""Provide an insecure sandbox to run arbitrary commands in.
The sandbox class is used to invoke arbitrary shell commands.
This class provides the same interface as the secure Sandbox but doesn't
provide any actual security or require any special system setup.
"""
def __init__(self, working_directory, protected_files):
"""Initialize a new sandbox for the given working directory.
working_directory: the directory in which the shell command should
be launched.
"""
random.seed()
self._is_alive = False
self.command_process = | |
using ZLib level 6 compression flag (default recommended value)
PNG_Z_DEFAULT_COMPRESSION = 0x0006
# save using ZLib level 9 compression flag (default value is 6)
PNG_Z_BEST_COMPRESSION = 0x0009
PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression
# save using Adam7 interlacing (use | to combine with other save flags)
PNG_INTERLACED = 0x0200
PNM_DEFAULT = 0
PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6)
PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3)
PSD_DEFAULT = 0
PSD_CMYK = 1 # reads tags for separated CMYK (default converts to RGB)
PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB)
RAS_DEFAULT = 0
RAW_DEFAULT = 0 # load the file as linear RGB 48-bit
# try to load embedded JPEG preview from Exif Data or default to RGB 24-bit
RAW_PREVIEW = 1
RAW_DISPLAY = 2 # load the file as RGB 24-bit
SGI_DEFAULT = 0
TARGA_DEFAULT = 0
TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888.
TARGA_SAVE_RLE = 2 # Save with RLE compression
TIFF_DEFAULT = 0
# reads/stores tags for separated CMYK
# (use | to combine with compression flags)
TIFF_CMYK = 0x0001
TIFF_PACKBITS = 0x0100 # save using PACKBITS compression
TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression
TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression
TIFF_NONE = 0x0800 # save without any compression
TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding
TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding
TIFF_LZW = 0x4000 # save using LZW compression
TIFF_JPEG = 0x8000 # save using JPEG compression
TIFF_LOGLUV = 0x10000 # save using LogLuv compression
WBMP_DEFAULT = 0
XBM_DEFAULT = 0
XPM_DEFAULT = 0
class MetadataModels(object):
FIMD_COMMENTS = 0
FIMD_EXIF_MAIN = 1
FIMD_EXIF_EXIF = 2
FIMD_EXIF_GPS = 3
FIMD_EXIF_MAKERNOTE = 4
FIMD_EXIF_INTEROP = 5
FIMD_IPTC = 6
FIMD_XMP = 7
FIMD_GEOTIFF = 8
FIMD_ANIMATION = 9
class MetadataDatatype(object):
FIDT_BYTE = 1 # 8-bit unsigned integer
FIDT_ASCII = 2 # 8-bit bytes w/ last byte null
FIDT_SHORT = 3 # 16-bit unsigned integer
FIDT_LONG = 4 # 32-bit unsigned integer
FIDT_RATIONAL = 5 # 64-bit unsigned fraction
FIDT_SBYTE = 6 # 8-bit signed integer
FIDT_UNDEFINED = 7 # 8-bit untyped data
FIDT_SSHORT = 8 # 16-bit signed integer
FIDT_SLONG = 9 # 32-bit signed integer
FIDT_SRATIONAL = 10 # 64-bit signed fraction
FIDT_FLOAT = 11 # 32-bit IEEE floating point
FIDT_DOUBLE = 12 # 64-bit IEEE floating point
FIDT_IFD = 13 # 32-bit unsigned integer (offset)
FIDT_PALETTE = 14 # 32-bit RGBQUAD
FIDT_LONG8 = 16 # 64-bit unsigned integer
FIDT_SLONG8 = 17 # 64-bit signed integer
FIDT_IFD8 = 18 # 64-bit unsigned integer (offset)
dtypes = {FIDT_BYTE: numpy.uint8,
FIDT_SHORT: numpy.uint16,
FIDT_LONG: numpy.uint32,
FIDT_RATIONAL: [('numerator', numpy.uint32),
('denominator', numpy.uint32)],
FIDT_SBYTE: numpy.int8,
FIDT_UNDEFINED: numpy.uint8,
FIDT_SSHORT: numpy.int16,
FIDT_SLONG: numpy.int32,
FIDT_SRATIONAL: [('numerator', numpy.int32),
('denominator', numpy.int32)],
FIDT_FLOAT: numpy.float32,
FIDT_DOUBLE: numpy.float64,
FIDT_IFD: numpy.uint32,
FIDT_PALETTE: [('R', numpy.uint8), ('G', numpy.uint8),
('B', numpy.uint8), ('A', numpy.uint8)],
FIDT_LONG8: numpy.uint64,
FIDT_SLONG8: numpy.int64,
FIDT_IFD8: numpy.uint64,
}
def _process_bitmap(filename, flags, process_func):
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFileType(filename, 0)
handle_errors()
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
bitmap = _FI.FreeImage_Load(ftype, filename, flags)
handle_errors()
bitmap = ctypes.c_void_p(bitmap)
if not bitmap:
raise ValueError('Could not load file %s' % filename)
try:
return process_func(bitmap)
finally:
_FI.FreeImage_Unload(bitmap)
handle_errors()
def read(filename, flags=0):
"""Read an image to a numpy array of shape (height, width) for
greyscale images, or shape (height, width, nchannels) for RGB or
RGBA images.
The `flags` parameter should be one or more values from the IoFlags
class defined in this module, or-ed together with | as appropriate.
(See the source-code comments for more details.)
"""
return _process_bitmap(filename, flags, _array_from_bitmap)
def read_metadata(filename):
"""Return a dict containing all image metadata.
Returned dict maps (metadata_model, tag_name) keys to tag values, where
metadata_model is a string name based on the FreeImage "metadata models"
defined in the class MetadataModels.
"""
flags = IoFlags.FIF_LOAD_NOPIXELS
return _process_bitmap(filename, flags, _read_metadata)
def _process_multipage(filename, flags, process_func):
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFileType(filename, 0)
handle_errors()
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
create_new = False
read_only = True
keep_cache_in_memory = True
multibitmap = _FI.FreeImage_OpenMultiBitmap(
ftype, filename, create_new, read_only, keep_cache_in_memory, flags)
handle_errors()
multibitmap = ctypes.c_void_p(multibitmap)
if not multibitmap:
raise ValueError('Could not open %s as multi-page image.' % filename)
try:
pages = _FI.FreeImage_GetPageCount(multibitmap)
handle_errors()
out = []
for i in range(pages):
bitmap = _FI.FreeImage_LockPage(multibitmap, i)
handle_errors()
bitmap = ctypes.c_void_p(bitmap)
if not bitmap:
raise ValueError('Could not open %s as a multi-page image.'
% filename)
try:
out.append(process_func(bitmap))
finally:
_FI.FreeImage_UnlockPage(multibitmap, bitmap, False)
handle_errors()
return out
finally:
_FI.FreeImage_CloseMultiBitmap(multibitmap, 0)
handle_errors()
def read_multipage(filename, flags=0):
"""Read a multipage image to a list of numpy arrays, where each
array is of shape (height, width) for greyscale images, or shape
(height, width, nchannels) for RGB or RGBA images.
The `flags` parameter should be one or more values from the IoFlags
class defined in this module, or-ed together with | as appropriate.
(See the source-code comments for more details.)
"""
return _process_multipage(filename, flags, _array_from_bitmap)
def read_multipage_metadata(filename):
"""Read a multipage image to a list of metadata dicts, one dict for each
page. The dict format is as in read_metadata().
"""
flags = IoFlags.FIF_LOAD_NOPIXELS
return _process_multipage(filename, flags, _read_metadata)
def _wrap_bitmap_bits_in_array(bitmap, shape, dtype):
"""Return an ndarray view on the data in a FreeImage bitmap. Only
valid for as long as the bitmap is loaded (if single page) / locked
in memory (if multipage).
"""
pitch = _FI.FreeImage_GetPitch(bitmap)
handle_errors()
height = shape[-1]
byte_size = height * pitch
itemsize = dtype.itemsize
if len(shape) == 3:
strides = (itemsize, shape[0] * itemsize, pitch)
else:
strides = (itemsize, pitch)
bits = _FI.FreeImage_GetBits(bitmap)
handle_errors()
array = numpy.ndarray(
shape, dtype=dtype,
buffer=(ctypes.c_char * byte_size).from_address(bits), strides=strides)
return array
def _array_from_bitmap(bitmap):
"""Convert a FreeImage bitmap pointer to a numpy array.
"""
dtype, shape = FiTypes.get_type_and_shape(bitmap)
array = _wrap_bitmap_bits_in_array(bitmap, shape, dtype)
# swizzle the color components and flip the scanlines to go from
# FreeImage's BGR[A] and upside-down internal memory format to something
# more normal
def n(arr):
return arr[..., ::-1].T
if len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and \
dtype.type == numpy.uint8:
b = n(array[0])
g = n(array[1])
r = n(array[2])
if shape[0] == 3:
handle_errors()
return numpy.dstack((r, g, b))
elif shape[0] == 4:
a = n(array[3])
return numpy.dstack((r, g, b, a))
else:
raise ValueError('Cannot handle images of shape %s' % shape)
# We need to copy because array does *not* own its memory
# after bitmap is freed.
return n(array).copy()
def _read_metadata(bitmap):
metadata = {}
models = [(name[5:], number) for name, number in
MetadataModels.__dict__.items() if name.startswith('FIMD_')]
tag = ctypes.c_void_p()
for model_name, number in models:
mdhandle = _FI.FreeImage_FindFirstMetadata(number, bitmap,
ctypes.byref(tag))
handle_errors()
mdhandle = ctypes.c_void_p(mdhandle)
if mdhandle:
more = True
while more:
tag_name = asstr(_FI.FreeImage_GetTagKey(tag))
tag_type = _FI.FreeImage_GetTagType(tag)
byte_size = _FI.FreeImage_GetTagLength(tag)
handle_errors()
char_ptr = ctypes.c_char * byte_size
tag_str = char_ptr.from_address(_FI.FreeImage_GetTagValue(tag))
handle_errors()
if tag_type == MetadataDatatype.FIDT_ASCII:
tag_val = asstr(tag_str.value)
else:
tag_val = numpy.fromstring(
tag_str, dtype=MetadataDatatype.dtypes[tag_type])
if len(tag_val) == 1:
tag_val = tag_val[0]
metadata[(model_name, tag_name)] = tag_val
more = _FI.FreeImage_FindNextMetadata(mdhandle,
ctypes.byref(tag))
handle_errors()
_FI.FreeImage_FindCloseMetadata(mdhandle)
handle_errors()
return metadata
def write(array, filename, flags=0):
"""Write a (height, width) or (height, width, nchannels) array to
a greyscale, RGB, or RGBA image, with file type deduced from the
filename.
The `flags` parameter should be one or more values from the IoFlags
class defined in this module, or-ed together with | as appropriate.
(See the source-code comments for more details.)
"""
array = numpy.asarray(array)
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFIFFromFilename(filename)
handle_errors()
if ftype == -1:
raise ValueError('Cannot determine type for %s' % filename)
bitmap, fi_type = _array_to_bitmap(array)
try:
if fi_type == FiTypes.FIT_BITMAP:
can_write = _FI.FreeImage_FIFSupportsExportBPP(
ftype, _FI.FreeImage_GetBPP(bitmap))
handle_errors()
else:
can_write = _FI.FreeImage_FIFSupportsExportType(ftype, fi_type)
handle_errors()
if not can_write:
raise TypeError('Cannot save image of this format '
'to this file type')
res = _FI.FreeImage_Save(ftype, bitmap, filename, flags)
handle_errors()
if not res:
raise RuntimeError('Could not save image properly.')
finally:
_FI.FreeImage_Unload(bitmap)
handle_errors()
def write_multipage(arrays, filename, flags=0):
"""Write a | |
limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
"""
return pulumi.get(self, "size_limit")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentConfigVolumesEmptyDirSizeLimit(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentConfigVolumesFc(dict):
"""
FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
"""
def __init__(__self__, *,
fs_type: Optional[str] = None,
lun: Optional[int] = None,
read_only: Optional[bool] = None,
target_wwns: Optional[Sequence[str]] = None,
wwids: Optional[Sequence[str]] = None):
"""
FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
:param str fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine
:param int lun: Optional: FC target lun number
:param bool read_only: Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param Sequence[str] target_wwns: Optional: FC target worldwide names (WWNs)
:param Sequence[str] wwids: Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
"""
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if lun is not None:
pulumi.set(__self__, "lun", lun)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if target_wwns is not None:
pulumi.set(__self__, "target_wwns", target_wwns)
if wwids is not None:
pulumi.set(__self__, "wwids", wwids)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[str]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine
"""
return pulumi.get(self, "fs_type")
@property
@pulumi.getter
def lun(self) -> Optional[int]:
"""
Optional: FC target lun number
"""
return pulumi.get(self, "lun")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
"""
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@property
@pulumi.getter(name="targetWWNs")
def target_wwns(self) -> Optional[Sequence[str]]:
"""
Optional: FC target worldwide names (WWNs)
"""
return pulumi.get(self, "target_wwns")
@property
@pulumi.getter
def wwids(self) -> Optional[Sequence[str]]:
"""
Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
"""
return pulumi.get(self, "wwids")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentConfigVolumesFlexVolume(dict):
"""
FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
"""
def __init__(__self__, *,
driver: str,
fs_type: Optional[str] = None,
options: Optional[Mapping[str, str]] = None,
read_only: Optional[bool] = None,
secret_ref: Optional['outputs.DatadogAgentSpecAgentConfigVolumesFlexVolumeSecretRef'] = None):
"""
FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
:param str driver: Driver is the name of the driver to use for this volume.
:param str fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
:param Mapping[str, str] options: Optional: Extra command options if any.
:param bool read_only: Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param 'DatadogAgentSpecAgentConfigVolumesFlexVolumeSecretRefArgs' secret_ref: Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
"""
pulumi.set(__self__, "driver", driver)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if options is not None:
pulumi.set(__self__, "options", options)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter
def driver(self) -> str:
"""
Driver is the name of the driver to use for this volume.
"""
return pulumi.get(self, "driver")
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[str]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
"""
return pulumi.get(self, "fs_type")
@property
@pulumi.getter
def options(self) -> Optional[Mapping[str, str]]:
"""
Optional: Extra command options if any.
"""
return pulumi.get(self, "options")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
"""
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional['outputs.DatadogAgentSpecAgentConfigVolumesFlexVolumeSecretRef']:
"""
Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
"""
return pulumi.get(self, "secret_ref")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentConfigVolumesFlexVolumeSecretRef(dict):
"""
Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentConfigVolumesFlocker(dict):
"""
Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
"""
def __init__(__self__, *,
dataset_name: Optional[str] = None,
dataset_uuid: Optional[str] = None):
"""
Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
:param str dataset_name: Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated
:param str dataset_uuid: UUID of the dataset. This is unique identifier of a Flocker dataset
"""
if dataset_name is not None:
pulumi.set(__self__, "dataset_name", dataset_name)
if dataset_uuid is not None:
pulumi.set(__self__, "dataset_uuid", dataset_uuid)
@property
@pulumi.getter(name="datasetName")
def dataset_name(self) -> Optional[str]:
"""
Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated
"""
return pulumi.get(self, "dataset_name")
@property
@pulumi.getter(name="datasetUUID")
def dataset_uuid(self) -> Optional[str]:
"""
UUID of the dataset. This is unique identifier of a Flocker dataset
"""
return pulumi.get(self, "dataset_uuid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentConfigVolumesGcePersistentDisk(dict):
"""
GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
def __init__(__self__, *,
pd_name: str,
fs_type: Optional[str] = None,
partition: Optional[int] = None,
read_only: Optional[bool] = None):
"""
GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param str pd_name: Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param str fs_type: Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine
:param int partition: The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param bool | |
sweetish sweetmeat sweetmeats
swellhead swellheaded swellheads swelter sweltered sweltering
swelterings swelters swiftness swimmer swimmers swimsuit swimsuits
swinger swingers swinish swirlier swirliest swirly switchback
switchbacks switchblade switchblades swordplay swordsman swordsmen
sybarite sybarites sybaritic sycamore sycamores sycophant sycophantic
sycophants syllabic syllabication syllabification syllabified
syllabifies syllabify syllabifying syllogism syllogisms syllogistic
sylph sylphs sylvan symbioses symbiosis symbiotic symbolically
symmetrically symmetries symposium symposiums sync synced
synchronously syncing syncopate syncopated syncopates syncopating
syncopation syncs syndication synergism synergistic synergy synod
synods syntactical syntactics synthetically syphilitic syphilitics
syrupy systemic systemics systolic t tableau tableaux tableland
tablelands tableware tabular tabulator tabulators tachometer
tachometers tacitness taciturnity tackiness tackler tacklers
tactically tactician tacticians tactile tactlessness tad tads taffeta
taffies taffy tailcoat tailcoats tailless tailpipe tailpipes tailwind
tailwinds takeaways takeout takeouts takeovers takings talkativeness
tallness tallyho tallyhoed tallyhoing tallyhos tam tamable tamale
tamales tamarind tamarinds tamers tamp tamped tamping tampon tampons
tamps tams tanager tanagers tangelo tangelos tangibility tangibly
tangier tangies tangiest tangy tankful tankfuls tanneries tanners
tannery tannin tansy tapeworm tapeworms tapioca tapir tapirs taproom
taprooms taproot taproots tardily tare tared tares taring tarmac
tarmacked tarmacking tarmacs taro taros tarot tarots tarp tarpon
tarpons tarps tarragon tarragons tartly tartness taskmaster
taskmasters tastelessly tastelessness taster tasters tastiness tat
tats tatted tatter tattered tattering tatters tatting tattler tattlers
tattletale tattletales tattooist tattooists taupe tautly tautness
tautological tautologies tawdriness taxidermist taxidermists taxidermy
taxings taxonomic taxonomies taxonomy teabag teachable teakettle
teakettles teal teals tearfully teargas teargases teargassed
teargassing tearier teariest tearjerker tearjerkers tearoom tearooms
teary teasel teasels teaser teasers teaspoonful teaspoonfuls teatime
technocracy technocrat technocrats technologist technologists techs
tectonics tediousness teenier teeniest teeny telecast telecaster
telecasters telecasting telecasts telecommunication telecommute
telecommuted telecommuter telecommuters telecommutes telecommuting
teleconference teleconferenced teleconferences teleconferencing
telegrapher telegraphers telegraphic telegraphy telekinesis
telemarketing telemeter telemeters telemetries telemetry
telepathically telephonic telephony telephoto telephotos telescopic
telethon telethons teletypes teletypewriter teletypewriters
televangelist televangelists telex telexed telexes telexing tellingly
temblor temblors temerity temp temped tempera temperamentally temperas
tempestuously tempestuousness temping templates temporally temps
tempter tempters temptingly temptings temptress temptresses tempura
tenability tenaciously tendentious tendentiously tendentiousness
tenderfoot tenderfoots tenderhearted tenderloin tenderloins tendinitis
tenfold tenfolds tenon tenons tenpin tenpins tensely tenseness tensile
tensor tenuously tenuousness tequila tequilas tercentenaries
tercentenary termagant termagants terminable terminations
terminological tern terned terning terns terrapin terrapins terrarium
terrariums terrifically terrifyingly terry tertiary testamentary
testate testates testier testiest testily testiness testosterone testy
tetrahedron tetrahedrons textural thalami thalamus thallium
thankfulness thanklessly thanksgiving thanksgivings theatrically theed
theeing thees theism theistic thematic thematically thematics
thenceforth thenceforward thenceforwards theocracies theocracy
theocratic theoretician theoreticians theosophy therapeutically
therapeutics thereabout therefrom thereto therewith thermally
thermionic thermodynamic thermonuclear thermoplastic thermoplastics
thermos thermoses thermostatic thermostatics thespian thespians
thiamine thickener thickeners thickenings thickset thieved thievery
thieving thievish thighbone thighbones thimbleful thimblefuls thine
thingamajig thingamajigs thinners thinness thirdly thirstily
thistledown thither tho thoracic thorax thoraxes thorium thoroughgoing
thoroughness thoughtlessness thrall thralldom thralled thralling
thralls thrasher thrashers thrashings threateningly threatenings
threefold threescore threescores threesome threesomes threnodies
threnody thriftily thriftiness thrivings throatier throatiest
throatily throatiness throaty throe throed throeing throes thromboses
thrombosis throwaways thrower throwers thrum thrummed thrumming thrums
thrush thrushes thruway thruways thumbnail thumbnails thumbscrew
thumbscrews thunderclap thunderclaps thundercloud thunderclouds
thunderhead thunderheads thundershower thundershowers thwack thwacked
thwacking thwacks thymus thymuses thyself ti tibia tibiae tic ticker
tickers tics tiddlywinks tidewater tidewaters tidily tidiness tidings
tiebreaker tiebreakers tightfisted tigress tigresses tildes tillable
tillage tiller tillers timbered timberland timberline timberlines
timbre timbres timelessness timeliness timepiece timepieces timeworn
timorous timorously timpani timpanist timpanists tincture tinctured
tinctures tincturing tinderbox tinderboxes tinfoil tinglier tingliest
tingly tinsmith tinsmiths tintinnabulation tintinnabulations tipper
tippers tipple tippled tippler tipplers tipples tippling tipsily
tipster tipsters tiptop tiptops tiredness tirelessly tirelessness
tiresomely tiresomeness tirings titan titanic titanium titans tithe
tithed tithes tithing titillation titmice titmouse tittle tittled
tittles tittling titular tizzies tizzy toadied toadies toady toadying
toastier toasties toastiest toastmaster toastmasters toasty
tobacconist tobacconists tocsin tocsins toddies toddy toehold toeholds
tofu tog togetherness toggled toggles toggling togs toiler toilers
toiletries toiletry toilette toilsome toke toked tokenism tokes toking
tolerantly toleration tollbooth tollbooths tollgate tollgates tom
tomfooleries tomfoolery toms tonalities tonality toneless toner tonier
toniest tonsillectomies tonsillectomy tonsorial tonsure tonsured
tonsures tonsuring tony toolbar toolbars toolbox toolboxes toothed
toothier toothiest toothless toothsome toothy topcoat topcoats
topically topknot topknots topless topmast topmasts topmost
topographer topographers topographic topographical topological
topologically toppings topsail topsails topside topsides topsoil toque
toques tor torchlight toreador toreadors torpid torpidity torpor
torqued torques torquing tors torsion tort torte tortes tortoiseshell
tortoiseshells torts tortuously torturer torturers torus tossup
tossups totemic touche touchingly touchstone touchstones toughly
tourism tourmaline tourney tourneys towhead towheaded towheads
townhouse townhouses townsfolk township townships townsman townsmen
towpath towpaths toxicity toxicologist toxicologists toxicology
traceable tracer traceries tracers tracery trachea tracheae
tracheotomies tracheotomy tracings tracker trackers tractable
tradesman tradesmen traditionalists traduce traduced traduces
traducing trafficker traffickers tragedian tragedians tragicomedies
tragicomedy trailblazer trailblazers traipse traipsed traipses
traipsing trajectories trajectory tram trammed trammel trammels
tramming trams tranquilly transceiver transceivers transcendence
transcendent transcendental transcendentalism transcendentalist
transcendentalists transcendentally transducer transducers transept
transepts transferal transferals transference transfiguration
transfigure transfigured transfigures transfiguring transfinite
transfix transfixed transfixes transfixing transfuse transfused
transfuses transfusing transgressor transgressors transience
transiency transitively transliterate transliterated transliterates
transliterating transliterations translucence transmigrate
transmigrated transmigrates transmigrating transmigration
transmissible transmittable transmittal transmutation transmutations
transmute transmuted transmutes transmuting transnational
transnationals transoceanic transom transoms transpiration
transplantation transponder transponders transporter transporters
transposition transpositions transsexual transsexuals transship
transshipment transshipped transshipping transships transubstantiation
transversely transvestism transvestite transvestites trapdoors
trapezoidal trappable trapshooting trashcans travail travailed
travailing travails travelogue travelogues treacherously treacled
treacles treacling treadle treadled treadles treadling treasonable
treasonous treatable treeless treetop treetops trefoil trefoils
tremolo tremolos tremulous tremulously trenchant tress tresses triad
triads triage triangulation triathlon triathlons tribalism tribesman
tribesmen tribune tribunes trice triceps tricepses triceratops
trickiness trident tridents triennial triennials trifler triflers
trifocals trig triggest triglyceride triglycerides trigonometric trike
triked trikes triking trilateral trilaterals trillionth trillionths
trimaran trimarans trimly trimmers trimmings trimness trinities
tripartite triplied triplies triply triplying triptych triptychs
trisect trisected trisecting trisects tritely triteness triumphal
triumphantly triumvirate triumvirates trivet trivets trivialities
trochee trochees troika troikas trollop trolloped trolloping trollops
trombonist trombonists tromp tromped tromping tromps troopship
troopships trope tropes tropic tropics tropism tropisms troposphere
tropospheres troth trothed trothing troths trotter trotters troubadour
troubadours troubleshoot troubleshooted troubleshooter troubleshooters
troubleshooting troubleshoots troubleshot trouper troupers trousseau
trousseaux troy troys trucker truckers truckle truckled truckles
truckling truckload truckloads truculence truculent truculently
trumpery trumpeter trumpeters truncheon truncheons trundle trundled
trundles trundling truss trussed trusses trussing trusteeship
trusteeships trustfully trustfulness trustworthiness tryst trysted
trysting trysts ts tsunami tsunamis tubbier tubbiest tubby tubeless
tuber tubercle tubercles tubercular tuberculous tuberous tubers tucker
tuckered tuckering tuckers tugboat tugboats tulle tumbledown
tumbleweed tumbleweeds tumbrel tumbrels tumid tun tunefully tuneless
tunelessly tungsten tunnies tunny tuns turbid turbojet turbojets
turboprop turboprops turbot turbots turbulently turd turds turgidity
turgidly turmeric turmerics turnabout turnabouts turnarounds turncoat
turncoats turners turnkey turnkeys turnoff turnoffs turpitude
turtledove turtledoves tush tushed tushes tushing tusked tussock
tussocks tutelage tutu tutus tux tuxes twaddle twaddled twaddles
twaddling twain tweedier tweediest tweeds tweedy tweeter tweeters
twerp twerps twiggier twiggiest twiggy twill twilled twirler twirlers
twit twits twitted twitting twofer twofers twofold twofolds twosome
twosomes tyke tykes tympanum tympanums typecast typecasting typecasts
typefaces typescripts typesetters typewrite typewrites typewriting
typewritten typewrote typo typographer typographers typographically
typography typos tyrannically tyrannosaur tyrannosaurs tyrannosaurus
tyrannosauruses tyrannous tyro tyros u ubiquitously ubiquity uh
ukulele ukuleles ulcerate ulcerated ulcerates ulcerating ulceration
ulcerous ulna ulnae ultraconservative ultraconservatives ultramarine
ultras ultrasonically ultrasound ultrasounds ululate ululated ululates
ululating um umbel umbels umber umbilical umbilici umbilicus umbrage
umbraged umbrages umbraging umiak umiaks umlaut umlauts ump umped
umping umps umpteenth unabashed unabated unabridged unabridgeds
unaccented unacceptability unaccompanied unaccustomed unacknowledged
unacquainted unadorned unadvised unafraid unaided unalterable
unalterably unannounced unanticipated unappealing unappreciated
unappreciative unapproachable unashamed unashamedly unasked
unassailable unassisted unattributed unauthenticated unavailing
unavoidably unbar unbarred unbarring unbars unbeaten unbeknown
unbelief unbend unbending unbends unbent unbidden unbind unbinding
unbinds unblushing unbolt unbolted unbolting unbolts unbosom unbosomed
unbosoming unbosoms unbound unbounded unbranded unbridled unbuckle
unbuckled unbuckles unbuckling unbutton unbuttoned unbuttoning
unbuttons uncalled uncannily uncaring uncased uncatalogued unceasingly
uncensored unceremonious unceremoniously uncertainly unchanging
uncharacteristic uncharacteristically uncharitably uncharted unchecked
uncivil unclaimed unclasp unclasped unclasping unclasps unclassified
uncleanlier uncleanliest uncleanly uncleanness unclearer unclearest
unclothe unclothed unclothes unclothing uncluttered uncoil uncoiled
uncoiling uncoils uncollected uncommitted uncommonly uncommunicative
uncomplaining uncompleted uncomplicated uncomplimentary
uncomprehending uncompressed uncompromisingly unconcern unconcernedly
unconcerning unconcerns unconquerable unconscionable unconscionably
unconsciousness unconsidered uncontaminated uncontested uncontrollably
unconventionally unconvincingly uncooked uncooperative uncoordinated
uncork uncorked uncorking uncorks uncorrelated uncorroborated
uncounted uncouple uncoupled uncouples uncoupling uncritical unction
unctions unctuous unctuously unctuousness uncultivated undated
undeceive undeceived undeceives undeceiving undecipherable undeclared
undefeated undefended undefinable undelivered undemanding
undemonstrative undependable underachieve underachieved underachiever
underachievers underachieves underachieving underact underacted
underacting underacts underage underarm underarmed underarming
underarms underbellies underbelly underbid underbidding underbids
undercarriage undercarriages undercharge undercharged undercharges
undercharging underclass underclassman underclassmen underclothes
underclothing undercoat undercoated undercoating undercoats
underdeveloped underdone underemployed underexpose underexposed
underexposes underexposing underfed underfeed underfeeding underfeeds
underfunded undergrad undergrads underhand underhandedly underlains
underling undermost underpaid underpay underpaying underpays underpin
underpinned underpinning underpinnings underpins underplay underplayed
underplaying underplays undersea underseas undersecretaries
undersecretary undersell underselling undersells undershoot
undershooting undershoots undershorts undershot undersign undersigned
undersigning undersigns undersized underskirt underskirts undersold
understaffed understandingly underused undervalue undervalued
undervalues undervaluing underwriter underwriters undeservedly
undeserving undesirability undetectable undetermined undeterred undies
undignified undiluted undiminished undisciplined undisclosed
undiscovered undiscriminating undisguised undisputed undistinguished
undivided undulant undulate undulated undulates undulating undulation
undulations unearned unease uneaten unedited | |
#!/usr/bin/env python
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""This file prompts the user to enter information about the repositories and
dependencies that s/he wants to clone into her/his machine.
This is the interactive mode of warhorn."""
import os
import shutil
import subprocess
import xml.etree.ElementTree
from xml.dom import minidom
from xml.etree.ElementTree import Element
import re
import sys
from utils import get_firstlevel_children, create_dir, get_repository_name, \
get_subfiles, delete_read_only, get_relative_path, get_parent_dir
def confirm_url(question, attrib_value):
""" This function recursively checks whether a given url is a valid
repository or not. If it isn't, it promps the user to enter a new url and
checks that.
:Arguments:
1. question (xml.etree.ElementTree.Element) = The question tag from data.xml
2. attrib_value (str) = the url to be checked
:Returns:
1. attrib_value (str) = valid url
"""
if not check_url_is_a_valid_repo(attrib_value):
attrib_value = raw_input("Please enter a valid URL: ")
attrib_value = confirm_url(question, attrib_value)
return attrib_value
def validate_input(question, attrib_name, answer):
""" This function validates the input values for tags in the data.xml
that will eventually become attributes in the new xml file
:Arguments:
1. question (xml.etree.ElementTree.Element) = The question tag from data.xml
2. attrib_name (str) = name of the tags from data.xml that will
eventually become attribute names
3. answer (str) = user response associated with that particular tag name
:Returns:
1. answer (str) = valid user response
"""
aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE)
neg_pattern = re.compile("^(n|no)$", re.IGNORECASE)
if attrib_name == "dependency":
if aff_pattern.match(answer):
return "yes"
elif neg_pattern.match(answer):
return "no"
else:
print "The command was not recognized. Please answer 'yes' or 'no'."
answer = raw_input(question.text)
answer = validate_input(question, attrib_name, answer)
if attrib_name == "clone":
if aff_pattern.match(answer):
global STATE
STATE = "yes"
return "yes"
elif neg_pattern.match(answer):
global STATE
STATE = "no"
return "no"
else:
print "The command was not recognized. Please answer 'yes' or 'no'."
answer = raw_input(question.text)
answer = validate_input(question, attrib_name, answer)
elif attrib_name == "destination":
answer = validate_path(answer)
elif attrib_name == "clean_install":
if aff_pattern.match(answer):
return "yes"
elif neg_pattern.match(answer):
return "no"
else:
print "The command was not recognized. Please answer 'yes' or 'no'."
answer = raw_input(question.text)
answer = validate_input(question, attrib_name, answer)
return answer
def show_suggestion_get_answer(node, attrib_value, suggestion):
""" This function 'suggests' a tentative url of the git repository that
the user wants to clone. The user has the ability to accept the suggested
url or reject it and give a url of his own
:Arguments:
1. node (xml.etree.ElementTree.Element) = the parent node of the
suggestion tag from data.xml
2. attrib_value (str) = contains the name of the repository that the user
wants to clone
3. suggestion (xml.etree.ElementTree.Element) = suggestion tag from data.xml
:Returns:
1. suggestion_content (str) = The complete, suggested url
2. answer (str) = valid user response
"""
aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE)
neg_pattern = re.compile("^(n|no)$", re.IGNORECASE)
suggestion_content = suggestion.text
if node.attrib["name"] == 'repository':
suggestion_content = suggestion_content + attrib_value + ".git"
print "The suggested URL is: " + suggestion_content
else:
print "The suggested URL is: " + suggestion_content
answer = raw_input("Do you want to use this URL? (yes[Enter]/no):")
if not (aff_pattern.match(answer) or neg_pattern.match(answer)):
print "The command was not recognized. Please answer yes or no."
suggestion_content, answer = show_suggestion_get_answer(node,
attrib_value,
suggestion)
return suggestion_content, answer
def check_url_is_a_valid_repo(url):
""" This function checks if the url is a valid git repository or not
:Arguments:
1. url (str) = url of a repository
:Returns:
1. bool (True/False) = True is url is a valid repository, False if not
"""
try:
_ = subprocess.check_output(["git", "ls-remote", url])
except:
print url + " is not a valid git repository"
return False
print url + " is available"
return True
def get_driver_list(pd_file_names):
"""
:Arguments:
1. pd_file_names (list[str]) = All the files names obtained from the
ProductDrivers directory of the repository that is temporarily cloned
into the user's machine
:Returns:
2. driver_list (list[str]) = This list contains serial numbers and the
driver names alternately
eg: [1, driver_name_1, 2, driver_name_2, 3, driver_name_3]
"""
subfiles = []
driver_list = []
for subfile in pd_file_names:
if subfile.endswith('.py'):
subfiles.append(subfile)
for i, subfile in zip(range(0, len(subfiles)), subfiles):
driver_list.append(str(i+1))
driver_list.append(subfile)
return driver_list
def get_corresponding_numbers():
"""This function validates the string of numbers entered by the user. Any
alphabet, characters that may be found are elimintaed. a single 0 is also
eliminated here. The string is converted into a list of int. space is used
as a separator.
:Returns:
1. number_list (list[int]) = list of numbers
"""
number_set = set()
answer = raw_input("Please enter the corresponding number of the drivers "
"you want to clone. Separate the numbers with a space: ")
pattern = re.compile("^[0-9]*$")
for characters in answer.split():
if pattern.match(characters):
if characters == '0':
print characters + " does not have a corresponding driver."
else:
number_set.add(int(characters))
else:
print characters + " is not a valid number"
return list(number_set)
def add_drivers_to_tags(tag, drivers, driver_numbers):
""" This function appends the driver tags sets the attributes and
attribute names to the corresponding driver tag
:Arguments:
1. tag (xml.etree.ElementTree.Element) = Current tag to which the newly
formed driver tags may be appended
2. drivers (list[str]) = list of driver names available to the user
3. driver_numbers (list[int]) = list of the numbers which correspond to
the driver names that the user wants.
"""
print "Selected drivers:"
for driver_number in driver_numbers:
driver_number = driver_number * 2 - 1
if driver_number > len(drivers):
print "Corresponding driver for " + str((driver_number+1)/2) +\
" not found."
continue
print str((driver_number+1)/2) + ". " + drivers[driver_number]
driver_tag = Element("driver")
driver_tag.set("name", drivers[driver_number])
tag.append(driver_tag)
def transform_response(answer):
aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE)
neg_pattern = re.compile("^(n|no)$", re.IGNORECASE)
if aff_pattern.match(answer):
return "yes"
if neg_pattern.match(answer):
return "no"
def clone_kw_repo(root, question, attrib_value, tag):
""" This function clones the keyword repository if the user wants
individual drivers of that repository. Otherwise, it just sets the
attribute "all_drivers" of that repository tag as 'yes'
:Arguments:
1. root (xml.etree.ElementTree.Element) = parent node of the current tag
from data.xml
2. question (xml.etree.ElementTree.Element) = question tag nested under
the current tag from data.xml
3. attrib_value (str) = url of the repository in question
4. tag (xml.etree.ElementTree.Element) = current tag to which the
repository tags would be appended.
:Returns:
1. attrib_value (str) = valid url
"""
aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE)
neg_pattern = re.compile("^(n|no)$", re.IGNORECASE)
if not check_url_is_a_valid_repo(attrib_value):
attrib_value = raw_input("Please enter a valid url: ")
attrib_value = clone_kw_repo(root, question, attrib_value, tag)
else:
if root.tag == "drivers":
name = get_repository_name(attrib_value)
answer = raw_input("Do you want to clone all the drivers? "
"(yes[Enter]/no): ")
answer = transform_response(answer)
tag.set("all_drivers", answer)
if neg_pattern.match(answer):
current_dir = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(current_dir, "temp")
create_dir(path)
os.chdir(path)
subprocess.call(["git", "clone", attrib_value])
os.chdir(current_dir)
temp, _ = get_subfiles(os.path.join(path, name,
'ProductDrivers'))
drivers = get_driver_list(temp)
for i in range(1, len(drivers), 2):
print drivers[i-1] + ". " + drivers[i]
driver_numbers = get_corresponding_numbers()
add_drivers_to_tags(tag, drivers, driver_numbers)
shutil.rmtree(path, onerror=delete_read_only)
elif not aff_pattern.match(answer):
print "The command was not recognized. Please answer yes or no."
attrib_value = clone_kw_repo(root, question, attrib_value, tag)
return attrib_value
def populate_repo_tags(root, current_element, attrib_value, node, tag):
""" This function retrieves information from data.xml and asks user for
input. It also sets the attributes and their corresponding values of the
tag in question.
:Arguments:
1. root (xml.etree.ElementTree.Element) = parent of the current tag
2. current_element (xml.etree.ElementTree.Element) = current tag that is
being evaluated from data.xml
3. attrib_value (str) = user response
4. node (xml.etree.ElementTree.Element) = node used to create the tag
5. tag (xml.etree.ElementTree.Element) = tag created using the node
:Returns:
1. attrib_value (str) = valid user response
"""
aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE)
neg_pattern = re.compile("^(n|no)$", re.IGNORECASE)
answer = "no"
suggestion_content = ""
info = current_element.find("info")
if info is not None:
print info.text
suggestion = current_element.find("suggestion")
if suggestion is not None:
suggestion_content = suggestion.text + | |
= len(self.config[component].get(ck.PARTIAL_REPAIR, {}))
if num_repair_modes != 1 and num_repair_modes != num_failure_modes:
raise CaseError(
f"Number of concurrent repairs for component '{component}' must be 1 or equal to the number of concurrent failures"
)
for failure, fail_config in self.config[component].get(ck.FAILURE, {}).items():
fails = set(ck.failure_keys)
if component == ck.INVERTER:
# inverters may have cost_per_watt specified instead of cost
fails.discard(ck.COST)
included = fails & set(fail_config.keys())
if included != fails:
missing += list(fails - included)
unknown_keys = set(fail_config.keys()) - fails - {ck.FRAC, ck.COST, ck.COST_PER_WATT, ck.DECAY_FRAC}
if unknown_keys:
logger.warning(f"Unknown keys in failure configuration {failure}: {unknown_keys}")
keys = set(fail_config.keys())
if ck.FRAC in keys and ck.DECAY_FRAC in keys:
raise CaseError(f"Must specify either `fraction` or `decay_fraction`, not both for '{component}'")
# update cost for inverters
if component == ck.INVERTER:
if fail_config.get(ck.COST, None) is None and fail_config.get(ck.COST_PER_WATT, None) is None:
missing.append(ck.COST)
if fail_config.get(ck.COST_PER_WATT, None) is not None:
# calculate costs based on cents/watt
self.config[component][ck.FAILURE][failure][ck.COST] = (
fail_config[ck.COST_PER_WATT] * self.config[ck.INVERTER_SIZE]
)
if fail_config.get(ck.DIST, None) in ck.dists:
check_params(component, failure, fail_config)
# partial failure check
for failure, fail_config in self.config[component].get(ck.PARTIAL_FAIL, {}).items():
fails = set(ck.partial_failure_keys)
if component == ck.INVERTER:
# inverters may have cost_per_watt specified instead of cost
fails.discard(ck.COST)
included = fails & set(fail_config.keys())
if included != fails:
missing += list(fails - included)
unknown_keys = set(fail_config.keys()) - fails - {ck.FRAC, ck.COST, ck.COST_PER_WATT, ck.DECAY_FRAC}
if unknown_keys:
logger.warning(f"Unknown keys in concurrent failure configuration {failure}: {unknown_keys}")
keys = set(fail_config.keys())
if ck.FRAC in keys and ck.DECAY_FRAC in keys:
raise CaseError(f"Must specify either `fraction` or `decay_fraction`, not both for '{component}'")
# update cost for inverters
if component == ck.INVERTER:
if fail_config.get(ck.COST, None) is None and fail_config.get(ck.COST_PER_WATT, None) is None:
missing.append(ck.COST)
if fail_config.get(ck.COST_PER_WATT, None) is not None:
# calculate costs based on cents/watt
self.config[component][ck.PARTIAL_FAIL][failure][ck.COST] = (
fail_config[ck.COST_PER_WATT] * self.config[ck.INVERTER_SIZE]
)
if fail_config.get(ck.DIST, None) in ck.dists:
check_params(component, failure, fail_config)
for monitor, monitor_config in self.config[component].get(ck.MONITORING, {}).items():
monitor_ = set(ck.monitoring_keys)
included = monitor_ & set(monitor_config.keys())
if included != monitor_:
missing += list(monitor_ - included)
unknown_keys = set(monitor_config.keys()) - monitor_
if unknown_keys:
logger.warning(f"Unknown keys in monitoring configuration {monitor}: {unknown_keys}")
if monitor_config.get(ck.DIST, None) in ck.dists:
check_params(component, monitor, monitor_config)
for repair, repair_config in self.config[component].get(ck.REPAIR, {}).items():
repairs_ = set(ck.repair_keys)
included = repairs_ & set(repair_config.keys())
if included != repairs_:
missing += list(repairs_ - included)
unknown_keys = set(repair_config.keys()) - repairs_
if unknown_keys:
logger.warning(f"Unknown keys in repair configuration {repair}: {unknown_keys}")
if repair_config.get(ck.DIST, None) in ck.dists:
check_params(component, repair, repair_config)
# partial repairs
for repair, repair_config in self.config[component].get(ck.PARTIAL_REPAIR, {}).items():
repairs_ = set(ck.partial_repair_keys)
included = repairs_ & set(repair_config.keys())
if included != repairs_:
missing += list(repairs_ - included)
unknown_keys = set(repair_config.keys()) - repairs_
if unknown_keys:
logger.warning(f"Unknown keys in concurrent repair configuration {repair}: {unknown_keys}")
if repair_config.get(ck.DIST, None) in ck.dists:
check_params(component, repair, repair_config)
if missing:
raise CaseError(f"Missing configurations for component '{component}': {missing}")
if self.config[ck.STR_PER_COMBINER] * self.config[ck.NUM_COMBINERS] != self.num_strings:
raise CaseError("There must be an integer number of strings per combiner!")
if self.config[ck.INVERTER_PER_TRANS] * self.config[ck.NUM_TRANSFORMERS] != self.num_inverters:
raise CaseError("There must be an integer number of inverters per transformer!")
# add the number of each component to its configuration information
if component == ck.MODULE:
self.config[component][ck.NUM_COMPONENT] = int(self.num_modules)
elif component == ck.STRING:
self.config[component][ck.NUM_COMPONENT] = int(self.num_strings)
elif component == ck.COMBINER:
self.config[component][ck.NUM_COMPONENT] = self.config[ck.NUM_COMBINERS]
elif component == ck.INVERTER:
self.config[component][ck.NUM_COMPONENT] = int(self.num_inverters)
elif component == ck.DISCONNECT:
self.config[component][ck.NUM_COMPONENT] = int(self.num_disconnects)
elif component == ck.TRANSFORMER:
self.config[component][ck.NUM_COMPONENT] = self.config[ck.NUM_TRANSFORMERS]
elif component == ck.GRID:
self.config[component][ck.NUM_COMPONENT] = 1
elif component == ck.TRACKER:
self.config[component][ck.NUM_COMPONENT] = self.config[ck.NUM_TRACKERS]
# make directory for results if it doesnt exist
os.makedirs(self.config[ck.RESULTS_FOLDER], exist_ok=True)
if self.config[ck.TRACKING] and self.config[ck.TRACKER][ck.CAN_FAIL]:
self.precalculate_tracker_losses()
def __verify_case(self) -> None:
"""
Verifies loaded module configuration from SAM and also sets class variables for some information about the case.
"""
# since we are finding order simulation now, remove set order in config for old pvrpm config files
self.config[ck.MODULE_ORDER] = None
# setup module order for simulation, also
# need to check that an LCOE calculator that supports lifetime is used
# in this case its only 1 unusable calculator and if no calculator is present
my_modules = list(self.modules.keys())
for module_loadout in self.module_orders:
if len(module_loadout) != len(self.modules):
continue
found = True
for module in module_loadout:
if module not in my_modules:
found = False
break
if found:
self.config[ck.MODULE_ORDER] = module_loadout
break
if self.config[ck.MODULE_ORDER] is None:
for module_loadout in self.bad_module_orders:
if len(module_loadout) != len(self.modules):
continue
found = True
for module in module_loadout:
if module not in my_modules:
found = False
break
if found:
raise CaseError(
"You have either selected the `LCOE Calculator (FCR Method)`, `Third Party Owner - Host` or `No Financial Model` for your financial model, which PVRPM does not support. Please select a supported financial model."
)
raise CaseError(
"You have selected an unknown financial model or are not using the `Detailed Photovoltaic Model`. Please update your model to a supported model."
)
if self.value("en_dc_lifetime_losses") or self.value("en_ac_lifetime_losses"):
logger.warning("Lifetime daily DC and AC losses will be overridden for this run.")
if self.value("om_fixed") != [0]:
logger.warning(
"There is a non-zero value in the fixed annual O&M costs input. These will be overwritten with the new values."
)
if self.value("dc_degradation") != [0]:
logger.warning(
"Degradation is set by the PVRPM script, you have entered a non-zero degradation to the degradation input. This script will set the degradation input to zero."
)
self.value("dc_degradation", [0])
if ck.NUM_TRANSFORMERS not in self.config or self.config[ck.NUM_TRANSFORMERS] < 1:
raise CaseError("Number of transformers must be greater than 0!")
self.num_modules = 0
self.num_strings = 0
# assume the number of modules per string is the same for each subarray
self.config[ck.MODULES_PER_STR] = int(self.value("subarray1_modules_per_string"))
self.config[ck.TRACKING] = False
self.config[ck.MULTI_SUBARRAY] = 0
for sub in range(1, 5):
if sub == 1 or self.value(f"subarray{sub}_enable"): # subarry 1 is always enabled
self.num_modules += self.value(f"subarray{sub}_modules_per_string") * self.value(
f"subarray{sub}_nstrings"
)
self.num_strings += self.value(f"subarray{sub}_nstrings")
if self.value(f"subarray{sub}_track_mode"):
self.config[ck.TRACKING] = True
self.config[ck.MULTI_SUBARRAY] += 1
inverter = self.value("inverter_model")
if inverter == 0:
self.config[ck.INVERTER_SIZE] = self.value("inv_snl_paco")
elif inverter == 1:
self.config[ck.INVERTER_SIZE] = self.value("inv_ds_paco")
elif inverter == 2:
self.config[ck.INVERTER_SIZE] = self.value("inv_pd_paco")
else:
raise CaseError("Unknown inverter model! Should be 0, 1, or 2")
if self.config[ck.MULTI_SUBARRAY] > 1 and self.config[ck.TRACKING]:
raise CaseError(
"Tracker failures may only be modeled for a system consisting of a single subarray. Exiting simulation."
)
if self.config[ck.TRACKING]:
if self.value("subarray1_track_mode") == 2 or self.value("subarray1_track_mode") == 3:
raise CaseError(
"This script is not configured to run with 2-axis tracking or azimuth-axis tracking systems."
)
# assume 1 AC disconnect per inverter
self.num_inverters = self.value("inverter_count")
self.num_disconnects = self.num_inverters
self.config[ck.STR_PER_COMBINER] = int(np.floor(self.num_strings / self.config[ck.NUM_COMBINERS]))
self.config[ck.COMBINER_PER_INVERTER] = int(np.floor(self.config[ck.NUM_COMBINERS] / self.num_inverters))
self.config[ck.INVERTER_PER_TRANS] = int(np.floor(self.num_inverters / self.config[ck.NUM_TRANSFORMERS]))
self.config[ck.LIFETIME_YRS] = int(self.value("analysis_period"))
# for pickling
def __getstate__(self) -> dict:
"""
Converts the case into a dictionary for pickling
"""
state = self.__dict__.copy()
del state["modules"]
del state["ssc"]
return state
def __setstate__(self, state: dict) -> None:
"""
Creates the object from a dictionary
"""
self.__dict__ = state
self.ssc = pssc.PySSC()
self.modules = self.__load_modules()
def get_npv(self):
"""
Returns the NPV for the case after a simulation has been ran, regardless of financial model used.
"""
try:
return self.output("npv")
except AttributeError:
pass
try:
return np.array(self.output("cf_project_return_aftertax_npv")).sum()
except AttributeError:
pass
try:
return self.output("tax_investor_aftertax_npv")
except AttributeError:
return None
def precalculate_tracker_losses(self):
"""
Precalculate_tracker_losses calculates an array of coefficients (one for every day of the year) that account for the "benefit" of trackers on that particular day. This is used to determine how much power is lost if a tracker fails.
"""
if self.value("subarray1_tilt") != 0:
raise CaseError("This script can only model tracker failures for 0 degree tilt trackers.")
user_analysis_period = self.value("analysis_period")
self.value("analysis_period", 1)
self.value("en_ac_lifetime_losses", 0)
self.value("en_dc_lifetime_losses", 0)
self.simulate()
timeseries_with_tracker = self.output("dc_net")
# calculate timeseries performance without trackers for one year
user_tracking_mode = self.value("subarray1_track_mode")
user_azimuth = self.value("subarray1_azimuth")
user_tilt = self.value("subarray1_tilt")
self.value("subarray1_track_mode", 0) # fixed tilt
if user_azimuth > 360 or user_azimuth < 0:
raise CaseError("Azimuth must be between 0 and 360. Please adjust the azimuth and try again.")
if self.config[ck.WORST_TRACKER]:
# assume worst case tracker gets stuck to north. If axis is north-south, assume gets stuck to west.
worst_case_az = user_azimuth
if user_azimuth < 180:
worst_case_az -= 90
else:
worst_case_az += 90
if worst_case_az < 0:
worst_case_az | |
version of 10557.epub.images"
http.queue_response(**epub10557_updated)
imported_editions, pools, works, failures = importer.import_from_feed(
self.content_server_mini_feed
)
assert {e_10441, e_10557} == set(imported_editions)
assert 4 == len(s3_for_books.uploaded)
assert epub10441_updated["content"] in s3_for_books.content[-2:]
assert svg_bytes == s3_for_covers.content.pop()
assert epub10557_updated["content"] in s3_for_books.content[-2:]
def test_content_resources_not_mirrored_on_import_if_no_collection(
self,
http,
svg,
epub10557_cover_broken,
epub10557_cover_working,
epub10441_cover,
):
# If you don't provide a Collection to the OPDSImporter, no
# LicensePools are created for the book and content resources
# (like EPUB editions of the book) are not mirrored. Only
# metadata resources (like the book cover) are mirrored.
# The request to http://root/broken-cover-image
# will result in a 404 error, and the image will not be mirrored.
http.queue_response(**epub10557_cover_broken)
http.queue_response(**epub10557_cover_working)
http.queue_response(**epub10441_cover)
s3 = MockS3Uploader()
mirrors = dict(covers_mirror=s3)
importer = OPDSImporter(
self._db, collection=None, mirrors=mirrors, http_get=http.do_get
)
imported_editions, pools, works, failures = importer.import_from_feed(
self.content_server_mini_feed, feed_url="http://root"
)
# No LicensePools were created, since no Collection was
# provided.
assert [] == pools
# The import process requested each remote resource in the
# order they appeared in the OPDS feed. The EPUB resources
# were not requested because no Collection was provided to the
# importer. The thumbnail image was not requested, since we
# were going to make our own thumbnail anyway.
assert len(http.requests) == 3
assert set(http.requests) == {
epub10441_cover["url"],
epub10557_cover_broken["url"],
epub10557_cover_working["url"],
}
class TestOPDSImportMonitor(OPDSImporterTest):
def test_constructor(self):
with pytest.raises(ValueError) as excinfo:
OPDSImportMonitor(self._db, None, OPDSImporter)
assert (
"OPDSImportMonitor can only be run in the context of a Collection."
in str(excinfo.value)
)
self._default_collection.external_integration.protocol = (
ExternalIntegration.OVERDRIVE
)
with pytest.raises(ValueError) as excinfo:
OPDSImportMonitor(self._db, self._default_collection, OPDSImporter)
assert (
"Collection Default Collection is configured for protocol Overdrive, not OPDS Import."
in str(excinfo.value)
)
self._default_collection.external_integration.protocol = (
ExternalIntegration.OPDS_IMPORT
)
self._default_collection.external_integration.setting(
"data_source"
).value = None
with pytest.raises(ValueError) as excinfo:
OPDSImportMonitor(self._db, self._default_collection, OPDSImporter)
assert "Collection Default Collection has no associated data source." in str(
excinfo.value
)
def test_external_integration(self):
monitor = OPDSImportMonitor(
self._db,
self._default_collection,
import_class=OPDSImporter,
)
assert (
self._default_collection.external_integration
== monitor.external_integration(self._db)
)
def test__run_self_tests(self):
"""Verify the self-tests of an OPDS collection."""
class MockImporter(OPDSImporter):
def assert_importable_content(self, content, url):
self.assert_importable_content_called_with = (content, url)
return "looks good"
class Mock(OPDSImportMonitor):
follow_one_link_called_with = []
# First we will get the first page of the OPDS feed.
def follow_one_link(self, url):
self.follow_one_link_called_with.append(url)
return ([], "some content")
feed_url = self._url
self._default_collection.external_account_id = feed_url
monitor = Mock(self._db, self._default_collection, import_class=MockImporter)
[first_page, found_content] = monitor._run_self_tests(self._db)
expect = "Retrieve the first page of the OPDS feed (%s)" % feed_url
assert expect == first_page.name
assert True == first_page.success
assert ([], "some content") == first_page.result
# follow_one_link was called once.
[link] = monitor.follow_one_link_called_with
assert monitor.feed_url == link
# Then, assert_importable_content was called on the importer.
assert "Checking for importable content" == found_content.name
assert True == found_content.success
assert (
"some content",
feed_url,
) == monitor.importer.assert_importable_content_called_with
assert "looks good" == found_content.result
def test_hook_methods(self):
"""By default, the OPDS URL and data source used by the importer
come from the collection configuration.
"""
monitor = OPDSImportMonitor(
self._db,
self._default_collection,
import_class=OPDSImporter,
)
assert self._default_collection.external_account_id == monitor.opds_url(
self._default_collection
)
assert self._default_collection.data_source == monitor.data_source(
self._default_collection
)
def test_feed_contains_new_data(self):
feed = self.content_server_mini_feed
class MockOPDSImportMonitor(OPDSImportMonitor):
def _get(self, url, headers):
return 200, {"content-type": AtomFeed.ATOM_TYPE}, feed
monitor = OPDSImportMonitor(
self._db,
self._default_collection,
import_class=OPDSImporter,
)
timestamp = monitor.timestamp()
# Nothing has been imported yet, so all data is new.
assert True == monitor.feed_contains_new_data(feed)
assert None == timestamp.start
# Now import the editions.
monitor = MockOPDSImportMonitor(
self._db,
collection=self._default_collection,
import_class=OPDSImporter,
)
monitor.run()
# Editions have been imported.
assert 2 == self._db.query(Edition).count()
# The timestamp has been updated, although unlike most
# Monitors the timestamp is purely informational.
assert timestamp.finish != None
editions = self._db.query(Edition).all()
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
# If there are CoverageRecords that record work are after the updated
# dates, there's nothing new.
record, ignore = CoverageRecord.add_for(
editions[0], data_source, CoverageRecord.IMPORT_OPERATION
)
record.timestamp = datetime_utc(2016, 1, 1, 1, 1, 1)
record2, ignore = CoverageRecord.add_for(
editions[1], data_source, CoverageRecord.IMPORT_OPERATION
)
record2.timestamp = datetime_utc(2016, 1, 1, 1, 1, 1)
assert False == monitor.feed_contains_new_data(feed)
# If the monitor is set up to force reimport, it doesn't
# matter that there's nothing new--we act as though there is.
monitor.force_reimport = True
assert True == monitor.feed_contains_new_data(feed)
monitor.force_reimport = False
# If an entry was updated after the date given in that entry's
# CoverageRecord, there's new data.
record2.timestamp = datetime_utc(1970, 1, 1, 1, 1, 1)
assert True == monitor.feed_contains_new_data(feed)
# If a CoverageRecord is a transient failure, we try again
# regardless of whether it's been updated.
for r in [record, record2]:
r.timestamp = datetime_utc(2016, 1, 1, 1, 1, 1)
r.exception = "Failure!"
r.status = CoverageRecord.TRANSIENT_FAILURE
assert True == monitor.feed_contains_new_data(feed)
# If a CoverageRecord is a persistent failure, we don't try again...
for r in [record, record2]:
r.status = CoverageRecord.PERSISTENT_FAILURE
assert False == monitor.feed_contains_new_data(feed)
# ...unless the feed updates.
record.timestamp = datetime_utc(1970, 1, 1, 1, 1, 1)
assert True == monitor.feed_contains_new_data(feed)
def http_with_feed(self, feed, content_type=OPDSFeed.ACQUISITION_FEED_TYPE):
"""Helper method to make a DummyHTTPClient with a
successful OPDS feed response queued.
"""
return http
def test_follow_one_link(self):
monitor = OPDSImportMonitor(
self._db, collection=self._default_collection, import_class=OPDSImporter
)
feed = self.content_server_mini_feed
http = DummyHTTPClient()
# If there's new data, follow_one_link extracts the next links.
def follow():
return monitor.follow_one_link("http://url", do_get=http.do_get)
http.queue_response(200, OPDSFeed.ACQUISITION_FEED_TYPE, content=feed)
next_links, content = follow()
assert 1 == len(next_links)
assert "http://localhost:5000/?after=327&size=100" == next_links[0]
assert feed.encode("utf-8") == content
# Now import the editions and add coverage records.
monitor.importer.import_from_feed(feed)
assert 2 == self._db.query(Edition).count()
editions = self._db.query(Edition).all()
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
for edition in editions:
record, ignore = CoverageRecord.add_for(
edition, data_source, CoverageRecord.IMPORT_OPERATION
)
record.timestamp = datetime_utc(2016, 1, 1, 1, 1, 1)
# If there's no new data, follow_one_link returns no next
# links and no content.
#
# Note that this works even when the media type is imprecisely
# specified as Atom or bare XML.
for imprecise_media_type in OPDSFeed.ATOM_LIKE_TYPES:
http.queue_response(200, imprecise_media_type, content=feed)
next_links, content = follow()
assert 0 == len(next_links)
assert None == content
http.queue_response(200, AtomFeed.ATOM_TYPE, content=feed)
next_links, content = follow()
assert 0 == len(next_links)
assert None == content
# If the media type is missing or is not an Atom feed,
# an exception is raised.
http.queue_response(200, None, content=feed)
with pytest.raises(BadResponseException) as excinfo:
follow()
assert "Expected Atom feed, got None" in str(excinfo.value)
http.queue_response(200, "not/atom", content=feed)
with pytest.raises(BadResponseException) as excinfo:
follow()
assert "Expected Atom feed, got not/atom" in str(excinfo.value)
def test_import_one_feed(self):
# Check coverage records are created.
monitor = OPDSImportMonitor(
self._db,
collection=self._default_collection,
import_class=DoomedOPDSImporter,
)
self._default_collection.external_account_id = "http://root-url/index.xml"
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
feed = self.content_server_mini_feed
imported, failures = monitor.import_one_feed(feed)
editions = self._db.query(Edition).all()
# One edition has been imported
assert 1 == len(editions)
[edition] = editions
# The return value of import_one_feed includes the imported
# editions.
assert [edition] == imported
# That edition has a CoverageRecord.
record = CoverageRecord.lookup(
editions[0].primary_identifier,
data_source,
operation=CoverageRecord.IMPORT_OPERATION,
)
assert CoverageRecord.SUCCESS == record.status
assert None == record.exception
# The edition's primary identifier has some cover links whose
# relative URL have been resolved relative to the Collection's
# external_account_id.
covers = set(
[
x.resource.url
for x in editions[0].primary_identifier.links
if x.rel == Hyperlink.IMAGE
]
)
assert covers == set(
[
"http://root-url/broken-cover-image",
"http://root-url/working-cover-image",
]
)
# The 202 status message in the feed caused a transient failure.
# The exception caused a persistent failure.
coverage_records = self._db.query(CoverageRecord).filter(
CoverageRecord.operation == CoverageRecord.IMPORT_OPERATION,
CoverageRecord.status != CoverageRecord.SUCCESS,
)
assert sorted(
[CoverageRecord.TRANSIENT_FAILURE, CoverageRecord.PERSISTENT_FAILURE]
) == sorted([x.status for x in coverage_records])
identifier, ignore = Identifier.parse_urn(
self._db, "urn:librarysimplified.org/terms/id/Gutenberg%20ID/10441"
)
failure = CoverageRecord.lookup(
identifier, data_source, operation=CoverageRecord.IMPORT_OPERATION
)
assert "Utter failure!" in failure.exception
# Both failures were reported in the return value from
# import_one_feed
assert 2 == len(failures)
def test_run_once(self):
class MockOPDSImportMonitor(OPDSImportMonitor):
def __init__(self, *args, **kwargs):
super(MockOPDSImportMonitor, self).__init__(*args, **kwargs)
self.responses = []
self.imports = []
def queue_response(self, response):
self.responses.append(response)
def follow_one_link(self, link, cutoff_date=None, do_get=None):
return self.responses.pop()
def import_one_feed(self, feed):
# Simulate two successes and one failure on every page.
self.imports.append(feed)
return [object(), object()], {"identifier": "Failure"}
monitor = MockOPDSImportMonitor(
self._db, collection=self._default_collection, import_class=OPDSImporter
)
monitor.queue_response([[], "last page"])
monitor.queue_response([["second next link"], "second page"])
monitor.queue_response([["next link"], "first page"])
progress = monitor.run_once(object())
# Feeds are imported in reverse order
assert ["last page", "second | |
<reponame>ubragg/endpoints-management-python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""check_request supports aggregation of CheckRequests.
:func:`sign` generated a signature from CheckRequests
:class:`~endpoints_management.gen.servicecontrol_v1_message.Operation` represents
information regarding an operation, and is a key constituent of
:class:`~endpoints_management.gen.servicecontrol_v1_message.CheckRequest` and
:class:`~endpoints_management.gen.servicecontrol_v1_message.ReportRequests.
The :class:`.Aggregator` implements the strategy for aggregating CheckRequests
and caching their responses.
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import object
import collections
import hashlib
import http.client
import logging
from datetime import datetime
from apitools.base.py import encoding
from . import (caches, label_descriptor, metric_value, operation, sc_messages,
signing)
from .. import USER_AGENT, SERVICE_AGENT
_logger = logging.getLogger(__name__)
# alias for brevity
_CheckErrors = sc_messages.CheckError.CodeValueValuesEnum
_IS_OK = (http.client.OK, u'', True)
_IS_UNKNOWN = (
http.client.INTERNAL_SERVER_ERROR,
u'Request blocked due to unsupported block reason {detail}',
False)
_CHECK_ERROR_CONVERSION = {
_CheckErrors.NOT_FOUND: (
http.client.BAD_REQUEST,
u'Client project not found. Please pass a valid project',
False,
),
_CheckErrors.API_KEY_NOT_FOUND: (
http.client.BAD_REQUEST,
u'API key not found. Please pass a valid API key',
True,
),
_CheckErrors.API_KEY_EXPIRED: (
http.client.BAD_REQUEST,
u'API key expired. Please renew the API key',
True,
),
_CheckErrors.API_KEY_INVALID: (
http.client.BAD_REQUEST,
u'API not valid. Please pass a valid API key',
True,
),
_CheckErrors.SERVICE_NOT_ACTIVATED: (
http.client.FORBIDDEN,
u'{detail} Please enable the project for {project_id}',
False,
),
_CheckErrors.PERMISSION_DENIED: (
http.client.FORBIDDEN,
u'Permission denied: {detail}',
False,
),
_CheckErrors.IP_ADDRESS_BLOCKED: (
http.client.FORBIDDEN,
u'{detail}',
False,
),
_CheckErrors.REFERER_BLOCKED: (
http.client.FORBIDDEN,
u'{detail}',
False,
),
_CheckErrors.CLIENT_APP_BLOCKED: (
http.client.FORBIDDEN,
u'{detail}',
False,
),
_CheckErrors.PROJECT_DELETED: (
http.client.FORBIDDEN,
u'Project {project_id} has been deleted',
False,
),
_CheckErrors.PROJECT_INVALID: (
http.client.BAD_REQUEST,
u'Client Project is not valid. Please pass a valid project',
False,
),
_CheckErrors.VISIBILITY_DENIED: (
http.client.FORBIDDEN,
u'Project {project_id} has no visibility access to the service',
False,
),
_CheckErrors.BILLING_DISABLED: (
http.client.FORBIDDEN,
u'Project {project_id} has billing disabled. Please enable it',
False,
),
# Fail open for internal server errors
_CheckErrors.NAMESPACE_LOOKUP_UNAVAILABLE: _IS_OK,
_CheckErrors.SERVICE_STATUS_UNAVAILABLE: _IS_OK,
_CheckErrors.BILLING_STATUS_UNAVAILABLE: _IS_OK,
_CheckErrors.QUOTA_CHECK_UNAVAILABLE: _IS_OK,
}
def convert_response(check_response, project_id):
"""Computes a http status code and message `CheckResponse`
The return value a tuple (code, message, api_key_is_bad) where
code: is the http status code
message: is the message to return
api_key_is_bad: indicates that a given api_key is bad
Args:
check_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.CheckResponse`):
the response from calling an api
Returns:
tuple(code, message, bool)
"""
if not check_response or not check_response.checkErrors:
return _IS_OK
# only check the first error for now, as per ESP
theError = check_response.checkErrors[0]
error_tuple = _CHECK_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)
if error_tuple[1].find(u'{') == -1: # no replacements needed:
return error_tuple
updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.detail or u'')
return error_tuple[0], updated_msg, error_tuple[2]
def sign(check_request):
"""Obtains a signature for an operation in a `CheckRequest`
Args:
op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an
operation used in a `CheckRequest`
Returns:
string: a secure hash generated from the operation
"""
if not isinstance(check_request, sc_messages.CheckRequest):
raise ValueError(u'Invalid request')
op = check_request.operation
if op is None or op.operationName is None or op.consumerId is None:
logging.error(u'Bad %s: not initialized => not signed', check_request)
raise ValueError(u'check request must be initialized with an operation')
md5 = hashlib.md5()
md5.update(op.operationName.encode('utf-8'))
md5.update(b'\x00')
md5.update(op.consumerId.encode('utf-8'))
if op.labels:
signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))
for value_set in op.metricValueSets:
md5.update(b'\x00')
md5.update(value_set.metricName.encode('utf-8'))
for mv in value_set.metricValues:
metric_value.update_hash(md5, mv)
md5.update(b'\x00')
if op.quotaProperties:
# N.B: this differs form cxx implementation, which serializes the
# protobuf. This should be OK as the exact hash used does not need to
# match across implementations.
md5.update(repr(op.quotaProperties).encode('utf-8'))
md5.update(b'\x00')
return md5.digest()
_KNOWN_LABELS = label_descriptor.KnownLabels
class Info(collections.namedtuple(u'Info',
(u'client_ip',) + operation.Info._fields),
operation.Info):
"""Holds the information necessary to fill in CheckRequest.
In addition the attributes in :class:`operation.Info`, this has:
Attributes:
client_ip: the client IP address
"""
def __new__(cls, client_ip=u'', **kw):
"""Invokes the base constructor with default values."""
op_info = operation.Info(**kw)
return super(Info, cls).__new__(cls, client_ip, **op_info._asdict())
def as_check_request(self, timer=datetime.utcnow):
"""Makes a `ServicecontrolServicesCheckRequest` from this instance
Returns:
a ``ServicecontrolServicesCheckRequest``
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesCheckRequest``
"""
if not self.service_name:
raise ValueError(u'the service name must be set')
if not self.operation_id:
raise ValueError(u'the operation id must be set')
if not self.operation_name:
raise ValueError(u'the operation name must be set')
op = super(Info, self).as_operation(timer=timer)
labels = {}
if self.android_cert_fingerprint:
labels[_KNOWN_LABELS.SCC_ANDROID_CERT_FINGERPRINT.label_name] = self.android_cert_fingerprint
if self.android_package_name:
labels[_KNOWN_LABELS.SCC_ANDROID_PACKAGE_NAME.label_name] = self.android_package_name
if self.client_ip:
labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip
if self.ios_bundle_id:
labels[_KNOWN_LABELS.SCC_IOS_BUNDLE_ID.label_name] = self.ios_bundle_id
if self.referer:
labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer
# Forcibly add system label reporting here, as the base service
# config does not specify it as a label.
labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT
labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT
op.labels = encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, labels)
check_request = sc_messages.CheckRequest(operation=op)
return sc_messages.ServicecontrolServicesCheckRequest(
serviceName=self.service_name,
checkRequest=check_request)
class Aggregator(object):
"""Caches and aggregates ``CheckRequests``.
Concurrency: Thread safe.
Usage:
Creating a new cache entry and use cached response
Example:
>>> options = caches.CheckOptions()
>>> agg = Aggregator('my_service', options)
>>> req = ServicecontrolServicesCheckRequest(...)
>>> # check returns None as the request is not cached
>>> if agg.check(req) is not None:
... resp = service.check(req)
... agg = service.add_response(req, resp)
>>> agg.check(req) # response now cached according as-per options
<CheckResponse ....>
Refreshing a cached entry after a flush interval
The flush interval is constrained to be shorter than the actual cache
expiration. This allows the response to potentially remain cached and be
aggregated with subsequent check requests for the same operation.
Example:
>>> # continuing from the previous example,
>>> # ... after the flush interval
>>> # - the response is still in the cache, i.e, not expired
>>> # - the first call after the flush interval returns None, subsequent
>>> # calls continue to return the cached response
>>> agg.check(req) # signals the caller to call service.check(req)
None
>>> agg.check(req) # next call returns the cached response
<CheckResponse ....>
Flushing the cache
Once a response is expired, if there is an outstanding, cached CheckRequest
for it, this should be sent and their responses added back to the
aggregator instance, as they will contain quota updates that have not been
sent.
Example:
>>> # continuing the previous example
>>> for req in agg.flush(): # an iterable of cached CheckRequests
... resp = caller.send_req(req) # caller sends them
>>> agg.add_response(req, resp) # and caches their responses
"""
def __init__(self, service_name, options, kinds=None,
timer=datetime.utcnow):
"""Constructor.
Args:
service_name (string): names the service that all requests aggregated
by this instance will be sent
options (:class:`~endpoints_management.caches.CheckOptions`): configures the
caching and flushing behavior of this instance
kinds (dict[string,[endpoints_management.control.MetricKind]]): specifies the
kind of metric for each each metric name.
timer (function([[datetime]]): a function that returns the current
as a time as a datetime instance
"""
self._service_name = service_name
self._options = options
self._cache = caches.create(options, timer=timer)
self._kinds = {} if kinds is None else dict(kinds)
self._timer = timer
@property
def service_name(self):
"""The service to which all aggregated requests should belong."""
return self._service_name
@property
def flush_interval(self):
"""The interval between calls to flush.
Returns:
timedelta: the period between calls to flush if, or ``None`` if no
cache is set
"""
return None if self._cache is None else self._options.expiration
def flush(self):
"""Flushes this instance's cache.
The driver of this instance should call this method every
`flush_interval`.
Returns:
list['CheckRequest']: corresponding to CheckRequests that were
pending
"""
if self._cache is None:
return []
with self._cache as c:
flushed_items = list(c.out_deque)
c.out_deque.clear()
cached_reqs = [item.extract_request() for item in flushed_items]
cached_reqs = [req for req in cached_reqs if req is not None]
return cached_reqs
def clear(self):
"""Clears this instance's cache."""
if self._cache is not None:
with self._cache as c:
c.clear()
c.out_deque.clear()
def add_response(self, req, resp):
"""Adds the response from sending to `req` to this instance's cache.
Args:
req (`ServicecontrolServicesCheckRequest`): the request
resp (CheckResponse): the response from sending the request
"""
if self._cache is None:
return
signature = sign(req.checkRequest)
with self._cache as c:
now = self._timer()
quota_scale = 0 # WIP
item = c.get(signature)
if item is None:
c[signature] = CachedItem(
resp, self.service_name, now, quota_scale)
else:
# Update the cached item to reflect that it is updated
item.last_check_time = now
| |
<reponame>vespa-mrs/vespa
#
# Some of the code in this file was derived from the Python package
# pfile-tools project, https://github.com/njvack/pfile-tools
# and as such we have included their BSD statement in this file.
#
# Copyright (c) 2012, Board of Regents of the University of Wisconsin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of the University of Wisconsin nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python modules
import os
import math
import sys
import csv
import ctypes
# Third party packages
import numpy as np
# Our Modules
#import vespa.common.ge_util as utilge
#import vespa.common.ge_pfile_mapper as pfile_mapper
from ctypes import *
from collections import namedtuple
StructInfo = namedtuple("StructInfo", ["label", "depth", "value", "field_type", "size", "offset"])
class UnknownPfile(RuntimeError):
pass
class RevisionNumLittle(LittleEndianStructure):
@property
def major(self):
return int(self.revision)
_pack_ = 1
_fields_ = [ ('revision', c_float) ]
class RevisionNumBig(BigEndianStructure):
@property
def major(self):
return int(self.revision)
_pack_ = 1
_fields_ = [ ('revision', c_float) ]
class Pfile(object):
"""
This class was based on the style of code from the pfile-tools
package written by <NAME> in that we use ctypes to organize
the reading of binary structures into a Python readable class
instance. We have also incorporated code from their struct
utilities modules as part of our class in order to dump out the
header information to the stdout or as a list of strings.
We use a subset of header variables that are sufficient to read the
data from P-files in which we are interested. The structure for these
variables was adapted from similar code found in the UCSF Sivic project.
"""
def __init__(self, fname):
self.file_name = fname
self.version = 0
self.hdr = None
self.map = None
self.endian = 'little' # def for version >= 11
self.read_header()
if not self.is_ge_file:
raise UnknownPfile("Not a known GE Pfile - fname = %s" % fname)
self.map_data()
# Properties --------------------------------------------------
@property
def is_ge_file(self):
if self.version < 12:
if "GE" in self.hdr.rhr_rh_logo:
return True
else:
return False
else:
offset = self.hdr.rhr_rdb_hdr_off_data
if ( offset == 61464 or # bjs from matlap script for ver = 9
offset == 66072 or
offset == 145908 or
offset == 149788 or
offset == 150336 or
offset == 157276 or # v24 empirical
offset == 213684 ): # v26 empirical
return True
else:
return False
@property
def is_svs(self):
if self.map is None:
return False
else:
return self.map.is_svs
@property
def get_mapper(self):
if self.hdr is None:
return None
psd = self.hdr.rhi_psdname.decode('utf-8').lower()
if psd == 'probe-p':
mapper = PfileMapper
elif psd == 'oslaser':
mapper = PfileMapperSlaser
elif psd == 'presscsi':
mapper = PfileMapper
elif psd == 'fidcsi':
# bjs - added for Pom's fidcsi 13C data
mapper = PfileMapper
elif psd == 'ia/stable/fidcsi':
# bjs - added for Kearny's 13C data
mapper = PfileMapper
elif psd == 'presscsi_nfl':
# bjs - added for Govind's SVS data off v25
mapper = PfileMapper
elif psd == 'epsi_3d_24':
# bjs - added for soher check of MIDAS Browndyke data
mapper = PfileMapper
else:
raise UnknownPfile("No Pfile mapper for pulse sequence = %s" % psd)
# if psd == 'probe-p':
# mapper = pfile_mapper.PfileMapper
# elif psd == 'oslaser':
# mapper = pfile_mapper.PfileMapperSlaser
# elif psd == 'presscsi':
# mapper = pfile_mapper.PfileMapper
# elif psd == 'fidcsi':
# # bjs - added for Pom's fidcsi 13C data
# mapper = pfile_mapper.PfileMapper
# elif psd == 'ia/stable/fidcsi':
# # bjs - added for Kearny's 13C data
# mapper = pfile_mapper.PfileMapper
# elif psd == 'presscsi_nfl':
# # bjs - added for Govind's SVS data off v25
# mapper = pfile_mapper.PfileMapper
# elif psd == 'epsi_3d_24':
# # bjs - added for soher check of MIDAS Browndyke data
# mapper = pfile_mapper.PfileMapper
# else:
# raise UnknownPfile("No Pfile mapper for pulse sequence = %s" % psd)
return mapper
def read_header(self):
filelike = open(self.file_name, 'rb')
# determine version number of this header from revision of rdbm.h
version = self._major_version(filelike)
if version == 0:
raise UnknownPfile("Pfile not supported for version %s" % version)
# Here we dynamically configure the ctypes structures into which the
# binary file will be read, based on the revision number
#
# Note. Determined empirically that I cannot declare the XxxHeader
# class at the top level of the module with an attribute ._fields_ = []
# and then append into it. I have to create a list and then assign
# _fields_ attribute to that list in one step. Don't know why.
#
# Note 2. Had to move Class definition into this function so that the
# class can be reconstituted more than once for multiple GE file reads.
# At the top level of the module, the _fields_ attribute could be
# created once dynamically, but afterwards would stick around and
# could not then be changed.
if version < 11: # data taken on SGI - big endian
class PfileHeaderBig(BigEndianStructure):
"""
Contains the ctypes Structure for a GE P-file rdb header.
Dynamically allocate the ctypes _fields_ list later depending on revision
"""
_pack_ = 1
_fields_ = get_pfile_hdr_fields(version)
# _fields_ = utilge.get_pfile_hdr_fields(version)
hdr = PfileHeaderBig()
self.endian = 'big'
else:
class PfileHeaderLittle(LittleEndianStructure):
"""
Contains the ctypes Structure for a GE P-file rdb header.
Dynamically allocate the ctypes _fields_ list later depending on revision
"""
_pack_ = 1
_fields_ = get_pfile_hdr_fields(version)
# _fields_ = utilge.get_pfile_hdr_fields(version)
hdr = PfileHeaderLittle()
self.endian = 'little'
try:
# read header information from start of file
filelike.seek(0)
filelike.readinto(hdr)
filelike.close()
except:
filelike.close()
raise UnknownPfile("Trouble reading file into header structure for version %s" % version)
self.version = version
self.hdr = hdr
def map_data(self):
"""
Select appropriate mapper class using the pulse sequence name string,
instantiate and read the data from the file into the 'map' attribute
"""
mapper = self.get_mapper
self.map = mapper(self.file_name, self.hdr, self.version, self.endian)
self.map.read_data()
def _major_version(self, filelike):
"""
Get the rdbm.h revision number from first 4 bytes. Then map the rdbm
revision to a platform number (e.g. 11.x, 12.x, etc.)
"""
rev_little = RevisionNumLittle()
rev_big = RevisionNumBig()
filelike.seek(0)
filelike.readinto(rev_little)
filelike.seek(0)
filelike.readinto(rev_big)
rev_little = rev_little.major
rev_big = rev_big.major
version = 0
if (rev_little > 6.95 and rev_little < 8.0) or (rev_big > 6.95 and rev_big < 8.0):
version = 9.0;
elif ( rev_little == 9.0 or rev_big == 9.0 ):
version = 11.0;
elif ( rev_little == 11.0 or rev_big == 11.0 ):
version = 12.0;
elif ( rev_little == 14 or rev_big == 14 ):
version = 14.0;
elif ( rev_little == 15 or rev_big == 15 ):
version = 15.0;
elif ( rev_little == 16 or rev_big == 16 ):
version = 16.0;
elif ( rev_little == 20 | |
name_='userParameterStringType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
if self.value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%svalue>%s</%svalue>%s' % (namespace_, self.gds_format_string(quote_xml(self.value).encode(ExternalEncoding), input_name='value'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='userParameterStringType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
if self.value is not None:
showIndent(outfile, level)
outfile.write('value=%s,\n' % quote_python(self.value).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'value':
value_ = child_.text
value_ = self.gds_validate_string(value_, node, 'value')
self.value = value_
# end class userParameterStringType
class userParameterBase64Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, value=None):
self.original_tagname_ = None
self.name = name
self.value = value
def factory(*args_, **kwargs_):
if userParameterBase64Type.subclass:
return userParameterBase64Type.subclass(*args_, **kwargs_)
else:
return userParameterBase64Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
self.name is not None or
self.value is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='userParameterBase64Type', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='userParameterBase64Type')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='userParameterBase64Type', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='userParameterBase64Type'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='userParameterBase64Type', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
if self.value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%svalue>%s</%svalue>%s' % (namespace_, self.gds_format_base64(self.value, input_name='value'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='userParameterBase64Type'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
if self.value is not None:
showIndent(outfile, level)
outfile.write('value=model_.xs_base64Binary(\n')
self.value.exportLiteral(outfile, level, name_='value')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'value':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'value')
else:
bval_ = None
self.value = bval_
# end class userParameterBase64Type
class referencedImageSequence(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, referencedSOPInstanceUID=None):
self.original_tagname_ = None
if referencedSOPInstanceUID is None:
self.referencedSOPInstanceUID = []
else:
self.referencedSOPInstanceUID = referencedSOPInstanceUID
def factory(*args_, **kwargs_):
if referencedImageSequence.subclass:
return referencedImageSequence.subclass(*args_, **kwargs_)
else:
return referencedImageSequence(*args_, **kwargs_)
factory = staticmethod(factory)
def get_referencedSOPInstanceUID(self): return self.referencedSOPInstanceUID
def set_referencedSOPInstanceUID(self, referencedSOPInstanceUID): self.referencedSOPInstanceUID = referencedSOPInstanceUID
def add_referencedSOPInstanceUID(self, value): self.referencedSOPInstanceUID.append(value)
def insert_referencedSOPInstanceUID_at(self, index, value): self.referencedSOPInstanceUID.insert(index, value)
def replace_referencedSOPInstanceUID_at(self, index, value): self.referencedSOPInstanceUID[index] = value
def hasContent_(self):
if (
self.referencedSOPInstanceUID
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='referencedImageSequence', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='referencedImageSequence')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='referencedImageSequence', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='referencedImageSequence'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='referencedImageSequence', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for referencedSOPInstanceUID_ in self.referencedSOPInstanceUID:
showIndent(outfile, level, pretty_print)
outfile.write('<%sreferencedSOPInstanceUID>%s</%sreferencedSOPInstanceUID>%s' % (namespace_, self.gds_format_string(quote_xml(referencedSOPInstanceUID_).encode(ExternalEncoding), input_name='referencedSOPInstanceUID'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='referencedImageSequence'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('referencedSOPInstanceUID=[\n')
level += 1
for referencedSOPInstanceUID_ in self.referencedSOPInstanceUID:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(referencedSOPInstanceUID_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'referencedSOPInstanceUID':
referencedSOPInstanceUID_ = child_.text
referencedSOPInstanceUID_ = self.gds_validate_string(referencedSOPInstanceUID_, node, 'referencedSOPInstanceUID')
self.referencedSOPInstanceUID.append(referencedSOPInstanceUID_)
# end class referencedImageSequence
class userParameters(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, userParameterLong=None, userParameterDouble=None, userParameterString=None, userParameterBase64=None):
self.original_tagname_ = None
if userParameterLong is None:
self.userParameterLong = []
else:
self.userParameterLong = userParameterLong
if userParameterDouble is None:
self.userParameterDouble = []
else:
self.userParameterDouble = userParameterDouble
if userParameterString is None:
self.userParameterString = []
else:
self.userParameterString = userParameterString
if userParameterBase64 is None:
self.userParameterBase64 = []
else:
self.userParameterBase64 = userParameterBase64
def factory(*args_, **kwargs_):
if userParameters.subclass:
return userParameters.subclass(*args_, **kwargs_)
else:
return userParameters(*args_, **kwargs_)
factory = staticmethod(factory)
def get_userParameterLong(self): return self.userParameterLong
def set_userParameterLong(self, userParameterLong): self.userParameterLong = userParameterLong
def add_userParameterLong(self, value): self.userParameterLong.append(value)
def insert_userParameterLong_at(self, index, value): self.userParameterLong.insert(index, value)
def replace_userParameterLong_at(self, index, value): self.userParameterLong[index] = value
def get_userParameterDouble(self): return self.userParameterDouble
def set_userParameterDouble(self, userParameterDouble): self.userParameterDouble = userParameterDouble
def add_userParameterDouble(self, value): self.userParameterDouble.append(value)
def insert_userParameterDouble_at(self, index, value): self.userParameterDouble.insert(index, value)
def replace_userParameterDouble_at(self, index, value): self.userParameterDouble[index] = value
def get_userParameterString(self): return self.userParameterString
def set_userParameterString(self, userParameterString): self.userParameterString = userParameterString
def add_userParameterString(self, value): self.userParameterString.append(value)
def insert_userParameterString_at(self, index, value): self.userParameterString.insert(index, value)
def replace_userParameterString_at(self, index, value): self.userParameterString[index] = value
def get_userParameterBase64(self): return self.userParameterBase64
def set_userParameterBase64(self, userParameterBase64): self.userParameterBase64 = userParameterBase64
def add_userParameterBase64(self, value): self.userParameterBase64.append(value)
def insert_userParameterBase64_at(self, index, value): self.userParameterBase64.insert(index, value)
def replace_userParameterBase64_at(self, index, value): self.userParameterBase64[index] = value
def hasContent_(self):
if (
self.userParameterLong or
self.userParameterDouble or
self.userParameterString or
self.userParameterBase64
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='userParameters', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='userParameters')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='userParameters', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='userParameters'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='userParameters', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for userParameterLong_ in self.userParameterLong:
userParameterLong_.export(outfile, level, namespace_, name_='userParameterLong', pretty_print=pretty_print)
for userParameterDouble_ in self.userParameterDouble:
userParameterDouble_.export(outfile, level, namespace_, name_='userParameterDouble', pretty_print=pretty_print)
for userParameterString_ in self.userParameterString:
userParameterString_.export(outfile, level, namespace_, name_='userParameterString', pretty_print=pretty_print)
for userParameterBase64_ in self.userParameterBase64:
userParameterBase64_.export(outfile, level, namespace_, name_='userParameterBase64', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='userParameters'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('userParameterLong=[\n')
level += 1
for userParameterLong_ in self.userParameterLong:
showIndent(outfile, level)
outfile.write('model_.userParameterLongType(\n')
userParameterLong_.exportLiteral(outfile, level, name_='userParameterLongType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('userParameterDouble=[\n')
level += 1
for userParameterDouble_ in self.userParameterDouble:
showIndent(outfile, level)
outfile.write('model_.userParameterDoubleType(\n')
userParameterDouble_.exportLiteral(outfile, level, name_='userParameterDoubleType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('userParameterString=[\n')
level += 1
for userParameterString_ in self.userParameterString:
showIndent(outfile, level)
outfile.write('model_.userParameterStringType(\n')
userParameterString_.exportLiteral(outfile, level, name_='userParameterStringType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('userParameterBase64=[\n')
level += 1
for userParameterBase64_ in self.userParameterBase64:
showIndent(outfile, level)
outfile.write('model_.userParameterBase64Type(\n')
userParameterBase64_.exportLiteral(outfile, level, name_='userParameterBase64Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, | |
"void DrawFlask__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x80097824)
SetType(0x80097824, "unsigned char SpdTrimCol__Fs(short col)")
del_items(0x8009785C)
SetType(0x8009785C, "void DrawSpeedBar__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x80097F88)
SetType(0x80097F88, "void DrawSpell__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x80098124)
SetType(0x80098124, "void DrawMsgWindow__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x80098174)
SetType(0x80098174, "int DrawDurThingy__6GPaneliiP10ItemStructi(struct GPanel *this, int X, int Y, struct ItemStruct *Item, int ItemType)")
del_items(0x80098440)
SetType(0x80098440, "void DrawDurIcon__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8009856C)
SetType(0x8009856C, "void Print__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x80098684)
SetType(0x80098684, "int GetMaxOtPos__7CBlocks_addr_80098684()")
del_items(0x8009868C)
SetType(0x8009868C, "struct PAL *GetPal__7TextDati_addr_8009868C(struct TextDat *this, int PalNum)")
del_items(0x800986A8)
SetType(0x800986A8, "struct FRAME_HDR *GetFr__7TextDati_addr_800986A8(struct TextDat *this, int FrNum)")
del_items(0x800986C4)
SetType(0x800986C4, "void PrintCDWaitTask__FP4TASK(struct TASK *T)")
del_items(0x80098800)
SetType(0x80098800, "void InitCDWaitIcon__Fv()")
del_items(0x80098834)
SetType(0x80098834, "void STR_Debug__FP6SFXHDRPce(struct SFXHDR *sfh, char *e)")
del_items(0x80098848)
SetType(0x80098848, "void STR_SystemTask__FP4TASK(struct TASK *T)")
del_items(0x80098878)
SetType(0x80098878, "void STR_AllocBuffer__Fv()")
del_items(0x800988B0)
SetType(0x800988B0, "void STR_Init__Fv()")
del_items(0x800989DC)
SetType(0x800989DC, "struct SFXHDR *STR_InitStream__Fc(char flag)")
del_items(0x80098B04)
SetType(0x80098B04, "struct SFXHDR *STR_PlaySound__FUscic(unsigned short Name, char flag, int volume, char loop)")
del_items(0x80098D4C)
SetType(0x80098D4C, "void STR_setvolume__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x80098E18)
SetType(0x80098E18, "void STR_setpitch__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x80098E64)
SetType(0x80098E64, "void STR_PlaySFX__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x80098F70)
SetType(0x80098F70, "void STR_pauseall__Fv()")
del_items(0x80098FE4)
SetType(0x80098FE4, "void STR_resumeall__Fv()")
del_items(0x80099058)
SetType(0x80099058, "void STR_CloseStream__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x800990C4)
SetType(0x800990C4, "void STR_SoundCommand__FP6SFXHDRi(struct SFXHDR *sfh, int Command)")
del_items(0x800991B0)
SetType(0x800991B0, "char STR_Command__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x800993A0)
SetType(0x800993A0, "void STR_DMAControl__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x80099468)
SetType(0x80099468, "void STR_PlayStream__FP6SFXHDRPUci(struct SFXHDR *sfh, unsigned char *Src, int size)")
del_items(0x800996E8)
SetType(0x800996E8, "void STR_AsyncWeeTASK__FP4TASK(struct TASK *T)")
del_items(0x800999C0)
SetType(0x800999C0, "void STR_AsyncTASK__FP4TASK(struct TASK *T)")
del_items(0x80099DA8)
SetType(0x80099DA8, "void STR_StreamMainTask__FP6SFXHDRc(struct SFXHDR *sfh, char FileType)")
del_items(0x80099ED4)
SetType(0x80099ED4, "void SND_Monitor__FP4TASK(struct TASK *T)")
del_items(0x80099F60)
SetType(0x80099F60, "void SPU_OnceOnlyInit__Fv()")
del_items(0x80099F98)
SetType(0x80099F98, "void SPU_Init__Fv()")
del_items(0x8009A0A0)
SetType(0x8009A0A0, "int SND_FindChannel__Fv()")
del_items(0x8009A10C)
SetType(0x8009A10C, "void SND_ClearBank__Fv()")
del_items(0x8009A17C)
SetType(0x8009A17C, "bool SndLoadCallBack__FPUciib(unsigned char *Mem, int ReadSoFar, int Size, bool LastChunk)")
del_items(0x8009A1F4)
SetType(0x8009A1F4, "void SND_LoadBank__Fi(int lvlnum)")
del_items(0x8009A318)
SetType(0x8009A318, "int SND_FindSFX__FUs(unsigned short Name)")
del_items(0x8009A3F4)
SetType(0x8009A3F4, "void SND_StopSnd__Fi(int voice)")
del_items(0x8009A428)
SetType(0x8009A428, "bool SND_IsSfxPlaying__Fi(int SFXNo)")
del_items(0x8009A464)
SetType(0x8009A464, "int SND_RemapSnd__Fi(int SFXNo)")
del_items(0x8009A4D8)
SetType(0x8009A4D8, "int SND_PlaySnd__FUsiii(unsigned short Name, int vol, int pan, int pitchadj)")
del_items(0x8009A6F0)
SetType(0x8009A6F0, "void AS_CallBack0__Fi(int ah)")
del_items(0x8009A75C)
SetType(0x8009A75C, "void AS_CallBack1__Fi(int ah)")
del_items(0x8009A7C8)
SetType(0x8009A7C8, "void AS_WasLastBlock__FiP6STRHDRP6SFXHDR(int ah, struct STRHDR *sh, struct SFXHDR *sfh)")
del_items(0x8009A890)
SetType(0x8009A890, "int AS_OpenStream__FP6STRHDRP6SFXHDR(struct STRHDR *sh, struct SFXHDR *sfh)")
del_items(0x8009A930)
SetType(0x8009A930, "char AS_GetBlock__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8009A960)
SetType(0x8009A960, "void AS_CloseStream__FP6STRHDRP6SFXHDR(struct STRHDR *sh, struct SFXHDR *sfh)")
del_items(0x8009A9B4)
SetType(0x8009A9B4, "unsigned short SCR_GetBlackClut__Fv()")
del_items(0x8009A9C0)
SetType(0x8009A9C0, "void SCR_Open__Fv()")
del_items(0x8009A9F8)
SetType(0x8009A9F8, "void SCR_DumpClut__Fv()")
del_items(0x8009AA6C)
SetType(0x8009AA6C, "unsigned short SCR_NeedHighlightPal__FUsUsi(unsigned short Clut, unsigned short PixVal, int NumOfCols)")
del_items(0x8009AAA0)
SetType(0x8009AAA0, "void Init__13PalCollectionPC7InitPos(struct PalCollection *this, struct InitPos *IPos)")
del_items(0x8009AB30)
SetType(0x8009AB30, "struct PalEntry *FindPal__13PalCollectionUsUsi(struct PalCollection *this, unsigned short SourceClut, unsigned short PixVal, int NumOfCols)")
del_items(0x8009AC0C)
SetType(0x8009AC0C, "struct PalEntry *NewPal__13PalCollectionUsUsi(struct PalCollection *this, unsigned short SourceClut, unsigned short PixVal, int NumOfCols)")
del_items(0x8009AC8C)
SetType(0x8009AC8C, "void MakePal__8PalEntryUsUsi(struct PalEntry *this, unsigned short _SourceClut, unsigned short _PixVal, int _NumOfCols)")
del_items(0x8009AD2C)
SetType(0x8009AD2C, "unsigned short GetHighlightPal__13PalCollectionUsUsi(struct PalCollection *this, unsigned short SourceClut, unsigned short PixVal, int NumOfCols)")
del_items(0x8009AD74)
SetType(0x8009AD74, "void UpdatePals__13PalCollection(struct PalCollection *this)")
del_items(0x8009ADE8)
SetType(0x8009ADE8, "void SCR_Handler__Fv()")
del_items(0x8009AE10)
SetType(0x8009AE10, "int GetNumOfObjs__t10Collection2Z8PalEntryi20(struct t10Collection2Z8PalEntryi20 *this)")
del_items(0x8009AE18)
SetType(0x8009AE18, "struct PalEntry *GetObj__t10Collection2Z8PalEntryi20(struct t10Collection2Z8PalEntryi20 *this)")
del_items(0x8009AE54)
SetType(0x8009AE54, "void Init__t10Collection2Z8PalEntryi20(struct t10Collection2Z8PalEntryi20 *this)")
del_items(0x8009AEB8)
SetType(0x8009AEB8, "void MoveFromUsedToUnused__t10Collection2Z8PalEntryi20P8PalEntry(struct t10Collection2Z8PalEntryi20 *this, struct PalEntry *RetObj)")
del_items(0x8009AF10)
SetType(0x8009AF10, "void MoveFromUnusedToUsed__t10Collection2Z8PalEntryi20P8PalEntry(struct t10Collection2Z8PalEntryi20 *this, struct PalEntry *RetObj)")
del_items(0x8009AF68)
SetType(0x8009AF68, "void Set__8PalEntryUsUsi(struct PalEntry *this, unsigned short _SourceClut, unsigned short _PixVal, int _NumOfCols)")
del_items(0x8009AF7C)
SetType(0x8009AF7C, "void Set__8PalEntryRC7InitPos(struct PalEntry *this, struct InitPos *NewPos)")
del_items(0x8009AFA8)
SetType(0x8009AFA8, "bool SetJustUsed__8PalEntryb(struct PalEntry *this, bool NewVal)")
del_items(0x8009AFB0)
SetType(0x8009AFB0, "void Init__8PalEntry(struct PalEntry *this)")
del_items(0x8009AFB8)
SetType(0x8009AFB8, "unsigned short GetClut__C8PalEntry(struct PalEntry *this)")
del_items(0x8009AFC4)
SetType(0x8009AFC4, "bool IsEqual__C8PalEntryUsUsi(struct PalEntry *this, unsigned short _SourceClut, unsigned short _PixVal, int _NumOfCols)")
del_items(0x8009AFFC)
SetType(0x8009AFFC, "struct PalEntry *GetNext__Ct11TLinkedList1Z8PalEntry(struct t11TLinkedList1Z8PalEntry *this)")
del_items(0x8009B008)
SetType(0x8009B008, "void AddToList__t11TLinkedList1Z8PalEntryPP8PalEntry(struct t11TLinkedList1Z8PalEntry *this, struct PalEntry **Head)")
del_items(0x8009B028)
SetType(0x8009B028, "void DetachFromList__t11TLinkedList1Z8PalEntryPP8PalEntry(struct t11TLinkedList1Z8PalEntry *this, struct PalEntry **Head)")
del_items(0x8009B074)
SetType(0x8009B074, "void stub__FPcPv(char *e, void *argptr)")
del_items(0x8009B07C)
SetType(0x8009B07C, "void new_eprint__FPcT0i(char *Text, char *File, int Line)")
del_items(0x8009B0B0)
SetType(0x8009B0B0, "void TonysGameTask__FP4TASK(struct TASK *T)")
del_items(0x8009B138)
SetType(0x8009B138, "void SetAmbientLight__Fv()")
del_items(0x8009B1F8)
SetType(0x8009B1F8, "void SetDemoPlayer__Fv()")
del_items(0x8009B228)
SetType(0x8009B228, "void print_demo_task__FP4TASK(struct TASK *T)")
del_items(0x8009B568)
SetType(0x8009B568, "void TonysDummyPoll__Fv()")
del_items(0x8009B594)
SetType(0x8009B594, "void SetTonyPoll__Fv()")
del_items(0x8009B5A0)
SetType(0x8009B5A0, "void ClearTonyPoll__Fv()")
del_items(0x8009B5AC)
SetType(0x8009B5AC, "void load_demo_pad_data__FUl(unsigned long demo_num)")
del_items(0x8009B60C)
SetType(0x8009B60C, "void save_demo_pad_data__FUl(unsigned long demo_num)")
del_items(0x8009B66C)
SetType(0x8009B66C, "void set_pad_record_play__Fi(int level)")
del_items(0x8009B6E0)
SetType(0x8009B6E0, "void start_demo__Fv()")
del_items(0x8009B6F0)
SetType(0x8009B6F0, "void SetQuest__Fv()")
del_items(0x8009B6F8)
SetType(0x8009B6F8, "void DrawManaShield__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x8009B700)
SetType(0x8009B700, "void ManaTask__FP4TASK(struct TASK *T)")
del_items(0x8009B708)
SetType(0x8009B708, "void tony__Fv()")
del_items(0x8009B748)
SetType(0x8009B748, "void GLUE_SetMonsterList__Fi(int List)")
del_items(0x8009B754)
SetType(0x8009B754, "int GLUE_GetMonsterList__Fv()")
del_items(0x8009B760)
SetType(0x8009B760, "void GLUE_SuspendGame__Fv()")
del_items(0x8009B7B4)
SetType(0x8009B7B4, "void GLUE_ResumeGame__Fv()")
del_items(0x8009B808)
SetType(0x8009B808, "void GLUE_PreTown__Fv()")
del_items(0x8009B838)
SetType(0x8009B838, "void GLUE_PreDun__Fv()")
del_items(0x8009B840)
SetType(0x8009B840, "bool GLUE_Finished__Fv()")
del_items(0x8009B84C)
SetType(0x8009B84C, "void GLUE_SetFinished__Fb(bool NewFinished)")
del_items(0x8009B858)
SetType(0x8009B858, "void GLUE_StartBg__Fibi(int TextId, bool IsTown, int Level)")
del_items(0x8009B8C0)
SetType(0x8009B8C0, "bool GLUE_SetShowGameScreenFlag__Fb(bool NewFlag)")
del_items(0x8009B8D0)
SetType(0x8009B8D0, "bool GLUE_GetShowGameScreenFlag__Fv()")
del_items(0x8009B8DC)
SetType(0x8009B8DC, "bool GLUE_SetHomingScrollFlag__Fb(bool NewFlag)")
del_items(0x8009B8EC)
SetType(0x8009B8EC, "bool GLUE_SetShowPanelFlag__Fb(bool NewFlag)")
del_items(0x8009B8FC)
SetType(0x8009B8FC, "bool GLUE_HasGameStarted__Fv()")
del_items(0x8009B908)
SetType(0x8009B908, "void DoShowPanelGFX__FP6GPanelT0(struct GPanel *P1, struct GPanel *P2)")
del_items(0x8009B9E0)
SetType(0x8009B9E0, "void GLUE_DoQuake__Fii(int Time, int Amount)")
del_items(0x8009B9F0)
SetType(0x8009B9F0, "void BgTask__FP4TASK(struct TASK *T)")
del_items(0x8009BE9C)
SetType(0x8009BE9C, "struct PInf *FindPlayerChar__FPc(char *Id)")
del_items(0x8009BF34)
SetType(0x8009BF34, "struct PInf *FindPlayerChar__Fiii(int Char, int Wep, int Arm)")
del_items(0x8009BF90)
SetType(0x8009BF90, "struct PInf *FindPlayerChar__FP12PlayerStruct(struct PlayerStruct *P)")
del_items(0x8009BFC0)
SetType(0x8009BFC0, "int FindPlayerChar__FP12PlayerStructb(struct PlayerStruct *P, bool InTown)")
del_items(0x8009C08C)
SetType(0x8009C08C, "void MakeSurePlayerDressedProperly__FR7CPlayerR12PlayerStructbT2(struct CPlayer *Player, struct PlayerStruct *Plr, bool InTown, bool Blocking)")
del_items(0x8009C104)
SetType(0x8009C104, "struct MonstList *GLUE_GetCurrentList__Fi(int Level)")
del_items(0x8009C1B0)
SetType(0x8009C1B0, "void GLUE_StartGameExit__Fv()")
del_items(0x8009C21C)
SetType(0x8009C21C, "void GLUE_Init__Fv()")
del_items(0x8009C224)
SetType(0x8009C224, "int GetTexId__7CPlayer(struct CPlayer *this)")
del_items(0x8009C230)
SetType(0x8009C230, "void SetTown__7CBlocksb(struct CBlocks *this, bool Val)")
del_items(0x8009C238)
SetType(0x8009C238, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)")
del_items(0x8009C24C)
SetType(0x8009C24C, "void SetDemoKeys__FPi(int *buffer)")
del_items(0x8009C324)
SetType(0x8009C324, "void RestoreDemoKeys__FPi(int *buffer)")
del_items(0x8009C3B4)
SetType(0x8009C3B4, "char *get_action_str__Fii(int pval, int combo)")
del_items(0x8009C42C)
SetType(0x8009C42C, "int get_key_pad__Fi(int n)")
del_items(0x8009C464)
SetType(0x8009C464, "bool checkvalid__Fv()")
del_items(0x8009C4C8)
SetType(0x8009C4C8, "bool RemoveCtrlScreen__Fv()")
del_items(0x8009C524)
SetType(0x8009C524, "unsigned char Init_ctrl_pos__Fv()")
del_items(0x8009C5DC)
SetType(0x8009C5DC, "int remove_padval__Fi(int p)")
del_items(0x8009C61C)
SetType(0x8009C61C, "int remove_comboval__Fib(int p, bool all)")
del_items(0x8009C664)
SetType(0x8009C664, "unsigned char set_buttons__Fii(int cline, int n)")
del_items(0x8009C7DC)
SetType(0x8009C7DC, "void restore_controller_settings__F8CTRL_SET(enum CTRL_SET s)")
del_items(0x8009C880)
SetType(0x8009C880, "bool only_one_button__Fi(int p)")
del_items(0x8009C8AC)
SetType(0x8009C8AC, "unsigned char main_ctrl_setup__Fv()")
del_items(0x8009CD88)
SetType(0x8009CD88, "void PrintCtrlString__FiiUcic(int x, int y, unsigned char cjustflag, int str_num, int col)")
del_items(0x8009D2DC)
SetType(0x8009D2DC, "void DrawCtrlSetup__Fv()")
del_items(0x8009D7DC)
SetType(0x8009D7DC, "void _GLOBAL__D_ctrlflag()")
del_items(0x8009D804)
SetType(0x8009D804, "void _GLOBAL__I_ctrlflag()")
del_items(0x8009D82C)
SetType(0x8009D82C, "unsigned short GetTick__C4CPad(struct CPad *this)")
del_items(0x8009D854)
SetType(0x8009D854, "unsigned short GetDown__C4CPad_addr_8009D854(struct CPad *this)")
del_items(0x8009D87C)
SetType(0x8009D87C, "unsigned short GetUp__C4CPad(struct CPad *this)")
del_items(0x8009D8A4)
SetType(0x8009D8A4, "unsigned short GetCur__C4CPad_addr_8009D8A4(struct CPad *this)")
del_items(0x8009D8CC)
SetType(0x8009D8CC, "void SetPadTickMask__4CPadUs(struct CPad *this, unsigned short mask)")
del_items(0x8009D8D4)
SetType(0x8009D8D4, "void SetPadTick__4CPadUs(struct CPad *this, unsigned short tick)")
del_items(0x8009D8DC)
SetType(0x8009D8DC, "void SetRGB__6DialogUcUcUc_addr_8009D8DC(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8009D8FC)
SetType(0x8009D8FC, "void SetBorder__6Dialogi_addr_8009D8FC(struct Dialog *this, int Type)")
del_items(0x8009D904)
SetType(0x8009D904, "void ___6Dialog_addr_8009D904(struct Dialog *this, int __in_chrg)")
del_items(0x8009D92C)
SetType(0x8009D92C, "struct Dialog *__6Dialog_addr_8009D92C(struct Dialog *this)")
del_items(0x8009D9AC)
SetType(0x8009D9AC, "int GetOverlayOtBase__7CBlocks_addr_8009D9AC()")
del_items(0x8009D9B4)
SetType(0x8009D9B4, "void color_cycle__FP4TASK(struct TASK *T)")
del_items(0x8009DD74)
SetType(0x8009DD74, "void penta_cycle_task__FP4TASK(struct TASK *T)")
del_items(0x8009DEF4)
SetType(0x8009DEF4, "void DrawFlameLogo__Fv()")
del_items(0x8009E0A4)
SetType(0x8009E0A4, "void TitleScreen__FP7CScreen(struct CScreen *FeScreen)")
del_items(0x8009E0F8)
SetType(0x8009E0F8, "void DaveLDummyPoll__Fv()")
del_items(0x8009E100)
SetType(0x8009E100, "void DaveL__Fv()")
del_items(0x8009E128)
SetType(0x8009E128, "void DoReflection__FP8POLY_FT4iii(struct POLY_FT4 *Ft4, int R, int G, int B)")
del_items(0x8009E468)
SetType(0x8009E468, "void mteleportfx__Fv()")
del_items(0x8009E77C)
SetType(0x8009E77C, "void invistimer__Fv()")
del_items(0x8009E854)
SetType(0x8009E854, "void setUVparams__FP8POLY_FT4P9FRAME_HDR(struct POLY_FT4 *Ft4, struct FRAME_HDR *Fr)")
del_items(0x8009E8E4)
SetType(0x8009E8E4, "void drawparticle__Fiiiiii(int x, int y, int scale, int anim, int colour, int OtPos)")
del_items(0x8009EADC)
SetType(0x8009EADC, "void drawpolyF4__Fiiiiii(int x, int y, int w, int h, int colour, int OtPos)")
del_items(0x8009EC10)
SetType(0x8009EC10, "void drawpolyG4__Fiiiiiiii(int x, int y, int w, int h1, int h2, int colour0, int colour1, int OtPos)")
del_items(0x8009EDE0)
SetType(0x8009EDE0, "void particlejump__Fii(int ScrX, int ScrY)")
del_items(0x8009EFB0)
SetType(0x8009EFB0, "void doparticlejump__Fv()")
del_items(0x8009F144)
SetType(0x8009F144, "void StartPartJump__Fiiiii(int mi, int height, int scale, int colour, int OtPos)")
del_items(0x8009F298)
SetType(0x8009F298, "void MonstPartJump__Fi(int m)")
del_items(0x8009F3B8)
SetType(0x8009F3B8, "void doparticlechain__Fiiiiiiiiiiii(int sx, int sy, int dx, int dy, int count, int scale, int scaledec, int semitrans, int randomize, int colour, int OtPos, int source)")
del_items(0x8009F708)
SetType(0x8009F708, "void ParticleMissile__FP13MissileStructiiii(struct MissileStruct *Ms, int ScrX, int ScrY, int colour, int OtPos)")
del_items(0x8009F7C4)
SetType(0x8009F7C4, "void Teleportfx__Fiiiiiiii(int scrnx, int scrny, int width, int height, int scale, int colmask, int numpart, int OtPos)")
del_items(0x8009FAC4)
SetType(0x8009FAC4, "void ResurrectFX__Fiiii(int x, int height, int scale, int OtPos)")
del_items(0x8009FCEC)
SetType(0x8009FCEC, "void ParticleExp__FP13MissileStructiiii(struct MissileStruct *Ms, int ScrX, int ScrY, int colour, int OtPos)")
del_items(0x8009FD84)
SetType(0x8009FD84, "void GetPlrPos__11SPELLFX_DATP12PlayerStruct(struct SPELLFX_DAT *this, struct PlayerStruct *ptrplr)")
del_items(0x8009FEA8)
SetType(0x8009FEA8, "void healFX__Fv()")
del_items(0x8009FFE4)
SetType(0x8009FFE4, "void HealStart__Fi(int plr)")
del_items(0x800A0018)
SetType(0x800A0018, "void HealotherStart__Fi(int plr)")
del_items(0x800A0050)
SetType(0x800A0050, "void TeleStart__Fi(int plr)")
del_items(0x800A0110)
SetType(0x800A0110, "void TeleStop__Fi(int plr)")
del_items(0x800A013C)
SetType(0x800A013C, "void PhaseStart__Fi(int plr)")
del_items(0x800A0170)
SetType(0x800A0170, "void PhaseEnd__Fi(int plr)")
del_items(0x800A019C)
SetType(0x800A019C, "void ApocInit__11SPELLFX_DATP12PlayerStruct(struct SPELLFX_DAT *this, struct PlayerStruct *ptrplr)")
del_items(0x800A0384)
SetType(0x800A0384, "void ApocaStart__Fi(int plr)")
del_items(0x800A03E8)
SetType(0x800A03E8, "void DaveLTask__FP4TASK(struct TASK *T)")
del_items(0x800A04B8)
SetType(0x800A04B8, "void PRIM_GetPrim__FPP7POLY_G4(struct POLY_G4 **Prim)")
del_items(0x800A0534)
SetType(0x800A0534, "void PRIM_GetPrim__FPP7POLY_F4(struct POLY_F4 **Prim)")
del_items(0x800A05B0)
SetType(0x800A05B0, "void PRIM_GetPrim__FPP8POLY_FT4_addr_800A05B0(struct POLY_FT4 **Prim)")
del_items(0x800A062C)
SetType(0x800A062C, "struct CPlayer *GetPlayer__7CPlayeri(int PNum)")
del_items(0x800A067C)
SetType(0x800A067C, "int GetLastOtPos__C7CPlayer(struct CPlayer *this)")
del_items(0x800A0688)
SetType(0x800A0688, "int GetOtPos__7CBlocksi_addr_800A0688(struct CBlocks *this, int LogicalY)")
del_items(0x800A06C4)
SetType(0x800A06C4, "struct FRAME_HDR *GetFr__7TextDati_addr_800A06C4(struct TextDat *this, int FrNum)")
del_items(0x800A06E0)
SetType(0x800A06E0, "void SetQSpell__Fiii(int pnum, int Spell, int type)")
del_items(0x800A0700)
SetType(0x800A0700, "void release_spell__Fi(int pnum)")
del_items(0x800A0764)
SetType(0x800A0764, "void select_belt_item__Fi(int pnum)")
del_items(0x800A076C)
SetType(0x800A076C, "unsigned char any_belt_items__Fv()")
del_items(0x800A07D4)
SetType(0x800A07D4, "void get_last_inv__Fv()")
del_items(0x800A0900)
SetType(0x800A0900, "void get_next_inv__Fv()")
del_items(0x800A0A34)
SetType(0x800A0A34, "void pad_func_up__Fi(int pnum)")
del_items(0x800A0A60)
SetType(0x800A0A60, "void pad_func_down__Fi(int pnum)")
del_items(0x800A0A8C)
SetType(0x800A0A8C, "void pad_func_left__Fi(int pnum)")
del_items(0x800A0A94)
SetType(0x800A0A94, "void pad_func_right__Fi(int pnum)")
del_items(0x800A0A9C)
SetType(0x800A0A9C, "void pad_func_select__Fi(int pnum)")
del_items(0x800A0B60)
SetType(0x800A0B60, "void SetFindMonsterXY__FP12PlayerStructi(struct PlayerStruct *p, int i)")
del_items(0x800A0BF0)
SetType(0x800A0BF0, "void pad_func_Attack__Fi(int pnum)")
del_items(0x800A10A4)
SetType(0x800A10A4, "void pad_func_Action__Fi(int pnum)")
del_items(0x800A145C)
SetType(0x800A145C, "void InitTargetCursor__Fi(int pnum)")
del_items(0x800A1490)
SetType(0x800A1490, "void RemoveTargetCursor__Fi(int pnum)")
del_items(0x800A14D8)
SetType(0x800A14D8, "bool TargetingSpell__Fi(int sp)")
del_items(0x800A1520)
SetType(0x800A1520, "void pad_func_Cast_Spell__Fi(int pnum)")
del_items(0x800A1920)
SetType(0x800A1920, "void pad_func_Use_Item__Fi(int pnum)")
del_items(0x800A1B54)
SetType(0x800A1B54, "void pad_func_BeltList__Fi(int pnum)")
del_items(0x800A1CBC)
SetType(0x800A1CBC, "void pad_func_Chr__Fi(int pnum)")
del_items(0x800A1DF0)
SetType(0x800A1DF0, "void pad_func_Inv__Fi(int pnum)")
del_items(0x800A1F20)
SetType(0x800A1F20, "void pad_func_SplBook__Fi(int pnum)")
del_items(0x800A2050)
SetType(0x800A2050, "void pad_func_QLog__Fi(int pnum)")
del_items(0x800A2144)
SetType(0x800A2144, "void pad_func_SpellBook__Fi(int pnum)")
del_items(0x800A221C)
SetType(0x800A221C, "void pad_func_AutoMap__Fi(int pnum)")
del_items(0x800A22D8)
SetType(0x800A22D8, "void pad_func_Quick_Spell__Fi(int pnum)")
del_items(0x800A244C)
SetType(0x800A244C, "void check_inv__FiPci(int pnum, char *ilist, int entries)")
del_items(0x800A26CC)
SetType(0x800A26CC, "void pad_func_Quick_Use_Health__Fi(int pnum)")
del_items(0x800A26F4)
SetType(0x800A26F4, "void pad_func_Quick_Use_Mana__Fi(int pnum)")
del_items(0x800A271C)
SetType(0x800A271C, "bool sort_gold__Fi(int pnum)")
del_items(0x800A2824)
SetType(0x800A2824, "void DrawObjSelector__FiP12PlayerStruct(int pnum, struct PlayerStruct *player)")
del_items(0x800A302C)
SetType(0x800A302C, "bool SelectorActive__Fv()")
del_items(0x800A3038)
SetType(0x800A3038, "void DrawObjTask__FP4TASK(struct TASK *T)")
del_items(0x800A3374)
SetType(0x800A3374, "void add_area_find_object__Fiii(int index, int x, int y)")
del_items(0x800A33E4)
SetType(0x800A33E4, "unsigned char CheckRangeObject__Fiii(int x, int y, int distance)")
del_items(0x800A375C)
SetType(0x800A375C, "unsigned char CheckArea__FiiiUci(int xx, int yy, int range, unsigned char allflag, int pnum)")
del_items(0x800A3D44)
SetType(0x800A3D44, "void PlacePlayer__FiiiUc(int pnum, int x, int y, unsigned char do_current)")
del_items(0x800A3EBC)
SetType(0x800A3EBC, "void _GLOBAL__D_gplayer()")
del_items(0x800A3EE4)
SetType(0x800A3EE4, "void _GLOBAL__I_gplayer()")
del_items(0x800A3F0C)
SetType(0x800A3F0C, "void SetRGB__6DialogUcUcUc_addr_800A3F0C(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x800A3F2C)
SetType(0x800A3F2C, "void SetBack__6Dialogi_addr_800A3F2C(struct Dialog *this, int Type)")
del_items(0x800A3F34)
SetType(0x800A3F34, "void SetBorder__6Dialogi_addr_800A3F34(struct Dialog *this, int Type)")
del_items(0x800A3F3C)
SetType(0x800A3F3C, "void ___6Dialog_addr_800A3F3C(struct Dialog *this, int __in_chrg)")
del_items(0x800A3F64)
SetType(0x800A3F64, "struct Dialog *__6Dialog_addr_800A3F64(struct Dialog *this)")
del_items(0x800A3FE4)
SetType(0x800A3FE4, "bool Active__11SpellTarget(struct SpellTarget *this)")
del_items(0x800A3FF0)
SetType(0x800A3FF0, "int GetOverlayOtBase__7CBlocks_addr_800A3FF0()")
del_items(0x800A3FF8)
SetType(0x800A3FF8, "unsigned short GetDown__C4CPad_addr_800A3FF8(struct CPad *this)")
del_items(0x800A4020)
SetType(0x800A4020, "unsigned short GetCur__C4CPad_addr_800A4020(struct CPad *this)")
del_items(0x800A4048)
SetType(0x800A4048, "void DEC_AddAsDecRequestor__FP7TextDat(struct TextDat *Td)")
del_items(0x800A40C4)
SetType(0x800A40C4, "void DEC_RemoveAsDecRequestor__FP7TextDat(struct TextDat *Td)")
del_items(0x800A411C)
SetType(0x800A411C, "void DEC_DoDecompRequests__Fv()")
del_items(0x800A4178)
SetType(0x800A4178, "int FindThisTd__FP7TextDat(struct TextDat *Td)")
del_items(0x800A41B0)
SetType(0x800A41B0, "int FindEmptyIndex__Fv()")
del_items(0x800A41E8)
SetType(0x800A41E8, "void MY_TSK_Sleep__Fi(int time)")
del_items(0x800A4240)
SetType(0x800A4240, "void UPDATEPROGRESS__Fi(int inc)")
del_items(0x800A430C)
SetType(0x800A430C, "bool IsGameLoading__Fv()")
del_items(0x800A4318)
SetType(0x800A4318, "void DrawCutScreen__Fi(int lev)")
del_items(0x800A4754)
SetType(0x800A4754, "void PutUpCutScreenTSK__FP4TASK(struct TASK *T)")
del_items(0x800A481C)
SetType(0x800A481C, "void PutUpCutScreen__Fi(int lev)")
del_items(0x800A496C)
SetType(0x800A496C, "void TakeDownCutScreen__Fv()")
del_items(0x800A4A10)
SetType(0x800A4A10, "void FinishBootProgress__Fv()")
del_items(0x800A4A9C)
SetType(0x800A4A9C, "void FinishProgress__Fv()")
del_items(0x800A4AFC)
SetType(0x800A4AFC, "void PRIM_GetPrim__FPP7POLY_G4_addr_800A4AFC(struct POLY_G4 **Prim)")
del_items(0x800A4B78)
SetType(0x800A4B78, "void _GLOBAL__D_CutScr()")
del_items(0x800A4BA0)
SetType(0x800A4BA0, "void _GLOBAL__I_CutScr()")
del_items(0x800A4BC8)
SetType(0x800A4BC8, "void SetRGB__6DialogUcUcUc_addr_800A4BC8(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x800A4BE8)
SetType(0x800A4BE8, "void SetBack__6Dialogi_addr_800A4BE8(struct Dialog *this, int Type)")
del_items(0x800A4BF0)
SetType(0x800A4BF0, "void SetBorder__6Dialogi_addr_800A4BF0(struct Dialog *this, int Type)")
del_items(0x800A4BF8)
SetType(0x800A4BF8, "void ___6Dialog_addr_800A4BF8(struct Dialog *this, int __in_chrg)")
del_items(0x800A4C20)
SetType(0x800A4C20, "struct Dialog *__6Dialog_addr_800A4C20(struct Dialog *this)")
del_items(0x800A4CA0)
SetType(0x800A4CA0, "int GetOverlayOtBase__7CBlocks_addr_800A4CA0()")
del_items(0x800A4CA8)
SetType(0x800A4CA8, "void | |
#!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
# Standard library
from urllib.parse import urljoin, urlsplit
import argparse
import sys
import time
import traceback
import os
# Third-party
from bs4 import BeautifulSoup
import grequests # WARNING: Always import grequests before requests
from junit_xml import TestCase, TestSuite
import requests
# Set defaults
START_TIME = time.time()
ERR_CODE = 0
VERBOSE = False
OUTPUT_ERR = False
LOCAL = False
HEADER = {
"User-Agent": "Mozilla/5.0 (X11; Linux i686 on x86_64; rv:10.0) Gecko/20100101 Firefox/10.0"
}
MEMOIZED_LINKS = {}
MAP_BROKEN_LINKS = {}
GOOD_RESPONSE = [200, 300, 301, 302]
OUTPUT = None
REQUESTS_TIMEOUT = 5
LICENSE_LOCAL_PATH = "../creativecommons.org/docroot/legalcode"
TEST_ORDER = ["zero", "4.0", "3.0"]
class CheckerError(Exception):
def __init__(self, message, code=None):
self.code = code if code else 1
self.message = "({}) {}".format(self.code, message)
super(CheckerError, self).__init__(self.message)
def __str__(self):
return self.message
def parse_argument(args):
"""parse arguments from cli
Args:
args (list): list of arguments parsed from command line
"""
global VERBOSE
global OUTPUT_ERR
global OUTPUT
global LOCAL
# Setup argument parser
parser = argparse.ArgumentParser(
description="Script to check broken links in CC licenses"
)
parser.add_argument(
"-v",
"--verbose",
help="Increase verbosity of output",
action="store_true",
)
parser.add_argument(
"--output-error",
help="Outputs all link errors to file (default: errorlog.txt) and creates junit-xml type summary(test-summary/junit-xml-report.xml)",
metavar="output_file",
const="errorlog.txt",
nargs="?",
type=argparse.FileType("w", encoding="utf-8"),
dest="OUTPUT",
)
parser.add_argument(
"--local",
help="Scrapes license files from local file system",
action="store_true",
)
args = parser.parse_args(args)
if args.verbose:
VERBOSE = True
if args.OUTPUT:
OUTPUT = args.OUTPUT
OUTPUT_ERR = True
if args.local:
LOCAL = True
def get_local_license():
"""This function get all the licenses stored locally
Returns:
list: list of file names of license file
"""
try:
all_files = os.listdir(LICENSE_LOCAL_PATH)
except FileNotFoundError:
raise CheckerError(
"Local license path({}) does not exist".format(LICENSE_LOCAL_PATH)
)
# Catching permission denied(OS ERROR) or other errors
except:
raise
links_ordered = list()
# Test newer licenses first (they are the most volatile) and exclude
# non-.html files
for version in TEST_ORDER:
for link in all_files:
if ".html" in link and version in link:
links_ordered.append(link)
for link in all_files:
if ".html" in link and link not in links_ordered:
links_ordered.append(link)
links = links_ordered
print("Number of files to be checked:", len(links))
return links
def get_global_license():
"""This function scrapes all the license file in the repo:
https://github.com/creativecommons/creativecommons.org/tree/master/docroot/legalcode
Returns:
str[]: The list of license/deeds files found in the repository
"""
URL = (
"https://github.com/creativecommons/creativecommons.org/tree/master"
"/docroot/legalcode"
)
page_text = request_text(URL)
soup = BeautifulSoup(page_text, "lxml")
links = soup.table.tbody.find_all("a", class_="js-navigation-open")
links_ordered = list()
# Test newer licenses first (they are the most volatile) and exclude
# non-.html files
for version in TEST_ORDER:
for link in links:
if ".html" in link.string and version in link.string:
links_ordered.append(link)
for link in links:
if ".html" in link.string and link not in links_ordered:
links_ordered.append(link)
links = links_ordered
print("Number of files to be checked:", len(links))
return links
def request_text(page_url):
"""This function makes a requests get and returns the text result
Args:
page_url (str): URL to perform a GET request for
Returns:
str: request response text
"""
try:
r = requests.get(page_url, headers=HEADER, timeout=REQUESTS_TIMEOUT)
fetched_text = r.content
except requests.exceptions.ConnectionError:
raise CheckerError(
"FAILED to retreive source HTML ({}) due to"
" ConnectionError".format(page_url),
1,
)
except requests.exceptions.Timeout:
raise CheckerError(
"FAILED to retreive source HTML ({}) due to"
" Timeout".format(page_url),
1,
)
except:
raise
return fetched_text
def request_local_text(license_name):
"""This function reads license content from license file stored in local file system
Args:
license_name (str): Name of the license
Returns:
str: Content of license file
"""
filename = license_name
path = os.path.join(LICENSE_LOCAL_PATH, filename)
try:
with open(path) as lic:
return lic.read()
except FileNotFoundError:
raise CheckerError(
"Local license path({}) does not exist".format(path)
)
# Catching permission denied(OS ERROR) or other errors
except:
raise
def create_base_link(filename):
"""Generates base URL on which the license file will be displayed
Args:
filename (str): Name of the license file
Returns:
str: Base URL of the license file
"""
ROOT_URL = "https://creativecommons.org"
parts = filename.split("_")
if parts[0] == "samplingplus":
extra = "/licenses/sampling+"
elif parts[0].startswith("zero"):
extra = "/publicdomain/" + parts[0]
else:
extra = "/licenses/" + parts[0]
extra = extra + "/" + parts[1]
if parts[0] == "samplingplus" and len(parts) == 3:
extra = extra + "/" + parts[2] + "/legalcode"
return ROOT_URL + extra
if len(parts) == 4:
extra = extra + "/" + parts[2]
extra = extra + "/legalcode"
if len(parts) >= 3:
extra = extra + "." + parts[-1]
return ROOT_URL + extra
def verbose_print(*args, **kwargs):
"""Prints only if -v or --verbose flag is set
"""
if VERBOSE:
print(*args, **kwargs)
def get_scrapable_links(base_url, links_in_license):
"""Filters out anchor tags without href attribute, internal links and
mailto scheme links
Args:
base_url (string): URL on which the license page will be displayed
links_in_license (list): List of all the links found in file
Returns:
set: valid_anchors - list of all scrapable anchor tags
valid_links - list of all absolute scrapable links
"""
valid_links = []
valid_anchors = []
for link in links_in_license:
try:
href = link["href"]
except KeyError:
try:
assert link["id"]
except KeyError:
try:
assert link["name"]
verbose_print(
" {:<24}{}".format("Anchor uses name", link)
)
except:
verbose_print(
" {:<24}{}".format("Anchor w/o href or id", link)
)
continue
if href[0] == "#":
# No need to report non-issue (not actionable)
# verbose_print(
# " {:<24}{}".format("Skipping internal link ", link)
# )
continue
if href.startswith("mailto:"):
# No need to report non-issue (not actionable)
# verbose_print(
# " {:<24}{}".format("Skipping mailto link ", link)
# )
continue
analyze = urlsplit(href)
valid_links.append(create_absolute_link(base_url, analyze))
valid_anchors.append(link)
return (valid_anchors, valid_links)
def create_absolute_link(base_url, link_analysis):
"""Creates absolute links from relative links
Args:
base_url (string): URL on which the license page will be displayed
link_analysis (class 'urllib.parse.SplitResult'): Link splitted by
urlsplit, that is to be converted
Returns:
str: absolute link
"""
href = link_analysis.geturl()
# Check for relative link
if (
link_analysis.scheme == ""
and link_analysis.netloc == ""
and link_analysis.path != ""
):
href = urljoin(base_url, href)
return href
# Append scheme https where absent
if link_analysis.scheme == "":
link_analysis = link_analysis._replace(scheme="https")
href = link_analysis.geturl()
return href
return href
def get_memoized_result(valid_links, valid_anchors):
"""Get memoized result of previously checked links
Args:
valid_links (list): List of all scrapable links in license
valid_anchors (list): List of all scrapable anchor tags in license
Returns:
set: stored_links - List of links whose responses are memoized
stored_anchors - List of anchor tags corresponding to stored_links
stored_result - List of responses corresponding to stored_links
check_links - List of links which are to be checked
check_anchors - List of anchor tags corresponding to check_links
"""
stored_links = []
stored_anchors = []
stored_result = []
check_links = []
check_anchors = []
for idx, link in enumerate(valid_links):
status = MEMOIZED_LINKS.get(link)
if status:
stored_anchors.append(valid_anchors[idx])
stored_result.append(status)
stored_links.append(link)
else:
check_links.append(link)
check_anchors.append(valid_anchors[idx])
return (
stored_links,
stored_anchors,
stored_result,
check_links,
check_anchors,
)
def exception_handler(request, exception):
"""Handles Invalid Scheme and Timeout Error from grequests.get
Args:
request (class 'grequests.AsyncRequest'): Request on which error
occured
exception (class 'requests.exceptions'): Exception occured
Returns:
str: Exception occured in string format
"""
if type(exception) == requests.exceptions.ConnectionError:
return "Connection Error"
elif type(exception) == requests.exceptions.ConnectTimeout:
return "Timeout Error"
elif type(exception) == requests.exceptions.InvalidSchema:
return "Invalid Schema"
else:
return type(exception).__name__
def memoize_result(check_links, responses):
"""Memoize the result of links checked
Args:
check_links (list): List of fresh links that are processed
responses (list): List of response status codes corresponding to
check_links
"""
for idx, link in enumerate(check_links):
MEMOIZED_LINKS[link] = responses[idx]
def write_response(all_links, response, base_url, license_name, valid_anchors):
"""Writes broken links to CLI and file
Args:
all_links (list): List of all scrapable links found in website
response (list): Response status code/ exception of all the links in
all_links
base_url (string): URL on which the license page will be displayed
license_name (string): Name of license
valid_anchors (list): List of all the scrapable anchors
Returns:
int: Number of broken links found in license
"""
caught_errors = 0
for idx, link_status in enumerate(response):
try:
status = link_status.status_code
except AttributeError:
status = link_status
if status not in GOOD_RESPONSE:
map_links_file(all_links[idx], base_url)
caught_errors += 1
if caught_errors == 1:
if not VERBOSE:
print("Errors:")
output_write("\n{}\nURL: {}".format(license_name, base_url))
result = " {:<24}{}".format(str(status), valid_anchors[idx])
print(result)
output_write(result)
return caught_errors
def map_links_file(link, file_url):
"""Maps broken link to the files of occurence
Args:
link (str): Broken link encountered
file_url (str): File url in which the broken link was encountered
"""
if MAP_BROKEN_LINKS.get(link):
if file_url not in MAP_BROKEN_LINKS[link]:
MAP_BROKEN_LINKS[link].append(file_url)
else:
MAP_BROKEN_LINKS[link] = [file_url]
def output_write(*args, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.