input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
import dash
import dash_cytoscape as cyto
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import numpy as np
import plotly.plotly as py
from plotly import graph_objs as go
from plotly.graph_objs import *
# external scripts
external_scripts = [
'https://www.google-analytics.com/analytics.js',
{'src': 'https://cdn.polyfill.io/v2/polyfill.min.js'},
{
'src': 'https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.10/lodash.core.js',
'integrity': '<KEY>
'crossorigin': 'anonymous'
}
]
# data
# def initialize: - this will be how we initialize the functions
# FreewayFDSlatlon.xlsx FreewayFDSdata.xlsx
vls = pd.ExcelFile('FreewayFDSdata.xlsx')
dff = pd.read_excel(vls, 'Volume', parse_dates=True, index_col="Time")
dff = dff.T
xls = pd.ExcelFile('freewayfdslatlon.xlsx') # this loads the data only once saving memory
df = pd.read_excel(xls, 'Volume', parse_dates=True, index_col="Time")
df = df.T
df2 = pd.read_excel(xls, 'Occupancy', parse_dates=True, index_col="Time")
df2 = df2.T
df3 = pd.read_excel(xls, 'Speed', parse_dates=True, index_col="Time")
df3 = df3.T
Detectors = list(df.columns)
mf = pd.read_excel(xls, 'Coordinates', index_col="Short Name")
# return df, df2, df3, Detectors, mf
mapbox_access_token = '<KEY>'
# input slider value then output into data frame filter for slider time volume value
# This function creates a excel like table - not necessarily useful but left it here in case someone wants it later
def generate_table(dataframe, max_rows=3):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
# Styling
)
# timeslider arrangement
def heatmap(SVO):
# creates heatmap data for map
SVO['Period'] = np.arange(len(SVO))
mintime = SVO['Period'].min()
maxtime = SVO['Period'].max()
return mintime, maxtime
mintime, maxtime = heatmap(df)
hf = df.reset_index().set_index('Period')
# print(hf)
df2['Period'] = np.arange(len(df2))
hf2 = df2.reset_index().set_index('Period')
df3['Period'] = np.arange(len(df3))
hf3 = df.reset_index().set_index('Period')
# uploading data - this allows you to drag and drop excel or other files
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
xls = pd.read_csv(
io.StringIO(decoded.decode('utf-8')))
return xls
elif 'xls' in filename:
# Assume that the user uploaded an excel file
xls = pd.read_excel(io.BytesIO(decoded))
return xls
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
"""
create a function taking a date as argument that will display the data on your map only for that date.
Or two arguments (startDate, enDate), and your function will display the data between those two dates.
This function has to filter the data, and display it.
"""
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.H2("Managed Motorway Data App", style={'font-family': 'Dosis'}),
html.P("Select different days using the dropdown and the slider\
below or by selecting different time frames on the\
histogram. Select Data type by clicking the Radio option",
className="explanationParagraph twelve columns"),
html.Div([
html.Div([
dcc.RadioItems(
id='tdatam',
options=[{'label': i, 'value': i} for i in ['Volume', 'Speed', 'Occupancy']],
value='Volume',
labelStyle={'display': 'inline-block'}
),
dcc.Upload(
id='upload-data',
children=html.Div([
html.A('Select Files')
]),
style={
'borderWidth': '10px',
'borderStyle': 'dashed',
'textAlign': 'center',
'float': 'right',
},
# Allow multiple files to be uploaded
multiple=True
),
],
style={'width': '100%', 'display': 'flex'}),
html.Div([
dcc.Graph(id='graph'),
html.P("", id="popupAnnotation", className="popupAnnotation"),
dcc.Slider(
id="Slider",
marks={i: 'Hour {}'.format(i) for i in range(0, 24)},
min=mintime / 4,
max=maxtime / 4,
step=.01,
value=19,
)
], style={"padding-bottom": '50px', "padding-right": '50px', "padding-left": '50px', "padding-top": '50px'}),
dcc.Tabs(id="tabs", children=[
dcc.Tab(label='Performance Metrics', children=[
html.Div([
html.P(
"Select different Freeway using the dropdown and click and hold on the graph to zoom in on specific parts of the graph. Higher Number the worse the congestion"),
dcc.Graph(
id='Freeway',
figure={
'data': [
{'x': ["Today", "Yesterday", "Average"], 'y': [4, 1, 2], 'type': 'bar',
'name': 'Monash'},
{'x': ["Today", "Yesterday", "Average"], 'y': [2, 4, 5], 'type': 'bar',
'name': u'Western'},
],
'layout': {
'title': 'Network Performance Indicators'
}
}
)])
]),
dcc.Tab(label='Detector Information', children=[
html.Div([
html.P(
"Select different Detectors using the dropdown and click and hold on the graph to zoom in on specific parts of the graph. Select Data type by clicking the Radio option"),
dcc.Dropdown(
id='xaxis-column',
options=[{'label': i, 'value': i} for i in Detectors],
style={"width": '48%'},
multi=True
),
dcc.RadioItems(
id='xaxis-type',
options=[{'label': i, 'value': i} for i in ['Volume', 'Speed', 'Occupancy']],
value='Volume',
labelStyle={'display': 'inline-block'}
),
dcc.Graph(id='indicator-graphic')])
]),
dcc.Tab(label='NetworkLens', children=[
html.Div([
html.P(
"Nodes are detection points and the linkages describe how effective a link is."),
cyto.Cytoscape(
id='cytoscape',
layout={'name': 'preset'},
style={'width': '100%', 'height': '400px'},
elements=[
{'data': {'id': 'one', 'label': 'Node 1'}, 'position': {'x': 75, 'y': 75}},
{'data': {'id': 'two', 'label': 'Node 2'}, 'position': {'x': 200, 'y': 200}},
{'data': {'source': 'one', 'target': 'two'}}
]
)])]),
dcc.Tab(label='Example Data', children=[
html.Div([
generate_table(dff)
], style={'overflowX': 'scroll', 'overflowY': 'scroll', 'height': '500px'})
])])
]
)
])
# Marker
def datatime(t, hf):
floortime = np.floor(t)
heat = hf.filter(items=[floortime], axis=0).T.drop("index")
return heat[floortime]
@app.callback(
Output('graph', 'figure'),
[Input('Slider', 'value'),
Input('tdatam', 'value')],
[State('graph', 'relayoutData')]
)
def update_map(time, tdata, gstate):
# use state?
zoom = 10.0
latInitial = -37.8136
lonInitial = 144.9631
bearing = 0
# print(time)
# print(hf)
# This is super janky and works way better with straight javascript. Fuck this noise.
# This switched data types for map.
if tdata == "Volume":
# this is dynamic print(period)
# print(time)
# print(datatime(period,hf))
return go.Figure(
data=Data([
Scattermapbox(
lat=mf.Y,
lon=mf.X,
mode='markers',
hoverinfo="text",
text=["Monash Freeway", "Western Link",
"Eastern Link",
"Melbourne CBD", "Swan Street"],
# opacity=0.5,
marker=Marker(size=15,
color=datatime(time, hf),
colorscale='Viridis',
opacity=.8,
showscale=True,
cmax=2500,
cmin=400
),
),
]),
layout=Layout(
autosize=True,
height=750,
margin=Margin(l=0, r=0, t=0, b=0),
showlegend=False,
mapbox=dict(
accesstoken=mapbox_access_token,
center=dict(
lat=latInitial, # -37.8136
lon=lonInitial # 144.9631
),
style='dark',
bearing=bearing,
zoom=zoom
),
updatemenus=[
dict(
buttons=([
dict(
args=[{
'mapbox.zoom': 10,
'mapbox.center.lon': '144.9631',
'mapbox.center.lat': '-37.8136',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Reset Zoom',
method='relayout'
)
]),
direction='left',
pad={'r': 0, 't': 0, 'b': 0, 'l': 0},
showactive=False,
type='buttons',
x=0.45,
xanchor='left',
yanchor='bottom',
bgcolor='#323130',
borderwidth=1,
bordercolor="#6d6d6d",
font=dict(
color="#FFFFFF"
),
y=0.02
),
dict(
buttons=([
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '151.15',
'mapbox.center.lat': '-33.873',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Western Link',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '145.218',
'mapbox.center.lat': '-37.81',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Eastern Link',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': 12,
'mapbox.center.lon': '145.061',
'mapbox.center.lat': '-37.865',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Monash Freeway',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '145.005',
'mapbox.center.lat': '-37.826389',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Swan Street',
method='relayout'
)
]),
direction="down",
pad={'r': 0, 't': 0, 'b': 0, 'l': 0},
showactive=False,
bgcolor="rgb(50, 49, 48, 0)",
type='buttons',
yanchor='bottom',
xanchor='left',
font=dict(
color="#FFFFFF"
),
x=0,
y=0.05
)
]
)
)
elif tdata == "Speed":
return go.Figure(
data=Data([
Scattermapbox(
lat=mf.Y,
lon=mf.X,
mode='markers',
hoverinfo="text",
text=["Monash Freeway", "Western Link",
"Eastern Link",
"Melbourne CBD", "Swan Street"],
# opacity=0.5,
marker=Marker(size=15,
color=datatime(time, hf3),
colorscale='Viridis',
opacity=.8,
showscale=True,
cmax=50,
cmin=150
),
),
]),
layout=Layout(
autosize=True,
height=750,
margin=Margin(l=0, r=0, t=0, b=0),
showlegend=False,
mapbox=dict(
accesstoken=mapbox_access_token,
center=dict(
lat=latInitial, # -37.8136
lon=lonInitial # 144.9631
),
style='dark',
bearing=bearing,
zoom=zoom
),
updatemenus=[
dict(
buttons=([
dict(
args=[{
'mapbox.zoom': 10,
'mapbox.center.lon': '144.9631',
'mapbox.center.lat': '-37.8136',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Reset Zoom',
method='relayout'
)
]),
direction='left',
pad={'r': 0, 't': 0, 'b': 0, 'l': 0},
showactive=False,
type='buttons',
x=0.45,
xanchor='left',
yanchor='bottom',
bgcolor='#323130',
borderwidth=1,
bordercolor="#6d6d6d",
font=dict(
color="#FFFFFF"
),
y=0.02
),
dict(
buttons=([
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '151.15',
'mapbox.center.lat': '-33.873',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Western Link',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '145.218',
'mapbox.center.lat': '-37.81',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Eastern Link',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': 12,
'mapbox.center.lon': '145.061',
'mapbox.center.lat': '-37.865',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Monash Freeway',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '145.005',
'mapbox.center.lat': '-37.826389',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Swan Street',
method='relayout'
)
]),
direction="down",
pad={'r': 0, 't': 0, 'b': 0, 'l': 0},
showactive=False,
bgcolor="rgb(50, 49, 48, 0)",
type='buttons',
yanchor='bottom',
xanchor='left',
font=dict(
color="#FFFFFF"
),
x=0,
y=0.05
)
]
)
)
elif tdata == "Occupancy":
return go.Figure(
data=Data([
Scattermapbox(
lat=mf.Y,
lon=mf.X,
mode='markers',
hoverinfo="text",
text=["Monash Freeway", "Western Link",
"Eastern Link",
"Melbourne CBD", "Swan Street"],
# opacity=0.5,
marker=Marker(size=15,
color=datatime(time, hf2),
colorscale='Viridis',
opacity=.8,
showscale=True,
cmax=3,
cmin=0
),
),
]),
layout=Layout(
autosize=True,
height=750,
margin=Margin(l=0, r=0, t=0, b=0),
showlegend=False,
mapbox=dict(
accesstoken=mapbox_access_token,
center=dict(
lat=latInitial, # -37.8136
lon=lonInitial # 144.9631
),
style='dark',
bearing=bearing,
zoom=zoom
),
updatemenus=[
dict(
buttons=([
dict(
args=[{
'mapbox.zoom': 10,
'mapbox.center.lon': '144.9631',
'mapbox.center.lat': '-37.8136',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Reset Zoom',
method='relayout'
)
]),
direction='left',
pad={'r': 0, 't': 0, 'b': 0, 'l': 0},
showactive=False,
type='buttons',
x=0.45,
xanchor='left',
yanchor='bottom',
bgcolor='#323130',
borderwidth=1,
bordercolor="#6d6d6d",
font=dict(
color="#FFFFFF"
),
y=0.02
),
dict(
buttons=([
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '151.15',
'mapbox.center.lat': '-33.873',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Western Link',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': 15,
'mapbox.center.lon': '145.218',
'mapbox.center.lat': '-37.81',
'mapbox.bearing': 0,
'mapbox.style': 'dark'
}],
label='Eastern Link',
method='relayout'
),
dict(
args=[{
'mapbox.zoom': | |
= meta_episode_len
self.num_trajs = num_trajs
self.num_trajs_eval = num_trajs_eval
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
self.expsampler = ExpInPlacePathSamplerSimple(
env=env,
policy=self.exploration_agent,
encoder=self.context_encoder,
max_path_length=self.max_path_length,
)
self.seedsampler = SeedInPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.seed_sample:
self.collect_data_seed(self.num_initial_steps, 1, np.inf, add_to_enc_buffer=False)
else:
self.collect_data(self.num_initial_steps, 1, np.inf,add_to_enc_buffer=False)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
if (it_+1)%5==0:
self.enc_replay_buffer.task_buffers[idx].clear()
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.num_steps_prior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_steps_prior, 1, np.inf, add_to_enc_buffer=False)
else:
self.collect_data(self.num_steps_prior, 1, np.inf,add_to_enc_buffer=False)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_steps_posterior, 1, self.update_post_train, add_to_enc_buffer=False)
else:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train,add_to_enc_buffer=False)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
else:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
print('collect over')
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def sample_eval(self,indices, context):
reward = torch.zeros(context.shape[0],1,1).cuda()
rem = 0
for indice in indices:
self.env.reset_task(indice)
context_i = context[rem,...]
context_i = torch.unsqueeze(context_i,0)
self.agent.clear_z()
self.agent.infer_posterior(context_i)
path,_ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length*5,resample=1)
reward[rem] = eval_util.get_average_returns(path)
rem = rem + 1
return reward
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=False):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context, context_unbatched = self.sample_context(self.task_idx,False)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_exp(self, num_episodes):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
paths, n_samples = self.expsampler.obtain_samples(max_trajs=num_episodes)
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += n_samples
gt.stamp('sample')
def collect_data_seed(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True,accumulate_context=True):
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.seedsampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=1,
accum_context=accumulate_context
)
num_transitions += n_samples
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
#if update_posterior_rate != np.inf:
# context = self.prepare_context(self.task_idx)
# self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate(epoch):
self.evaluate(epoch,self.num_trajs)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self,epoch):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True #if (epoch+1)%5==0 else False
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
path, num = self.expsampler.obtain_samples(deterministic=self.eval_deterministic,
max_trajs=self.num_exp_traj_eval, accum_context_for_agent=True, context_agent = self.agent,split=True)
num_transitions += num
num_trajs +=self.num_exp_traj_eval
paths+=path
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.num_steps_per_eval - num_transitions, max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([np.mean(a) for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch,num_trajs):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()
#if not self.use_SMM:
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
#else:
# prior_paths, _ = self.smm_sampler.obtain_samples(
# max_samples=self.max_path_length * 20,
# )
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx | |
# Refine Steps #
##############################################
def __rafine_steps(self):
'''
This function increases the number of timesteps if the star formation
will eventually consume all the gas, which occurs when dt > (t_star/sfe).
'''
# Declaration of the new timestep array
if not self.print_off:
print ('..Time refinement..')
new_dt = []
# For every timestep ...
for i_rs in range(0,len(self.history.timesteps)):
# Calculate the critical time delay
t_raf = self.t_SF_t[i_rs] / self.sfe
# If the step needs to be refined ...
if self.history.timesteps[i_rs] > t_raf:
# Calculate the split factor
nb_split = int(self.history.timesteps[i_rs] / t_raf) + 1
# Split the step
for i_sp_st in range(0,nb_split):
new_dt.append(self.history.timesteps[i_rs]/nb_split)
# If ok, don't change anything
else:
new_dt.append(self.history.timesteps[i_rs])
# Update the timestep information
self.nb_timesteps = len(new_dt)
self.history.timesteps = new_dt
# Update self.history.age
self.history.age = [0]
for ii in range(self.nb_timesteps):
self.history.age.append(self.history.age[-1] + new_dt[ii])
self.history.age = np.array(self.history.age)
# If a timestep needs to be added to be synchronized with
# the external program managing merger trees ...
if self.t_merge > 0.0:
# Find the interval where the step needs to be added
i_temp = 0
t_temp = new_dt[0]
while t_temp / self.t_merge < 0.9999999:
i_temp += 1
t_temp += new_dt[i_temp]
# Keep the t_merger index in memory
self.i_t_merger = i_temp
# Update/redeclare all the arrays (stable isotopes)
ymgal = self._get_iniabu()
self.len_ymgal = len(ymgal)
self.mdot, self.ymgal, self.ymgal_massive, self.ymgal_agb, \
self.ymgal_1a, self.ymgal_nsm, self.ymgal_bhnsm, \
self.ymgal_delayed_extra, self.mdot_massive, \
self.mdot_agb, self.mdot_1a, self.mdot_nsm, self.mdot_bhnsm, \
self.mdot_delayed_extra, \
self.sn1a_numbers, self.sn2_numbers, self.nsm_numbers, self.bhnsm_numbers,\
self.delayed_extra_numbers, self.imf_mass_ranges, \
self.imf_mass_ranges_contribution, self.imf_mass_ranges_mtot = \
self._get_storing_arrays(ymgal, len(self.history.isotopes))
# Update/redeclare all the arrays (unstable isotopes)
if self.len_decay_file > 0:
ymgal_radio = np.zeros(self.nb_radio_iso)
# Initialisation of the storing arrays for radioactive isotopes
self.mdot_radio, self.ymgal_radio, self.ymgal_massive_radio, \
self.ymgal_agb_radio, self.ymgal_1a_radio, self.ymgal_nsm_radio, \
self.ymgal_bhnsm_radio, self.ymgal_delayed_extra_radio, \
self.mdot_massive_radio, self.mdot_agb_radio, self.mdot_1a_radio, \
self.mdot_nsm_radio, self.mdot_bhnsm_radio,\
self.mdot_delayed_extra_radio, dummy, dummy, dummy, dummy, dummy, \
dummy, dummy, dummy = \
self._get_storing_arrays(ymgal_radio, self.nb_radio_iso)
# Recalculate the simulation time (used in chem_evol)
self.t_ce = []
self.t_ce.append(self.history.timesteps[0])
for i_init in range(1,self.nb_timesteps):
self.t_ce.append(self.t_ce[i_init-1] + self.history.timesteps[i_init])
##############################################
# Rafine Steps LR #
##############################################
def __rafine_steps_lr(self):
'''
This function increases the number of timesteps if the star formation
will eventually consume all the gas, which occurs when dt > (t_star/sfe).
'''
# Declaration of the new timestep array
if not self.print_off:
print ('..Time refinement (long range)..')
new_dt = []
# For every timestep ...
for i_rs in range(0,len(self.history.timesteps)):
# Calculate the critical time delay
t_raf = self.t_SF_t[i_rs] / self.sfe
# If the step needs to be refined ...
if self.history.timesteps[i_rs] > t_raf:
# Calculate the number of remaining steps
nb_step_rem = len(self.history.timesteps) - i_rs
t_rem = 0.0
for i_rs in range(0,len(self.history.timesteps)):
t_rem += self.history.timesteps[i_rs]
# Calculate the split factor
nb_split = int(t_rem / t_raf) + 1
# Split the step
for i_sp_st in range(0,nb_split):
new_dt.append(t_rem/nb_split)
# Quit the for loop
break
# If ok, don't change anything
else:
new_dt.append(self.history.timesteps[i_rs])
# Update the timestep information
self.nb_timesteps = len(new_dt)
self.history.timesteps = new_dt
# Update self.history.age
self.history.age = [0]
for ii in range(self.nb_timesteps):
self.history.age.append(self.history.age[-1] + new_dt[ii])
self.history.age = np.array(self.history.age)
# If a timestep needs to be added to be synchronized with
# the external program managing merger trees ...
if self.t_merge > 0.0:
# Find the interval where the step needs to be added
i_temp = 0
t_temp = new_dt[0]
while t_temp / self.t_merge < 0.9999999:
i_temp += 1
t_temp += new_dt[i_temp]
# Keep the t_merger index in memory
self.i_t_merger = i_temp
# Update/redeclare all the arrays (stable isotopes)
ymgal = self._get_iniabu()
self.len_ymgal = len(ymgal)
self.mdot, self.ymgal, self.ymgal_massive, self.ymgal_agb, \
self.ymgal_1a, self.ymgal_nsm, self.ymgal_bhnsm, \
self.ymgal_delayed_extra, self.mdot_massive, \
self.mdot_agb, self.mdot_1a, self.mdot_nsm, self.mdot_bhnsm, \
self.mdot_delayed_extra, \
self.sn1a_numbers, self.sn2_numbers, self.nsm_numbers, self.bhnsm_numbers,\
self.delayed_extra_numbers, self.imf_mass_ranges, \
self.imf_mass_ranges_contribution, self.imf_mass_ranges_mtot = \
self._get_storing_arrays(ymgal, len(self.history.isotopes))
# Update/redeclare all the arrays (unstable isotopes)
if self.len_decay_file > 0:
ymgal_radio = np.zeros(self.nb_radio_iso)
# Initialisation of the storing arrays for radioactive isotopes
self.mdot_radio, self.ymgal_radio, self.ymgal_massive_radio, \
self.ymgal_agb_radio, self.ymgal_1a_radio, self.ymgal_nsm_radio, \
self.ymgal_bhnsm_radio, self.ymgal_delayed_extra_radio, \
self.mdot_massive_radio, self.mdot_agb_radio, self.mdot_1a_radio, \
self.mdot_nsm_radio, self.mdot_bhnsm_radio,\
self.mdot_delayed_extra_radio, dummy, dummy, dummy, dummy, dummy, \
dummy, dummy, dummy = \
self._get_storing_arrays(ymgal_radio, self.nb_radio_iso)
# Recalculate the simulation time (used in chem_evol)
self.t_ce = []
self.t_ce.append(self.history.timesteps[0])
for i_init in range(1,self.nb_timesteps):
self.t_ce.append(self.t_ce[i_init-1] + self.history.timesteps[i_init])
##############################################
# Declare Evol Arrays #
##############################################
def __declare_evol_arrays(self):
'''
This function declares the arrays used to follow the evolution of the
galaxy regarding its growth and the exchange of gas with its surrounding.
'''
# Arrays with specific values at every timestep
self.sfr_input = np.zeros(self.nb_timesteps+1) # Star formation rate [Mo yr^-1]
self.m_DM_t = np.zeros(self.nb_timesteps+1) # Mass of the dark matter halo
self.r_vir_DM_t= np.zeros(self.nb_timesteps+1) # Virial radius of the dark matter halo
self.v_vir_DM_t= np.zeros(self.nb_timesteps+1) # Virial velocity of the halo
self.m_tot_ISM_t = np.zeros(self.nb_timesteps+1) # Mass of the ISM in gas
self.m_outflow_t = np.zeros(self.nb_timesteps) # Mass of the outflow at every timestep
self.eta_outflow_t = np.zeros(self.nb_timesteps) # Mass-loading factor == M_outflow / SFR
self.t_SF_t = np.zeros(self.nb_timesteps+1) # Star formation timescale at every timestep
self.m_crit_t = np.zeros(self.nb_timesteps+1) # Critital ISM mass below which no SFR
self.redshift_t = np.zeros(self.nb_timesteps+1) # Redshift associated to every timestep
self.m_inflow_t = np.zeros(self.nb_timesteps) # Mass of the inflow at every timestep
##############################################
# Initialize Gal Prop #
##############################################
def __initialize_gal_prop(self):
'''
This function sets the properties of the selected galaxy, such as its
SFH, its total mass, and its stellar mass.
'''
# No specific galaxy - Use input parameters
if self.galaxy == 'none':
#If an array is used for the SFH ..
if len(self.sfh_array) > 0:
self.__copy_sfr_array()
# If an input file is used for the SFH ...
elif not self.sfh_file == 'none':
self.__copy_sfr_input(self.sfh_file)
# If a star formation law is used in a closed box ...
elif self.cl_SF_law and not self.open_box:
self.__calculate_sfe_cl()
# If a random SFH is chosen ...
elif self.rand_sfh > 0.0:
self.__generate_rand_sfh()
# If the SFH is constant ...
else:
for i_cte_sfr in range(0, self.nb_timesteps+1):
self.sfr_input[i_cte_sfr] = self.cte_sfr
# Milky Way galaxy ...
elif self.galaxy == 'milky_way' or self.galaxy == 'milky_way_cte':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 1.0e12
self.stellar_mass_0 = 5.0e10
# Read Chiappini et al. (2001) SFH
if self.galaxy == 'milky_way':
self.__copy_sfr_input('stellab_data/milky_way_data/sfh_mw_cmr01.txt')
# Read constant SFH
else:
self.__copy_sfr_input('stellab_data/milky_way_data/sfh_cte.txt')
# Sculptor dwarf galaxy ...
elif self.galaxy == 'sculptor':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 1.5e9
self.stellar_mass_0 = 7.8e6
self.stellar_mass_0 = self.stellar_mass_0 * (1-self.mass_frac_SSP)
# Read deBoer et al. (2012) SFH
self.__copy_sfr_input('stellab_data/sculptor_data/sfh_deBoer12.txt')
# Fornax dwarf galaxy ...
elif self.galaxy == 'fornax':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 7.08e8
self.stellar_mass_0 = 4.3e7
self.stellar_mass_0 = self.stellar_mass_0 * (1-self.mass_frac_SSP)
# Read deBoer et al. (2012) SFH
self.__copy_sfr_input('stellab_data/fornax_data/sfh_fornax_deboer_et_al_2012.txt')
# Carina dwarf galaxy ...
elif self.galaxy == 'carina':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 3.4e6
self.stellar_mass_0 = 1.07e6
self.stellar_mass_0 = self.stellar_mass_0 * (1-self.mass_frac_SSP)
# Read deBoer et al. (2014) SFH
self.__copy_sfr_input('stellab_data/carina_data/sfh_deBoer14.txt')
# Interpolate the last timestep
if len(self.sfr_input) > 3:
aa = (self.sfr_input[-2] - self.sfr_input[-3])/\
self.history.timesteps[-2]
bb = self.sfr_input[-2]- (self.history.tend-self.history.timesteps[-1])*aa
self.sfr_input[-1] = aa*self.history.tend + bb
# Keep the SFH in memory
self.history.sfr_abs = self.sfr_input
##############################################
## Copy SFR Array ##
##############################################
def __copy_sfr_array(self):
'''
See copy_sfr_input() for more info.
'''
# Variable to keep track of the OMEGA's timestep
i_dt_csa = 0
t_csa = 0.0
nb_dt_csa = self.nb_timesteps + 1
# Variable to keep track of the total stellar mass from the input SFH
m_stel_sfr_in = 0.0
# For every timestep given in the array (starting at the second step)
for i_csa in range(1,len(self.sfh_array)):
# Calculate the SFR interpolation coefficient
a_sfr = (self.sfh_array[i_csa][1] - self.sfh_array[i_csa-1][1]) / \
(self.sfh_array[i_csa][0] - self.sfh_array[i_csa-1][0])
b_sfr = self.sfh_array[i_csa][1] - a_sfr * self.sfh_array[i_csa][0]
# While we stay in the same time bin ...
while t_csa <= self.sfh_array[i_csa][0]:
# Interpolate the SFR
self.sfr_input[i_dt_csa] = a_sfr * t_csa + b_sfr
# Cumulate the stellar mass formed
m_stel_sfr_in += self.sfr_input[i_dt_csa] * \
self.history.timesteps[i_dt_csa]
# Exit the loop if the | |
#cvector =
args += ",%s" % clevels
else:
args += ",%s" % list(cvector)
location = item.getp('clocation')
if location == 'surface':
# place the contours at the corresponding z level (contour3)
func = "contour3"
elif location == 'base':
# standard contour plot
if filled:
func = "contourf"
else:
func = "contour"
# get line specifiactions:
marker, color, style, width = self._get_linespecs(item)
extra_args = ""
if style:
extra_args += ",'LineStyle', '%s'" % style
if width:
extra_args += ",'LineWidth', %s" % float(width)
if item.getp('function') == 'contour3':
# contour3 does not allow property-value pairs
cmd += "[cs,h] = %s(X,Y,Z%s);\n" % (func,args)
if color:
extra_args += ",'EdgeColor', '%s'" % color
# FIXME: What if color is a three-tuple [r,g,b]?
if marker:
extra_args += ",'Marker', '%s'" % marker
if extra_args:
cmd += "set(h%s),...\n" % extra_args
else:
if color:
extra_args += ",'Color', '%s'" % color
# FIXME: What if color is a three-tuple [r,g,b]?
args += extra_args
cmd += "[cs,h] = %s(X,Y,Z%s);\n" % (func,args)
if item.getp('clabels'):
# add labels on the contour curves
cmd += "clabel(cs, h),...\n"
self._script += cmd
def _add_vectors(self, item):
if DEBUG:
print "Adding vectors"
# uncomment the following command if there is no support for
# automatic scaling of vectors in the current plotting package:
#item.scale_vectors()
# grid components:
x = squeeze(item.getp('xdata'))
y = squeeze(item.getp('ydata'))
z = item.getp('zdata')
# vector components:
u = asarray(item.getp('udata'))
v = asarray(item.getp('vdata'))
w = item.getp('wdata')
# get line specifiactions (marker='.' means no marker):
marker, color, style, width = self._get_linespecs(item)
# scale the vectors according to this variable (scale=0 should
# turn off automatic scaling):
scale = item.getp('arrowscale')
filled = item.getp('filledarrows') # draw filled arrows if True
cmd = ""
if z is not None and w is not None:
z = squeeze(z)
w = asarray(w)
# draw velocity vectors as arrows with components (u,v,w) at
# points (x,y,z):
if item.getp('indexing') == 'ij' and \
(shape(x) != shape(u) and shape(y) != shape(u) and \
shape(z) != shape(u)):
x,y,z = ndgrid(x,y,z,sparse=False)
if shape(x) != shape(u) and shape(y) != shape(u) and \
shape(z) != shape(u):
cmd += "x = %s;\n" % list(x)
cmd += "y = %s;\n" % list(y)
cmd += "z = %s;\n" % list(z)
if item.getp('indexing') == 'ij':
cmd += "[X,Y,Z] = ndgrid(x,y,z);\n"
else:
cmd += "[X,Y,Z] = meshgrid(x,y,z);\n"
else:
cmd += "X = %s;\n" % str(x.tolist()).replace('],', '];')
cmd += "Y = %s;\n" % str(y.tolist()).replace('],', '];')
cmd += "Z = %s;\n" % str(z.tolist()).replace('],', '];')
cmd += "U = %s;\n" % str(u.tolist()).replace('],', '];')
cmd += "V = %s;\n" % str(v.tolist()).replace('],', '];')
cmd += "W = %s;\n" % str(w.tolist()).replace('],', '];')
args = "X,Y,Z,U,V,W"
func = "quiver3"
else:
# draw velocity vectors as arrows with components (u,v) at
# points (x,y):
if item.getp('indexing') == 'ij' and \
(shape(x) != shape(u) and shape(y) != shape(u)):
x,y = ndgrid(x,y,sparse=False)
if shape(x) != shape(u) and shape(y) != shape(u):
cmd += "x = %s;\n" % list(x)
cmd += "y = %s;\n" % list(y)
if item.getp('indexing') == 'ij':
cmd += "[X,Y] = ndgrid(x,y);\n"
else:
cmd += "[X,Y] = meshgrid(x,y);\n"
else:
cmd += "X = %s;\n" % str(x.tolist()).replace('],', '];')
cmd += "Y = %s;\n" % str(y.tolist()).replace('],', '];')
cmd += "U = %s;\n" % str(u.tolist()).replace('],', '];')
cmd += "V = %s;\n" % str(v.tolist()).replace('],', '];')
args = "X,Y,U,V"
func = "quiver"
args += ",%s" % float(scale)
if filled:
args += ",'filled'"
if color:
args += ",'Color','%s'" % color
# FIXME: What if color is a three-tuple [r,g,b]?
if style:
args += ",'LineStyle','%s'" % style
if marker:
args += ",'Marker','%s','ShowArrowHead','off'" % marker
if width:
args += ",'LineWidth', %s" % float(width)
cmd += "%s(%s),...\n" % (func,args)
self._script += cmd
def _add_streams(self, item):
if DEBUG:
print "Adding streams"
# grid components:
x, y, z = item.getp('xdata'), item.getp('ydata'), item.getp('zdata')
# vector components:
u, v, w = item.getp('udata'), item.getp('vdata'), item.getp('wdata')
# starting positions for streams:
sx = item.getp('startx')
sy = item.getp('starty')
sz = item.getp('startz')
# get line specifiactions:
marker, color, style, width = self._get_linespecs(item)
# TODO: implement linepecs
args = [x,y,z,u,v,w,sx,sy,sz]
if item.getp('tubes'):
# draw stream tubes from vector data (u,v,w) at points (x,y,z)
n = item.getp('n') # no points along the circumference of the tube
scale = item.getp('tubescale')
args.append([scale, n])
#func = self._g.streamtube
elif item.getp('ribbons'):
# draw stream ribbons from vector data (u,v,w) at points (x,y,z)
width = item.getp('ribbonwidth')
args.append(width)
#func = self._g.streamribbon
else:
if z is not None and w is not None:
# draw stream lines from vector data (u,v,w) at points (x,y,z)
pass
else:
# draw stream lines from vector data (u,v) at points (x,y)
args = [x,y,u,v,sx,sy]
#func = self._g.streamline
kwargs = {'nout': 0}
#func(*args, **kwargs)
def _add_isosurface(self, item):
if DEBUG:
print "Adding a isosurface"
# grid components:
x = squeeze(item.getp('xdata'))
y = squeeze(item.getp('ydata'))
z = squeeze(item.getp('zdata'))
v = asarray(item.getp('vdata')) # volume
c = item.getp('cdata') # pseudocolor data
isovalue = item.getp('isovalue')
cmd = ""
if item.getp('indexing') == 'ij' and \
(shape(x) != shape(v) and shape(y) != shape(v) and \
shape(z) != shape(v)):
x,y,z = ndgrid(x,y,z,sparse=False)
if shape(x) != shape(v) and shape(y) != shape(v) and \
shape(z) != shape(v):
cmd += "x = %s;\n" % list(x)
cmd += "y = %s;\n" % list(y)
cmd += "z = %s;\n" % list(z)
if item.getp('indexing') == 'ij':
cmd += "[X,Y,Z] = ndgrid(x,y,z);\n"
else:
cmd += "[X,Y,Z] = meshgrid(x,y,z);\n"
else:
cmd += "X = %s;\n" % str(x.tolist()).replace('],', '];')
cmd += "X = reshape(X,%d,%d,%d);\n" % shape(v)
cmd += "Y = %s;\n" % str(y.tolist()).replace('],', '];')
cmd += "Y = reshape(Y,%d,%d,%d);\n" % shape(v)
cmd += "Z = %s;\n" % str(z.tolist()).replace('],', '];')
cmd += "Z = reshape(Z,%d,%d,%d);\n" % shape(v)
cmd += "V = %s;\n" % str(v.tolist()).replace('],', '];')
cmd += "V = reshape(V,%d,%d,%d);\n" % shape(v)
args = "X,Y,Z,V"
if c is not None:
c = asarray(c)
cmd += "C = %s;\n" % str(c.tolist()).replace('],', '];')
cmd += "C = reshape(C,%d,%d,%d);\n" % shape(v)
args += ",C"
args += ",%s" % float(isovalue)
cmd += "isosurface(%s),...\n" % args
self._script += cmd
def _add_slices(self, item):
if DEBUG:
print "Adding slices in a volume"
# grid components:
x, y, z = item.getp('xdata'), item.getp('ydata'), item.getp('zdata')
v = item.getp('vdata') # volume
if item.getp('indexing') == 'ij' and \
(shape(x) != shape(v) and shape(y) != shape(v) and \
shape(z) != shape(v)):
x,y,z = ndgrid(x,y,z,sparse=False)
sx, sy, sz = item.getp('slices')
if rank(sz) == 2:
# sx, sy, and sz defines a surface
pass
else:
# sx, sy, and sz is either numbers or vectors with numbers
pass
#self._g.slice_(x,y,z,v,sx,sy,sz,nout=0)
def _add_contourslices(self, item):
if DEBUG:
print "Adding contours in slice planes"
# grid components:
x, y, z = item.getp('xdata'), item.getp('ydata'), item.getp('zdata')
v = item.getp('vdata') # volume
sx, sy, sz = item.getp('slices')
if rank(sz) == 2:
# sx, sy, and sz defines a surface
pass
else:
# sx, sy, and sz is either numbers or vectors with numbers
pass
if item.getp('indexing') == 'ij' and \
(shape(x) != shape(v) and shape(y) != shape(v) and \
shape(z) != shape(v)):
x,y,z = ndgrid(x,y,z,sparse=False)
args = [x,y,z,v,sx,sy,sz]
cvector = item.getp('cvector')
clevels = item.getp('clevels') # number of contour levels per plane
if cvector is None:
# the contour levels are chosen automatically
args.append(clevels)
else:
args.append(cvector)
#self._g.contourslice(*args, **kwargs)
def _set_figure_size(self, fig):
if DEBUG:
print "Setting figure size"
width, height = fig.getp('size')
if width and height:
# set figure width and height
pass
else:
# use the default width and height in plotting package
pass
def figure(self, *args, **kwargs):
# Extension of BaseClass.figure:
# add a plotting package figure instance as fig._g and create a
# link to it as self._g
fig = BaseClass.figure(self, *args, **kwargs)
try:
fig._g
except:
# create plotting package figure and save figure instance
# as fig._g
if DEBUG:
name = 'Fig ' + str(fig.getp('number'))
print "creating figure %s in backend" % name
fig._g = ""
self._g = fig._g # link for faster access
return fig
def _replot(self):
"""Replot all axes and all plotitems in the backend."""
# NOTE: only the current figure (gcf) is redrawn.
if | |
#!/usr/bin/env pnpython3
#
# Interface PH5 to PH5Viewer
#
# <NAME>, <NAME> August 2015
#
# Updated April 2018
import os
import numpy as np
from ph5.core import ph5api, timedoy
VER = 2018116
class PH5ReaderError(Exception):
''' Exception gets raised in PH5Reader '''
def __init__(self, message):
super(PH5ReaderError, self).__init__(message)
self.message = message
class PH5Reader():
'''
Read PH5 data and meta-data.
For example: See __main__ below.
'''
def __init__(self):
# This is the ph5api object.
self.fio = None
self.clear()
self.set()
def clear(self):
self.graphExperiment = None
self.graphArrays = None
self.graphEvents = None
self.data = np.array([])
self.metadata = None
def set(self, channel=[1], array=['Array_t_001']):
'''
Set channels and arrays
Example:
set (channel=[1,2,3], array = ['Array_t_001', 'Array_t_002'])
'''
# Channels to extract, a list.
self.CHANNEL = channel
# Arrays to extract, a list.
self.ARRAY = array
def initialize_ph5(self, path2file):
'''
Initialize ph5api and read meta-data...
path2file => Absolute path to the master ph5 file.
'''
pathname = os.path.dirname(str(path2file))
master = os.path.basename(str(path2file))
self.fio = ph5api.PH5(path=pathname, nickname=master)
self.fio.read_event_t_names()
for n in self.fio.Event_t_names:
self.fio.read_event_t(n)
self.fio.read_array_t_names()
for n in self.fio.Array_t_names:
self.fio.read_array_t(n)
# this table is required to identify events' endtime.
# If missing, errors will be informed in createGraphEvents()
self.fio.read_sort_t()
# this table define orientation of data, required when reading trace
self.fio.read_receiver_t()
if len(self.fio.Receiver_t['rows']) == 0:
msg = "There is no Receiver_t table in the dataset." + \
"which means it is not possible to read data correctly."
raise PH5ReaderError(msg)
# this table gives some non-displayed data
# self.fio.read_response_t ()
self.fio.read_das_g_names()
if len(self.fio.Das_g_names) == 0:
msg = "There are no Das_t tables in the dataset," + \
"which means there are no data to be viewed."
raise PH5ReaderError(msg)
def ph5close(self):
self.fio.close()
def _event_stop(self, event_epoch):
''' Find end of recording window that contains the event time. '''
try:
for n in self.fio.Array_t_names:
for s in self.fio.Sort_t[n]['rows']:
if event_epoch >= s['start_time/epoch_l'] \
and event_epoch <= s['end_time/epoch_l']:
tdoy = timedoy.TimeDOY(
epoch=s['end_time/epoch_l'],
microsecond=s['end_time/micro_seconds_i'])
return tdoy.epoch(fepoch=True)
except KeyError:
return None
return None
##################################################
# def createGraphEvents
# Author: <NAME>
# Updated: 201802
# read Experiment_t table
def createGraphExperiment(self):
'''
Information about experiment
Sets: self.GraphExperiment
'''
self.fio.read_experiment_t()
rows = self.fio.Experiment_t['rows']
if rows == []:
raise PH5ReaderError(
"The PH5 dataset does not have Experiment_t table." +
"\nCannot identify the experiment's name")
self.graphExperiment = rows[-1]
pass
###################################################
# def createGraphEvents
# Author: <NAME>
# Updated: 201802
def createGraphEvents(self):
''' Information about events info for ES_Gui,
Sets: self.graphEvents
'''
self.graphEvents = {'shotLines': []}
events = []
for n in self.fio.Event_t_names:
# take off 'Event_t' and change to '0' for n='Event_t'
# for create to SEGY
if n == 'Event_t':
shot = '0'
else:
shot = n.replace('Event_t_', '')
self.graphEvents['shotLines'].append(shot)
rows = self.fio.Event_t[n]['byid']
for o in self.fio.Event_t[n]['order']:
r = rows[o]
e = {}
e['shotlineId'] = shot
e['eventName'] = n
e['eventId'] = r['id_s']
e['lat.'] = r['location/Y/value_d']
e['long.'] = r['location/X/value_d']
e['elev.'] = r['location/Z/value_d']
e['mag.'] = r['size/value_d']
e['depth'] = r['depth/value_d']
tdoy = timedoy.TimeDOY(
epoch=r['time/epoch_l'],
microsecond=r['time/micro_seconds_i'])
e['eStart'] = tdoy.epoch(fepoch=True)
e['eStop'] = self._event_stop(e['eStart'])
events.append(e)
self.graphEvents['events'] = \
sorted(events, key=lambda k: k['eventId'])
self.graphEvents['shotLines'] = \
sorted(self.graphEvents['shotLines'], key=lambda k: k)
if self.fio.Event_t_names == []:
raise PH5ReaderError(
"The PH5 dataset does not have any Event_t table.")
if self.fio.Sort_t == {}:
msg = "The PH5 dataset does not have any Sort_t table " + \
"which means\nthere aren't enough information to identtify" + \
" the events' end time."
raise PH5ReaderError(msg)
######################################################
# def createGraphArraysNStations
# Author: <NAME>
# Updated: 201802
def createGraphArraysNStations(self):
'''
Information about arrays and station info for ES_Gui,
Sets: self.graphArrays
'''
self.graphArrays = []
for n in self.fio.Array_t_names:
# create array: {'arrayId': aId,
# {'stations':{stationId:
# [list of data for each channel of that station]}
a = {'arrayId': n.split('_')[-1], 'stations': {}, 'channels': []}
rows = self.fio.Array_t[n]['byid']
# byid: {statId:{chanId:[ {info of that channel-station} ]}}
sta0 = rows.keys()[0]
chan0 = rows[sta0].keys()[0]
r0 = rows[sta0][chan0][0]
a['deployT'] = r0['deploy_time/epoch_l']
a['pickupT'] = r0['pickup_time/epoch_l']
try:
a['sampleRate'] = \
r0['sample_rate_i'] / float(r0['sample_rate_multiplier_i'])
except KeyError:
das = r0['das/serial_number_s']
self.fio.read_das_t(
das, start_epoch=a['deployT'], stop_epoch=a['pickupT'])
dasrow = self.fio.Das_t[das]['rows'][0]
a['sampleRate'] = dasrow['sample_rate_i'] / \
float(dasrow['sample_rate_multiplier_i'])
# self.fio.Array_t[n]['order']:
# list of station names in order of postion, time
chNo = 0
for o in self.fio.Array_t[n]['order']:
for ch in rows[o].keys():
if ch not in a['channels']:
a['channels'].append(ch)
for stat in rows[o][ch]:
if stat['id_s'] in a['stations'].keys():
continue
s = {}
s['stationId'] = stat['id_s']
s['dasSer'] = stat['das/serial_number_s']
s['lat.'] = stat['location/Y/value_d']
s['long.'] = stat['location/X/value_d']
s['elev.'] = stat['location/Z/value_d']
# s['selected'] = False
a['stations'][stat['id_s']] = s
if stat['deploy_time/epoch_l'] < a['deployT']:
a['deployT'] = stat['deploy_time/epoch_l']
if stat['pickup_time/epoch_l'] > a['pickupT']:
a['pickupT'] = stat['pickup_time/epoch_l']
keys = set(a['stations'].keys())
a['orderedStationIds'] = sorted(
keys, key=lambda item: (int(item), item))
if len(a['channels']) > chNo:
chNo = len(a['channels'])
self.graphArrays.append(a)
if self.fio.Array_t_names == []:
raise PH5ReaderError(
"The PH5 dataset does not have any Array_t table.")
if chNo > 3:
errMsg = "Limitation for number of channels is 3" + \
"\nwhile this experiment has up to %s channels " + \
"for one array."
raise PH5ReaderError(errMsg % chNo)
###############################################
# def readData_nonEvent
# Author: <NAME>
# Updated: 201802
# to populate data in case of lacking of events' information
# (based on readData_shotGather)
def readData_loiEvent(
self, orgStartT, offset, timeLen, staSpc,
appClockDriftCorr, redVel, # corrections
PH5View, statusBar=None, beginMsg=None):
'''
Read trace data based on given start and stop epoch,
arrays, and channels.
Sets: self.metadata
Returns: info
'''
sampleRate = PH5View.selectedArray['sampleRate']
statusMsg = beginMsg + ": preparing event table"
statusBar.showMessage(statusMsg)
# For each event, loop through each station,
# each channel in the requested array and extract trace data.
self.data = {}
info = {}
# info['maxP2P'] = -1 * (2**31 - 1)
info['zeroDOffsetIndex'] = None
info['LEN'] = {}
info['quickRemoved'] = {}
info['deepRemoved'] = {}
info['numOfSamples'] = 0
# secs = timeLen
# ss = ""
Offset_t = {}
self.minOffset = None
self.maxOffset = None
a = self.ARRAY[0] # currently allow to select one array at a time
rows = self.fio.Array_t[a]['byid']
order = self.fio.Array_t[a]['order']
listOfStations = sorted(PH5View.selectedArray['seclectedStations'])
self.metadata = [None] * len(listOfStations)
info['distanceOffset'] = [None] * len(listOfStations)
if orgStartT is not None:
startTime = orgStartT + offset
stopTime = startTime + timeLen
info['noDataList'] = []
listOfDataStations = []
lenlist = {'less': {}, 'maybeless': {}}
"""
# If there is an associated event calculate offset distances
for ev in PH5View.selectedEvents:
#print "ev['eventId']:",ev['eventId']
Offset_t[a] = self.fio.calc_offsets(
a, ev['eventId'], ev['eventName'])
if orgStartT is None:
startTime = ev['eStart'] + offset
stopTime = startTime + timeLen
"""
ev = None
sr = None
# slen = None
count = 0
for o in order:
for ch in self.CHANNEL:
if ch not in self.data.keys():
self.data[ch] = [[]] * len(listOfStations)
info['LEN'][ch] = [0] * len(listOfStations)
lenlist['less'][ch] = []
lenlist['maybeless'][ch] = []
info['quickRemoved'][ch] = {}
info['deepRemoved'][ch] = []
for r in rows[o][ch]:
try:
if r['id_s'] not in \
PH5View.selectedArray['seclectedStations']:
raise PH5ReaderError("Continue")
ii = listOfStations.index(r['id_s'])
if not ph5api.is_in(
r['deploy_time/epoch_l'],
r['pickup_time/epoch_l'],
startTime, stopTime):
raise PH5ReaderError("Continue")
das = r['das/serial_number_s']
corr = self.calcCorrection(
ii, das, ch, Offset_t, a, r, startTime,
sampleRate, staSpc, appClockDriftCorr, redVel)
# + 1.1/sampleRate: add a little bit than
# the time of one sample
traces = self.fio.cut(
das, startTime-corr[0]/1000.,
stopTime-corr[0]/1000. + 1.1/sampleRate,
ch, sampleRate, apply_time_correction=False)
trace = ph5api.pad_traces(traces)
if trace.nsamples == 0:
v = (PH5View.selectedArray['arrayId'],
das, r['id_s'], ch)
noDataItem = \
"Array:%s Das: %s Station: %s Chan: %s"
noDataItem %= v
if noDataItem not in info['noDataList']:
info['noDataList'].append(noDataItem)
raise PH5ReaderError("Continue")
if sr is None:
sr = trace.sample_rate
# slen = int ((secs * sr) + 0.5)
self.getMetadata(
info, lenlist, ii, trace, a, ev, r, ch, das,
Offset_t, corr, staSpc, orgStartT, startTime)
trace.data = np.array(trace.data, dtype=np.float32)
if len(self.data[ch][ii]) < trace.nsamples:
self.data[ch][ii] = (trace.data)
info['LEN'][ch][ii] = trace.nsamples
if r['id_s'] not in listOfDataStations:
listOfDataStations.append(r['id_s'])
if 'minmax' not in self.metadata[ii].keys():
self.metadata[ii]['minmax'] = \
(np.amin(trace.data), np.amax(trace.data))
else:
minval = min(
self.metadata[ii]['minmax'][0],
np.amin(trace.data))
maxval = max(
self.metadata[ii]['minmax'][1],
np.amax(trace.data))
self.metadata[ii]['minmax'] = (minval, maxval)
count += 1
if statusBar is not None and count % 10 == 0:
statusMsg = beginMsg + ": reading data and" + \
" metadata: %s station-channels"
statusBar.showMessage(statusMsg % count)
except PH5ReaderError, e:
if e.message == "Continue":
if r['id_s'] in listOfStations:
lenlist['less'][ch].append(ii)
else:
| |
<filename>cloakifyFactory.py
#!/usr/bin/python
#
# Filename: cloakifyFactory.py
#
# Version: 1.0.1
#
# Author: <NAME> (TryCatchHCF)
#
# Summary: Cloakify Factory is part of the Cloakify Exfiltration toolset that transforms
# any fileype into lists of words / phrases / Unicode to ease exfiltration of data across
# monitored networks, defeat data whitelisting restrictions, hiding the data in plain
# sight, and facilitates social engineering attacks against human analysts and their
# workflows. Bonus Feature: Defeats signature-based malware detection tools (cloak your
# other tools). Leverages other scripts of the Cloakify Exfiltration Toolset, including
# cloakify.py, decloakify.py, and the noise generator scripts.
#
# Description: Base64-encodes the given payload and translates the output using a list
# of words/phrases/Unicode provided in the cipher. This is NOT a secure encryption tool,
# the output is vulnerable to frequency analysis attacks. Use the Noise Generator scripts
# to add entropy to your cloaked file. You should encrypt the file before cloaking if
# secrecy is needed.
#
# Prepackaged ciphers include: lists of desserts in English, Arabic, Thai, Russian,
# Hindi, Chinese, Persian, and Muppet (Swedish Chef); PokemonGo creatures; Top 100 IP
# Addresses; Top Websites; GeoCoords of World Capitols; MD5 Password Hashes; An Emoji
# cipher; Star Trek characters; Geocaching Locations; Amphibians (Scientific Names);
# evadeAV cipher (simple cipher that minimizes size of the resulting obfuscated data).
#
# To create your own cipher:
#
# - Generate a list of at least 66 unique words (Unicode-16 accepted)
# - Remove all duplicate entries and blank lines
# - Randomize the list
# - Place in the 'ciphers/' subdirectory
# - Relaunch cloakifyFactory and it will automatically detect the new cipher
#
# Example:
#
# $ ./cloakifyFactory.py
#
import os, sys, getopt, random, base64, cloakify, decloakify
# Load list of ciphers
gCipherFiles = next(os.walk("./ciphers/"))[2]
# Load list of noise generators
gNoiseScripts = []
for root, dirs, files in os.walk( "./noiseTools" ):
for file in files:
if file.endswith('.py'):
gNoiseScripts.append( file )
def CloakifyFile():
print ""
print "==== Cloakify a File ===="
print ""
sourceFile = raw_input("Enter filename to cloak (e.g. ImADolphin.exe or /foo/bar.zip): ")
print ""
cloakedFile = raw_input("Save cloaked data to filename (default: 'tempList.txt'): ")
if cloakedFile == "":
cloakedFile = "tempList.txt"
cipherNum = SelectCipher()
noiseNum = -1
choice = raw_input("Add noise to cloaked file? (y/n): ")
if choice == "y":
noiseNum = SelectNoise()
print ""
print "Creating cloaked file using cipher:", gCipherFiles[ cipherNum ]
try:
cloakify.Cloakify( sourceFile, "ciphers/" + gCipherFiles[ cipherNum ], cloakedFile )
except:
print ""
print "!!! Well that didn't go well. Verify that your cipher is in the 'ciphers/' subdirectory."
print ""
if noiseNum >=0:
print "Adding noise to cloaked file using noise generator:", gNoiseScripts[ noiseNum ]
try:
os.system( "noiseTools/%s %s" % ( gNoiseScripts[ noiseNum ], cloakedFile ))
except:
print ""
print "!!! Well that didn't go well. Verify that '", cloakedFile, "'"
print "!!! is in the current working directory or try again giving full filepath."
print ""
print ""
print "Cloaked file saved to:", cloakedFile
print ""
choice = raw_input( "Preview cloaked file? (y/n): " )
if choice == "y":
print ""
with open( cloakedFile ) as file:
cloakedPreview = file.readlines()
i = 0;
while ( i<20 ):
print cloakedPreview[ i ],
i = i+1
print ""
choice = raw_input( "Press return to continue... " )
def DecloakifyFile():
decloakTempFile = "decloakTempFile.txt"
print ""
print "==== Decloakify a Cloaked File ===="
print ""
sourceFile = raw_input( "Enter filename to decloakify (e.g. /foo/bar/MyBoringList.txt): " )
print ""
decloakedFile = raw_input( "Save decloaked data to filename (default: 'decloaked.file'): " )
print ""
if decloakedFile == "":
decloakedFile = "decloaked.file"
# Reviewing the cloaked file within cloakifyFactory will save a little time for those who
# forgot the format of the cloaked file and don't want to hop into a new window just to look
choice = raw_input( "Preview cloaked file? (y/n default=n): " )
if choice == "y":
print ""
try:
with open( sourceFile ) as file:
cloakedPreview = file.readlines()
i = 0;
while ( i<20 ):
print cloakedPreview[ i ],
i = i+1
print ""
except:
print ""
print "!!! Well that didn't go well. Verify that '", sourceFile, "'"
print "!!! is in the current working directory or the filepath you gave."
print ""
choice = raw_input("Was noise added to the cloaked file? (y/n default=n): ")
if choice == "y":
noiseNum = SelectNoise()
stripColumns = 2
# No upper bound checking, relies on SelectNoise() returning valid value, fix in next release
if noiseNum >= 0:
try:
# Remove Noise, overwrite the source file with the stripped contents
print "Removing noise from noise generator:", gNoiseScripts[ noiseNum ]
os.system( "./removeNoise.py %s %s %s" % ( stripColumns, sourceFile, decloakTempFile ))
# Copy decloak temp filename to sourceFile so that Decloakify() gets the right filename
sourceFile = decloakTempFile
except:
print "!!! Error while removing noise from file. Was calling 'removeNoise.py'.\n"
cipherNum = SelectCipher()
print "Decloaking file using cipher: ", gCipherFiles[ cipherNum ]
# Call Decloakify()
try:
decloakify.Decloakify( sourceFile, "ciphers/" + gCipherFiles[ cipherNum ], decloakedFile )
print ""
print "Decloaked file", sourceFile, ", saved to", decloakedFile
except:
print ""
print "!!! Oh noes! Error decloaking file (did you select the same cipher it was cloaked with?)"
print ""
try:
os.system( "rm -f %s" % ( decloakTempFile ))
except:
print ""
print "!!! Oh noes! Error while deleting temporary file:", decloakTempFile
print ""
choice = raw_input("Press return to continue... ")
def SelectCipher():
print ""
print "Ciphers:"
print ""
cipherCount = 1
for cipherName in gCipherFiles:
print cipherCount, "-", cipherName
cipherCount = cipherCount + 1
print ""
selection = -1
while ( selection < 0 or selection > (cipherCount - 2)):
try:
cipherNum = raw_input( "Enter cipher #: " )
selection = int ( cipherNum ) - 1
if ( cipherNum == "" or selection < 0 or selection > (cipherCount - 1)):
print "Invalid cipher number, try again..."
selection = -1
except ValueError:
print "Invalid cipher number, try again..."
print ""
return selection
def BrowseCiphers():
print ""
print "======== Preview Ciphers ========"
cipherNum = SelectCipher()
print "===== Cipher:", gCipherFiles[ cipherNum ], " ====="
print ""
try:
with open( "ciphers/"+gCipherFiles[ cipherNum ] ) as cipherList:
arrayCipher = cipherList.read()
print( arrayCipher )
except:
print "!!! Error opening cipher file.\n"
choice = raw_input( "Press return to continue... " )
def SelectNoise():
print ""
print "Noise Generators:"
print ""
noiseCount = 1
for noiseName in gNoiseScripts:
print noiseCount, "-", noiseName
noiseCount = noiseCount + 1
print ""
selection = -1
noiseTotal = noiseCount - 2
while ( selection < 0 or selection > noiseTotal ):
try:
noiseNum = raw_input( "Enter noise generator #: " )
selection = int ( noiseNum ) - 1
if ( selection == "" or selection < 0 or selection > noiseTotal ):
print "Invalid generator number, try again..."
selection = -1
except ValueError:
print "Invalid generator number, try again..."
return selection
def BrowseNoise():
print ""
print "======== Preview Noise Generators ========"
noiseNum = SelectNoise()
print ""
# No upper bounds checking, relies on SelectNoise() to return a valid value, fix in next update
if noiseNum >= 0:
try:
print "Sample output of prepended strings, using noise generator:", gNoiseScripts[ noiseNum ], "\n"
os.system( "noiseTools/%s" % ( gNoiseScripts[ noiseNum ] ))
except:
print "!!! Error while generating noise preview.\n"
print ""
choice = raw_input( "Press return to continue... " )
def Help():
print ""
print "===================== Using Cloakify Factory ====================="
print ""
print "For background and full tutorial, see the presentation slides at"
print "https://github.com/TryCatchHCF/Cloakify"
print ""
print "WHAT IT DOES:"
print ""
print "Cloakify Factory transforms any filetype (e.g. .zip, .exe, .xls, etc.) into"
print "a list of harmless-looking strings. This lets you hide the file in plain sight,"
print "and transfer the file without triggering alerts. The fancy term for this is"
print "'text-based steganography', hiding data by making it look like other data."
print ""
print "For example, you can transform a .zip file into a list made of Pokemon creatures"
print "or Top 100 Websites. You then transfer the cloaked file however you choose,"
print "and then decloak the exfiltrated file back into its original form. The ciphers"
print "are designed to appear like harmless / ignorable lists, though some (like MD5"
print "password hashes) are specifically meant as distracting bait."
print ""
print "BASIC USE:"
print ""
print "Cloakify Factory will guide you through each step. Follow the prompts and"
print "it will show you the way."
print ""
print "Cloakify a Payload:"
print "- Select 'Cloakify a File' (any filetype will work - zip, binaries, etc.)"
print "- Enter filename that you want to Cloakify (can be filename or filepath)"
print "- Enter filename that you want to save the cloaked file as"
print "- Select the cipher you want to use"
print "- Select a Noise Generator if desired"
print "- Preview cloaked file if you want to check the results"
print "- Transfer cloaked file via whatever method you prefer"
print ""
print "Decloakify a Payload:"
print "- Receive cloaked file via whatever method you prefer"
print "- Select 'Decloakify a File'"
print "- Enter filename of cloaked file (can be filename or filepath)"
print "- Enter filename to save decloaked file to"
print "- | |
== 1)
rel18_inds = self.spatial_neighbours(objects, que18_cond, ind_rubber_object)
que19_cond = (ind_cube_object.size(0) == 1)
rel19_inds = self.spatial_neighbours(objects, que19_cond, ind_cube_object)
que20_cond = (ind_small_sphere_object.size(0) == 1)
rel20_inds = self.spatial_neighbours(objects, que20_cond, ind_small_sphere_object)
que21_cond = (ind_big_object.size(0) == 1)
rel21_inds = self.spatial_neighbours(objects, que21_cond, ind_big_object)
que22_cond = (ind_grey_cylinder_object.size(0) == 1)
rel22_inds = self.spatial_neighbours(objects, que22_cond, ind_grey_cylinder_object)
que23_cond = (ind_green_object.size(0) == 1)
rel23_inds = self.spatial_neighbours(objects, que23_cond, ind_green_object)
que24_cond = (ind_big_brown_object.size(0) == 1)
rel24_inds = self.spatial_neighbours(objects, que24_cond, ind_big_brown_object)
que25_cond = (ind_brown_cube_object.size(0) == 1)
rel25_inds = self.spatial_neighbours(objects, que25_cond, ind_brown_cube_object)
que26_cond = (ind_big_sphere_object.size(0) == 1)
rel26_inds = self.spatial_neighbours(objects, que26_cond, ind_big_sphere_object)
que27_cond = (ind_purple_cylinder_object.size(0) == 1)
rel27_inds = self.spatial_neighbours(objects, que27_cond, ind_purple_cylinder_object)
que28_cond = (ind_small_cube_object.size(0) == 1)
rel28_inds = self.spatial_neighbours(objects, que28_cond, ind_small_cube_object)
que29_cond = (ind_red_object.size(0) == 1)
rel29_inds = self.spatial_neighbours(objects, que29_cond, ind_red_object)
que30_cond = (ind_rubber_object.size(0) == 1)
rel30_inds = self.spatial_neighbours(objects, que30_cond, ind_rubber_object)
que31_cond = (ind_small_rubber_object.size(0) == 1)
rel31_inds = self.spatial_neighbours(objects, que31_cond, ind_small_rubber_object)
que32_cond = (ind_metal_cylinder_object.size(0) == 1)
rel32_inds = self.spatial_neighbours(objects, que32_cond, ind_metal_cylinder_object)
que33_cond = (ind_metal_cube_object.size(0) == 1)
rel33_inds = self.spatial_neighbours(objects, que33_cond, ind_metal_cube_object)
que34_cond = (ind_small_metal_object.size(0) == 1)
rel34_inds = self.spatial_neighbours(objects, que34_cond, ind_small_metal_object)
que35_cond = (ind_big_sphere_object.size(0) == 1)
rel35_inds = self.spatial_neighbours(objects, que35_cond, ind_big_sphere_object)
que36_cond = (ind_rubber_cube_object.size(0) == 1)
rel36_inds = self.spatial_neighbours(objects, que36_cond, ind_rubber_cube_object)
que37_cond = (ind_metal_sphere_object.size(0) == 1)
rel37_inds = self.spatial_neighbours(objects, que37_cond, ind_metal_sphere_object)
que38_cond = (ind_small_cube_object.size(0) == 1)
rel38_inds = self.spatial_neighbours(objects, que38_cond, ind_small_cube_object)
que39_cond = (ind_small_sphere_object.size(0) == 1)
rel39_inds = self.spatial_neighbours(objects, que39_cond, ind_small_sphere_object)
if que0_cond:
que0_ans = (objects[ind_purple_object][5]).int()-1
rel0_ans, rel0_loc = self.create_spatial_answers(objects, rel0_inds, 5)
que0_loc = (objects[ind_purple_object][:2]).int()
else:
que0_ans = torch.ones([]).int()*-1
que0_loc = torch.ones([2]).int() * -1
rel0_ans = torch.ones([4]).int() * -1
rel0_loc = torch.ones([8]).int() * -1
if que1_cond:
que1_ans = (objects[ind_red_metal_object][5]).int()-1
que1_loc = (objects[ind_red_metal_object][:2]).int()
rel1_ans, rel1_loc = self.create_spatial_answers(objects, rel1_inds, 5)
else:
que1_ans = torch.ones([]).int()*-1
que1_loc = torch.ones([2]).int() * -1
rel1_ans = torch.ones([4]).int() * -1
rel1_loc = torch.ones([8]).int() * -1
if que2_cond:
que2_ans = (objects[ind_big_rubber_object][5]).int()-1
que2_loc = (objects[ind_big_rubber_object][:2]).int()
rel2_ans, rel2_loc = self.create_spatial_answers(objects, rel2_inds, 5)
else:
que2_ans = torch.ones([]).int()*-1
que2_loc = torch.ones([2]).int() * -1
rel2_ans = torch.ones([4]).int() * -1
rel2_loc = torch.ones([8]).int() * -1
if que3_cond:
que3_ans = (objects[ind_small_brown_object][5]).int()-1
que3_loc = (objects[ind_small_brown_object][:2]).int()
rel3_ans, rel3_loc = self.create_spatial_answers(objects, rel3_inds, 5)
else:
que3_ans = torch.ones([]).int()*-1
que3_loc = torch.ones([2]).int() * -1
rel3_ans = torch.ones([4]).int() * -1
rel3_loc = torch.ones([8]).int() * -1
if que4_cond:
que4_ans = (objects[ind_red_object][5]).int()-1
que4_loc = (objects[ind_red_object][:2]).int()
rel4_ans, rel4_loc = self.create_spatial_answers(objects, rel4_inds, 5)
else:
que4_ans = torch.ones([]).int()*-1
que4_loc = torch.ones([2]).int() * -1
rel4_ans = torch.ones([4]).int() * -1
rel4_loc = torch.ones([8]).int() * -1
if que5_cond:
que5_ans = (objects[ind_big_blue_object][5]).int()-1
que5_loc = (objects[ind_big_blue_object][:2]).int()
rel5_ans, rel5_loc = self.create_spatial_answers(objects, rel5_inds, 5)
else:
que5_ans = torch.ones([]).int()*-1
que5_loc = torch.ones([2]).int() * -1
rel5_ans = torch.ones([4]).int() * -1
rel5_loc = torch.ones([8]).int() * -1
if que6_cond:
que6_ans = (objects[ind_big_grey_object][5]).int()-1
que6_loc = (objects[ind_big_grey_object][:2]).int()
rel6_ans, rel6_loc = self.create_spatial_answers(objects, rel6_inds, 5)
else:
que6_ans = torch.ones([]).int()*-1
que6_loc = torch.ones([2]).int() * -1
rel6_ans = torch.ones([4]).int() * -1
rel6_loc = torch.ones([8]).int() * -1
if que7_cond:
que7_ans = (objects[ind_small_object][5]).int()-1
que7_loc = (objects[ind_small_object][:2]).int()
rel7_ans, rel7_loc = self.create_spatial_answers(objects, rel7_inds, 5)
else:
que7_ans = torch.ones([]).int()*-1
que7_loc = torch.ones([2]).int() * -1
rel7_ans = torch.ones([4]).int() * -1
rel7_loc = torch.ones([8]).int() * -1
if que8_cond:
que8_ans = (objects[ind_rubber_object][5]).int()-1
que8_loc = (objects[ind_rubber_object][:2]).int()
rel8_ans, rel8_loc = self.create_spatial_answers(objects, rel8_inds, 5)
else:
que8_ans = torch.ones([]).int()*-1
que8_loc = torch.ones([2]).int() * -1
rel8_ans = torch.ones([4]).int() * -1
rel8_loc = torch.ones([8]).int() * -1
if que9_cond:
que9_ans = (objects[ind_big_cyan_object][5]).int()-1
que9_loc = (objects[ind_big_cyan_object][:2]).int()
rel9_ans, rel9_loc = self.create_spatial_answers(objects, rel9_inds, 5)
else:
que9_ans = torch.ones([]).int()*-1
que9_loc = torch.ones([2]).int() * -1
rel9_ans = torch.ones([4]).int() * -1
rel9_loc = torch.ones([8]).int() * -1
if que10_cond:
que10_ans = (objects[ind_rubber_cylinder_object][6]).int()-1
que10_loc = (objects[ind_rubber_cylinder_object][:2]).int()
rel10_ans, rel10_loc = self.create_spatial_answers(objects, rel10_inds, 6)
else:
que10_ans = torch.ones([]).int()*-1
que10_loc = torch.ones([2]).int() * -1
rel10_ans = torch.ones([4]).int() * -1
rel10_loc = torch.ones([8]).int() * -1
if que11_cond:
que11_ans = (objects[ind_metal_cube_object][6]).int()-1
que11_loc = (objects[ind_metal_cube_object][:2]).int()
rel11_ans, rel11_loc = self.create_spatial_answers(objects, rel11_inds, 6)
else:
que11_ans = torch.ones([]).int()*-1
que11_loc = torch.ones([2]).int() * -1
rel11_ans = torch.ones([4]).int() * -1
rel11_loc = torch.ones([8]).int() * -1
if que12_cond:
que12_ans = (objects[ind_grey_rubber_object][6]).int()-1
que12_loc = (objects[ind_grey_rubber_object][:2]).int()
rel12_ans, rel12_loc = self.create_spatial_answers(objects, rel12_inds, 6)
else:
que12_ans = torch.ones([]).int()*-1
que12_loc = torch.ones([2]).int() * -1
rel12_ans = torch.ones([4]).int() * -1
rel12_loc = torch.ones([8]).int() * -1
if que13_cond:
que13_ans = (objects[ind_metal_sphere_object][6]).int()-1
que13_loc = (objects[ind_metal_sphere_object][:2]).int()
rel13_ans, rel13_loc = self.create_spatial_answers(objects, rel13_inds, 6)
else:
que13_ans = torch.ones([]).int()*-1
que13_loc = torch.ones([2]).int() * -1
rel13_ans = torch.ones([4]).int() * -1
rel13_loc = torch.ones([8]).int() * -1
if que14_cond:
que14_ans = (objects[ind_cyan_object][6]).int()-1
que14_loc = (objects[ind_cyan_object][:2]).int()
rel14_ans, rel14_loc = self.create_spatial_answers(objects, rel14_inds, 6)
else:
que14_ans = torch.ones([]).int()*-1
que14_loc = torch.ones([2]).int() * -1
rel14_ans = torch.ones([4]).int() * -1
rel14_loc = torch.ones([8]).int() * -1
if que15_cond:
que15_ans = (objects[ind_cylinder_object][6]).int()-1
que15_loc = (objects[ind_cylinder_object][:2]).int()
rel15_ans, rel15_loc = self.create_spatial_answers(objects, rel15_inds, 6)
else:
que15_ans = torch.ones([]).int()*-1
que15_loc = torch.ones([2]).int() * -1
rel15_ans = torch.ones([4]).int() * -1
rel15_loc = torch.ones([8]).int() * -1
if que16_cond:
que16_ans = (objects[ind_blue_metal_object][6]).int()-1
que16_loc = (objects[ind_blue_metal_object][:2]).int()
rel16_ans, rel16_loc = self.create_spatial_answers(objects, rel16_inds, 6)
else:
que16_ans = torch.ones([]).int()*-1
que16_loc = torch.ones([2]).int() * -1
rel16_ans = torch.ones([4]).int() * -1
rel16_loc = torch.ones([8]).int() * -1
if que17_cond:
que17_ans = (objects[ind_red_object][6]).int()-1
que17_loc = (objects[ind_red_object][:2]).int()
rel17_ans, rel17_loc = self.create_spatial_answers(objects, rel17_inds, 6)
else:
que17_ans = torch.ones([]).int()*-1
que17_loc = torch.ones([2]).int() * -1
rel17_ans = torch.ones([4]).int() * -1
rel17_loc = torch.ones([8]).int() * -1
if que18_cond:
que18_ans = (objects[ind_rubber_object][6]).int()-1
que18_loc = (objects[ind_rubber_object][:2]).int()
rel18_ans, rel18_loc = self.create_spatial_answers(objects, rel18_inds, 6)
else:
que18_ans = torch.ones([]).int()*-1
que18_loc = torch.ones([2]).int() * -1
rel18_ans = torch.ones([4]).int() * -1
rel18_loc = torch.ones([8]).int() * -1
if que19_cond:
que19_ans = (objects[ind_cube_object][6]).int()-1
que19_loc = (objects[ind_cube_object][:2]).int()
rel19_ans, rel19_loc = self.create_spatial_answers(objects, rel19_inds, 6)
else:
que19_ans = torch.ones([]).int()*-1
que19_loc = torch.ones([2]).int() * -1
rel19_ans = torch.ones([4]).int() * -1
rel19_loc = torch.ones([8]).int() * -1
if que20_cond:
que20_ans = (objects[ind_small_sphere_object][4]).int()-1
que20_loc = (objects[ind_small_sphere_object][:2]).int()
rel20_ans, rel20_loc = self.create_spatial_answers(objects, rel20_inds, 4)
else:
que20_ans = torch.ones([]).int()*-1
que20_loc = torch.ones([2]).int() * -1
rel20_ans = torch.ones([4]).int() * -1
rel20_loc = torch.ones([8]).int() * -1
if que21_cond:
que21_ans = (objects[ind_big_object][4]).int()-1
que21_loc = (objects[ind_big_object][:2]).int()
rel21_ans, rel21_loc = self.create_spatial_answers(objects, rel21_inds, 4)
else:
que21_ans = torch.ones([]).int()*-1
que21_loc = torch.ones([2]).int() * -1
rel21_ans = torch.ones([4]).int() * -1
rel21_loc = torch.ones([8]).int() * -1
if que22_cond:
que22_ans = (objects[ind_grey_cylinder_object][4]).int()-1
que22_loc = (objects[ind_grey_cylinder_object][:2]).int()
rel22_ans, rel22_loc = self.create_spatial_answers(objects, rel22_inds, 4)
else:
que22_ans = torch.ones([]).int()*-1
que22_loc = torch.ones([2]).int() * -1
rel22_ans = torch.ones([4]).int() * -1
rel22_loc = torch.ones([8]).int() * -1
if que23_cond:
que23_ans = (objects[ind_green_object][4]).int()-1
que23_loc = (objects[ind_green_object][:2]).int()
rel23_ans, rel23_loc = self.create_spatial_answers(objects, rel23_inds, 4)
else:
que23_ans = torch.ones([]).int()*-1
que23_loc = torch.ones([2]).int() * -1
rel23_ans = torch.ones([4]).int() * -1
rel23_loc = torch.ones([8]).int() * -1
if que24_cond:
que24_ans = (objects[ind_big_brown_object][4]).int()-1
que24_loc = (objects[ind_big_brown_object][:2]).int()
rel24_ans, rel24_loc = self.create_spatial_answers(objects, rel24_inds, 4)
else:
que24_ans = torch.ones([]).int()*-1
que24_loc = torch.ones([2]).int() * -1
rel24_ans = torch.ones([4]).int() * -1
rel24_loc = torch.ones([8]).int() * -1
if que25_cond:
que25_ans = (objects[ind_brown_cube_object][4]).int()-1
que25_loc = (objects[ind_brown_cube_object][:2]).int()
rel25_ans, rel25_loc = self.create_spatial_answers(objects, rel25_inds, 4)
else:
que25_ans = torch.ones([]).int()*-1
que25_loc = torch.ones([2]).int() * -1
rel25_ans = torch.ones([4]).int() * -1
rel25_loc = torch.ones([8]).int() * -1
if que26_cond:
que26_ans = (objects[ind_big_sphere_object][4]).int()-1
que26_loc = (objects[ind_big_sphere_object][:2]).int()
rel26_ans, rel26_loc = self.create_spatial_answers(objects, rel26_inds, 4)
else:
que26_ans = torch.ones([]).int()*-1
que26_loc = torch.ones([2]).int() * -1
rel26_ans = torch.ones([4]).int() * -1
rel26_loc = torch.ones([8]).int() * -1
if que27_cond:
que27_ans = (objects[ind_purple_cylinder_object][4]).int()-1
que27_loc = (objects[ind_purple_cylinder_object][:2]).int()
rel27_ans, rel27_loc = self.create_spatial_answers(objects, rel27_inds, 4)
else:
que27_ans = torch.ones([]).int()*-1
que27_loc = torch.ones([2]).int() * -1
rel27_ans = torch.ones([4]).int() * -1
rel27_loc = torch.ones([8]).int() * -1
if que28_cond:
que28_ans = (objects[ind_small_cube_object][4]).int()-1
que28_loc = (objects[ind_small_cube_object][:2]).int()
rel28_ans, rel28_loc = self.create_spatial_answers(objects, rel28_inds, 4)
else:
que28_ans = torch.ones([]).int()*-1
que28_loc = torch.ones([2]).int() * -1
rel28_ans = torch.ones([4]).int() * -1
rel28_loc = torch.ones([8]).int() * -1
if que29_cond:
que29_ans = (objects[ind_red_object][4]).int()-1
que29_loc = (objects[ind_red_object][:2]).int()
rel29_ans, rel29_loc = self.create_spatial_answers(objects, rel29_inds, 4)
else:
que29_ans = torch.ones([]).int()*-1
que29_loc = torch.ones([2]).int() * -1
rel29_ans = torch.ones([4]).int() * -1
rel29_loc = torch.ones([8]).int() * -1
if que30_cond:
que30_ans = (objects[ind_rubber_object][3]).int()-1
que30_loc = (objects[ind_rubber_object][:2]).int()
rel30_ans, rel30_loc = self.create_spatial_answers(objects, rel30_inds, 3)
else:
que30_ans = torch.ones([]).int()*-1
que30_loc = torch.ones([2]).int() * -1
rel30_ans = torch.ones([4]).int() * -1
rel30_loc = torch.ones([8]).int() * -1
if que31_cond:
que31_ans = (objects[ind_small_rubber_object][3]).int()-1
que31_loc = (objects[ind_small_rubber_object][:2]).int()
rel31_ans, rel31_loc = self.create_spatial_answers(objects, rel31_inds, 3)
else:
que31_ans = torch.ones([]).int()*-1
que31_loc = torch.ones([2]).int() * -1
rel31_ans = torch.ones([4]).int() * -1
rel31_loc = torch.ones([8]).int() * -1
if que32_cond:
que32_ans = (objects[ind_metal_cylinder_object][3]).int()-1
que32_loc = | |
14:15:00 | 43.7143 | 11.6095 | 1019.89 | 614.716605714286 | 0 |
# | 1615209600 | 2021-03-08 14:20:00 | 41.85 | 11.98 | 1019.88 | 551.6908515 | 0 |
# | 1615209900 | 2021-03-08 14:25:00 | 40.8571 | 11.981 | 1019.76 | 400.116792380952 | 0 |
# ...
# | 1617792000 | 2021-04-07 12:40:00 | NULL | NULL | 1014.02 | 295.11714 | 0 |
# | 1617792300 | 2021-04-07 12:45:00 | NULL | NULL | 1014.06 | 295.11714 | 0 |
# | 1617792600 | 2021-04-07 12:50:00 | NULL | NULL | 1014.19 | NULL | 0 |
# | 1617792900 | 2021-04-07 12:55:00 | NULL | NULL | 1014.28 | NULL | 0 |
#
query = "SELECT FROM_UNIXTIME(dateTime), outHumidity, outTemp, pressure, radiation, rain from archive " + \
"WHERE dateTime >= UNIX_TIMESTAMP(NOW() - INTERVAL " + str(days) + " DAY)"
logger.debug("Query: %s", query)
cursor.execute(query)
humidityDay = []
tempDay = []
pressureDay = []
radiationDay = []
rainDay = []
for row in cursor:
logger.debug("Time = %s", row[0])
try:
humidityDay.append(float(row[1]))
tempDay.append(float(row[2]))
# Database is in HPa, need in Pa
pressureDay.append(float(row[3]) * 100)
# Database is Watt per second, and need Joules / m2
# need to x 5 (datapoint per 5 minutes) x 60 (minutes to seconds)
radiationDay.append(float(row[4]) * 5 * 60)
rainDay.append(float(row[5]))
except TypeError:
# There was a NULL in the data, so skip this row: continue with next row (and overwrite filled values, as i is not increased)
logger.debug("Row skipped due to incorrect data")
continue
logger.debug("Point %d: Humidity: %.0f %%, Temp: %.1f deg C, Pressure: %.0f Pa, Radiation: %.0f J/m2, Rain: %.1f mm", len(tempDay), humidityDay[-1], tempDay[-1], pressureDay[-1], radiationDay[-1], rainDay[-1])
# Close weewx database
if (db.is_connected()):
db.close()
cursor.close()
logger.info("MySQL connection is closed")
# return the collected values turned into numpy arrays
return numpy.array(tempDay), numpy.array(humidityDay), numpy.array(pressureDay), numpy.array(radiationDay), numpy.array(rainDay)
def load_irrigated( logger, \
zone, \
days, \
mysql_host, \
mysql_user, \
mysql_passwd ):
# Open irrigation database
logger.info("Opening MySQL Database irrigation on %s for loading data", mysql_host)
db = mysql.connector.connect(user=mysql_user, password=<PASSWORD>, host=mysql_host, database='irrigation')
cursor = db.cursor()
# NEED TO ADD EXAMPLE WITH zone FIELD
#
# Get the irrigation from the past X days, watered in liters per m2 = mm
# mysql> select dateTime, watered, UNIX_TIMESTAMP(NOW()), UNIX_TIMESTAMP(NOW() - INTERVAL 2 DAY) from irrigated where dateTime >= UNIX_TIMESTAMP(NOW() - INTERVAL 2 DAY);
# +------------+---------+-----------------------+----------------------------------------+
# | dateTime | watered | UNIX_TIMESTAMP(NOW()) | UNIX_TIMESTAMP(NOW() - INTERVAL 2 DAY) |
# +------------+---------+-----------------------+----------------------------------------+
# | 1614553200 | 0 | 1614673885 | 1614501085 |
# | 1614636558 | 1.05394 | 1614673885 | 1614501085 |
# +------------+---------+-----------------------+----------------------------------------+
#
query = "SELECT FROM_UNIXTIME(dateTime), watered from irrigated " + \
"WHERE dateTime >= UNIX_TIMESTAMP(NOW() - INTERVAL " + str(days) + " DAY) AND " + \
"zone LIKE '%%" + zone + "%%'"
logger.debug("Query: %s", query)
cursor.execute(query)
records = cursor.fetchall()
amount = cursor.rowcount
waterDay = numpy.zeros(amount)
waterSum = 0
i = 0
for row in records:
waterDay[i] = float(row[1])
logger.debug("Point %d: Time: %s Irrigation: %.1f liters per m2", i, row[0], waterDay[i])
i = i + 1
# Close irrigation database
if (db.is_connected()):
db.close()
cursor.close()
logger.info("MySQL connection is closed")
# Return the collected values
return waterDay
def save_irrigated( logger, \
zone, \
watering_mm, \
mysql_host, \
mysql_user, \
mysql_passwd ):
# First make sure there is some irrigation to write
if (watering_mm > 0.0):
# Open irrigation database
logger.info("Opening MySQL Database irrigation on %s for writing data", mysql_host)
db = mysql.connector.connect(user=mysql_user, password=<PASSWORD>, host=mysql_host, database='irrigation')
cursor = db.cursor()
# Add irrigation amount (mm) to database
query = "INSERT INTO irrigated (dateTime, zone, watered) VALUES (%s, %s, %s)"
insert_time = time.time()
insert_zone = zone
insert_water = round(watering_mm, 1)
values = (insert_time, insert_zone, insert_water)
logger.debug("Query: %s", query)
logger.debug("Values: %d, %s, %f", insert_time, insert_zone, insert_water)
cursor.execute(query, values)
db.commit()
logger.info("Added irrigation of %0.1f mm on %s to database", watering_mm, zone)
# Close irrigation database
if (db.is_connected()):
db.close()
cursor.close()
logger.info("MySQL connection is closed")
# return
# Generic repeating timer class for emulating callbacks
class RepeatedTimer():
def __init__(self, logger, interval, function, *args, **kwargs):
self.logger = logger
self.logger.debug("RepeatedTimer init")
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
def _run(self):
# self.logger.debug("RT_run %d:\tSetting running to False, call cont again, and run callback" % threading.get_ident())
self.is_running = False
self.cont()
self.function(*self.args, **self.kwargs)
def start(self):
# self.logger.debug("RT start")
self.next_call = time.time()
self.cont()
def cont(self):
# self.logger.debug("RT cont")
if not self.is_running:
self.next_call += self.interval
delta = self.next_call - time.time()
# self.logger.debug("RT cont - starting next thead in %.3f s" % delta)
self._timer = threading.Timer(delta, self._run)
self._timer.start()
self.is_running = True
# if running do nothing
def stop(self):
self._timer.cancel()
self.is_running = False
class WaterSource():
def __init__(self, logger, name, relay_pin):
self.logger = logger
self.logger.debug("WaterSource init for %s", name)
self.name = name
self.relay_pin = relay_pin
def get_name(self):
return self.name
def open_valve(self):
self.logger.info("Setting %s water ON", self.name)
# Note: Takes 10-15 seconds to fully open
GPIO.output(self.relay_pin, GPIO.HIGH)
def close_valve(self):
self.logger.info("Setting %s water OFF", self.name)
# Note: Takes 10-15 seconds to fully close
GPIO.output(self.relay_pin, GPIO.LOW)
class IrrigationZone():
def __init__(self, logger, name, relay_pin, area, shadow, flow_pin, flow_required = -1):
self.logger = logger
self.logger.debug("IrrigationZone init for %s", name)
self.name = name
self.area = area
self.shadow = shadow
self.irrigated_liters = 0
self.relay_pin = relay_pin
self.flow_pin = flow_pin
self.flow_required = flow_required
# Start a flowmeter associated with this zone
self.flow_meter = FlowMeter(self.logger, self.name)
# Prepare for emulated callback: Calling every 50 times per second
self.timer = RepeatedTimer(self.logger, 0.02, self.flow_meter.pulseCallback)
def get_name(self):
return self.name
def get_area(self):
return self.area
def get_shadow(self):
return self.shadow
def open_valve(self):
self.logger.info("Setting %s zone ON", self.name)
GPIO.output(self.relay_pin, GPIO.LOW)
def close_valve(self):
self.logger.info("Setting %s zone OFF", self.name)
GPIO.output(self.relay_pin, GPIO.HIGH)
def get_flow_pin(self):
return self.flow_pin
def set_pulse_callback(self):
self.logger.debug("%s: set_pulse_callback:", self.name)
# EZ lowered bouncetime from 20 to 1 ms, as pulse callbacks coming in faster (0.006 s!)
GPIO.add_event_detect(self.flow_pin, GPIO.RISING, callback=self.flow_meter.pulseCallback, bouncetime=1)
def set_emulated_pulse_callback(self):
self.logger.debug("%s: set_emulated_pulse_callback:", self.name)
self.timer.start()
def clear_pulse_callback(self):
self.logger.debug("%s: clear_pulse_callback:", self.name)
GPIO.remove_event_detect(self.flow_pin)
def clear_emulated_pulse_callback(self):
self.logger.debug("%s: clear_emulated_pulse_callback:", self.name)
self.timer.stop()
def get_flow_rate(self):
self.logger.debug("%s: get_flow_rate:", self.name)
return self.flow_meter.getFlowRate()
def get_flow_required(self):
self.logger.debug("%s: get_flow_required:", self.name)
return self.flow_required
def get_irrigated_liters(self):
return self.irrigated_liters
def set_irrigated_liters(self, actual):
self.irrigated_liters = actual
class FlowMeter():
''' Class representing the flow meter sensor which handles input pulses
and calculates current flow rate (L/min) measurement
'''
def __init__(self, logger, name):
self.logger = logger
self.logger.debug("Flow init for %s, setting last_time to now, and rate to 0", name)
self.name = name
self.average_flow_rate = 0.0
self.last_flow_rates = numpy.array([])
self.last_flow_rate = 0.0
self.last_time = datetime.now()
def pulseCallback(self, pin=0):
''' Callback that is executed with each pulse
received from the sensor
'''
self.logger.debug("%s: pulseCallback: Flowing! (pin %d)", self.name, pin)
# Calculate the time difference since last pulse received
current_time = datetime.now()
diff = (current_time - self.last_time).total_seconds()
if(diff < 2):
# Calculate current flow rate
hertz = 1.0 / diff
self.last_flow_rate = hertz / 7.5
self.last_flow_rates = numpy.append(self.last_flow_rates, self.last_flow_rate)
self.logger.debug("%s: pulseCallback: Rate %.1f (diff %.3f s from last_time %s)" % (self.name, self.last_flow_rate, diff, self.last_time))
else:
# Took too long, setting rates to 0
self.flow_rate = 0.0
self.logger.debug("%s: pulseCallback: Took too long (%.0f s from last_time %s), setting flow rate to 0, resetting array" % (self.name, diff, self.last_time))
# Empty the array, as took too long
self.last_flow_rates = numpy.array([])
# Reset time of last pulse
self.last_time = current_time
self.logger.debug("%s: pulseCallback: Array size %d" % (self.name, numpy.size(self.last_flow_rates)))
def getFlowRate(self):
''' Return the current flow rate measurement.
If a pulse has not been received in last second,
assume that flow has stopped and set flow rate to 0.0
'''
self.logger.debug("%s: getFlowRate:", self.name)
self.logger.debug("%s: getFlowRate: Last flow rate %.1f" % (self.name, self.last_flow_rate))
# Calculate average since last call
stored_values = numpy.size(self.last_flow_rates)
if (stored_values > 0):
self.average_flow_rate = numpy.average(self.last_flow_rates)
else:
self.average_flow_rate = 0.0
self.logger.debug("%s: getFlowRate: Average flow rate %.1f (from %d values)" % (self.name, self.average_flow_rate, stored_values))
# Re-initialize the array
self.last_flow_rates = numpy.array([])
return self.average_flow_rate
# Main
def main():
################################################################################################################################################
#Main program
################################################################################################################################################
print("%s %s (version %s)" % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), progname, version))
#print("Python version %s.%s.%s" | |
all_is_allowed.append( is_allowed )
else:
#print >> sys.stderr, "primer 5p position combination not allowed" # DEBUG
all_is_allowed.append( False )
break
#is_allowed = all(all_is_allowed)
#assert (is_allowed or not any(all_is_allowed)), "The multiple fwd and rev primer genomic start positions should all yield the same product"
return all(all_is_allowed)
def getPrimerStartsForIsoformGroupsTuple(self):
return self.starts_for_igs[self.isoform_groups_dual_primed]
def getDecoupledPrimerSpecsForIsoformGroupsTuple(self, fwdrev):
assert (fwdrev == "Fwd" or fwdrev == "Rev")
primer_specs = []
primer_starts_data = self.getPrimerStartsForIsoformGroupsTuple()
if (self.regions_merge_or_abutt):
primer_5p_pos_and_len = primer_starts_data[0] if (fwdrev == "Fwd") else primer_starts_data[1]
for tup in primer_5p_pos_and_len:
primer_specs.append( (tuple(map(itemgetter(0), tup[0])), tup[1]) )
else:
index = 0 if (fwdrev == "Fwd") else 1
accum_primer_specs = defaultdict(set)
for start_pos, primer_lens in primer_starts_data.items():
accum_primer_specs[primer_lens[index]].add(start_pos[index])
for primer_lens_tuple, primer_5p_positions in accum_primer_specs.items():
primer_specs.append( (primer_5p_positions, primer_lens_tuple) )
return primer_specs
def getDecoupledPrimerStartsForIsoformGroupsTuple(self, fwdrev):
assert (fwdrev == "Fwd" or fwdrev == "Rev")
primer_starts_data = self.getPrimerStartsForIsoformGroupsTuple()
if (self.regions_merge_or_abutt):
if (fwdrev == "Fwd"):
primer_starts = set(chain.from_iterable(map(itemgetter(0), map(itemgetter(0), primer_starts_data[0]))))
else:
primer_starts = set(chain.from_iterable(map(itemgetter(0), map(itemgetter(0), primer_starts_data[1]))))
else:
index = 0 if (fwdrev == "Fwd") else 1
primer_starts = set(map(itemgetter(index), primer_starts_data.keys()))
return primer_starts
def setIsoformGroupingsPrimerPositions(self, genome_ref, target_isoform_groups):
#possible_primer_lengths = range(designParams.min_primer_len, designParams.max_primer_len+1) # TODO: delete
read_len = designParams.read_len
if (self.regions_merge_or_abutt and len(self.nuc_seq) >= designParams.amplicon_min_len):
fwd_primer_region_genomic_positions = self.equiv_region_fwd.getGenomicPositions(as_set=True)
rev_primer_region_genomic_positions = self.equiv_region_rev.getGenomicPositions(as_set=True)
# Trim the Fwd and Rev regions by most_pos_needed
if (self.regions_merge_or_abutt == "abutt"):
most_pos_needed = designParams.amplicon_max_len - designParams.min_primer_len
else:
most_pos_needed = designParams.amplicon_max_len + \
len(fwd_primer_region_genomic_positions & rev_primer_region_genomic_positions) - \
designParams.min_primer_len
# Get the primer specs
ig = tuple(sorted(self.isoforms_dual_primed, key=methodcaller("getCGDBName")))
if (ig in target_isoform_groups):
ig_by_ID = tuple(map(methodcaller("getCGDBName"), ig))
self.starts_for_igs = {}
self.starts_for_igs[(ig,)] = self.getDescriptorsForLegalPrimers(most_pos_needed)
self.ranked_igs_tuples = [((ig,), (ig_by_ID,), ())]
elif (not self.regions_merge_or_abutt):
self.starts_for_igs = defaultdict(dict)
target_isoforms = set(chain.from_iterable(target_isoform_groups))
# Constrain the primer genomic positions to only what is maximally potentially needed, based on read
# length, so that the whole fwd & rev regions aren't searched for primer start positions
intervene_seq_lens = []
common_isoforms_intervene_seq = []
for isoform in self.isoforms_dual_primed:
assert (self.strand == isoform.getStrand())
fwd_start, fwd_stop, rev_start, rev_stop = self.getStartsStopsForIsoform(isoform)
#isoform_genomic_coords = isoform.getAllGenomicCoords()
if (self.strand == '+'):
#fwd_primer_region_termini = self.equiv_region_fwd.getStops(as_set=True) & isoform_genomic_coords
#rev_primer_region_termini = self.equiv_region_rev.getStarts(as_set=True) & isoform_genomic_coords
#most_3p_fwd_genomic_position, most_5p_rev_genomic_position = min(fwd_primer_region_termini), max(rev_primer_region_termini)
most_3p_fwd_genomic_position, most_5p_rev_genomic_position = fwd_stop, rev_start
else:
#fwd_primer_region_termini = self.equiv_region_fwd.getStarts(as_set=True) & isoform_genomic_coords
#rev_primer_region_termini = self.equiv_region_rev.getStops(as_set=True) & isoform_genomic_coords
#most_3p_fwd_genomic_position, most_5p_rev_genomic_position = max(fwd_primer_region_termini), min(rev_primer_region_termini)
most_3p_fwd_genomic_position, most_5p_rev_genomic_position = fwd_start, rev_stop
# Cache the dual primed isoforms and each of their associated intervening sequences.
# There are special cases in which the fwd/rev equiv regions overlap on some isoforms.
# The overlap amount is the negative of intervene_seq_len, and intervene_seq will be an empty string
try:
intervene_seq_plus = isoform.getSequence(genome_ref, most_3p_fwd_genomic_position, most_5p_rev_genomic_position)
# Trim the 1bp of equiv_regions on each side
intervene_seq_len = len(intervene_seq_plus) - 2
intervene_seq = intervene_seq_plus[1:-1] if (len(intervene_seq_plus) > 2) else ""
except AssertionError as ae:
if (str(ae) == "mRNA 5' position > 3' position"):
intervene_seq_len = isoform.getNegativeSequenceLength(most_3p_fwd_genomic_position, most_5p_rev_genomic_position)
intervene_seq = ''
else:
pdb.set_trace()
raise ae
common_isoforms_intervene_seq.append( (isoform, intervene_seq_len, intervene_seq) )
intervene_seq_lens.append(intervene_seq_len)
shortest_intervene_seq_len = min(intervene_seq_lens) if (min(intervene_seq_lens) >= 0) else 0
# IMPORTANT: The intervening sequence length constraint used here must be the same as is used in couldPotentiallyProductivelyPairWith()
most_pos_needed = designParams.amplicon_max_len - shortest_intervene_seq_len - designParams.min_primer_len
if (most_pos_needed > 0):
fwd_nuc_seq = self.equiv_region_fwd.getNucSeq()
rev_nuc_seq = self.equiv_region_rev.getNucSeq()
fwd_primer_5p_positions_and_lens, rev_primer_5p_positions_and_lens = self.getDescriptorsForLegalPrimers(most_pos_needed)
assert (fwd_primer_5p_positions_and_lens != None and rev_primer_5p_positions_and_lens != None)
# Group the fwd primer/rev primer start positions that yield the same isoform groups (as revealed by paired-end sequencing)
for (fwd_genomic_5ps, fwd_lens), (rev_genomic_5ps, rev_lens) in product(fwd_primer_5p_positions_and_lens, rev_primer_5p_positions_and_lens):
for ((fwd_genomic_5p, fwd_region_amplicon_len), (rev_genomic_5p, rev_region_amplicon_len)) in product(fwd_genomic_5ps, rev_genomic_5ps):
isoforms_grouped_by_paired_reads_short = defaultdict(list)
isoforms_grouped_by_paired_reads_long = defaultdict(list)
amplicon_lens = {}
for isoform, intervene_seq_len, intervene_seq in common_isoforms_intervene_seq:
if (intervene_seq_len < 0 and abs(intervene_seq_len) > fwd_region_amplicon_len):
continue
amplicon_len = fwd_region_amplicon_len + intervene_seq_len + rev_region_amplicon_len
#confirm_amplicon_len = isoform.getSequenceLength(fwd_genomic_5p, rev_genomic_5p)
#try:
# assert (amplicon_len == confirm_amplicon_len), "Inconsistent amplicon lengths, computed vs actual"
#except AssertionError:
# pdb.set_trace()
if (amplicon_len < designParams.amplicon_min_len and isoform in target_isoforms):
# Disallow candidate primer positions to avoid possible confusion by actual sequencing of amplicons near min length limit.
break
#if (amplicon_len > designParams.amplicon_max_len and isoform in target_isoforms):
# pdb.set_trace() # Indicates a problem # Now don't think so. Such cases will be handled below with igs_short/igs_long
if (amplicon_len <= designParams.confusion_amplicon_max_len):
if (intervene_seq_len < 0):
assert (intervene_seq == '')
amplicon = fwd_nuc_seq[-fwd_region_amplicon_len:intervene_seq_len] + rev_nuc_seq[0:rev_region_amplicon_len]
else:
amplicon = fwd_nuc_seq[-fwd_region_amplicon_len:] + intervene_seq + rev_nuc_seq[0:rev_region_amplicon_len]
paired_reads = (amplicon[0:read_len], amplicon[-read_len:])
isoforms_grouped_by_paired_reads_long[paired_reads].append( isoform )
if (amplicon_len <= designParams.amplicon_max_len):
isoforms_grouped_by_paired_reads_short[paired_reads].append( isoform )
amplicon_lens[isoform] = amplicon_len
igs_short = set()
for isoforms_w_same_read_pair in isoforms_grouped_by_paired_reads_short.values():
ig = sorted(isoforms_w_same_read_pair, key=methodcaller("getCGDBName"))
igs_short.add(tuple(ig))
igs_long = set()
for isoforms_w_same_read_pair in isoforms_grouped_by_paired_reads_long.values():
ig = sorted(isoforms_w_same_read_pair, key=methodcaller("getCGDBName"))
igs_long.add(tuple(ig))
# Checks to see if there is a target isoform group composed of (short) sequenceably-distinguishable amplicons that is
# robust to the presence of longer amplicons that could avoid size selection and get clustered and sequenced.
distinct_tigs = list(filter(lambda tig: tig in igs_short and tig in igs_long, target_isoform_groups))
if (len(distinct_tigs)>0):
mean_target_amplicon_len_deviation = np.mean(list(map(lambda i: abs(amplicon_lens[i]-designParams.amplicon_opt_len), chain.from_iterable(distinct_tigs))))
igs_long = sorted(igs_long)
self.starts_for_igs[tuple(igs_long)][(fwd_genomic_5p, rev_genomic_5p)] = (fwd_lens, rev_lens, int(mean_target_amplicon_len_deviation))
numtigs_to_tigs = defaultdict(set)
igs_tuples_w_rank_criteria = []
for igs_tuple, primers_search_space in self.starts_for_igs.items():
ntigs_by_ID, tigs_by_ID = [], []
for ig in igs_tuple:
ig_by_ID = tuple(map(methodcaller("getCGDBName"), ig))
if (ig in target_isoform_groups):
tigs_by_ID.append(ig_by_ID)
else:
ntigs_by_ID.append(ig_by_ID)
if (len(tigs_by_ID) > 0):
igs_tuples_w_rank_criteria.append( (igs_tuple, tuple(tigs_by_ID), tuple(ntigs_by_ID), len(tigs_by_ID), len(ntigs_by_ID), len(primers_search_space)) )
# The order in which to consider the igs_tuple to use for this PPR. Want to prioritize those that have the most TIGs.
# igs_tuples with fewer TIGs will contain a subset of the TIGs of those of larger igs_tuples.
#
# REMOVED: Second ranking criterion: For igs_tuples with the same number of TIGs, want to use those with more non-TIGs because that implies
# primers closer to edges of the equiv primer regions, and so increase the chances of picking up novel splicing.
#
# Third ranking criterion: Larger search space implies close to edges of equiv primer regions.
igs_tuples_w_rank_criteria.sort(key=itemgetter(3,5), reverse=True) # 2,
self.ranked_igs_tuples = list(map(itemgetter(0,1,2), igs_tuples_w_rank_criteria))
def getDescriptorsForLegalPrimers(self, most_pos_needed):
'''Returns a list of tuples (fwd primer 5p genomic position, rev primer 5p genomic position, fwd primer lengths, rev primer lengths)
specifying primer pairs that are contained within the allowed position ranges.'''
fwd_primer_5p_positions_and_lens = self.equiv_region_fwd.getDescriptorsForLegalPrimers(most_pos_needed)
rev_primer_5p_positions_and_lens = self.equiv_region_rev.getDescriptorsForLegalPrimers(most_pos_needed)
return (fwd_primer_5p_positions_and_lens, rev_primer_5p_positions_and_lens)
def getID(self):
return self.ID
def getFwdID(self):
return self.equiv_region_fwd.getID()
def getRevID(self):
return self.equiv_region_rev.getID()
def getFwdRevIDs(self):
return (self.equiv_region_fwd.getID(), self.equiv_region_rev.getID())
def getFwdEquivRegion(self):
return self.equiv_region_fwd
def getRevEquivRegion(self):
return self.equiv_region_rev
def getStartsStops(self):
return (self.starts, self.stops)
def getStrand(self):
return self.strand
def getPrimedIsoformGroupsTuple(self):
return self.isoform_groups_dual_primed
def myFwdOtherRevSharePos(self, other_ppr):
my_fwd_genomic_positions = self.equiv_region_fwd.getGenomicPositions(as_set=True)
other_rev_region = other_ppr.getRevEquivRegion()
other_rev_genomic_positions = other_rev_region.getGenomicPositions(as_set=True)
return not my_fwd_genomic_positions.isdisjoint(other_rev_genomic_positions)
def myFwdOtherRevHaveCompatiblePrimers(self, other_ppr):
# Incorporate code idea from constrainPrimersPerEquivRegion(). Can make evaluation on just the fwd/rev genomic 5' positions.
sys.exit(1)
def getStartsStopsForIsoform(self, isoform):
fwd_start, fwd_stop = self.equiv_region_fwd.getIsoformStartStop(isoform)
rev_start, rev_stop = self.equiv_region_rev.getIsoformStartStop(isoform)
return fwd_start, fwd_stop, rev_start, rev_stop
def getStartsStopsForIsoform_DEPRACATED(self, isoform):
fwd_start, fwd_stop, rev_start, rev_stop = None, None, None, None
isoform_genomic_positions = isoform.getAllGenomicCoords()
fwd_genomic_positions_graph = self.equiv_region_fwd.getGenomicPositions()
rev_genomic_positions_graph = self.equiv_region_rev.getGenomicPositions()
# Get the start/stop that defines the contiguous genomic positions of the fwd/rev regions in this isoform
fwd_starts, fwd_stops = self.equiv_region_fwd.getStartsStops()
rev_starts, rev_stops = self.equiv_region_rev.getStartsStops()
for start,stop in product(fwd_starts,fwd_stops):
if (start in isoform_genomic_positions and stop in isoform_genomic_positions):
path = nx.shortest_path(fwd_genomic_positions_graph,start,stop)
if (isoform.areAllSequential(path)):
assert (fwd_start == None and fwd_stop == None), "There should only be one contiguous path"
fwd_start, fwd_stop = start, stop
for start,stop in product(rev_starts,rev_stops):
if (start in isoform_genomic_positions and stop in isoform_genomic_positions):
path = nx.shortest_path(rev_genomic_positions_graph,start,stop)
if (isoform.areAllSequential(path)):
assert (rev_start == None and rev_stop == None), "There should only be one contiguous path"
rev_start, rev_stop = start, stop
termini_tup = (fwd_start, fwd_stop, rev_start, rev_stop)
assert (all(map(lambda x: x != None, termini_tup))), "Start/stops for fwd/rev equiv primer regions were not all set"
return termini_tup
def doesConflictOnAnIsoformWith(self, other_ppr, isoforms_of_interest):
'''Unless the Fwd or Rev primer regions are the same between self and other_ppr, there is a conflict if
amplicons primed on a common isoform overlap.'''
do_conflict = False
isoform_w_conflict = None
# If the Fwd this & Rev other OR Fwd other & Rev this have a common isoforms that | |
').replace('\r', ' ')
r23c14 = request.POST.get('r23c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r23c15 = request.POST.get('r23c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r23c16 = request.POST.get('r23c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r23c17 = request.POST.get('r23c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c1 = request.POST.get('r24c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c2 = request.POST.get('r24c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c3 = request.POST.get('r24c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c4 = request.POST.get('r24c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c5 = request.POST.get('r24c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c6 = request.POST.get('r24c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c7 = request.POST.get('r24c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c8 = request.POST.get('r24c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c9 = request.POST.get('r24c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c10 = request.POST.get('r24c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c11 = request.POST.get('r24c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c12 = request.POST.get('r24c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c13 = request.POST.get('r24c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c14 = request.POST.get('r24c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c15 = request.POST.get('r24c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c16 = request.POST.get('r24c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c17 = request.POST.get('r24c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
body = '<!doctype html>' + \
'<html lang="en">' + \
'<head>' + \
'<meta charset="utf-8">' + \
'<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \
'<link rel="stylesheet"' + \
'href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous">' + \
'<title>Annual expense budget</title>' + \
'</head>' + \
'<body>' + \
'<div class="container">' + \
'<div class="card text-center">' + \
'<div class="card-header text-center">Annual expense budget</div>' + \
'<div class="card-body">'
body += '<h6>Comapny name : ' + company_name + '</h6>' + \
'<h6>Share capital : ' + share_capital + '</h6>' + \
'<h6>Head office address : ' + head_office_address + '</h6>' + \
'<h6>Establishment number : ' + establishment_number + '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<table class="table table-striped table-bordered">' + \
'<thead>' + \
'<tr>' + \
'<th scope="col">Details</th>' + \
'<th scope="col">M1</th>' + \
'<th scope="col">M2</th>' + \
'<th scope="col">M3</th>' + \
'<th scope="col">Q1</th>' + \
'<th scope="col">M4</th>' + \
'<th scope="col">M5</th>' + \
'<th scope="col">M6</th>' + \
'<th scope="col">Q2</th>' + \
'<th scope="col">M7</th>' + \
'<th scope="col">M8</th>' + \
'<th scope="col">M9</th>' + \
'<th scope="col">Q3</th>' + \
'<th scope="col">M10</th>' + \
'<th scope="col">M11</th>' + \
'<th scope="col">M12</th>' + \
'<th scope="col">Q4</th>' + \
'<th scope="col">Total</th>' + \
'</tr>' + \
'</thead>' + \
'<tbody>' + \
'<tr>' + \
'<td>Marketing budget expense</td>' + \
'<td>' + r1c1 + '</td>' + \
'<td>' + r1c2 + '</td>' + \
'<td>' + r1c3 + '</td>' + \
'<td>' + r1c4 + '</td>' + \
'<td>' + r1c5 + '</td>' + \
'<td>' + r1c6 + '</td>' + \
'<td>' + r1c7 + '</td>' + \
'<td>' + r1c8 + '</td>' + \
'<td>' + r1c9 + '</td>' + \
'<td>' + r1c10 + '</td>' + \
'<td>' + r1c11 + '</td>' + \
'<td>' + r1c12 + '</td>' + \
'<td>' + r1c13 + '</td>' + \
'<td>' + r1c14 + '</td>' + \
'<td>' + r1c15 + '</td>' + \
'<td>' + r1c16 + '</td>' + \
'<td>' + r1c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Marketing actual expense</td>' + \
'<td>' + r2c1 + '</td>' + \
'<td>' + r2c2 + '</td>' + \
'<td>' + r2c3 + '</td>' + \
'<td>' + r2c4 + '</td>' + \
'<td>' + r2c5 + '</td>' + \
'<td>' + r2c6 + '</td>' + \
'<td>' + r2c7 + '</td>' + \
'<td>' + r2c8 + '</td>' + \
'<td>' + r2c9 + '</td>' + \
'<td>' + r2c10 + '</td>' + \
'<td>' + r2c11 + '</td>' + \
'<td>' + r2c12 + '</td>' + \
'<td>' + r2c13 + '</td>' + \
'<td>' + r2c14 + '</td>' + \
'<td>' + r2c15 + '</td>' + \
'<td>' + r2c16 + '</td>' + \
'<td>' + r2c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Marketing variance expense</td>' + \
'<td>' + r3c1 + '</td>' + \
'<td>' + r3c2 + '</td>' + \
'<td>' + r3c3 + '</td>' + \
'<td>' + r3c4 + '</td>' + \
'<td>' + r3c5 + '</td>' + \
'<td>' + r3c6 + '</td>' + \
'<td>' + r3c7 + '</td>' + \
'<td>' + r3c8 + '</td>' + \
'<td>' + r3c9 + '</td>' + \
'<td>' + r3c10 + '</td>' + \
'<td>' + r3c11 + '</td>' + \
'<td>' + r3c12 + '</td>' + \
'<td>' + r3c13 + '</td>' + \
'<td>' + r3c14 + '</td>' + \
'<td>' + r3c15 + '</td>' + \
'<td>' + r3c16 + '</td>' + \
'<td>' + r3c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Legal budget expense</td>' + \
'<td>' + r4c1 + '</td>' + \
'<td>' + r4c2 + '</td>' + \
'<td>' + r4c3 + '</td>' + \
'<td>' + r4c4 + '</td>' + \
'<td>' + r4c5 + '</td>' + \
'<td>' + r4c6 + '</td>' + \
'<td>' + r4c7 + '</td>' + \
'<td>' + r4c8 + '</td>' + \
'<td>' + r4c9 + '</td>' + \
'<td>' + r4c10 + '</td>' + \
'<td>' + r4c11 + '</td>' + \
'<td>' + r4c12 + '</td>' + \
'<td>' + r4c13 + '</td>' + \
'<td>' + r4c14 + '</td>' + \
'<td>' + r4c15 + '</td>' + \
'<td>' + r4c16 + '</td>' + \
'<td>' + r4c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Legal actual expense</td>' + \
'<td>' + r5c1 + '</td>' + \
'<td>' + r5c2 + '</td>' + \
'<td>' + r5c3 + '</td>' + \
'<td>' + r5c4 + '</td>' + \
'<td>' + r5c5 + '</td>' + \
'<td>' + r5c6 + '</td>' + \
'<td>' + r5c7 + '</td>' + \
'<td>' + r5c8 + '</td>' + \
'<td>' + r5c9 + '</td>' + \
'<td>' + r5c10 + '</td>' + \
'<td>' + r5c11 + '</td>' + \
'<td>' + r5c12 + '</td>' + \
'<td>' + r5c13 + '</td>' + \
'<td>' + r5c14 + '</td>' + \
'<td>' + r5c15 + '</td>' + \
'<td>' + r5c16 + '</td>' + \
'<td>' + r5c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Legal variance expense</td>' + \
'<td>' + r6c1 + '</td>' + \
'<td>' + r6c2 + '</td>' + \
'<td>' + r6c3 + '</td>' + \
'<td>' + r6c4 + '</td>' + \
'<td>' + r6c5 + '</td>' + \
'<td>' + r6c6 + '</td>' + \
'<td>' + r6c7 + '</td>' + \
'<td>' + r6c8 + '</td>' + \
'<td>' + r6c9 + '</td>' + \
'<td>' + r6c10 + '</td>' + \
'<td>' + r6c11 + '</td>' + \
'<td>' + r6c12 + '</td>' + \
'<td>' + r6c13 + '</td>' + \
'<td>' + r6c14 + '</td>' + \
'<td>' + r6c15 + '</td>' + \
'<td>' + r6c16 + '</td>' + \
'<td>' + r6c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent budget expense</td>' + \
'<td>' + r7c1 + '</td>' + \
'<td>' + r7c2 + '</td>' + \
'<td>' + r7c3 + '</td>' + \
'<td>' + r7c4 + '</td>' + \
'<td>' + r7c5 + '</td>' + \
'<td>' + r7c6 + '</td>' + \
'<td>' + r7c7 + '</td>' + \
'<td>' + r7c8 + '</td>' + \
'<td>' + r7c9 + '</td>' + \
'<td>' + r7c10 + '</td>' + \
'<td>' + r7c11 + '</td>' + \
'<td>' + r7c12 + '</td>' + \
'<td>' + r7c13 + '</td>' + \
'<td>' + r7c14 + '</td>' + \
'<td>' + r7c15 + '</td>' + \
'<td>' + r7c16 + '</td>' + \
'<td>' + r7c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent actual expense</td>' + \
'<td>' + r8c1 + '</td>' + \
'<td>' + r8c2 + '</td>' + \
'<td>' + r8c3 + '</td>' + \
'<td>' + r8c4 + '</td>' + \
'<td>' + r8c5 + '</td>' + \
'<td>' + r8c6 + '</td>' + \
'<td>' + r8c7 + '</td>' + \
'<td>' + r8c8 + '</td>' + \
'<td>' + r8c9 + '</td>' + \
'<td>' + r8c10 + '</td>' + \
'<td>' + r8c11 + '</td>' + \
'<td>' + r8c12 + '</td>' + \
'<td>' + r8c13 + '</td>' + \
'<td>' + r8c14 + '</td>' + \
'<td>' + r8c15 + '</td>' + \
'<td>' + r8c16 + '</td>' + \
'<td>' + r8c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent variance expense</td>' + \
'<td>' + r9c1 + '</td>' | |
<reponame>rigdenlab/SWAMP
import os
import gemmi
import conkit.io
import itertools
import pandas as pd
import swamp.utils as utils
from swamp.wrappers import Gesamt
from swamp.parsers import PdbtmXmlParser
from swamp.logger import SwampLogger
class SwampLibrary(object):
"""Class that implements certain methods to create a SWAMP fragment library and data structures to manage the data
of interest in the library.
:param str workdir: the working directory for this instance. Only used if a library will be created.
:param `~swamp.logger.swamplogger.SwampLogger` logger: logging instance
:ivar `pandas.DataFrame` rmsd_matrix: square dataframe with the rmsd distance across framgents in the library
:ivar `pandas.DataFrame` qscore_matrix: square dataframe with the similarity across framgents in the library
:ivar `pandas.DataFrame` nalign_matrix: square dataframe with the no. of aligned residues between framgents in the \
library
:ivar str pdb_library: location of the directory with the pdb files contained in the SWAMP library
:ivar str pdbtm_svn: location of the pdbtm svn repository
:ivar str outdir: an output directory for any operation of the :py:obj:`~swamp.utils.swamplibrary.SwampLibrary` \
instance
:example:
>>> from swamp.utils.swamplibrary import SwampLibrary
>>> my_library = SwampLibrary('<workdir>')
>>> pdb_code_list = my_library.parse_nr_listfile("/path/to/nr_list")
>>> my_library.pdbtm_svn = "/path/to/pdbtm_svn"
>>> my_library.pdb_library = "/path.to/pdb_library"
>>> my_library.make_library(outdir="/path/to/outdir", pdb_codes=pdb_code_list)
>>> my_library.all_vs_all_gesamt(outdir="/path/to/outdir", inputdir="/path/to/library", nthreads=1)
>>> my_library.create_distance_mtx(gesamt_dir="/path/to/gesamt_dir")
"""
def __init__(self, workdir, logger=None):
self.workdir = workdir
self._make_workdir()
if logger is None:
self.logger = SwampLogger(__name__)
self.logger.init(logfile=None, use_console=True, console_level='info')
else:
self.logger = logger
self.qscore_matrix = None
self.nalign_matrix = None
self.rmsd_matrix = None
self.pdb_library = None
self.pdbtm_svn = None
self.outdir = None
# ------------------ Properties ------------------
@property
def pdbfiles_list(self):
"""A list of file names in :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.pdb_library`"""
return [os.path.join(self.pdb_library, fname) for fname in os.listdir(self.pdb_library)]
@property
def _pdbfname_template(self):
"""A template file name for pdb files"""
return os.path.join(self.pdb_library, "{}", "pdb{}.ent.gz")
@property
def _xmlfname_template(self):
"""A template file name for xml files"""
return os.path.join(self.pdbtm_svn, "{}", "{}.xml")
@property
def _fragfile_template(self):
"""A template file name for fragment pdb files"""
return os.path.join(self.pdb_library, '{}.pdb')
@property
def _library_out_template(self):
"""A template file name for pdb files with helical pairs"""
return os.path.join(self.workdir, '{}_{}{}_{}{}.{}')
@property
def _ensemble_pdbout_template(self):
"""A template pdb file name for ensemble output files"""
if self.outdir is None:
return None
else:
return os.path.join(self.outdir, 'ensemble_{}.pdb')
@property
def _centroid_template(self):
"""A centroid pdb file name template"""
if self.outdir is None:
return None
else:
return os.path.join(self.outdir, 'centroid_{}.pdb')
# ------------------ Hidden methods ------------------
def _is_valid_entry(self, pdbcode):
"""For a given pdb code, check if there is a PDB and a XML file in the \
:py:attr:`~swamp.utils.swamplibrary.pdb_library` and :py:attr:`~swamp.utils.swamplibrary.pdbtm_svn` respectively
:param str pdbcode: the pdb code of interest
:returns: True if all the files are present (bool)
"""
if not os.path.isfile(self._pdbfname_template.format(pdbcode[1:3], pdbcode)):
self.logger.warning("Entry %s not found in input dir %s" % (pdbcode, self.pdb_library))
return False
if not os.path.isfile(self._xmlfname_template.format(pdbcode[1:3], pdbcode)):
self.logger.warning("Entry %s not found in input dir %s" % (pdbcode, self.pdbtm_svn))
return False
return True
def _make_workdir(self):
"""Create the :py:attr:`~swamp.utils.swamplibrary.workdir`
:raises ValueError: if :py:attr:`~swamp.utils.swamplibrary.workdir` is None
"""
if self.workdir is None:
raise ValueError("Impossible to create workdir, please set workdir value first!")
if not os.path.isdir(self.workdir):
os.mkdir(self.workdir)
def _determine_orientation(self, frag_ids):
"""For a given set of fragment ids, determine the optimal orientation to ensemble them and return the tuple of \
file names
:param frag_ids: a list with the fragment ids of interest
:type frag_ids: list, tuple
:returns: a tuple with the file names of the alignment that scored the highest qscore (tuple)
:raises ValueError: if there are less than 2 fragments in the input list
"""
if len(frag_ids) < 2:
raise ValueError("Impossible to determine the orientation of less than two fragments!")
qscores = []
frag_list = [(frag, SwampLibrary._get_reciprocal_id(frag)) for frag in frag_ids]
all_combinations = list(itertools.product(*frag_list))
for combination in all_combinations:
gesamt = Gesamt(pdbin=[self._fragfile_template.format(x) for x in combination], workdir=None,
mode="alignment")
gesamt.run()
qscores.append(float(gesamt.summary_results["qscore"]))
return all_combinations[qscores.index(max(qscores))]
# ------------------ Public methods ------------------
def remove_homologs(self, pdb_ids_to_remove):
"""Remove fragments originating from a set of pdb structures out of \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.qscore_matrix`, \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.rmsd_matrix`, \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.nalign_matrix`
:argument tuple pdb_ids_to_remove: tuple with the pdb codes of the structures to be removed
"""
# Detect the fragments comming from homolog structures (convert everything to lower case)
pdb_ids_to_remove = [pdb.lower() for pdb in pdb_ids_to_remove]
frag_ids_to_remove = []
for frag_id in self.qscore_matrix.columns:
if frag_id.split('_')[0].lower() in pdb_ids_to_remove:
frag_ids_to_remove.append(frag_id)
# Remove the fragments
self.qscore_matrix.drop(frag_ids_to_remove, 0, inplace=True)
self.qscore_matrix.drop(frag_ids_to_remove, 1, inplace=True)
self.rmsd_matrix.drop(frag_ids_to_remove, 0, inplace=True)
self.rmsd_matrix.drop(frag_ids_to_remove, 1, inplace=True)
self.nalign_matrix.drop(frag_ids_to_remove, 0, inplace=True)
self.nalign_matrix.drop(frag_ids_to_remove, 1, inplace=True)
def create_distance_mtx(self, gesamt_dir):
"""Create the square distance matrices for the library: \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.qscore_matrix`, \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.rmsd_matrix` and \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.nalign_matrix`
Requires the :py:func:`~swamp.utils.swamplibrary.SwampLibrary.all_vs_all_gesamt` results. The distance \
matrices contain the optimal structural alignment between every set of fragments present in the library.
:param str gesamt_dir: directory containing the .hit files resulting from the all vs all gesamt search results
"""
frag_dict = self._get_frag_id_dict(gesamt_dir)
self.qscore_matrix = pd.DataFrame()
self.qscore_matrix["frag_id"] = list(frag_dict.keys())
self.rmsd_matrix = pd.DataFrame()
self.rmsd_matrix["frag_id"] = list(frag_dict.keys())
self.nalign_matrix = pd.DataFrame()
self.nalign_matrix["frag_id"] = list(frag_dict.keys())
self.logger.info("Creating distance matrices now...")
n_frags = len(frag_dict.keys())
for idx, unique_id in enumerate(frag_dict.keys()):
self.logger.info("Working on entry %s (%s/%s)" % (unique_id, idx + 1, n_frags))
fragment_distances = None
for hitfile in frag_dict[unique_id]:
# Get the current distances
current_hits = Gesamt.parse_hitfile(hitfile)
current_hits.drop("n_res", 1, inplace=True)
current_hits.drop("seq_id", 1, inplace=True)
current_hits.drop("rmsd", 1, inplace=True)
current_hits.fname = current_hits.fname.str.replace('.pdb', '')
current_hits.rename(columns={"fname": "frag_id"}, inplace=True)
current_hits.frag_id = current_hits.frag_id.apply(lambda x: self._get_unique_frag_id(x))
current_hits["max_qscore"] = current_hits.groupby(["frag_id"], sort=False)["qscore"].transform(max)
current_hits = current_hits[current_hits.qscore == current_hits.max_qscore]
current_hits.drop("qscore", 1, inplace=True)
current_hits.drop_duplicates(inplace=True)
# Append results to the current fragment distances
fragment_distances = pd.concat([fragment_distances, current_hits]).reset_index(drop=True)
# Get the final distances for this fragment
fragment_distances.rename(columns={'max_qscore': 'qscore'}, inplace=True)
fragment_distances['max_qscore'] = fragment_distances.groupby(["frag_id"], sort=False)["qscore"].transform(
max)
fragment_distances = fragment_distances[fragment_distances.qscore == fragment_distances.max_qscore]
fragment_distances.drop("max_qscore", 1, inplace=True)
fragment_distances.drop_duplicates(subset='frag_id', inplace=True)
# Store it in the final matrix
self.qscore_matrix = self.qscore_matrix.merge(fragment_distances.loc[:, ['frag_id', 'qscore']], how="left",
on=["frag_id"])
self.qscore_matrix.rename(columns={'qscore': unique_id}, inplace=True)
self.nalign_matrix = self.nalign_matrix.merge(fragment_distances.loc[:, ['frag_id', 'n_align']], how="left",
on=["frag_id"])
self.nalign_matrix.rename(columns={'n_align': unique_id}, inplace=True)
self.rmsd_matrix = self.rmsd_matrix.merge(fragment_distances.loc[:, ['frag_id', 'rmsd']], how="left",
on=["frag_id"])
self.rmsd_matrix.rename(columns={'rmsd': unique_id}, inplace=True)
self.rmsd_matrix = self.rename_axis(self.rmsd_matrix)
self.nalign_matrix = self.rename_axis(self.nalign_matrix)
self.qscore_matrix = self.rename_axis(self.qscore_matrix)
def make_library(self, pdb_codes):
"""Create the pdb files for each contacting TM helical pair in detected with the information at \
:py:attr:`~swamp.utils.swamplibrary.pdb_library` and :py:attr:`~swamp.utils.swamplibrary.pdbtm_svn`. Files \
will be created at :py:attr:`~swamp.utils.swamplibrary.workdir`
:param list pdb_codes: a list with the pdb codes that will be included to the library
"""
for idx, entry in enumerate(pdb_codes):
pdbcode = entry[0]
chain = entry[1]
if not self._is_valid_entry(pdbcode):
self.logger.warning("Skipping invalid entry %s" % pdbcode)
continue
self.logger.info(
"Processing %s:%s entry to the library (%s / %s)" % (pdbcode, chain, idx + 1, len(pdb_codes)))
# TM helices
pdbtm_parser = PdbtmXmlParser(self._xmlfname_template.format(pdbcode[1:3], pdbcode))
pdbtm_parser.parse()
tmhelices = [ss_annot for ss_annot in pdbtm_parser.ss2_annotation if
ss_annot.type == "H" and ss_annot.chain == chain]
# Extract pdb hierarchy
full_hierarchy = gemmi.read_structure(self._pdbfname_template.format(pdbcode[1:3], pdbcode))
if full_hierarchy.info.__getitem__('_exptl.method') != 'X-RAY DIFFRACTION':
self.logger.info('Not a X-ray structure, skipping...')
continue
full_hierarchy.remove_waters()
# Check helical pairs individually
for idx, helix_a in enumerate(tmhelices):
for helix_b in tmhelices[idx + 1:]:
helix_a_hierarchy = utils.extract_hierarchy(to_extract=helix_a.pdb_region,
chainID=helix_a.chain,
full_hierarchy=full_hierarchy)
helix_b_hierarchy = utils.extract_hierarchy(to_extract=helix_b.pdb_region,
chainID=helix_b.chain,
full_hierarchy=full_hierarchy)
fragment_hierarchy = utils.merge_hierarchies((helix_a_hierarchy, helix_b_hierarchy),
renumber=False)
fragment_cmap = utils.extract_fragment_cmap(fragment_hierarchy,
(helix_a.pdb_region, helix_b.pdb_region))
if fragment_cmap is None:
self.logger.warning(
"No contacts loaded from %s:%s %s - %s" % (pdbcode, chain, helix_a.index, helix_b.index))
continue
if len(fragment_cmap) >= 2:
self.logger.info(
"Found contacting helical pair! %s %s %s" % (pdbcode, helix_a.index, helix_b.index))
# Write pdb files
fragment_hierarchy.cell = full_hierarchy.cell
utils.renumber_hierarchy(fragment_hierarchy)
inverted_fragment = utils.invert_hiearchy(fragment_hierarchy)
inverted_fragment.cell = full_hierarchy.cell
pdbout = self._library_out_template.format(pdbcode, helix_a.index, helix_a.chain, helix_b.index,
helix_b.chain, "pdb")
fragment_hierarchy.write_pdb(pdbout)
pdbout = self._library_out_template.format(pdbcode, helix_b.index, helix_b.chain, helix_a.index,
helix_a.chain, "pdb")
inverted_fragment.write_pdb(pdbout)
# Write contact maps
conkit.io.write(
self._library_out_template.format(pdbcode, helix_a.index, helix_a.chain, helix_b.index,
helix_b.chain, "mapalign"), "mapalign", fragment_cmap)
conkit.io.write(
self._library_out_template.format(pdbcode, helix_a.index, helix_a.chain, helix_b.index,
helix_b.chain, "aleigen"), "aleigen", fragment_cmap)
inverted_cmap = utils.invert_contactmap(fragment_cmap)
conkit.io.write(
self._library_out_template.format(pdbcode, helix_b.index, helix_b.chain, helix_a.index,
helix_a.chain, "mapalign"), "mapalign", inverted_cmap)
conkit.io.write(
self._library_out_template.format(pdbcode, helix_b.index, helix_b.chain, helix_a.index,
helix_a.chain, "aleigen"), "aleigen", inverted_cmap)
def all_vs_all_gesamt(self, inputdir, outdir, nthreads=1):
"""For each the members of the library, obtain the distance with all the others. This step is required to \
obtain the distance matrices: :py:attr:`~swamp.utils.swamplibrary.SwampLibrary.qscore_matrix`, \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.rmsd_matrix` and \
:py:attr:`~swamp.utils.swamplibrary.SwampLibrary.nalign_matrix`
:param str inputdir: the input directory with the pdb files created by \
:py:func:`~swamp.utils.swamplibrary.SwampLibrary.make_library`
:param str outdir: the output directory where the .hit files will be created
:param int nthreads: number of threads to be used in the gesamt archive scan (default 1)
"""
# Make the archive
self.logger.info("Creating gesamt archive at %s" % os.path.join(outdir, "gesamt_archive"))
gesamt_makearchive = Gesamt(workdir=None, mode="make-archive", pdb_archive=inputdir, pdbin=None,
gesamt_archive=os.path.join(outdir, "gesamt_archive"))
gesamt_makearchive.run()
# Scan the archive with all the | |
"""
module for describing data process.
All data structure is describing as nested combination of `dict` or `list` for `ndarray`.
Data process is a translation from data structure to another data structure or typical `ndarray`.
Data cache can be implemented based on the dynamic features of `list` and `dict`.
The full data structure is
.. code::
{
"particle":{
"A":{"p":...,"m":...}
...
},
"decay":[
{
"A->R1+B": {
"R1": {
"ang": {
"alpha":[...],
"beta": [...],
"gamma": [...]
},
"z": [[x1,y1,z1],...],
"x": [[x2,y2,z2],...]
},
"B" : {...}
},
"R->C+D": {
"C": {
...,
"aligned_angle":{
"alpha":[...],
"beta":[...],
"gamma":[...]
}
},
"D": {...}
},
},
{
"A->R2+C": {...},
"R2->B+D": {...}
},
...
],
"weight": [...]
}
"""
import random
from pprint import pprint
import numpy as np
from .config import get_config
from .tensorflow_wrapper import tf
# import tensorflow as tf
# from pysnooper import snoop
try:
from collections.abc import Iterable
except ImportError: # python version < 3.7
from collections import Iterable
def set_random_seed(seed):
"""
set random seed for random, numpy and tensorflow
"""
np.random.seed(seed)
tf.random.set_seed(seed)
random.seed(seed)
def load_dat_file(
fnames, particles, dtype=None, split=None, order=None, _force_list=False
):
"""
Load ``*.dat`` file(s) of 4-momenta of the final particles.
:param fnames: String or list of strings. File names.
:param particles: List of Particle. Final particles.
:param dtype: Data type.
:param split: sizes of each splited dat files
:param order: transpose order
:return: Dictionary of data indexed by Particle.
"""
n = len(particles)
if dtype is None:
dtype = get_config("dtype")
if isinstance(fnames, str):
fnames = [fnames]
elif isinstance(fnames, Iterable):
fnames = list(fnames)
else:
raise TypeError("fnames must be string or list of strings")
datas = []
sizes = []
for fname in fnames:
if fname.endswith(".npz"):
data = np.load(fname)["arr_0"]
elif fname.endswith(".npy"):
data = np.load(fname)
else:
data = np.loadtxt(fname, dtype=dtype)
data = np.reshape(data, (-1, 4))
sizes.append(data.shape[0])
datas.append(data)
if split is None:
n_total = sum(sizes)
if n_total % n != 0:
raise ValueError("number of data find {}/{}".format(n_total, n))
n_data = n_total // n
split = [size // n_data for size in sizes]
if order is None:
order = (1, 0, 2)
ret = {}
idx = 0
for size, data in zip(split, datas):
data_1 = data.reshape((-1, size, 4))
data_2 = data_1.transpose(order)
for i in data_2:
part = particles[idx]
ret[part] = i
idx += 1
return ret
def save_data(file_name, obj, **kwargs):
"""Save structured data to files. The arguments will be passed to ``numpy.save()``."""
return np.save(file_name, obj, **kwargs)
def save_dataz(file_name, obj, **kwargs):
"""Save compressed structured data to files. The arguments will be passed to ``numpy.save()``."""
return np.savez(file_name, obj, **kwargs)
def load_data(file_name, **kwargs):
"""Load data file from save_data. The arguments will be passed to ``numpy.load()``."""
if "allow_pickle" not in kwargs:
kwargs["allow_pickle"] = True
data = np.load(file_name, **kwargs)
try:
return data["arr_0"].item()
except IndexError:
try:
return data.item()
except ValueError:
return data
def _data_split(dat, batch_size, axis=0):
data_size = dat.shape[axis]
if axis == 0:
for i in range(0, data_size, batch_size):
yield dat[i : min(i + batch_size, data_size)]
elif axis == -1:
for i in range(0, data_size, batch_size):
yield dat[..., i : min(i + batch_size, data_size)]
else:
raise Exception("unsupported axis: {}".format(axis))
def data_generator(data, fun=_data_split, args=(), kwargs=None, MAX_ITER=1000):
"""Data generator: call ``fun`` to each ``data`` as a generator. The extra arguments will be passed to ``fun``."""
kwargs = kwargs if kwargs is not None else {}
def _gen(dat):
if isinstance(dat, dict):
if not dat:
for i in range(MAX_ITER):
yield {}
ks, vs = [], []
for k, v in dat.items():
ks.append(k)
vs.append(_gen(v))
for s_data in zip(*vs):
yield type(dat)(zip(ks, s_data))
elif isinstance(dat, list):
if not dat:
for i in range(MAX_ITER):
yield []
vs = []
for v in dat:
vs.append(_gen(v))
for s_data in zip(*vs):
yield list(s_data)
elif isinstance(dat, tuple):
vs = []
for v in dat:
vs.append(_gen(v))
for s_data in zip(*vs):
yield s_data
else:
for i in fun(dat, *args, **kwargs):
yield i
return _gen(data)
def data_split(data, batch_size, axis=0):
"""
Split ``data`` for ``batch_size`` each in ``axis``.
:param data: structured data
:param batch_size: Integer, data size for each split data
:param axis: Integer, axis for split, [option]
:return: a generator for split data
>>> data = {"a": [np.array([1.0, 2.0]), np.array([3.0, 4.0])], "b": {"c": np.array([5.0, 6.0])}, "d": [], "e": {}}
>>> for i, data_i in enumerate(data_split(data, 1)):
... print(i, data_to_numpy(data_i))
...
0 {'a': [array([1.]), array([3.])], 'b': {'c': array([5.])}, 'd': [], 'e': {}}
1 {'a': [array([2.]), array([4.])], 'b': {'c': array([6.])}, 'd': [], 'e': {}}
"""
return data_generator(
data, fun=_data_split, args=(batch_size,), kwargs={"axis": axis}
)
split_generator = data_split
def data_map(data, fun, args=(), kwargs=None):
"""Apply fun for each data. It returns the same structure."""
kwargs = kwargs if kwargs is not None else {}
if isinstance(data, dict):
return type(data)(
{k: data_map(v, fun, args, kwargs) for k, v in data.items()}
)
if isinstance(data, list):
return [data_map(data_i, fun, args, kwargs) for data_i in data]
if isinstance(data, tuple):
return tuple([data_map(data_i, fun, args, kwargs) for data_i in data])
return fun(data, *args, **kwargs)
def data_struct(data):
"""get the structure of data, keys and shape"""
if isinstance(data, dict):
return type(data)({k: data_struct(v) for k, v in data.items()})
if isinstance(data, list):
return [data_struct(data_i) for data_i in data]
if isinstance(data, tuple):
return tuple([data_struct(data_i) for data_i in data])
if hasattr(data, "shape"):
return tuple(data.shape)
return data
def data_mask(data, select):
"""
This function using boolean mask to select data.
:param data: data to select
:param select: 1-d boolean array for selection
:return: data after selection
"""
ret = data_map(data, tf.boolean_mask, args=(select,))
return ret
def data_cut(data, expr, var_map=None):
"""cut data with boolean expression
:param data: data need to cut
:param expr: cut expression
:param var_map: variable map between parameters in expr and data, [option]
:return: data after being cut,
"""
var_map = var_map if isinstance(var_map, dict) else {}
import sympy as sym
expr_s = sym.sympify(expr)
params = tuple(expr_s.free_symbols)
args = [data_index(data, var_map.get(i.name, i.name)) for i in params]
expr_f = sym.lambdify(params, expr, "tensorflow")
mask = expr_f(*args)
return data_mask(data, mask)
def data_merge(*data, axis=0):
"""This function merges data with the same structure."""
assert len(data) > 0
if isinstance(data[0], dict):
assert all([isinstance(i, dict) for i in data]), "not all type same"
all_idx = [set(list(i)) for i in data]
idx = set.intersection(*all_idx)
return type(data[0])(
{i: data_merge(*[data_i[i] for data_i in data]) for i in idx}
)
if isinstance(data[0], list):
assert all([isinstance(i, list) for i in data]), "not all type same"
return [data_merge(*data_i) for data_i in zip(*data)]
if isinstance(data[0], tuple):
assert all([isinstance(i, tuple) for i in data]), "not all type same"
return tuple([data_merge(*data_i) for data_i in zip(*data)])
m_data = tf.concat(data, axis=axis)
return m_data
def data_shape(data, axis=0, all_list=False):
"""
Get data size.
:param data: Data array
:param axis: Integer. ???
:param all_list: Boolean. ???
:return:
"""
def flatten(dat):
ret = []
def data_list(dat1):
if hasattr(dat1, "shape"):
ret.append(dat1.shape)
else:
ret.append(())
data_map(dat, data_list)
return ret
shapes = flatten(data)
if all_list:
return shapes
return shapes[0][axis]
def data_to_numpy(dat):
"""Convert Tensor data to ``numpy.ndarray``."""
def to_numpy(data):
if hasattr(data, "numpy"):
return data.numpy()
return data
dat = data_map(dat, to_numpy)
return dat
def data_to_tensor(dat):
"""convert data to ``tensorflow.Tensor``."""
def to_tensor(data):
return tf.convert_to_tensor(data)
dat = data_map(dat, to_tensor)
return dat
def flatten_dict_data(data, fun="{}/{}".format):
"""Flatten data as dict with structure named as ``fun``."""
def dict_gen(dat):
return dat.items()
def list_gen(dat):
return enumerate(dat)
if isinstance(data, (dict, list, tuple)):
ret = {}
gen_1 = dict_gen if isinstance(data, dict) else list_gen
for i, data_i in gen_1(data):
tmp = flatten_dict_data(data_i)
if isinstance(tmp, (dict, list, tuple)):
gen_2 = dict_gen if isinstance(tmp, dict) else list_gen
for j, tmp_j in gen_2(tmp):
ret[fun(i, j)] = tmp_j
else:
ret[i] = tmp
return ret
return data
def data_index(data, key):
"""Indexing data for key or a list of keys."""
def idx(data, i):
if isinstance(i, int):
return data[i]
assert isinstance(data, dict)
if i in data:
return data[i]
for k, v in data.items():
if str(k) == str(i):
return v
raise ValueError("{} is not found".format(i))
if isinstance(key, (list, tuple)):
keys = list(key)
if len(keys) > 1:
return data_index(idx(data, keys[0]), keys[1:])
return idx(data, keys[0])
return idx(data, key)
def data_strip(data, keys):
if isinstance(keys, str):
keys = [keys]
if isinstance(data, dict):
ret = {}
for k, v in data.items():
if k not in keys:
ret[k] = data_strip(v, keys)
return ret
if isinstance(data, list):
return [data_strip(data_i, keys) for data_i in data]
if isinstance(data, tuple):
return tuple([data_strip(data_i, keys) for data_i in | |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import os
import re
import sys
import time
from enum import Enum
from inspect import FrameInfo
from json import JSONDecodeError
from typing import Callable, Dict, Iterable, List, Optional, TypeVar, Union
from openstack_cli.modules.apputils.curl import CURLResponse, CurlRequestType, curl
from openstack_cli.modules.apputils.progressbar import CharacterStyles, ProgressBar, ProgressBarFormat, \
ProgressBarOptions
from openstack_cli.modules.apputils.terminal.colors import Colors
from openstack_cli.modules.openstack.api_objects import APIProjects, ComputeFlavorItem, ComputeFlavors, ComputeLimits, \
ComputeServerActionRebootType, ComputeServerActions, ComputeServerInfo, ComputeServers, DiskImageInfo, DiskImages, \
LoginResponse, NetworkItem, NetworkLimits, Networks, Region, RegionItem, Subnets, Token, VMCreateResponse, \
VMKeypairItem, \
VMKeypairItemValue, VMKeypairs, VolumeV3Limits
from openstack_cli.modules.openstack.objects import AuthRequestBuilder, AuthRequestType, EndpointTypes, ImageStatus, \
OSFlavor, OSImageInfo, OSNetwork, OpenStackEndpoints, OpenStackQuotaType, OpenStackQuotas, OpenStackUsers, \
OpenStackVM, OpenStackVMInfo, ServerPowerState, ServerState, VMCreateBuilder
T = TypeVar('T')
class JSONValueError(ValueError):
KEY: str = "{@json@}:"
def __init__(self, data: str):
self.__data = None
try:
json.loads(data)
self.__data = data
except (TypeError, JSONDecodeError):
super(JSONValueError, self).__init__(data)
def __str__(self):
return f"{self.KEY}{self.__data}" if self.__data else super(JSONValueError, self).__str__()
class LocalCacheType(Enum):
SERVERS = 0
KEYPAIR = 1
class OpenStack(object):
def __init__(self, conf, debug: bool = False):
"""
:type conf openstack_cli.core.config.Configuration
"""
self.__last_errors: List[str] = []
self.__login_api = f"{conf.os_address}/v3"
self._conf = conf
self.__endpoints__: Optional[OpenStackEndpoints] = None
self.__cache_images: Dict[str, DiskImageInfo] = {}
# initializes in 2 steps: scanning images and server list when it is requested
self.__users_cache: Optional[OpenStackUsers] = None
self.__flavors_cache: Optional[Dict[str, OSFlavor]] = {}
self.__networks_cache: Optional[OSNetwork] = None
self.__debug = debug or os.getenv("API_DEBUG", False) == "True"
self.__local_cache: Dict[LocalCacheType, object] = {}
pattern_str = f"[\\W\\s]*(?P<name>{'|'.join(conf.supported_os_names)})(\\s|\\-|\\_)(?P<ver>[\\d\\.]+\\s*[\\w]*).*$"
self.__os_image_pattern = re.compile(pattern_str, re.IGNORECASE)
self.__is_auth: bool = False
def __invalidate_local_cache(self, cache_type: LocalCacheType):
self.__local_cache[cache_type.value] = None
def __set_local_cache(self, cache_type: LocalCacheType, value: T) -> T:
self.__local_cache[cache_type.value] = value
return value
def __get_local_cache(self, cache_type: LocalCacheType) -> T:
if cache_type.value not in self.__local_cache:
return None
return self.__local_cache[cache_type.value]
def __init_after_auth__(self):
def __cache_ssh_keys():
conf_keys_hashes = [hash(k) for k in self._conf.get_keys()]
server_keys = self.get_keypairs()
for server_key in server_keys:
if hash(server_key) not in conf_keys_hashes:
try:
self._conf.add_key(server_key)
except ValueError:
print(f"Key {server_key.name} is present locally but have wrong hash, replacing with server key")
self._conf.delete_key(server_key.name)
self._conf.add_key(server_key)
self._conf.cache.set(VMKeypairItemValue, "this super cache")
def __cached_network():
self.__networks_cache = OSNetwork(serialized_obj=self._conf.cache.get(OSNetwork))
def __cached_images():
self.__cache_images = {k: DiskImageInfo(serialized_obj=v) for k, v in json.loads(self._conf.cache.get(DiskImageInfo)).items()}
def __cached_flavors():
self.__flavors_cache = {k: OSFlavor(serialized_obj=v) for k, v in json.loads(self._conf.cache.get(OSFlavor)).items()}
def __cached_ssh_keys():
return True
_cached_objects = {
DiskImageInfo: {
False: lambda: self.images,
True: lambda: __cached_images()
},
OSFlavor: {
False: lambda: self.flavors,
True: lambda: __cached_flavors()
},
OSNetwork: {
False: lambda: self.networks,
True: lambda: __cached_network()
},
VMKeypairItemValue: {
False: lambda: __cache_ssh_keys(),
True: lambda: __cached_ssh_keys()
}
}
need_recache: bool = False in [self._conf.cache.exists(obj) for obj in _cached_objects]
if need_recache and not self.__debug:
p = ProgressBar("Syncing to the server data",20,
ProgressBarOptions(CharacterStyles.simple, ProgressBarFormat.PROGRESS_FORMAT_STATUS)
)
p.start(len(_cached_objects))
for cache_item, funcs in _cached_objects.items():
p.progress_inc(1, cache_item.__name__)
funcs[self._conf.cache.exists(cache_item)]()
p.stop(hide_progress=True)
else:
for cache_item, funcs in _cached_objects.items():
funcs[self._conf.cache.exists(cache_item)]()
if not self.__users_cache:
self.users
def __check_token(self) -> bool:
headers = {
"X-Auth-Token": self._conf.auth_token,
"X-Subject-Token": self._conf.auth_token
}
r = self._request_simple(
EndpointTypes.identity,
"/auth/tokens",
req_type=CurlRequestType.GET,
headers=headers
)
if not r:
from openstack_cli.core.output import Console
Console.print_error("Authentication server is not accessible")
return False
if r.code not in [200, 201]:
return False
l_resp = LoginResponse(serialized_obj=r.content)
self.__endpoints__ = OpenStackEndpoints(self._conf, l_resp)
self._conf.user_id = l_resp.token.user.id
return True
def __auth(self, _type: AuthRequestType = AuthRequestType.SCOPED) -> bool:
if self._conf.auth_token and self.__check_token():
return True
if _type == AuthRequestType.UNSCOPED:
data = AuthRequestBuilder.unscoped_login(self._conf.os_login, self._conf.os_password)
elif _type == AuthRequestType.SCOPED and self._conf.project.id:
data = AuthRequestBuilder.scoped_login(self._conf.os_login, self._conf.os_password, self._conf.project)
else:
data = AuthRequestBuilder.normal_login(self._conf.os_login, self._conf.os_password)
r = self._request_simple(
EndpointTypes.identity,
"/auth/tokens",
req_type=CurlRequestType.POST,
data=data
)
if not r:
from openstack_cli.core.output import Console
data = r.raw if r else "none"
Console.print_error(f"Authentication server is not accessible: {data}")
return False
if r.code not in [200, 201]:
return False
auth_token = r.headers["X-Subject-Token"] if "X-Subject-Token" in r.headers else None
self._conf.auth_token = auth_token
l_resp = LoginResponse(serialized_obj=r.from_json())
if _type == AuthRequestType.UNSCOPED:
l_resp.token = Token(catalog=[])
self.__endpoints__ = None
else:
self._conf.user_id = l_resp.token.user.id
self.__endpoints__ = OpenStackEndpoints(self._conf, l_resp)
return True
def last_errors(self) -> List[str]:
"""
Returning list of last errors with cleaning the results
"""
return self.__last_errors
def clear_errors(self):
self.__last_errors = []
@property
def has_errors(self) -> bool:
return len(self.__last_errors) > 0
@property
def __endpoints(self) -> OpenStackEndpoints:
return self.__endpoints__
@property
def endpoints(self):
return self.__endpoints
def logout(self):
self._conf.auth_token = ""
def login(self, _type: AuthRequestType = AuthRequestType.SCOPED) -> bool:
if self.__auth(_type):
self.__is_auth = True
if _type != AuthRequestType.UNSCOPED and self._conf.region:
self.__init_after_auth__()
return True
else:
self.__last_errors.append("Login failed, some exception happen")
return False
def __get_origin_frame(self, base_f_name: str) -> List[FrameInfo]:
import inspect
_frames = inspect.stack()
for i in range(0, len(_frames)):
if _frames[i].function == base_f_name:
return [_frames[i+3], _frames[i+2], _frames[i+1]]
def _request_simple(self,
endpoint: EndpointTypes,
relative_uri: str,
params: Dict[str, str] = None,
headers: Dict[str, str] = None,
req_type: CurlRequestType = CurlRequestType.GET,
data: str or dict = None
) -> CURLResponse or None:
if endpoint == EndpointTypes.identity:
_endpoint: str = f"{self.__login_api}"
else:
_endpoint: str = self.__endpoints.get_endpoint(endpoint)
url = f"{_endpoint}{relative_uri}"
_t_start = 0
if self.__debug:
_t_start = time.time_ns()
r = None
try:
return curl(url, req_type=req_type, params=params, headers=headers, data=data)
except TimeoutError:
self.__last_errors.append("Timeout exception on API request")
return None
finally:
if self.__debug:
from openstack_cli.core.output import Console
_t_delta = time.time_ns() - _t_start
_t_sec = _t_delta / 1000000000
_params = ",".join([f"{k}={v}" for k, v in params.items()]) if params else "None"
_f_caller = self.__get_origin_frame(self._request_simple.__name__)
_chunks = [
f"[{_t_sec:.2f}s]",
f"[{req_type.value}]",
f"[{endpoint.value}]",
f" {relative_uri}; ",
str(Colors.RESET),
f"{Colors.BRIGHT_BLACK}{os.path.basename(_f_caller[0].filename)}{Colors.RESET}: ",
f"{Colors.BRIGHT_BLACK}->{Colors.RESET}".join([f"{f.function}:{f.lineno}" for f in _f_caller])
]
Console.print_debug("".join(_chunks))
def _request(self,
endpoint: EndpointTypes,
relative_uri: str,
params: Dict[str, str] = None,
req_type: CurlRequestType = CurlRequestType.GET,
is_json: bool = False,
page_collection_name: str = None,
data: str or dict = None
) -> str or dict or None:
if not self.__is_auth and not self.login():
raise RuntimeError("Not Authorised")
_endpoint = self.__login_api if endpoint == EndpointTypes.identity else self.__endpoints.get_endpoint(endpoint)
_t_start = 0
if self.__debug:
_t_start = time.time_ns()
url = f"{_endpoint}{relative_uri}"
headers = {
"X-Auth-Token": self._conf.auth_token
}
r = None
try:
r = curl(url, req_type=req_type, params=params, headers=headers, data=data)
except TimeoutError:
self.__last_errors.append("Timeout exception on API request")
return
finally:
if self.__debug:
from openstack_cli.core.output import Console
_t_delta = time.time_ns() - _t_start
_t_sec = _t_delta / 1000000000
_params = ",".join([f"{k}={v}" for k, v in params.items()]) if params else "None"
_f_caller = self.__get_origin_frame(self._request.__name__)
_chunks = [
f"[{_t_sec:.2f}s]",
f"[{req_type.value}]",
f"[{endpoint.value}]",
f" {relative_uri}; ",
str(Colors.RESET),
f"{Colors.BRIGHT_BLACK}{os.path.basename(_f_caller[0].filename)}{Colors.RESET}: ",
f"{Colors.BRIGHT_BLACK}->{Colors.RESET}".join([f"{f.function}:{f.lineno}" for f in _f_caller])
]
Console.print_debug("".join(_chunks))
if r.code not in [200, 201, 202, 204]:
# if not data:
# return None
raise JSONValueError(r.content)
if r.code in [204]:
return ""
content = r.from_json() if is_json else r.content
if is_json and page_collection_name and isinstance(content, dict):
if "next" in content and content["next"]:
uri, _, args = content["next"].partition("?")
elif "links" in content and "next" in content["links"] and content["links"]["next"]:
uri, _, args = content["links"]["next"].partition("?")
else:
return content
params = dict([i.split("=") for i in args.split("&")])
next_page = self._request(
endpoint,
uri,
params=params,
req_type=req_type,
is_json=is_json,
page_collection_name=page_collection_name
)
content[page_collection_name].extend(next_page[page_collection_name])
return content
@property
def regions(self) -> List[RegionItem]:
r = self._request(EndpointTypes.identity, "/regions", is_json=True, page_collection_name="regions")
return Region(serialized_obj=r).regions
@property
def projects(self):
d = self._request(
EndpointTypes.identity,
"/auth/projects",
is_json=True,
req_type=CurlRequestType.GET,
)
return APIProjects(serialized_obj=d).projects
@property
def users(self) -> OpenStackUsers:
if self.__users_cache:
return self.__users_cache
self.__users_cache = OpenStackUsers(self.images)
self.__users_cache.add_user(self._conf.user_id, self._conf.os_login)
return self.__users_cache
@property
def images(self) -> List[DiskImageInfo]:
if self.__cache_images:
return list(self.__cache_images.values())
params = {
"limit": "1000"
}
images = DiskImages(
serialized_obj=self._request(
EndpointTypes.image,
"/images",
is_json=True,
page_collection_name="images",
params=params
)
).images
_cached_images = {}
_cached = {}
for img in images:
_cached_images[img.id] = img
_cached[img.id] = img.serialize()
self._conf.cache.set(DiskImageInfo, _cached)
self.__cache_images = _cached_images
return list(self.__cache_images.values())
def get_os_image(self, image: DiskImageInfo) -> Optional[OSImageInfo]:
if image.image_type: # process only base images
return None
match = re.match(self.__os_image_pattern, image.name)
if not match:
return None
os_img = OSImageInfo(
match.group("name"),
match.group("ver"),
image
)
# === here is some lame way to filter out image forks or non-base images by analyzing | |
<filename>nsopy/methods/universal.py
from __future__ import division
import numpy as np
import copy
from nsopy.methods.base import SolutionMethod
from nsopy.observer_pattern import Observable
from nsopy.utils import invert_oracle_sense
UGM_DEFAULT_EPSILON = 1.0
UGM_DEFAULT_L_0 = 1.1
def _bregman_map(M, lambda_k, diff_d_k):
""" Bregman map, according to [1], Eq. 2.9, with f(x) := -d(lambda), and M*psi(x,y) := M/2*||lambda-lambda_k||_2
Old:
lambda_bregman = lambda_k + float(1.0)/M*diff_d_k
return self._project_on_dual_feasible_set(lambda_bregman)
"""
return lambda_k + float(1.0)/M*diff_d_k
class UniversalPGM(SolutionMethod, Observable):
"""
Implementation of Algorithm (2.16) in [1], the Universal Primal Gradient Method.
Note that the algorithm is written for the maximization of a convex function, while in the duality
framework we maximize a concave fct. Hence, f(x) := -d(lambda)
[1] Universal Gradient Methods for Convex Optimization Problems, <NAME>, CORE Discussion Paper, 2013.
Note: zeta(x,y) = ||y-x||^2_2 is used as the prox function, throughout.
"""
def __init__(self, oracle, projection_function, dimension=0, epsilon=UGM_DEFAULT_EPSILON, averaging=False, sense='min'):
"""
Averaging: Nesterov's nsopy give guarantees on variables marked with a tilde. Those are supposed to be the
actual outputs of the method, but they require extra computations (evaluation of d(lambda_tilde)), and these can
be carried out after the method has gone through all the required iterations. We therefore introduce the
following additional attributed: lambda_hat_k and d_hat_k. These replace lambda_k and d_k.
- averaging=True establishes that lambda_k and d_k (the method's outputs) are the lambda_tilde_k and d_tilde_k
(i.e., those extra computations are carried out during execution, even thoug it wouldnt be in principle
necessary)
- averaging=False, method's outputs lambda_k and d_k
TL;DR: both options deliver valid iterates, but only the iterates produced with averaging=True are endowed with
his theoretical properties; on the other hand, they require extra computations (one extra oracle calls per
iteration).
"""
super(UniversalPGM, self).__init__()
self.desc = 'UPGM, $\epsilon = {}$'.format(epsilon)
if sense == 'min':
self.oracle = oracle
elif sense == 'max':
self.oracle = invert_oracle_sense(oracle)
else:
raise ValueError('Sense should be either "min" or "max"')
self.projection_function = projection_function
self.iteration_number = 1
self.oracle_calls = 0
self.d_hat_k = np.zeros(1, dtype=float)
if dimension == 0:
self.lambda_hat_k = self.projection_function(0)
self.dimension = len(self.lambda_hat_k)
else:
self.dimension = dimension
self.lambda_hat_k = self.projection_function(np.zeros(self.dimension, dtype=float))
self.x_hat_k = 0
# specific to U-PGM
self.diff_d_hat_k = 0
self.L_k = float(UGM_DEFAULT_L_0) # if you use something else, make sure it's a float!
self.epsilon = float(epsilon)
self.i_k = 0
# -- Averaging -- Synthesize averaged outputs
# Variables to synthesize solution from algorithm's process
# records of d_tilda_k and lambda_tilda_k ("averages") according to Eqns. below 2.17
self.S_k = float(1)/float(self.L_k)
self.lambda_tilde_k = copy.deepcopy(self.lambda_hat_k)
self.sum_lambda_tilde_k = copy.deepcopy(self.lambda_hat_k) # \sum_i=0^k lambda_tilda_k
self.d_tilde_k = 0
self.sum_d_tilde_k = 0
self.averaging = averaging
if self.averaging:
self.lambda_k = self.lambda_tilde_k
self.d_k = self.d_tilde_k
self.diff_d_k = self.diff_d_hat_k
self.x_k = self.x_hat_k
else:
self.lambda_k = self.lambda_hat_k
self.d_k = self.d_hat_k
self.diff_d_k = self.diff_d_hat_k
self.x_k = self.x_hat_k
# -- Averaging --
# for record keeping
self.method_name = 'UPGM'
self.parameter = epsilon
def _bregman_map(self, M, lambda_k, subgrad_lambda_k):
return self.projection_function(_bregman_map(M, lambda_k, subgrad_lambda_k))
def dual_step(self):
###############
# Preparation #
###############
# if it's the first iteration, we have to make an oracle call to fill the subgradient and the d_k
# for lambda_0; the algorithm assumes that these quantities are known for each iterate (including 0-th)
# if not self.diff_d_k:
if self.iteration_number == 1:
self.x_hat_k, self.d_hat_k, self.diff_d_hat_k = self.oracle(self.lambda_hat_k)
self.oracle_calls += 1
self.lambda_k = self.lambda_hat_k
self.d_k = self.d_hat_k
self.diff_d_k = self.diff_d_hat_k
self.notify_observers()
##############################
# Step 1 (see (2.16) in [1]) #
##############################
i_k = 0
smallest_i_k_found = 0
while not smallest_i_k_found:
# find next test point
lambda_k_plus = self._bregman_map(2 ** i_k * self.L_k, self.lambda_hat_k, self.diff_d_hat_k)
# query oracle at test point
x_k_plus, d_k_plus, diff_d_k_plus, = self.oracle(lambda_k_plus)
self.oracle_calls += 1
# check condition given in the inequality of Step 1.
if (-d_k_plus <= -self.d_hat_k
+ np.dot(-self.diff_d_hat_k, lambda_k_plus - self.lambda_hat_k)
+ 2**(i_k-1)*self.L_k*(np.linalg.norm(lambda_k_plus-self.lambda_hat_k, 2)**2)
+ 0.5*self.epsilon):
smallest_i_k_found = 1
else:
i_k += 1
##########
# Step 2 #
##########
self.iteration_number += 1
self.L_k = 2**(i_k-1)*self.L_k
# -- Averaging -- Synthesize outputs
self.S_k += float(1)/float(self.L_k)
self.sum_lambda_tilde_k += float(1) / float(self.L_k) * self.lambda_hat_k
self.lambda_tilde_k = float(1) / float(self.S_k) * self.sum_lambda_tilde_k
self.sum_d_tilde_k += float(1) / float(self.L_k) * self.d_hat_k
self.d_tilde_k = float(1) / float(self.S_k) * self.sum_d_tilde_k
# self.S_k += self.L_k
# self.sum_lambda_tilde_k += self.L_k * self.lambda_hat_k
# self.lambda_tilde_k = float(1) / float(self.S_k) * self.sum_lambda_tilde_k
# self.sum_d_tilde_k += self.L_k * self.d_hat_k
# self.d_tilde_k = float(1) / float(self.S_k) * self.sum_d_tilde_k
# -- Averaging --
# Update
self.lambda_hat_k = lambda_k_plus
# and for record keeping...
self.d_hat_k = d_k_plus
self.diff_d_hat_k = diff_d_k_plus
self.x_hat_k = x_k_plus
self.i_k = i_k
# Calculate ouputs depending on whether averaging is active or not:
if self.averaging:
# we have an additional oracle call
# projection here would not be required technically, but because of numerics when constructing the convex
# combination, we call it
self.lambda_k = self.projection_function(self.lambda_tilde_k)
self.x_k, self.d_k, self.diff_d_k = self.oracle(self.lambda_k)
self.oracle_calls += 1
else:
self.x_k = self.x_hat_k
self.d_k = self.d_hat_k
self.lambda_k = self.lambda_hat_k
self.diff_d_k = self.diff_d_hat_k
# log signal to any observers connected
self.notify_observers()
class UniversalDGM(SolutionMethod, Observable):
"""
Implementation of Algorithm (3.2) in [1], the Universal Dual Gradient Method.
Note that the algorithm is written for the maximization of a convex function, while in the duality
framework we maximize a concave fct. Hence, f(x) := -d(lambda)
[1] Universal Gradient Methods for Convex Optimization Problems, <NAME>, CORE Discussion Paper, 2013.
Note: zeta(x,y) = ||y-x||^2_2 is used as the prox function, throughout.
"""
def __init__(self, oracle, projection_function, dimension=0, epsilon=UGM_DEFAULT_EPSILON, averaging=False, sense='min'):
super(UniversalDGM, self).__init__()
self.desc = 'UDGM, $\epsilon = {}$'.format(epsilon)
self.oracle = oracle
if sense == 'min':
self.oracle = oracle
elif sense == 'max':
self.oracle = invert_oracle_sense(oracle)
else:
raise ValueError('Sense should be either "min" or "max"')
self.projection_function = projection_function
self.iteration_number = 1
self.oracle_calls = 0
# self.d_k = np.zeros(1, dtype=float)
if dimension == 0:
self.lambda_hat_k = self.projection_function(0)
self.dimension = len(self.lambda_hat_k)
else:
self.dimension = dimension
self.lambda_hat_k = self.projection_function(np.zeros(self.dimension, dtype=float))
# self.dimension = dimension
# self.lambda_k = self.projection_function(np.zeros(self.dimension, dtype=float))
# # Init of the method
# # if it's the first iteration, we have to make an oracle call to fill the subgradient and the d_k
# # for lambda_0; the algorithm assumes that these quantities are known for each iterate (including 0-th)
# # if self.iteration_number == 1:
# self.x_k, self.d_k, self.diff_d_k = self.oracle(self.lambda_k)
# self.oracle_calls += 1
self.x_hat_k = 0
self.d_hat_k = 0
self.diff_d_hat_k = 0
# specific to U-DGM
self.L_k = float(UGM_DEFAULT_L_0) # if you use something else, make sure it's a float!
self.epsilon = float(epsilon)
self.i_k = 0
self.phi_k = copy.deepcopy(self.lambda_hat_k)
# -- Averaging -- Synthesize outputs
# Variables to synthesize solution from algorithm's process
# records of d_tilda_k and lambda_tilda_k ("averages") according to Eqns. below 2.17
self.S_k = float(1)/float(self.L_k)
self.lambda_tilde_k = copy.deepcopy(self.lambda_hat_k)
self.sum_lambda_tilde_k = copy.deepcopy(self.lambda_hat_k) # \sum_i=0^k lambda_tilda_k
self.d_tilde_k = 0
self.sum_d_tilde_k = 0
self.averaging = averaging
if self.averaging:
self.lambda_k = self.lambda_tilde_k
self.d_k = self.d_tilde_k
self.diff_d_k = self.diff_d_hat_k
self.x_k = self.x_hat_k
else:
self.lambda_k = self.lambda_hat_k
self.d_k = self.d_hat_k
self.diff_d_k = self.diff_d_hat_k
self.x_k = self.x_hat_k
# -- Averaging --
# for record keeping
self.method_name = 'UDGM'
self.parameter = epsilon
def _bregman_map(self, M, lambda_k, subgrad_lambda_k):
return self.projection_function(_bregman_map(M, lambda_k, subgrad_lambda_k))
def dual_step(self):
# Implementation of Algorithm (3.2) in [1], the Universal Dual Gradient Method.
if self.iteration_number == 1:
# Init
# if it's the first iteration, we have to make an oracle call to fill the subgradient and the d_k
# for lambda_0; the algorithm assumes that these quantities are known for each iterate (including 0-th)
# if self.iteration_number == 1:
self.x_hat_k, self.d_hat_k, self.diff_d_hat_k = self.oracle(self.lambda_hat_k)
self.oracle_calls += 1
self.lambda_k = self.lambda_hat_k
self.d_k = self.d_hat_k
self.diff_d_k = self.diff_d_hat_k
self.notify_observers()
##########
# Step 0 #
##########
i_k = 0
smallest_i_k_found = 0
while not smallest_i_k_found:
# first, calculate lambda_k_ik (test point)
lambda_k_ik = self.phi_k + float(1.0)/(2**i_k*self.L_k)*self.diff_d_hat_k
lambda_k_ik = self.projection_function(lambda_k_ik)
# then, call oracle at lambda_k_ik (test point)
x_k_ik, d_k_ik, diff_d_k_ik, = self.oracle(lambda_k_ik)
self.oracle_calls += 1
# before I can test the condition I have to calculate the Bregman point, and invoke once again the oracle
# to evaluate d(bregman(lambda_k_ik))
bregman_lambda_k_ik = self._bregman_map(2**i_k*self.L_k, lambda_k_ik, diff_d_k_ik)
bregman_x_k_ik, bregman_d_k_ik, bregman_subgrad_lambda_k_ik, = self.oracle(bregman_lambda_k_ik)
self.oracle_calls | |
from cached_property import cached_property
from sqlalchemy import text
import requests
import json
import urllib.parse
import datetime
from app import db
from app import USER_AGENT
from app import MAX_MAG_ID
from app import get_apiurl_from_openalex_url
from util import jsonify_fast_no_sort_raw
# truncate mid.concept
# insert into mid.concept (select * from legacy.mag_advanced_fields_of_study)
def as_concept_openalex_id(id):
from app import API_HOST
return f"{API_HOST}/C{id}"
class Concept(db.Model):
__table_args__ = {'schema': 'mid'}
__tablename__ = "concept_for_api_mv"
field_of_study_id = db.Column(db.BigInteger, primary_key=True)
normalized_name = db.Column(db.Text)
display_name = db.Column(db.Text)
main_type = db.Column(db.Text)
level = db.Column(db.Numeric)
paper_count = db.Column(db.Numeric)
citation_count = db.Column(db.Numeric)
wikipedia_id = db.Column(db.Text)
wikidata_id = db.Column(db.Text)
wikipedia_json = db.Column(db.Text)
wikidata_json = db.Column(db.Text)
created_date = db.Column(db.DateTime)
updated_date = db.Column(db.DateTime)
full_updated_date = db.Column(db.DateTime)
@cached_property
def id(self):
return self.field_of_study_id
@property
def openalex_id(self):
return as_concept_openalex_id(self.field_of_study_id)
@property
def openalex_id_short(self):
from models import short_openalex_id
return short_openalex_id(self.openalex_id)
@property
def openalex_api_url(self):
return get_apiurl_from_openalex_url(self.openalex_id)
@cached_property
def wikidata_id_short(self):
if not self.wikidata_id:
return None
return self.wikidata_id.replace("https://www.wikidata.org/wiki/", "")
@cached_property
def ancestors_raw(self):
q = """
WITH RECURSIVE leaf (child_field_of_study_id, child_field, child_level, field_of_study_id, parent_field, parent_level) AS (
SELECT linking.child_field_of_study_id,
child_fields.display_name as child_field,
child_fields.level as child_level,
linking.field_of_study_id,
parent_fields.display_name as parent_field,
parent_fields.level as parent_level
FROM legacy.mag_advanced_field_of_study_children linking
JOIN legacy.mag_advanced_fields_of_study child_fields on child_fields.field_of_study_id=linking.child_field_of_study_id
JOIN legacy.mag_advanced_fields_of_study parent_fields on parent_fields.field_of_study_id=linking.field_of_study_id
WHERE child_field_of_study_id = :concept_id
UNION ALL
SELECT linking2.child_field_of_study_id,
child_fields2.display_name as child_field,
child_fields2.level as child_level,
linking2.field_of_study_id,
parent_fields2.display_name as parent_field,
parent_fields2.level as parent_level
FROM legacy.mag_advanced_field_of_study_children linking2
JOIN legacy.mag_advanced_fields_of_study child_fields2 on child_fields2.field_of_study_id=linking2.child_field_of_study_id
JOIN legacy.mag_advanced_fields_of_study parent_fields2 on parent_fields2.field_of_study_id=linking2.field_of_study_id
INNER JOIN leaf l
On l.field_of_study_id = linking2.child_field_of_study_id
)
SELECT distinct child_field_of_study_id as id, child_field as name, child_level as level, field_of_study_id as ancestor_id, parent_field as ancestor_name, parent_level as ancestor_level FROM leaf
"""
rows = db.session.execute(text(q), {"concept_id": self.field_of_study_id}).fetchall()
return rows
@cached_property
def ancestors(self):
rows = self.ancestors_raw
row_dict = {row["ancestor_id"]: row for row in rows}
ancestors = [{"id": as_concept_openalex_id(row["ancestor_id"]),
"display_name": row["ancestor_name"],
"level": row["ancestor_level"]} for row in row_dict.values()]
ancestors = sorted(ancestors, key=lambda x: (x["level"], x["display_name"]), reverse=True)
return ancestors
@cached_property
def extended_attributes(self):
q = """
select attribute_type, attribute_value
from legacy.mag_advanced_field_of_study_extended_attributes
WHERE field_of_study_id = :concept_id
"""
rows = db.session.execute(text(q), {"concept_id": self.field_of_study_id}).fetchall()
extended_attributes = [{"attribute_type": row["attribute_type"],
"attribute_value": row["attribute_value"]} for row in rows]
return extended_attributes
@cached_property
def umls_aui_urls(self):
return [attr["attribute_value"] for attr in self.extended_attributes if attr["attribute_type"]==1]
@cached_property
def raw_wikipedia_url(self):
# for attr in self.extended_attributes:
# if attr["attribute_type"]==2:
# return attr["attribute_value"]
# temporary
# page_title = urllib.parse.quote(self.display_name)
page_title = urllib.parse.quote(self.display_name.lower().replace(" ", "_"))
return f"https://en.wikipedia.org/wiki/{page_title}"
@cached_property
def umls_cui_urls(self):
return [attr["attribute_value"] for attr in self.extended_attributes if attr["attribute_type"]==3]
@cached_property
def wikipedia_data_url(self):
# for attr_dict in self.extended_attributes:
# if attr_dict["attribute_type"] == 2:
# wiki_url = attr_dict["attribute_value"]
# page_title = wiki_url.rsplit("/", 1)[-1]
# url = f"https://en.wikipedia.org/w/api.php?action=query&format=json&formatversion=2&prop=pageimages|pageterms&piprop=original|thumbnail&titles={page_title}&pithumbsize=100"
# return url
# temporary
# page_title = urllib.parse.quote(self.display_name)
page_title = urllib.parse.quote(self.display_name.lower().replace(" ", "_"))
url = f"https://en.wikipedia.org/w/api.php?action=query&format=json&formatversion=2&prop=pageimages|pageterms&piprop=original|thumbnail&titles={page_title}&pithumbsize=100"
return url
@cached_property
def related_concepts(self):
q = """
select type2, field_of_study_id2,
concept2.display_name as field_of_study_id2_display_name,
concept2.level as field_of_study_id2_level,
related.rank
from legacy.mag_advanced_related_field_of_study related
join mid.concept_for_api_mv concept2 on concept2.field_of_study_id = field_of_study_id2
WHERE field_of_study_id1 = :concept_id
"""
rows = db.session.execute(text(q), {"concept_id": self.field_of_study_id}).fetchall()
# not including type on purpose
related_concepts1 = [{"id": as_concept_openalex_id(row["field_of_study_id2"]),
"wikidata": None,
"display_name": row["field_of_study_id2_display_name"],
"level": row["field_of_study_id2_level"],
"score": row["rank"]
} for row in rows]
q = """
select type1, field_of_study_id1,
concept1.display_name as field_of_study_id1_display_name,
concept1.level as field_of_study_id1_level,
related.rank
from legacy.mag_advanced_related_field_of_study related
join mid.concept_for_api_mv concept1 on concept1.field_of_study_id = field_of_study_id1
WHERE field_of_study_id2 = :concept_id
"""
rows = db.session.execute(text(q), {"concept_id": self.field_of_study_id}).fetchall()
# not including type on purpose
related_concepts2 = [{"id": as_concept_openalex_id(row["field_of_study_id1"]),
"wikidata": None,
"display_name": row["field_of_study_id1_display_name"],
"level": row["field_of_study_id1_level"],
"score": row["rank"]
} for row in rows]
related_concepts_all = related_concepts1 + related_concepts2
related_concepts_dict = {}
for row in related_concepts_all:
related_concepts_dict[row["id"]] = row
#do it this way to dedup
related_concepts_all = sorted(related_concepts_dict.values(), key=lambda x: (x["score"]), reverse=True)
# the ones with poor rank aren't good enough to include
related_concepts_all = [field for field in related_concepts_all if field["score"] >= 0.75 and field["level"] <= self.level + 1]
# keep a max of 100 related concepts
related_concepts_all = related_concepts_all[:100]
return related_concepts_all
@cached_property
def image_url(self):
if not self.wikipedia_data:
return None
data = self.wikipedia_data
try:
page_id = data["query"]["pages"][0]["original"]["source"]
except KeyError:
return None
return page_id
@cached_property
def image_thumbnail_url(self):
if not self.wikipedia_data:
return None
data = self.wikipedia_data
try:
page_id = data["query"]["pages"][0]["thumbnail"]["source"]
except KeyError:
return None
return page_id
@cached_property
def description(self):
if not self.wikipedia_data:
return None
data = self.wikipedia_data
try:
page_id = data["query"]["pages"][0]["terms"]["description"][0]
except KeyError:
return None
@cached_property
def wikipedia_title(self):
if not self.wikipedia_data:
return None
data = self.wikipedia_data
# print(data)
try:
return data["query"]["pages"][0]["title"]
except KeyError:
return None
@cached_property
def raw_wikidata_id(self):
if not self.wikipedia_data:
return None
data = self.wikipedia_data
try:
page_id = data["query"]["pages"][0]["pageprops"]["wikibase_item"]
except KeyError:
return None
return page_id
@cached_property
def wikipedia_url(self):
return self.wikipedia_id
@cached_property
def wikidata_data(self):
if not self.wikidata_id:
return None
try:
data = json.loads(self.wikidata_json)
except:
data = None
if not data:
url = f"https://www.wikidata.org/wiki/Special:EntityData/{self.wikidata_id_short}.json"
print(f"calling wikidata live with {url} for {self.openalex_id}")
r = requests.get(url, headers={"User-Agent": USER_AGENT})
data = r.json()
# are claims too big?
try:
del data["entities"][self.wikidata_id_short]["claims"]
except:
pass
# print(response)
return data
@cached_property
def wikipedia_data(self):
try:
return json.loads(self.wikipedia_json)
except:
print(f"Error doing json_loads for {self.openalex_id} in wikipedia_data")
return None
@cached_property
def raw_wikipedia_data(self):
if not self.wikipedia_url:
return None
wikipedia_page_name = self.wikipedia_url.rsplit("/", 1)[-1]
# print(f"\noriginal: {self.wikipedia_url} for name {self.display_name}")
url = f"https://en.wikipedia.org/w/api.php?action=query&format=json&formatversion=2&prop=pageprops%7Cpageimages%7Cpageterms&piprop=original%7Cthumbnail&titles={wikipedia_page_name}&pithumbsize=100&redirects="
# print(f"calling {url}")
r = requests.get(url, headers={"User-Agent": USER_AGENT})
# print(r.json())
return r.json()
@cached_property
def display_name_international(self):
if not self.wikidata_data:
return None
data = self.wikidata_data
try:
response = data["entities"][self.wikidata_id_short]["labels"]
response = {d["language"]: d["value"] for d in response.values()}
return dict(sorted(response.items()))
except KeyError:
return None
@cached_property
def description(self):
if not self.description_international:
return None
try:
return self.description_international["en"]
except KeyError:
return None
@cached_property
def description_international(self):
if not self.wikidata_data:
return None
data = self.wikidata_data
try:
response = data["entities"][self.wikidata_id_short]["descriptions"]
response = {d["language"]: d["value"] for d in response.values()}
return dict(sorted(response.items()))
except KeyError:
return None
@cached_property
def raw_wikidata_data(self):
if not self.wikidata_id_short:
return None
url = f"https://www.wikidata.org/wiki/Special:EntityData/{self.wikidata_id_short}.json"
print(f"calling {url}")
r = requests.get(url, headers={"User-Agent": USER_AGENT})
response = r.json()
# claims are too big
try:
del response["entities"][self.wikidata_id_short]["claims"]
except KeyError:
# not here for some reason, doesn't matter
pass
response_json = json.dumps(response, ensure_ascii=False)
# work around redshift bug with nested quotes in json
response = response_json.replace('\\"', '*')
return response
def store(self):
from util import jsonify_fast_no_sort_raw
VERSION_STRING = "postgres fast queue"
json_save = None
json_save = jsonify_fast_no_sort_raw(self.to_dict())
if json_save and len(json_save) > 65000:
print("Error: json_save too long for field_of_study_id {}, skipping".format(self.openalex_id))
json_save = None
updated = datetime.datetime.utcnow().isoformat()
self.insert_dicts = [{"JsonConcepts": {"id": self.field_of_study_id,
"updated": updated,
"json_save": json_save,
"version": VERSION_STRING,
"merge_into_id": None
}}]
def clean_metadata(self):
if not self.metadata:
return
self.metadata.updated = datetime.datetime.utcnow()
self.wikipedia_id = self.metadata.wikipedia_id
self.wikidata_id_short = self.metadata.wikidata_id_short
return
# work around redshift bug with nested quotes in json
if self.metadata.wikipedia_json:
response = json.loads(self.metadata.wikipedia_json.replace('\\\\"', '*'))
# response = self.metadata.wikipedia_json.replace('\\\\"', '*')
self.wikipedia_super = response
# try:
# # work around redshift bug with nested quotes in json
# response = json.loads(self.metadata.wikipedia_json.replace('\\\\"', '*'))
# self.wikipedia_super = json.loads(response)
# except:
# print(f"Error: oops on loading wikipedia_super {self.field_of_study_id}")
# pass
if self.metadata.wikidata_json:
# self.wikidata_super = json.loads(self.metadata.wikidata_json.replace('\\\\"', '*'))
self.wikidata_super = json.loads(self.metadata.wikidata_json.replace('\\\\"', '*'))
elif self.metadata.wikidata_id_short:
print("getting wikidata")
self.wikidata_super = self.raw_wikidata_data
# try:
# if self.metadata.wikidata_json:
# self.wikidata_super = json.loads(self.metadata.wikidata_json)
# elif self.metadata.wikidata_id_short:
# print("getting wikidata")
# self.wikidata_super = self.raw_wikidata_data
# except:
# print(f"Error: oops on loading wikidata_super {self.field_of_study_id}")
# pass
def calculate_ancestors(self):
ancestors = self.ancestors_raw
if not hasattr(self, "insert_dicts"):
self.insert_dicts = []
for ancestor in ancestors:
id = self.field_of_study_id
ancestor_id = ancestor["ancestor_id"]
self.insert_dicts += [{"ConceptAncestor": [id, ancestor_id]}]
print(self.insert_dicts)
@cached_property
def ancestors_sorted(self):
if not self.ancestors:
return []
non_null_ancestors = [ancestor for ancestor in self.ancestors if ancestor and ancestor.my_ancestor]
return sorted(non_null_ancestors, key=lambda x: (-1 * x.my_ancestor.level, x.my_ancestor.display_name), reverse=False)
@cached_property
def display_counts_by_year(self):
response_dict = {}
# all_rows = self.counts_by_year_papers + self.counts_by_year_citations
all_rows = self.counts_by_year
for count_row in all_rows:
response_dict[count_row.year] = {"year": count_row.year, "works_count": 0, "cited_by_count": 0}
for count_row in all_rows:
if count_row.type == "citation_count":
response_dict[count_row.year]["cited_by_count"] = int(count_row.n)
else:
response_dict[count_row.year]["works_count"] = int(count_row.n)
my_dicts = [counts for counts in response_dict.values() if counts["year"] and counts["year"] >= 2012]
response = sorted(my_dicts, key=lambda x: x["year"], reverse=True)
return response
def to_dict(self, return_level="full"):
response = {
"id": self.openalex_id,
"wikidata": self.wikidata_id,
"display_name": self.display_name,
"level": self.level,
}
if return_level == "full":
response.update({
"description": self.description,
"works_count": self.paper_count if self.paper_count else 0,
"cited_by_count": self.citation_count if self.citation_count else 0,
"ids": {
"openalex": self.openalex_id,
"wikidata": self.wikidata_id,
"wikipedia": self.wikipedia_url,
"umls_aui": self.umls_aui_urls if self.umls_aui_urls else None,
"umls_cui": self.umls_cui_urls | |
'type': {'readonly': True},
'location': {'readonly': True},
'kind': {'readonly': True},
'application_principals': {'readonly': True},
'masking_level': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'data_masking_state': {'key': 'properties.dataMaskingState', 'type': 'str'},
'exempt_principals': {'key': 'properties.exemptPrincipals', 'type': 'str'},
'application_principals': {'key': 'properties.applicationPrincipals', 'type': 'str'},
'masking_level': {'key': 'properties.maskingLevel', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataMaskingPolicy, self).__init__(**kwargs)
self.location = None
self.kind = None
self.data_masking_state = kwargs.get('data_masking_state', None)
self.exempt_principals = kwargs.get('exempt_principals', None)
self.application_principals = None
self.masking_level = None
class DataMaskingRule(ProxyResource):
"""Represents a database data masking rule.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: The location of the data masking rule.
:vartype location: str
:ivar kind: The kind of Data Masking Rule. Metadata, used for Azure portal.
:vartype kind: str
:ivar id_properties_id: The rule Id.
:vartype id_properties_id: str
:param alias_name: The alias name. This is a legacy parameter and is no longer used.
:type alias_name: str
:param rule_state: The rule state. Used to delete a rule. To delete an existing rule, specify
the schemaName, tableName, columnName, maskingFunction, and specify ruleState as disabled.
However, if the rule doesn't already exist, the rule will be created with ruleState set to
enabled, regardless of the provided value of ruleState. Possible values include: "Disabled",
"Enabled".
:type rule_state: str or ~azure.mgmt.sql.models.DataMaskingRuleState
:param schema_name: The schema name on which the data masking rule is applied.
:type schema_name: str
:param table_name: The table name on which the data masking rule is applied.
:type table_name: str
:param column_name: The column name on which the data masking rule is applied.
:type column_name: str
:param masking_function: The masking function that is used for the data masking rule. Possible
values include: "Default", "CCN", "Email", "Number", "SSN", "Text".
:type masking_function: str or ~azure.mgmt.sql.models.DataMaskingFunction
:param number_from: The numberFrom property of the masking rule. Required if maskingFunction is
set to Number, otherwise this parameter will be ignored.
:type number_from: str
:param number_to: The numberTo property of the data masking rule. Required if maskingFunction
is set to Number, otherwise this parameter will be ignored.
:type number_to: str
:param prefix_size: If maskingFunction is set to Text, the number of characters to show
unmasked in the beginning of the string. Otherwise, this parameter will be ignored.
:type prefix_size: str
:param suffix_size: If maskingFunction is set to Text, the number of characters to show
unmasked at the end of the string. Otherwise, this parameter will be ignored.
:type suffix_size: str
:param replacement_string: If maskingFunction is set to Text, the character to use for masking
the unexposed part of the string. Otherwise, this parameter will be ignored.
:type replacement_string: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
'kind': {'readonly': True},
'id_properties_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'str'},
'alias_name': {'key': 'properties.aliasName', 'type': 'str'},
'rule_state': {'key': 'properties.ruleState', 'type': 'str'},
'schema_name': {'key': 'properties.schemaName', 'type': 'str'},
'table_name': {'key': 'properties.tableName', 'type': 'str'},
'column_name': {'key': 'properties.columnName', 'type': 'str'},
'masking_function': {'key': 'properties.maskingFunction', 'type': 'str'},
'number_from': {'key': 'properties.numberFrom', 'type': 'str'},
'number_to': {'key': 'properties.numberTo', 'type': 'str'},
'prefix_size': {'key': 'properties.prefixSize', 'type': 'str'},
'suffix_size': {'key': 'properties.suffixSize', 'type': 'str'},
'replacement_string': {'key': 'properties.replacementString', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataMaskingRule, self).__init__(**kwargs)
self.location = None
self.kind = None
self.id_properties_id = None
self.alias_name = kwargs.get('alias_name', None)
self.rule_state = kwargs.get('rule_state', None)
self.schema_name = kwargs.get('schema_name', None)
self.table_name = kwargs.get('table_name', None)
self.column_name = kwargs.get('column_name', None)
self.masking_function = kwargs.get('masking_function', None)
self.number_from = kwargs.get('number_from', None)
self.number_to = kwargs.get('number_to', None)
self.prefix_size = kwargs.get('prefix_size', None)
self.suffix_size = kwargs.get('suffix_size', None)
self.replacement_string = kwargs.get('replacement_string', None)
class DataMaskingRuleListResult(msrest.serialization.Model):
"""The response to a list data masking rules request.
:param value: The list of database data masking rules.
:type value: list[~azure.mgmt.sql.models.DataMaskingRule]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DataMaskingRule]'},
}
def __init__(
self,
**kwargs
):
super(DataMaskingRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class DataWarehouseUserActivities(ProxyResource):
"""User activities of a data warehouse.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar active_queries_count: Count of running and suspended queries.
:vartype active_queries_count: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'active_queries_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'active_queries_count': {'key': 'properties.activeQueriesCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DataWarehouseUserActivities, self).__init__(**kwargs)
self.active_queries_count = None
class DataWarehouseUserActivitiesListResult(msrest.serialization.Model):
"""User activities of a data warehouse.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.DataWarehouseUserActivities]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataWarehouseUserActivities]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataWarehouseUserActivitiesListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DeletedServer(ProxyResource):
"""A deleted server.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar version: The version of the deleted server.
:vartype version: str
:ivar deletion_time: The deletion time of the deleted server.
:vartype deletion_time: ~datetime.datetime
:ivar original_id: The original ID of the server before deletion.
:vartype original_id: str
:ivar fully_qualified_domain_name: The fully qualified domain name of the server.
:vartype fully_qualified_domain_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'version': {'readonly': True},
'deletion_time': {'readonly': True},
'original_id': {'readonly': True},
'fully_qualified_domain_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'deletion_time': {'key': 'properties.deletionTime', 'type': 'iso-8601'},
'original_id': {'key': 'properties.originalId', 'type': 'str'},
'fully_qualified_domain_name': {'key': 'properties.fullyQualifiedDomainName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeletedServer, self).__init__(**kwargs)
self.version = None
self.deletion_time = None
self.original_id = None
self.fully_qualified_domain_name = None
class DeletedServerListResult(msrest.serialization.Model):
"""A list of deleted servers.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.DeletedServer]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeletedServer]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeletedServerListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class EditionCapability(msrest.serialization.Model):
"""The edition capability.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The database edition name.
:vartype name: str
:ivar supported_service_level_objectives: The list of supported service objectives for the
edition.
:vartype supported_service_level_objectives:
list[~azure.mgmt.sql.models.ServiceObjectiveCapability]
:ivar zone_redundant: Whether or not zone redundancy is supported for the edition.
:vartype zone_redundant: bool
:ivar read_scale: The read scale capability for the edition.
:vartype read_scale: ~azure.mgmt.sql.models.ReadScaleCapability
:ivar supported_storage_capabilities: The list of supported storage capabilities for this
edition.
:vartype supported_storage_capabilities: list[~azure.mgmt.sql.models.StorageCapability]
:ivar status: The status of the capability. Possible values include: "Visible", "Available",
"Default", "Disabled".
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'name': {'readonly': True},
'supported_service_level_objectives': {'readonly': True},
'zone_redundant': {'readonly': True},
'read_scale': {'readonly': True},
'supported_storage_capabilities': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_service_level_objectives': {'key': 'supportedServiceLevelObjectives', 'type': '[ServiceObjectiveCapability]'},
'zone_redundant': {'key': 'zoneRedundant', 'type': 'bool'},
'read_scale': {'key': 'readScale', 'type': 'ReadScaleCapability'},
'supported_storage_capabilities': {'key': 'supportedStorageCapabilities', | |
<filename>docs/quiz/Contents/scrapebylanguage.py
# Program that scrapes Wikipedia for language information
# 0 arguments scrape the 700+ webpages
# 1 argument opens previous dictionaries and modifies them
# 2 arguments uses a debug list to quickly check if the code works
import requests
from bs4 import BeautifulSoup
import json
import math
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
def reducebyfunc(predicate, array):
if not isinstance(array, list):
return
if len(array) > 1:
pos = len(array) - 1
tmp = predicate(array[pos], array[pos-1])
tmparray = array[0:pos-1]
tmparray.append(tmp)
return reducebyfunc(predicate, tmparray)
else:
return array[0]
def speakerstage(speakers):
try:
speakers2 = speakers.replace(",", "")
s = "".join([ c if (c.isalnum() or c==".") else "*" for c in speakers2 ])
newspeakers = s.split("*")
return(newspeakers)
except:
return("NA")
print("err-string")
def parsesimplejson(json, key):
array = []
for entry in json:
for k in entry.keys():
if key in k:
usethiskey = k
array.append(entry[usethiskey])
return(array)
# help from: https://www.geeksforgeeks.org/python-program-for-quicksort/
def quicksort_by_dict(array, dict, euro, value):
high = len(array) - 1
if high > 0 and isinstance(array, list):
med = high
pivot = array[med]
if dict[pivot]["continent"] == "Europe" and euro=="T":
pthing = dict[pivot][value] + 2000000000 #population
else:
pthing = dict[pivot][value]
below = []
above = []
for j in range(0, high):
if dict[array[j]]["continent"] == "Europe" and euro=="T":
thing = dict[array[j]][value] + 2000000000 #population
else:
thing = dict[array[j]][value]
if thing >= pthing:
below.append(array[j])
else:
above.append(array[j])
below = quicksort_by_dict(below, dict, euro, value)
above = quicksort_by_dict(above, dict, euro, value)
below.append(pivot)
array = below + above
return(array)
else:
return(array)
def vspeak(speakers):
def trya(arr):
if checkf(arr[0]) == "no":
if checkf(arr[1]) == "no":
return("NA")
else:
return(arr[1])
else:
return(arr[0])
def checkf(n):
try:
float(n)
return("yes")
except:
return("no")
try:
sp = speakerstage(speakers)
filtered = filter(lambda x: x!="", sp)
arr = [];
v = "NA";
if "million" in "".join([s.lower() for s in filtered]):
indices = [i for i, j in enumerate(filtered) if j.lower() == "million"]
r = ["low", "high"]
m = min(2, len(indices))
if m > 1:
l1 = filtered[indices[0]-1]
l2 = filtered[indices[1]-1]
if checkf(l1)=="yes" and checkf(l2)=="yes":
if(float(l1)>float(l2)):
tmp = l1
l1 = l2
l2 = tmp
langs = [l1, l2]
for l in range(0, m):
if checkf(langs[l])=="yes":
arr.append(r[l] + ": " + langs[l].encode("utf8") + " million")
v = arr
else:
l = filtered[indices[0]-1]
if checkf(l)=="yes":
v = l + " million"
else:
v = trya(filtered)
if checkf(v)=="no":
v = "NA"
if v!="NA" and float(v) > 1000000:
v = str(round(float(v)/1000000),2) + " million"
return(v)
except:
return("NA")
print("err-string")
def finddiff(speakers, v):
try:
newspeakers = speakerstage(speakers)
num = newspeakers[0]
if newspeakers[0] == "":
num = newspeakers[1]
number = float(num)
except:
return("NA")
print("err-string")
if v=="euro":
mid = 3
high = 6
else:
mid = 50
high = 100
if "billion" in speakers.lower():
return("easy")
if "million" in speakers.lower() or number >= 1000000:
if "million" in speakers.lower() and number > 10000 and number < 1000000:
number = 1 # handles known exception of Mon Language
if number >= 1000000:
number = number/1000000
if number > 0 and number < 999:
if number >= 0 and number < mid:
difficulty = "hard"
if number >= mid and number < high:
difficulty = "medium"
if number >= high and number < 999:
difficulty = "easy"
return(difficulty)
else:
if number > 0 and number < 1000000:
if number >= 100000 and number < 1000000:
if v=="euro":
difficulty = "hard"
else:
difficulty = "very hard"
if number >= 10000 and number < 100000:
difficulty = "super hard"
if number >= 0 and number < 10000:
difficulty = "whizkid"
return(difficulty)
#find non-latin and rearrange
def findnl(array, stop):
if isinstance(array, list):
for el in range(0, len(array)):
if isinstance(array[el], str) or isinstance(array[el], unicode):
end = 0
for char in array[el]:
end += 1
if end > stop:
break
if ord(char) > 400:
if el==0:
return array
else:
array.insert(0, array.pop(el))
return array
return array
#find non-latin and rearrange
def checknl(line):
if "Pronunciation" in line:
line = line.split("Pronunciation")[0]
elif "pronunciation" in line:
line = line.split("pronunciation")[0]
for char in line:
if ord(char) > 400:
return True
return False
def scrape(wikipg, countries, dict):
# initiliaze to ensure it doesn't carry over
speakers = "NA"
vspeakers = "NA"
places = "NA"
vplaces = "NA"
languagefam = "NA"
difficulty = "NA"
link = "NA"
off = "NA"
region = "NA"
endonyms = []
endo = []
scripts = []
countrydict = countries["3"]
cia = countries["1"]
sam = countries["2"]
repl = [" ", "\n", "or", ","]
arabic = ["persian", "arabic", "perso-arabic", "perso", "jawi"]
stop = "no"
rowno = 0
try:
wpage = requests.get(url=wikipg)
wiki = BeautifulSoup(wpage.content, "html.parser")
title = wiki.find(id='firstHeading').text
table = wiki.find("table", {"class":"infobox vevent"})
wiki = None # to save memory
except:
print("Could not request page: " + wikipg)
return
if not(table is None):
for row in table.find_all("tr"):
rowno += 1
if rowno > 5:
stop = "yes"
rtext = row.text
# endonyms
if stop != "yes" and row.find_all("th")==[] and row.find_all("img")==[]:
spans = row.find_all("span")
if spans:
for s in spans:
if s.has_attr("lang"):
endo.append(s.text)
italics = row.find_all("i")
for it in range(0, len(italics)):
tmp = italics[it].text
endo.append(tmp)
if checknl(rtext):
if not(rtext in endo):
endo.append(rtext)
if "Native" in rtext:
if "Native speakers" in rtext:
speakers = rtext[15:len(rtext)]
if speakers is None:
continue
else:
vspeakers = vspeak(speakers)
else:
places = rtext[9:len(rtext)]
if "Official" in rtext and "language" in rtext:
if places == "NA":
places = rtext[9:len(rtext)]
offtxt = rtext[9:len(rtext)]
offarr = []
offgeo = offtxt.lower()
for pays in sam:
if pays.lower() in offgeo or pays in offgeo:
offarr.append(pays)
if offarr:
off = offarr
if "region" in rtext:
if places == "NA":
places = rtext[9:len(rtext)]
if places != "NA":
countryarray = []
geo = places.lower()
for pays in sam:
if pays.lower() in geo or pays in geo:
countryarray.append(pays)
if countryarray:
vplaces = countryarray
if "Language family" in rtext:
languagefam = []
row2 = row.find("td")
macrofam = row2.find("div").find("a", href=True)
if macrofam is None:
languagefam.append(row2.text)
else:
languagefam.append(macrofam.text)
tree = row2.find("ul")
if not(tree is None):
trynewthings = tree.find_all(text=True)
famarray = []
if not(trynewthings is None):
for thing in trynewthings:
newthing = thing
if newthing != "\n":
if "\n" in newthing:
newthing = newthing[1:len(newthing)]
famarray.append(newthing)
languagefam = languagefam + famarray
else:
languagefam="NA"
if "script" in rtext.lower() or "writing system" in rtext.lower() or "alphabet" in rtext.lower():
alphabetlinks = row.find_all("a", href=True)
for alinks in alphabetlinks:
linktxt = alinks.text.lower()
if not "braille" in linktxt.lower() and not "writing system" in linktxt.lower() and linktxt.lower()!="":
if reducebyfunc(lambda x, y : x or y, [a in linktxt.lower() for a in arabic]):
scripts.append("Arabic")
scripts.append("("+linktxt+")")
else:
scripts.append(linktxt)
colonial = ["French", "German", "English", "Spanish"]
col = reducebyfunc(lambda x,y : x or y, [c in title for c in colonial])
if endo:
endonyms1 = endo
if not reducebyfunc(lambda x,y : x or y, [t in title for t in ["Serbo", "Serbian", "Vietnamese"]]) and languagefam not in ["Austronesian"]:
endonyms = findnl(endonyms1, 5)
else:
endonyms = "NA"
if not vplaces == "NA":
if col:
euro="T"
else:
euro="F"
vplaces = quicksort_by_dict(vplaces, countrydict, euro, 'population')
tmpregion = []
if countrydict and sam:
for pl in vplaces:
if pl in sam:
if countrydict[pl]["continent"]:
tmpregion.append(countrydict[pl]["continent"])
if tmpregion:
region = tmpregion
e1 = {"endonym": endonyms, "scripts": scripts, "speakers": speakers, "vspeakers": vspeakers, "places":places, "vplaces":vplaces, "family":languagefam, "link":wikipg, "official":off, "region":region}
validatefam(e1)
e1["difficulty"] = finddiff(e1["speakers"], checkdiff(e1, col, countrydict))
e2 = {title:e1}
dict.update(e2)
def changedictvalue(dicti, key, value):
if key in dicti:
new_dict = {}
for keys in dicti:
if keys == key:
new_dict[keys] = value
else:
new_dict[keys] = dicti[keys]
return(new_dict)
else:
print("ERROR")
def validatefam(e):
# modify main family according to this list
fams = ["Semitic", "Bantu", "Slavic", "Baltic", "Romance", "Germanic", "Indo-Aryan", "Iranian", "Algonquian", "Celtic"]
if "family" in e:
for lang in e["family"]:
if lang in fams:
e["mainfam"] = lang
if not "mainfam" in e:
e["mainfam"] = e["family"][0]
else:
e["mainfam"] = "NA"
def checkdiff(thing, col, d):
if not "mainfam" in thing:
thing["mainfam"] = "NA"
if col:
return("euro")
easylang = ["Romance", "Germanic", "Slavic", "Uralic", "Slavic", "Baltic", "Celtic"]
if d:
if "vplaces" in thing:
if thing["vplaces"]!= "NA":
for pl in thing["vplaces"]:
if pl in d:
if d[pl]["continent"] == "Europe" and thing["mainfam"] in easylang:
return("euro")
else:
return("noneuro")
def retryfail(failarr, retries, countries, dict):
failures2 = []
for fail in failarr:
try:
scrape(fail, | |
cmd == '!-t':
if item.transcoded_to and os.path.isfile(item.transcoded_to):
filter = True
elif cmd == '!r':
if TAG_RANK not in item.tags:
filter = True
elif cmd == '!-r':
if TAG_RANK in item.tags:
filter = True
elif cmd == '!d':
if item.has_duplicates:
filter = True
elif cmd == '!s':
if not item.source or not os.path.isfile(item.source):
filter = True
if filter:
break
if filter:
del item_list[i]
else:
i += 1
TAG_ALIASES = {
'a': tag.ARTIST_NAME,
'u': tag.ALBUM_NAME,
't': tag.SONG_NAME,
'y': tag.YEAR,
'k': tag.TRACK,
'kt': tag.TRACK_TOTAL,
}
class TagAction(Action):
def __init__(self):
self.cmds = ['tag']
self.desc = "Update a tag on the current item"
alias_list = ''
for k,v in TAG_ALIASES.items():
alias_list += " {0} -> {1}\n".format(k, v)
self.help = """
Usage:
tag [(tag)=(value) ...] [-(tag)]
Where:
tag=value assign tag (tag) to value (value). Use quotes where necessary.
-tag remove tag (tag)
Some tag aliases:
{0}""".format(alias_list)
def run(self, history, item_list, current_index, args):
item = item_list[current_index]
tags = item.tags
for arg in args:
if arg[0] == '-':
tn = arg[1:]
if tn in TAG_ALIASES:
tn = TAG_ALIASES[tn]
if tn in tags:
del tags[tn]
elif '=' in arg:
tn = arg[0:arg.find('=')].strip()
v = arg[arg.find('=') + 1:].strip()
if tn in TAG_ALIASES:
tn = TAG_ALIASES[tn]
tags[tn] = v
item.set_tags(tags)
class RevertAction(Action):
def __init__(self):
self.cmds = ['revert']
self.desc = "Revert all actions since the last commit"
self.help = ''
def run(self, history, item_list, current_index, args):
global CACHE
CACHE.revert()
class CommitAction(Action):
def __init__(self):
self.cmds = ['commit']
self.desc = "Commit all current pending requests. Will happen automatically on exit."
self.help = ''
def run(self, history, item_list, current_index, args):
commit()
class DeleteTranscodeAction(Action):
def __init__(self):
self.cmds = ['del', 'delete']
self.desc = "Delete the current item's transcoded file."
self.help = ''
def run(self, history, item_list, current_index, args):
item = item_list[current_index]
target = item.transcoded_to
if target is not None and os.path.isfile(target):
try:
os.unlink(target)
print("Deleted {0}".format(target))
except Exception as e:
print("Could not remove file `{0}`".format(target))
class RankAction(Action):
def __init__(self):
self.cmds = ['rank']
self.desc = "Rank the current file with a number, higher being better"
self.help = """
Usage:
rank (number)
Where:
number ranking to assign to the file.
"""
def run(self, history, item_list, current_index, args):
item = item_list[current_index]
if len(args) == 1 and NUMBER_PATTERN.match(args[0]):
item.add_tags({ TAG_RANK: args[0] })
else:
print("Must provide the rank number.")
class TranscodeAction(Action):
def __init__(self):
self.cmds = ['tx', 'transcode', 'tcode']
self.desc = "Transcode the file again."
self.help = """
Usage:
transcode [-np] [-v] [-norm]
Where:
-np don't play the transcoded file after transcoding.
-v show the transcode command
-norm attempt to normalize with 0.1 headroom.
Re-attempts to transcode the file. If the transcoded file doesn't exist,
it will be created.
"""
def run(self, history, item_list, current_index, args):
# FIXME HAAAAAACK
# This should be fetched from elsewhere.
base_destdir = sys.argv[1]
current = item_list[current_index]
if not os.path.isfile(current.probe.filename):
print("Original file does not exist anymore: {0}".format(current.probe.filename))
return current_index
original = current.transcoded_to
if original is not None and os.path.exists(original):
os.unlink(original)
verbose = '-v' in args
destfile = transcode_correct_format(history, current.probe, get_destdir(base_destdir), verbose=verbose)
if original != destfile:
print("[debug] replacing old transcode dest ({0}) with ({1})".format(original, destfile))
current.set_transcoded_to(destfile)
print("New transcoded file recorded at {0}".format(destfile))
if '-norm' in args:
output_fd, output_file = tempfile.mkstemp(
suffix=os.path.splitext(destfile)[1])
try:
headroom = 0.1
print("Normalizing file by {1:#.1f} into {0}".format(output_file, headroom))
os.close(output_fd)
increase = normalize_audio(destfile, output_file, headroom)
if increase is None:
print("Can't normalize.")
else:
print("Increased volume by {0}dB".format(increase))
shutil.copyfile(output_file, destfile)
finally:
os.unlink(output_file)
if "-np" not in args:
get_media_player().play_file(destfile)
return current_index
class NormalizeAction(Action):
def __init__(self):
self.cmds = ['normalize', 'nz', 'norm']
self.desc = "Normalize the audio level, so that its volume is not too quiet"
self.help = """
Usage:
normalize [headroom]
Where:
headroom the decimal amount of "headroom" to leave above the peak level,
in decibels. Defaults to 0.1
"""
def run(self, history, item_list, current_index, args):
current = item_list[current_index]
output_fd,output_file = tempfile.mkstemp(
suffix=os.path.splitext(current.transcoded_to)[1])
headroom = 0.1
if len(args) > 0:
try:
headroom = float(args[0])
if headroom < 0:
print('headroom value must be a positive number.')
return current_index
except:
print('headroom value must be a floating point value, read {0}'.format(args[0]))
return current_index
print("Normalizing file by {1:#.1f} into {0}".format(output_file, headroom))
os.close(output_fd)
try:
increase = normalize_audio(current.transcoded_to, output_file, headroom)
if increase is None:
print("Can't normalize.")
return current_index
print("Increased volume by {0}dB".format(increase))
while True:
print("p) play normalized, K) keep normalized, s) skip normalization")
v = prompt_value('pKs')
if v == 'p':
get_media_player().play_file(output_file)
if v == 'K':
shutil.copyfile(output_file, current.transcoded_to)
break
if v == 's':
break
#except Exception as e:
# print(str(e))
# print(e)
# return current_index
finally:
os.unlink(output_file)
return current_index
class DuplicateManipulationAction(Action):
def __init__(self):
self.cmds = ['duplicate', 'dup']
self.desc = "manipulates the duplicate file handling"
self.help = """
Usage:
duplicate
Runs the interactive duplicate sub-menu. This allows for:
* Swapping which file is considered the "source".
* Removing files as being marked as duplicate of the file.
* Mark the current file as duplicate of some other file. (TODO)
* Play one of the duplicate files.
* Remove the original file of a duplicate.
"""
def run(self, history, item_list, current_index, args):
current = item_list[current_index]
while True:
dup_data = self._get_dups(current, history)
if len(dup_data) <= 0:
print("No duplicates left.")
break
print("s) skip, l) list, u) unmark duplicate, p) play a duplicate's source file,")
print("m) mark another file as duplicate of this file")
print("n) mark this file as a duplicate of another file")
print("N) set new source, X) delete duplicate source file,")
act = prompt_value("sldNX")
if act == 's':
break
elif act == 'l':
self._list_dups(current, dup_data)
elif act == 'u':
v = self._query_dup(current, dup_data)
if v >= 0:
history.delete_duplicate_id(dup_data[v]['duplicate_id'])
print('{0} no longer a duplicate of {1}'.format(
dup_data[v]['filename'], current.source
))
elif act == 'p':
v = self._query_dup(current, dup_data)
if v >= 0:
source = dup_data[v]['filename']
if not os.path.isfile(source):
print('Cannot find file `{0}`'.format(source))
else:
get_media_player().play_file(source)
elif act == 'm':
v = prompt_value("Source file which is a duplicate of this")
if v >= 0:
print("NOT IMPLEMENTED YET")
elif act == 'N':
v = self._query_dup(current, dup_data)
if v >= 0:
selected = dup_data[v]
new_source_filename = dup_data[v]['filename']
history.mark_duplicate(current.probe, new_source_filename)
for i in range(0, len(dup_data)):
if i != v:
dup = dup_data[i]
dup_probe = CACHE.get(selected['filename'])
history.delete_duplicate_id(dup['duplicate_id'])
history.mark_duplicate(dup_probe.probe, new_source_filename)
print('New source of duplicates is {0}'.format(new_source_filename))
elif act == 'X':
v = self._query_dup(current, dup_data)
if v >= 0:
dup = dup_data[v]
cfn = prompt_value("Y) Permanently Delete {0}".format(dup['filename']))
if cfn == 'Y':
history.delete_duplicate_id(dup['duplicate_id'])
history.delete_source_record(dup['filename'])
if os.path.isfile(dup['filename']):
os.unlink(dup['filename'])
print('Permanently deleted {0}'.format(dup['filename']))
else:
print('skipping permanent delete')
return current_index
def _get_dups(self, current, history):
ret = []
for entry in history.get_duplicate_data(current.probe):
if entry['source_location'] != current.probe.filename:
ret.append(entry)
return ret
def _list_dups(self, current, dup_data):
for pos in range(0, len(dup_data)):
is_source = dup_data[pos]['source_location'] == current.source
print("{0}) {1}{2}".format(
pos + 1,
dup_data[pos]['filename'],
is_source and " (source)" or ""
))
def _query_dup(self, current, dup_data):
while True:
v = prompt_value("s) skip, l) list, 1-{0}) select index".format(len(dup_data)))
if v == 's':
return -1
if v == 'l':
self._list_dups(current, dup_data)
continue
try:
pos = int(v) - 1
if pos >= 0 and pos < len(dup_data):
return pos
except:
pass
class TrimAudioAction(Action):
def __init__(self):
self.cmds = ['trim']
self.desc = "Trim the start and stop audio off the trancoded output"
self.help = """
Usage:
trim [(mm):(ss) | -] [(mm):(ss) | -]
Where:
The first argument is the time to trim from the start. If "-" is given, then
nothing is trimmed from the start.
The second argument is the end time to trim off all audio after the time.
If "-" is given, then nothing is trimmed off the end.
So, if you run "trim 00:10 02:00", then the final file will be 1:50 in length.
"""
def run(self, history, item_list, current_index, args):
current = item_list[current_index]
if len(args) != 2:
print(self.help)
return current_index
start_time = args[0]
end_time = args[1]
output_fd,output_file = tempfile.mkstemp(
suffix=os.path.splitext(current.transcoded_to)[1])
os.close(output_fd)
try:
worked = trim_audio(current.transcoded_to, output_file, start_time, end_time)
if not worked:
return current_index
while True:
print("p) play trimmed, K) keep trimmed, s) skip trimming")
v = prompt_value('pKs')
if v == 'p':
get_media_player().play_file(output_file)
if v == 'K':
shutil.copyfile(output_file, current.transcoded_to)
break
if v == 's':
break
#except Exception as e:
# print(str(e))
# print(e)
# return current_index
finally:
os.unlink(output_file)
return current_index
ACTIONS_WITH_CURRENT = [
PlayCurrentAction(),
ShowCurrentAction(),
TagAction(),
DeleteTranscodeAction(),
RankAction(),
TranscodeAction(),
NormalizeAction(),
DuplicateManipulationAction(),
TrimAudioAction(),
]
ACTIONS_WITH_LIST = [
NextItemAction(),
PrevItemAction(),
GotoPositionAction(),
ListAction(),
FilterAction(),
]
ACTIONS_ANY = | |
213, 29),
woosh.Token(woosh.NEWLINE, '\r\n', 213, 29, 214, 0),
woosh.Token(woosh.INDENT, ' ', 214, 0, 214, 8),
woosh.Token(woosh.NAME, 'return', 214, 8, 214, 14),
woosh.Token(woosh.NAME, 'default', 214, 15, 214, 22),
woosh.Token(woosh.NEWLINE, '\r\n', 214, 22, 215, 0),
woosh.Token(woosh.DEDENT, ' ', 215, 0, 215, 4),
woosh.Token(woosh.NAME, 'if', 215, 4, 215, 6),
woosh.Token(woosh.NAME, 'not', 215, 7, 215, 10),
woosh.Token(woosh.NAME, 'isinstance', 215, 11, 215, 21),
woosh.Token(woosh.OP, '(', 215, 21, 215, 22),
woosh.Token(woosh.NAME, 'val', 215, 22, 215, 25),
woosh.Token(woosh.OP, ',', 215, 25, 215, 26),
woosh.Token(woosh.NAME, 'int', 215, 27, 215, 30),
woosh.Token(woosh.OP, ')', 215, 30, 215, 31),
woosh.Token(woosh.OP, ':', 215, 31, 215, 32),
woosh.Token(woosh.NEWLINE, '\r\n', 215, 32, 216, 0),
woosh.Token(woosh.INDENT, ' ', 216, 0, 216, 8),
woosh.Token(woosh.NAME, 'msg', 216, 8, 216, 11),
woosh.Token(woosh.OP, '=', 216, 12, 216, 13),
woosh.Token(woosh.OP, '(', 216, 14, 216, 15),
woosh.Token(woosh.STRING, "'__length_hint__ must be integer, not %s'", 216, 15, 216, 56),
woosh.Token(woosh.OP, '%', 216, 57, 216, 58),
woosh.Token(woosh.NAME, 'type', 217, 15, 217, 19),
woosh.Token(woosh.OP, '(', 217, 19, 217, 20),
woosh.Token(woosh.NAME, 'val', 217, 20, 217, 23),
woosh.Token(woosh.OP, ')', 217, 23, 217, 24),
woosh.Token(woosh.OP, '.', 217, 24, 217, 25),
woosh.Token(woosh.NAME, '__name__', 217, 25, 217, 33),
woosh.Token(woosh.OP, ')', 217, 33, 217, 34),
woosh.Token(woosh.NEWLINE, '\r\n', 217, 34, 218, 0),
woosh.Token(woosh.NAME, 'raise', 218, 8, 218, 13),
woosh.Token(woosh.NAME, 'TypeError', 218, 14, 218, 23),
woosh.Token(woosh.OP, '(', 218, 23, 218, 24),
woosh.Token(woosh.NAME, 'msg', 218, 24, 218, 27),
woosh.Token(woosh.OP, ')', 218, 27, 218, 28),
woosh.Token(woosh.NEWLINE, '\r\n', 218, 28, 219, 0),
woosh.Token(woosh.DEDENT, ' ', 219, 0, 219, 4),
woosh.Token(woosh.NAME, 'if', 219, 4, 219, 6),
woosh.Token(woosh.NAME, 'val', 219, 7, 219, 10),
woosh.Token(woosh.OP, '<', 219, 11, 219, 12),
woosh.Token(woosh.NUMBER, '0', 219, 13, 219, 14),
woosh.Token(woosh.OP, ':', 219, 14, 219, 15),
woosh.Token(woosh.NEWLINE, '\r\n', 219, 15, 220, 0),
woosh.Token(woosh.INDENT, ' ', 220, 0, 220, 8),
woosh.Token(woosh.NAME, 'msg', 220, 8, 220, 11),
woosh.Token(woosh.OP, '=', 220, 12, 220, 13),
woosh.Token(woosh.STRING, "'__length_hint__() should return >= 0'", 220, 14, 220, 52),
woosh.Token(woosh.NEWLINE, '\r\n', 220, 52, 221, 0),
woosh.Token(woosh.NAME, 'raise', 221, 8, 221, 13),
woosh.Token(woosh.NAME, 'ValueError', 221, 14, 221, 24),
woosh.Token(woosh.OP, '(', 221, 24, 221, 25),
woosh.Token(woosh.NAME, 'msg', 221, 25, 221, 28),
woosh.Token(woosh.OP, ')', 221, 28, 221, 29),
woosh.Token(woosh.NEWLINE, '\r\n', 221, 29, 222, 0),
woosh.Token(woosh.DEDENT, ' ', 222, 0, 222, 4),
woosh.Token(woosh.NAME, 'return', 222, 4, 222, 10),
woosh.Token(woosh.NAME, 'val', 222, 11, 222, 14),
woosh.Token(woosh.NEWLINE, '\r\n', 222, 14, 223, 0),
woosh.Token(woosh.COMMENT, '# Generalized Lookup Objects **************************************************#', 224, 0, 224, 80),
woosh.Token(woosh.DEDENT, '', 226, 0, 226, 0),
woosh.Token(woosh.NAME, 'class', 226, 0, 226, 5),
woosh.Token(woosh.NAME, 'attrgetter', 226, 6, 226, 16),
woosh.Token(woosh.OP, ':', 226, 16, 226, 17),
woosh.Token(woosh.NEWLINE, '\r\n', 226, 17, 227, 0),
woosh.Token(woosh.INDENT, ' ', 227, 0, 227, 4),
woosh.Token(woosh.STRING, '"""\r\n Return a callable object that fetches the given attribute(s) from its operand.\r\n After f = attrgetter(\'name\'), the call f(r) returns r.name.\r\n After g = attrgetter(\'name\', \'date\'), the call g(r) returns (r.name, r.date).\r\n After h = attrgetter(\'name.first\', \'name.last\'), the call h(r) returns\r\n (r.name.first, r.name.last).\r\n """', 227, 4, 233, 7),
woosh.Token(woosh.NEWLINE, '\r\n', 233, 7, 234, 0),
woosh.Token(woosh.NAME, '__slots__', 234, 4, 234, 13),
woosh.Token(woosh.OP, '=', 234, 14, 234, 15),
woosh.Token(woosh.OP, '(', 234, 16, 234, 17),
woosh.Token(woosh.STRING, "'_attrs'", 234, 17, 234, 25),
woosh.Token(woosh.OP, ',', 234, 25, 234, 26),
woosh.Token(woosh.STRING, "'_call'", 234, 27, 234, 34),
woosh.Token(woosh.OP, ')', 234, 34, 234, 35),
woosh.Token(woosh.NEWLINE, '\r\n', 234, 35, 235, 0),
woosh.Token(woosh.NAME, 'def', 236, 4, 236, 7),
woosh.Token(woosh.NAME, '__init__', 236, 8, 236, 16),
woosh.Token(woosh.OP, '(', 236, 16, 236, 17),
woosh.Token(woosh.NAME, 'self', 236, 17, 236, 21),
woosh.Token(woosh.OP, ',', 236, 21, 236, 22),
woosh.Token(woosh.NAME, 'attr', 236, 23, 236, 27),
woosh.Token(woosh.OP, ',', 236, 27, 236, 28),
woosh.Token(woosh.OP, '*', 236, 29, 236, 30),
woosh.Token(woosh.NAME, 'attrs', 236, 30, 236, 35),
woosh.Token(woosh.OP, ')', 236, 35, 236, 36),
woosh.Token(woosh.OP, ':', 236, 36, 236, 37),
woosh.Token(woosh.NEWLINE, '\r\n', 236, 37, 237, 0),
woosh.Token(woosh.INDENT, ' ', 237, 0, 237, 8),
woosh.Token(woosh.NAME, 'if', 237, 8, 237, 10),
woosh.Token(woosh.NAME, 'not', 237, 11, 237, 14),
woosh.Token(woosh.NAME, 'attrs', 237, 15, 237, 20),
woosh.Token(woosh.OP, ':', 237, 20, 237, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 237, 21, 238, 0),
woosh.Token(woosh.INDENT, ' ', 238, 0, 238, 12),
woosh.Token(woosh.NAME, 'if', 238, 12, 238, 14),
woosh.Token(woosh.NAME, 'not', 238, 15, 238, 18),
woosh.Token(woosh.NAME, 'isinstance', 238, 19, 238, 29),
woosh.Token(woosh.OP, '(', 238, 29, 238, 30),
woosh.Token(woosh.NAME, 'attr', 238, 30, 238, 34),
woosh.Token(woosh.OP, ',', 238, 34, 238, 35),
woosh.Token(woosh.NAME, 'str', 238, 36, 238, 39),
woosh.Token(woosh.OP, ')', 238, 39, 238, 40),
woosh.Token(woosh.OP, ':', 238, 40, 238, 41),
woosh.Token(woosh.NEWLINE, '\r\n', 238, 41, 239, 0),
woosh.Token(woosh.INDENT, ' ', 239, 0, 239, 16),
woosh.Token(woosh.NAME, 'raise', 239, 16, 239, 21),
woosh.Token(woosh.NAME, 'TypeError', 239, 22, 239, 31),
woosh.Token(woosh.OP, '(', 239, 31, 239, 32),
woosh.Token(woosh.STRING, "'attribute name must be a string'", 239, 32, 239, 65),
woosh.Token(woosh.OP, ')', 239, 65, 239, 66),
woosh.Token(woosh.NEWLINE, '\r\n', 239, 66, 240, 0),
woosh.Token(woosh.DEDENT, ' ', 240, 0, 240, 12),
woosh.Token(woosh.NAME, 'self', 240, 12, 240, 16),
woosh.Token(woosh.OP, '.', 240, 16, 240, 17),
woosh.Token(woosh.NAME, '_attrs', 240, 17, 240, 23),
woosh.Token(woosh.OP, '=', 240, 24, 240, 25),
woosh.Token(woosh.OP, '(', 240, 26, 240, 27),
woosh.Token(woosh.NAME, 'attr', 240, 27, 240, 31),
woosh.Token(woosh.OP, ',', 240, 31, 240, 32),
woosh.Token(woosh.OP, ')', 240, 32, 240, 33),
woosh.Token(woosh.NEWLINE, '\r\n', 240, 33, 241, 0),
woosh.Token(woosh.NAME, 'names', 241, 12, 241, 17),
woosh.Token(woosh.OP, '=', 241, 18, 241, 19),
woosh.Token(woosh.NAME, 'attr', 241, 20, 241, 24),
woosh.Token(woosh.OP, '.', 241, 24, 241, 25),
woosh.Token(woosh.NAME, 'split', 241, 25, 241, 30),
woosh.Token(woosh.OP, '(', 241, 30, 241, 31),
woosh.Token(woosh.STRING, "'.'", 241, 31, 241, 34),
woosh.Token(woosh.OP, ')', 241, 34, 241, 35),
woosh.Token(woosh.NEWLINE, '\r\n', 241, 35, 242, 0),
woosh.Token(woosh.NAME, 'def', 242, 12, 242, 15),
woosh.Token(woosh.NAME, 'func', 242, 16, 242, 20),
woosh.Token(woosh.OP, '(', 242, 20, 242, 21),
woosh.Token(woosh.NAME, 'obj', 242, 21, 242, 24),
woosh.Token(woosh.OP, ')', 242, 24, 242, 25),
woosh.Token(woosh.OP, ':', 242, 25, 242, 26),
woosh.Token(woosh.NEWLINE, '\r\n', 242, 26, 243, 0),
woosh.Token(woosh.INDENT, ' ', 243, 0, 243, 16),
woosh.Token(woosh.NAME, 'for', 243, 16, 243, 19),
woosh.Token(woosh.NAME, 'name', 243, 20, 243, 24),
woosh.Token(woosh.NAME, 'in', 243, 25, 243, 27),
woosh.Token(woosh.NAME, 'names', 243, 28, 243, 33),
woosh.Token(woosh.OP, ':', 243, 33, 243, 34),
woosh.Token(woosh.NEWLINE, '\r\n', 243, 34, 244, 0),
woosh.Token(woosh.INDENT, ' ', 244, 0, 244, 20),
woosh.Token(woosh.NAME, 'obj', 244, 20, 244, 23),
woosh.Token(woosh.OP, '=', 244, 24, 244, 25),
woosh.Token(woosh.NAME, 'getattr', 244, 26, 244, 33),
woosh.Token(woosh.OP, '(', 244, 33, 244, 34),
woosh.Token(woosh.NAME, 'obj', 244, 34, 244, 37),
woosh.Token(woosh.OP, ',', 244, 37, 244, 38),
woosh.Token(woosh.NAME, 'name', 244, 39, 244, 43),
woosh.Token(woosh.OP, ')', 244, 43, 244, 44),
woosh.Token(woosh.NEWLINE, '\r\n', 244, 44, 245, 0),
woosh.Token(woosh.DEDENT, ' ', 245, 0, 245, 16),
woosh.Token(woosh.NAME, 'return', 245, 16, 245, 22),
woosh.Token(woosh.NAME, 'obj', 245, 23, 245, 26),
woosh.Token(woosh.NEWLINE, '\r\n', 245, 26, 246, 0),
woosh.Token(woosh.DEDENT, ' ', 246, 0, 246, 12),
woosh.Token(woosh.NAME, 'self', 246, 12, 246, 16),
woosh.Token(woosh.OP, '.', 246, 16, 246, 17),
woosh.Token(woosh.NAME, '_call', 246, 17, 246, 22),
woosh.Token(woosh.OP, '=', 246, 23, 246, 24),
woosh.Token(woosh.NAME, 'func', 246, 25, 246, 29),
woosh.Token(woosh.NEWLINE, '\r\n', 246, 29, 247, 0),
woosh.Token(woosh.DEDENT, ' ', 247, 0, 247, 8),
woosh.Token(woosh.NAME, 'else', 247, 8, 247, 12),
woosh.Token(woosh.OP, ':', 247, 12, 247, 13),
woosh.Token(woosh.NEWLINE, '\r\n', 247, 13, 248, 0),
woosh.Token(woosh.INDENT, ' ', 248, 0, 248, 12),
woosh.Token(woosh.NAME, 'self', 248, 12, 248, 16),
woosh.Token(woosh.OP, '.', 248, 16, 248, 17),
woosh.Token(woosh.NAME, '_attrs', 248, 17, 248, 23),
woosh.Token(woosh.OP, '=', 248, 24, 248, 25),
woosh.Token(woosh.OP, '(', 248, 26, 248, 27),
woosh.Token(woosh.NAME, 'attr', 248, 27, 248, 31),
woosh.Token(woosh.OP, ',', 248, 31, 248, 32),
woosh.Token(woosh.OP, ')', 248, 32, 248, 33),
woosh.Token(woosh.OP, '+', 248, 34, 248, 35),
woosh.Token(woosh.NAME, 'attrs', 248, 36, 248, 41),
woosh.Token(woosh.NEWLINE, '\r\n', 248, 41, 249, 0),
woosh.Token(woosh.NAME, 'getters', 249, 12, 249, 19),
woosh.Token(woosh.OP, '=', 249, 20, 249, 21),
woosh.Token(woosh.NAME, 'tuple', 249, 22, 249, 27),
woosh.Token(woosh.OP, '(', 249, 27, 249, 28),
woosh.Token(woosh.NAME, 'map', 249, 28, 249, 31),
woosh.Token(woosh.OP, '(', 249, 31, 249, 32),
woosh.Token(woosh.NAME, 'attrgetter', 249, 32, 249, 42),
woosh.Token(woosh.OP, ',', 249, 42, 249, 43),
woosh.Token(woosh.NAME, 'self', 249, 44, 249, 48),
woosh.Token(woosh.OP, '.', 249, 48, 249, 49),
woosh.Token(woosh.NAME, '_attrs', 249, 49, 249, 55),
woosh.Token(woosh.OP, ')', 249, 55, 249, 56),
woosh.Token(woosh.OP, ')', 249, 56, 249, 57),
woosh.Token(woosh.NEWLINE, '\r\n', 249, 57, 250, 0),
woosh.Token(woosh.NAME, 'def', 250, 12, 250, 15),
woosh.Token(woosh.NAME, 'func', 250, 16, 250, 20),
woosh.Token(woosh.OP, '(', 250, 20, 250, 21),
woosh.Token(woosh.NAME, 'obj', 250, 21, 250, 24),
woosh.Token(woosh.OP, ')', 250, 24, 250, 25),
woosh.Token(woosh.OP, ':', 250, 25, 250, 26),
woosh.Token(woosh.NEWLINE, '\r\n', 250, 26, 251, 0),
woosh.Token(woosh.INDENT, ' ', 251, 0, 251, 16),
woosh.Token(woosh.NAME, 'return', 251, 16, 251, 22),
woosh.Token(woosh.NAME, 'tuple', 251, 23, 251, 28),
woosh.Token(woosh.OP, '(', 251, 28, 251, 29),
woosh.Token(woosh.NAME, 'getter', 251, 29, 251, 35),
woosh.Token(woosh.OP, '(', 251, 35, 251, 36),
woosh.Token(woosh.NAME, 'obj', 251, 36, 251, 39),
woosh.Token(woosh.OP, ')', 251, 39, 251, 40),
woosh.Token(woosh.NAME, 'for', 251, 41, 251, 44),
woosh.Token(woosh.NAME, 'getter', 251, 45, 251, 51),
woosh.Token(woosh.NAME, 'in', 251, 52, 251, 54),
woosh.Token(woosh.NAME, 'getters', 251, 55, 251, 62),
woosh.Token(woosh.OP, ')', 251, 62, 251, 63),
woosh.Token(woosh.NEWLINE, '\r\n', 251, 63, 252, 0),
woosh.Token(woosh.DEDENT, ' ', 252, 0, 252, 12),
woosh.Token(woosh.NAME, 'self', 252, 12, 252, 16),
woosh.Token(woosh.OP, '.', 252, 16, 252, 17),
woosh.Token(woosh.NAME, '_call', 252, 17, 252, 22),
woosh.Token(woosh.OP, '=', 252, 23, 252, 24),
woosh.Token(woosh.NAME, 'func', 252, 25, 252, 29),
woosh.Token(woosh.NEWLINE, '\r\n', 252, 29, 253, 0),
woosh.Token(woosh.DEDENT, ' ', 254, 0, 254, 4),
woosh.Token(woosh.DEDENT, '', 254, 4, 254, 4),
woosh.Token(woosh.NAME, 'def', 254, 4, 254, 7),
woosh.Token(woosh.NAME, '__call__', 254, 8, 254, 16),
woosh.Token(woosh.OP, '(', 254, 16, 254, 17),
woosh.Token(woosh.NAME, 'self', 254, 17, 254, 21),
woosh.Token(woosh.OP, ',', 254, 21, 254, 22),
woosh.Token(woosh.NAME, 'obj', 254, 23, 254, 26),
woosh.Token(woosh.OP, ')', 254, 26, 254, 27),
woosh.Token(woosh.OP, ':', 254, 27, 254, 28),
woosh.Token(woosh.NEWLINE, '\r\n', 254, 28, 255, 0),
woosh.Token(woosh.INDENT, ' ', 255, 0, 255, 8),
woosh.Token(woosh.NAME, 'return', 255, 8, 255, 14),
woosh.Token(woosh.NAME, 'self', 255, 15, 255, 19),
woosh.Token(woosh.OP, '.', 255, 19, 255, 20),
woosh.Token(woosh.NAME, '_call', 255, 20, 255, 25),
woosh.Token(woosh.OP, | |
"""Tests for op_handler_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
from morph_net.framework import op_handler_util
from morph_net.framework import op_regularizer_manager as orm
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
layers = tf.contrib.layers
class OpHandlerUtilTest(tf.test.TestCase):
def _batch_norm_scope(self):
params = {
'trainable': True,
'normalizer_fn': layers.batch_norm,
'normalizer_params': {
'scale': True,
},
}
with arg_scope([layers.conv2d], **params) as sc:
return sc
def setUp(self):
tf.reset_default_graph()
# This tests a Conv2D -> BatchNorm -> ReLU chain of ops.
with tf.contrib.framework.arg_scope(self._batch_norm_scope()):
inputs = tf.zeros([2, 4, 4, 3])
layers.conv2d(inputs, num_outputs=5, kernel_size=3, scope='conv1')
# This tests 3 Conv2D ops being concatenated before a batch normalization.
c2 = layers.conv2d(inputs, num_outputs=5, kernel_size=3, scope='conv2')
c3 = layers.conv2d(inputs, num_outputs=6, kernel_size=3, scope='conv3')
c4 = layers.conv2d(inputs, num_outputs=7, kernel_size=3, scope='conv4')
net = tf.concat([c2, c3, c4], axis=3)
layers.batch_norm(net)
g = tf.get_default_graph()
# Declare OpSlice and OpGroup for ops in the first test network.
self.batch_norm_op = g.get_operation_by_name(
'conv1/BatchNorm/FusedBatchNormV3')
self.batch_norm_op_slice = orm.OpSlice(self.batch_norm_op, None)
self.batch_norm_op_group = orm.OpGroup(self.batch_norm_op_slice)
self.conv_op = g.get_operation_by_name('conv1/Conv2D')
self.conv_op_slice = orm.OpSlice(self.conv_op, None)
self.conv_op_group = orm.OpGroup(
self.conv_op_slice, omit_source_op_slices=[self.conv_op_slice])
self.gamma_op = g.get_operation_by_name('conv1/BatchNorm/gamma/read')
self.beta_op = g.get_operation_by_name('conv1/BatchNorm/beta/read')
self.decay_op = g.get_operation_by_name('conv1/BatchNorm/Const')
self.epsilon_op = g.get_operation_by_name('conv1/BatchNorm/Const_1')
self.mean_op = g.get_operation_by_name(
'conv1/BatchNorm/AssignMovingAvg/sub_1')
self.std_op = g.get_operation_by_name(
'conv1/BatchNorm/AssignMovingAvg_1/sub_1')
self.relu_op = g.get_operation_by_name('conv1/Relu')
self.relu_op_slice = orm.OpSlice(self.relu_op, None)
self.relu_op_group = orm.OpGroup(
self.relu_op_slice, omit_source_op_slices=[self.relu_op_slice])
# Declare OpSlice and OpGroup for ops in the second test network.
self.relu2_op = g.get_operation_by_name('conv2/Relu')
self.relu2_op_slice = orm.OpSlice(self.relu2_op, orm.Slice(0, 5))
self.relu2_op_group = orm.OpGroup(
self.relu2_op_slice, omit_source_op_slices=[self.relu2_op_slice])
self.relu3_op = g.get_operation_by_name('conv3/Relu')
self.relu3_op_slice = orm.OpSlice(self.relu3_op, orm.Slice(0, 6))
self.relu3_op_group = orm.OpGroup(
self.relu3_op_slice, omit_source_op_slices=[self.relu3_op_slice])
self.relu4_op = g.get_operation_by_name('conv4/Relu')
self.relu4_op_slice = orm.OpSlice(self.relu4_op, orm.Slice(0, 7))
self.relu4_op_group = orm.OpGroup(
self.relu4_op_slice, omit_source_op_slices=[self.relu4_op_slice])
self.unfused_batch_norm_op = g.get_operation_by_name(
'BatchNorm/FusedBatchNormV3')
self.unfused_batch_norm_op_slice = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(0, 18))
self.concat_op = g.get_operation_by_name('concat')
self.concat_op_slice = orm.OpSlice(self.concat_op, orm.Slice(0, 18))
self.concat_op_group = orm.OpGroup(
self.concat_op_slice, omit_source_op_slices=[self.concat_op_slice])
# Create mock OpRegularizerManager with custom mapping of OpSlice and
# OpGroup.
self.mock_op_reg_manager = mock.create_autospec(orm.OpRegularizerManager)
def get_op_slices(op):
return self.op_slice_dict.get(op, [])
def get_op_group(op_slice):
return self.op_group_dict.get(op_slice)
def is_passthrough(op):
return op in self._passthrough_ops
self.mock_op_reg_manager.get_op_slices.side_effect = get_op_slices
self.mock_op_reg_manager.get_op_group.side_effect = get_op_group
self.mock_op_reg_manager.is_passthrough.side_effect = is_passthrough
self.mock_op_reg_manager.ops = [
self.batch_norm_op, self.gamma_op, self.beta_op, self.decay_op,
self.epsilon_op, self.mean_op, self.std_op, self.conv_op, self.relu_op,
self.relu2_op, self.relu3_op, self.relu4_op, self.unfused_batch_norm_op,
self.concat_op]
def testGetInputOps(self):
# For batch norm, the expected inputs are Conv2D, gamma, and beta. The
# decay and epsilon are excluded because they are scalars.
expected_inputs = [self.conv_op, self.gamma_op, self.beta_op]
# Check for expected input ops.
input_ops = op_handler_util.get_input_ops(self.batch_norm_op,
self.mock_op_reg_manager)
self.assertEqual(expected_inputs, input_ops)
self.assertNotIn(self.decay_op, input_ops)
self.assertNotIn(self.epsilon_op, input_ops)
def testGetOutputOps(self):
# For batch norm, the expected outputs are mean, std, and ReLU.
expected_outputs = [self.relu_op, self.mean_op, self.std_op]
# Check for expected output ops.
self.assertEqual(
expected_outputs,
op_handler_util.get_output_ops(self.batch_norm_op,
self.mock_op_reg_manager))
def testGetOpsWithoutGroups(self):
# For a list of ops, verify that ops without groups are returned.
self.op_slice_dict = {
self.batch_norm_op: [self.batch_norm_op_slice],
self.conv_op: [self.conv_op_slice],
self.gamma_op: [orm.OpSlice(self.gamma_op, None)],
self.beta_op: [orm.OpSlice(self.beta_op, None)],
self.decay_op: [orm.OpSlice(self.decay_op, None)],
self.epsilon_op: [orm.OpSlice(self.epsilon_op, None)],
}
# Only batch norm and conv ops have groups.
self.op_group_dict = {
self.batch_norm_op_slice: self.batch_norm_op_group,
self.conv_op_slice: self.conv_op_group
}
all_ops = [self.batch_norm_op, self.conv_op, self.gamma_op, self.beta_op,
self.decay_op, self.epsilon_op]
# Batch norm and conv ops have groups. The other ops do not have groups.
expected_ops = [self.gamma_op, self.beta_op, self.decay_op, self.epsilon_op]
self.assertEqual(
expected_ops,
op_handler_util.get_ops_without_groups(
all_ops, self.mock_op_reg_manager))
def testRemoveNonPassthroughOps(self):
self._passthrough_ops = (self.gamma_op, self.decay_op, self.std_op)
all_ops = [self.batch_norm_op, self.conv_op, self.gamma_op, self.beta_op,
self.decay_op, self.epsilon_op, self.mean_op]
expected_ops = [self.gamma_op, self.decay_op]
self.assertListEqual(
expected_ops,
op_handler_util.remove_non_passthrough_ops(all_ops,
self.mock_op_reg_manager))
def testGroupOpWithInputsAndOutputs_SingleSlice(self):
# For the single slice case, verify that batch norm is grouped with its
# output (ReLU) and its input (Conv2D).
aligned_op_slice_sizes = [5]
self.op_slice_dict = {
self.batch_norm_op: [self.batch_norm_op_slice],
self.conv_op: [self.conv_op_slice],
self.relu_op: [self.relu_op_slice]
}
# All ops have groups.
self.op_group_dict = {
self.batch_norm_op_slice: self.batch_norm_op_group,
self.conv_op_slice: self.conv_op_group,
self.relu_op_slice: self.relu_op_group
}
ops_grouped = op_handler_util.group_op_with_inputs_and_outputs(
self.batch_norm_op, [[self.conv_op_slice]], [[self.relu_op_slice]],
aligned_op_slice_sizes, self.mock_op_reg_manager)
# Verify manager looks up op slice for ops of interest.
self.mock_op_reg_manager.get_op_slices.assert_any_call(self.batch_norm_op)
# Verify manager groups batch norm with Conv2D and ReLU ops.
self.assertTrue(ops_grouped)
self.mock_op_reg_manager.group_op_slices.assert_has_calls(
[mock.call([self.batch_norm_op_slice, self.relu_op_slice]),
mock.call([self.batch_norm_op_slice, self.conv_op_slice])])
def testGroupOpWithInputsAndOutputs_MultipleSlices(self):
# For the multiple slice case, verify that batch norm slices are grouped
# with output slices (ReLU) and input slices (Conv2D).
batch_norm_op_slice_0_2 = orm.OpSlice(
self.batch_norm_op, orm.OpSlice(0, 2))
batch_norm_op_slice_2_5 = orm.OpSlice(
self.batch_norm_op, orm.OpSlice(2, 3))
batch_norm_op_group1 = orm.OpGroup(
batch_norm_op_slice_0_2)
batch_norm_op_group2 = orm.OpGroup(
batch_norm_op_slice_2_5)
conv_op_slice_0_2 = orm.OpSlice(
self.conv_op, orm.OpSlice(0, 2))
conv_op_slice_2_5 = orm.OpSlice(
self.conv_op, orm.OpSlice(2, 3))
conv_op_group1 = orm.OpGroup(
conv_op_slice_0_2, omit_source_op_slices=[conv_op_slice_0_2])
conv_op_group2 = orm.OpGroup(
conv_op_slice_2_5, omit_source_op_slices=[conv_op_slice_2_5])
relu_op_slice_0_2 = orm.OpSlice(
self.relu_op, orm.OpSlice(0, 2))
relu_op_slice_2_5 = orm.OpSlice(
self.relu_op, orm.OpSlice(2, 3))
relu_op_group1 = orm.OpGroup(relu_op_slice_0_2)
relu_op_group2 = orm.OpGroup(relu_op_slice_2_5)
aligned_op_slice_sizes = [2, 3]
self.op_slice_dict = {
self.batch_norm_op: [batch_norm_op_slice_0_2, batch_norm_op_slice_2_5],
self.conv_op: [conv_op_slice_0_2, conv_op_slice_2_5],
self.relu_op: [relu_op_slice_0_2, relu_op_slice_2_5],
}
# All ops have groups.
self.op_group_dict = {
batch_norm_op_slice_0_2: batch_norm_op_group1,
batch_norm_op_slice_2_5: batch_norm_op_group2,
conv_op_slice_0_2: conv_op_group1,
conv_op_slice_2_5: conv_op_group2,
relu_op_slice_0_2: relu_op_group1,
relu_op_slice_2_5: relu_op_group2,
}
ops_grouped = op_handler_util.group_op_with_inputs_and_outputs(
self.batch_norm_op, [[conv_op_slice_0_2, conv_op_slice_2_5]],
[[relu_op_slice_0_2, relu_op_slice_2_5]], aligned_op_slice_sizes,
self.mock_op_reg_manager)
# Verify manager looks up op slice for ops of interest.
self.mock_op_reg_manager.get_op_slices.assert_any_call(self.batch_norm_op)
# Verify manager groups batch norm with Conv2D and ReLU ops.
self.assertTrue(ops_grouped)
self.mock_op_reg_manager.group_op_slices.assert_has_calls(
[mock.call([batch_norm_op_slice_0_2, relu_op_slice_0_2]),
mock.call([batch_norm_op_slice_0_2, conv_op_slice_0_2]),
mock.call([batch_norm_op_slice_2_5, relu_op_slice_2_5]),
mock.call([batch_norm_op_slice_2_5, conv_op_slice_2_5])])
def testGetConcatInputOpSlices(self):
# For concat, the input op slices are the concatenation of op slices of each
# input op.
# Map ops to slices.
self.op_slice_dict = {
self.relu2_op: [self.relu2_op_slice],
self.relu3_op: [self.relu3_op_slice],
self.relu4_op: [self.relu4_op_slice],
}
# The concat input is relu2, relu3, and relu4.
expected_input_op_slices = [
[self.relu2_op_slice, self.relu3_op_slice, self.relu4_op_slice]]
input_ops = op_handler_util.get_input_ops(
self.concat_op, self.mock_op_reg_manager)
self.assertEqual(
expected_input_op_slices,
op_handler_util.get_concat_input_op_slices(
input_ops, self.mock_op_reg_manager))
def testGetOpSlices(self):
# Generic ops are treated as a concatenation of their constituent OpSlice.
batch_norm_op_slice_0_5 = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(0, 5))
batch_norm_op_slice_5_11 = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(5, 6))
batch_norm_op_slice_11_18 = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(11, 7))
# Map ops to slices.
self.op_slice_dict = {
self.unfused_batch_norm_op: [
batch_norm_op_slice_0_5, batch_norm_op_slice_5_11,
batch_norm_op_slice_11_18],
}
# A nested list composed of a list of OpSlice for each output op. In this
# case, there is just one output op (i.e. batch norm).
expected_output_op_slices = [[
batch_norm_op_slice_0_5,
batch_norm_op_slice_5_11,
batch_norm_op_slice_11_18]]
output_ops = op_handler_util.get_output_ops(
self.concat_op, self.mock_op_reg_manager)
self.assertEqual(
expected_output_op_slices,
op_handler_util.get_op_slices(output_ops, self.mock_op_reg_manager))
def testGetOpSlices_FilterEmptySlices(self):
# No slices are mapped to ops.
self.op_slice_dict = {}
# Verify that empty slices are removed.
input_ops = op_handler_util.get_input_ops(
self.batch_norm_op, self.mock_op_reg_manager)
self.assertListEqual([], op_handler_util.get_op_slices(
input_ops, self.mock_op_reg_manager))
def testGetOpSliceSizes(self):
relu3_op_slice_0_3 = orm.OpSlice(
self.relu2_op, orm.Slice(0, 3))
relu3_op_slice_3_6 = orm.OpSlice(
self.relu2_op, orm.Slice(3, 3))
batch_norm_op_slice_0_5 = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(0, 5))
batch_norm_op_slice_5_8 = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(5, 3))
batch_norm_op_slice_8_11 = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(8, 3))
batch_norm_op_slice_11_18 = orm.OpSlice(
self.unfused_batch_norm_op, orm.Slice(11, 7))
# Map ops to slices.
self.op_slice_dict = {
self.relu2_op: [self.relu2_op_slice],
self.relu3_op: [relu3_op_slice_0_3, relu3_op_slice_3_6],
self.relu4_op: [self.relu4_op_slice],
self.unfused_batch_norm_op: [
batch_norm_op_slice_0_5, batch_norm_op_slice_5_8,
batch_norm_op_slice_8_11, batch_norm_op_slice_11_18],
}
expected_op_slice_sizes = [
[5], # c2 has size 5.
[3, 3], # c3 has size 6, but in 2 slices of size 3.
[7], # c4 has size 7.
[5, 3, 3, 7]] # batch norm has size 18, but slice sizes of c1, c2, c3.
self.assertEqual(
expected_op_slice_sizes,
op_handler_util.get_op_slice_sizes([
[self.relu2_op_slice],
[relu3_op_slice_0_3, relu3_op_slice_3_6],
[self.relu4_op_slice],
[batch_norm_op_slice_0_5, batch_norm_op_slice_5_8,
batch_norm_op_slice_8_11, batch_norm_op_slice_11_18]]))
def testGetAlignedOpSliceSizes(self):
expected_op_slice_sizes = [5, 4, 2, 2, 5]
self.assertEqual(
expected_op_slice_sizes,
op_handler_util.get_aligned_sizes([
[5, 4, 2, 7],
[9, 4, 5],
[18]]))
expected_op_slice_sizes = [1, 2, 2, 1, 3, 1, 2, 2, 1]
self.assertEqual(
expected_op_slice_sizes,
op_handler_util.get_aligned_sizes([
[1, 2, 3, 4, 5],
[5, 4, 3, 2, 1]]))
expected_op_slice_sizes = [1, 1, 1, 1, 1]
self.assertEqual(
expected_op_slice_sizes,
op_handler_util.get_aligned_sizes([
[5],
[1, 1, 1, 1, 1]]))
expected_op_slice_sizes = [10]
self.assertEqual(
expected_op_slice_sizes,
op_handler_util.get_aligned_sizes([[10]]))
# Raise exception for empty input.
with self.assertRaises(ValueError):
op_handler_util.get_aligned_sizes([])
# Raise exception if total sizes do not match.
with self.assertRaises(ValueError):
op_handler_util.get_aligned_sizes([[1, 2], [4]])
def testGetNumSlices(self):
self.assertEqual(
5, op_handler_util._get_num_slices([[1, 2, 3, 4, 5], [6, 7], [8]]))
self.assertEqual(
2, op_handler_util._get_num_slices([[6, 7], [8]]))
self.assertEqual(
1, op_handler_util._get_num_slices([[8]]))
def testResliceConcatOps_Aligned(self):
# Map ops to slices.
self.op_slice_dict = {
self.relu2_op: [self.relu2_op_slice],
self.relu3_op: [self.relu3_op_slice],
self.relu4_op: [self.relu4_op_slice],
}
op_handler_util.reslice_concat_ops(
[self.relu2_op, self.relu3_op, self.relu4_op],
[5, 6, 7], self.mock_op_reg_manager)
# Verify manager does not slice any ops.
self.mock_op_reg_manager.slice_op.assert_not_called()
def testResliceConcatOps_NotAligned(self):
relu3_op_slice_0_3 = orm.OpSlice(
self.relu3_op, orm.Slice(0, 3))
relu3_op_slice_3_6 = orm.OpSlice(
self.relu3_op, orm.Slice(3, 3))
# Map ops to slices. The op c3 is composed of multiple slices.
self.op_slice_dict = {
self.relu2_op: [self.relu2_op_slice],
self.relu3_op: [relu3_op_slice_0_3, relu3_op_slice_3_6],
self.relu4_op: [self.relu4_op_slice],
}
op_handler_util.reslice_concat_ops(
[self.relu2_op, self.relu3_op, self.relu4_op],
[5, 4, 2, 2, 5], self.mock_op_reg_manager)
# Verify manager slices input ops.
self.mock_op_reg_manager.slice_op.assert_has_calls(
[mock.call(self.relu3_op, [4, 2]),
mock.call(self.relu4_op, [2, 5])])
def testGetTotalSliceSize(self):
op_slice_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.assertEqual(
15, op_handler_util.get_total_slice_size(op_slice_sizes, 0, 5))
self.assertEqual(
15, op_handler_util.get_total_slice_size(op_slice_sizes, 3, 3))
self.assertEqual(
30, op_handler_util.get_total_slice_size(op_slice_sizes, 5, 4))
self.assertEqual(
3, | |
"""keymaster Integration."""
from datetime import timedelta
import logging
from typing import Any, Dict
from openzwavemqtt.const import CommandClass
from openzwavemqtt.exceptions import NotFoundError, NotSupportedError
from openzwavemqtt.util.node import get_node_from_manager
import voluptuous as vol
from homeassistant.components.ozw import DOMAIN as OZW_DOMAIN
from homeassistant.components.persistent_notification import async_create, async_dismiss
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import (
Config,
CoreState,
Event,
HomeAssistant,
ServiceCall,
State,
)
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
ATTR_NAME,
ATTR_NODE_ID,
ATTR_USER_CODE,
CHILD_LOCKS,
CONF_ALARM_LEVEL,
CONF_ALARM_LEVEL_OR_USER_CODE_ENTITY_ID,
CONF_ALARM_TYPE,
CONF_ALARM_TYPE_OR_ACCESS_CONTROL_ENTITY_ID,
CONF_CHILD_LOCKS_FILE,
CONF_ENTITY_ID,
CONF_GENERATE,
CONF_HIDE_PINS,
CONF_LOCK_ENTITY_ID,
CONF_LOCK_NAME,
CONF_PATH,
COORDINATOR,
DEFAULT_HIDE_PINS,
DOMAIN,
ISSUE_URL,
MANAGER,
PLATFORM,
PRIMARY_LOCK,
UNSUB_LISTENERS,
VERSION,
ZWAVE_NETWORK,
)
from .exceptions import NoNodeSpecifiedError, ZWaveIntegrationNotConfiguredError
from .helpers import (
async_reload_package_platforms,
delete_folder,
delete_lock_and_base_folder,
generate_keymaster_locks,
get_node_id,
handle_state_change,
handle_zwave_js_event,
using_ozw,
using_zwave,
using_zwave_js,
)
from .lock import KeymasterLock
from .services import add_code, clear_code, generate_package_files, refresh_codes
# TODO: At some point we should assume that users have upgraded to the latest
# Home Assistant instance and that we can safely import these, so we can move
# these back to standard imports at that point.
try:
from zwave_js_server.const import ATTR_CODE_SLOT, ATTR_IN_USE, ATTR_USERCODE
from zwave_js_server.util.lock import get_usercodes
from homeassistant.componets.zwave_js import ZWAVE_JS_EVENT
except (ModuleNotFoundError, ImportError):
from openzwavemqtt.const import ATTR_CODE_SLOT
ATTR_IN_USE = "in_use"
ATTR_USERCODE = "usercode"
ZWAVE_JS_EVENT = "zwave_js_event"
_LOGGER = logging.getLogger(__name__)
SERVICE_GENERATE_PACKAGE = "generate_package"
SERVICE_ADD_CODE = "add_code"
SERVICE_CLEAR_CODE = "clear_code"
SERVICE_REFRESH_CODES = "refresh_codes"
SET_USERCODE = "set_usercode"
CLEAR_USERCODE = "clear_usercode"
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Disallow configuration via YAML."""
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up is called when Home Assistant is loading our component."""
hass.data.setdefault(DOMAIN, {})
_LOGGER.info(
"Version %s is starting, if you have any issues please report" " them here: %s",
VERSION,
ISSUE_URL,
)
should_generate_package = config_entry.data.get(CONF_GENERATE)
updated_config = config_entry.data.copy()
# pop CONF_GENERATE if it is in data
updated_config.pop(CONF_GENERATE, None)
# If CONF_PATH is absolute, make it relative. This can be removed in the future,
# it is only needed for entries that are being migrated from using the old absolute
# path
config_path = hass.config.path()
if config_entry.data[CONF_PATH].startswith(config_path):
updated_config[CONF_PATH] = updated_config[CONF_PATH][len(config_path) :]
# Remove leading slashes
updated_config[CONF_PATH] = updated_config[CONF_PATH].lstrip("/").lstrip("\\")
if updated_config != config_entry.data:
hass.config_entries.async_update_entry(config_entry, data=updated_config)
config_entry.add_update_listener(update_listener)
primary_lock, child_locks = await generate_keymaster_locks(hass, config_entry)
hass.data[DOMAIN][config_entry.entry_id] = {
PRIMARY_LOCK: primary_lock,
CHILD_LOCKS: child_locks,
UNSUB_LISTENERS: [],
}
coordinator = LockUsercodeUpdateCoordinator(hass, config_entry)
hass.data[DOMAIN][config_entry.entry_id][COORDINATOR] = coordinator
# Button Press
async def _refresh_codes(service: ServiceCall) -> None:
"""Refresh lock codes."""
_LOGGER.debug("Refresh Codes service: %s", service)
entity_id = service.data[ATTR_ENTITY_ID]
instance_id = 1
await refresh_codes(hass, entity_id, instance_id)
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH_CODES,
_refresh_codes,
schema=vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): vol.Coerce(str),
}
),
)
# Add code
async def _add_code(service: ServiceCall) -> None:
"""Set a user code."""
_LOGGER.debug("Add Code service: %s", service)
entity_id = service.data[ATTR_ENTITY_ID]
code_slot = service.data[ATTR_CODE_SLOT]
usercode = service.data[ATTR_USER_CODE]
await add_code(hass, entity_id, code_slot, usercode)
hass.services.async_register(
DOMAIN,
SERVICE_ADD_CODE,
_add_code,
schema=vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): vol.Coerce(str),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
vol.Required(ATTR_USER_CODE): vol.Coerce(str),
}
),
)
# Clear code
async def _clear_code(service: ServiceCall) -> None:
"""Clear a user code."""
_LOGGER.debug("Clear Code service: %s", service)
entity_id = service.data[ATTR_ENTITY_ID]
code_slot = service.data[ATTR_CODE_SLOT]
await clear_code(hass, entity_id, code_slot)
hass.services.async_register(
DOMAIN,
SERVICE_CLEAR_CODE,
_clear_code,
schema=vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): vol.Coerce(str),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
}
),
)
# Generate package files
def _generate_package(service: ServiceCall) -> None:
"""Generate the package files."""
_LOGGER.debug("DEBUG: %s", service)
name = service.data[ATTR_NAME]
generate_package_files(hass, name)
hass.services.async_register(
DOMAIN,
SERVICE_GENERATE_PACKAGE,
_generate_package,
schema=vol.Schema({vol.Optional(ATTR_NAME): vol.Coerce(str)}),
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, PLATFORM)
)
# if the use turned on the bool generate the files
if should_generate_package:
servicedata = {"lockname": primary_lock.lock_name}
await hass.services.async_call(DOMAIN, SERVICE_GENERATE_PACKAGE, servicedata)
def entity_state_listener(
changed_entity: str, old_state: State, new_state: State
) -> None:
"""Listener to handle state changes to lock entities."""
handle_state_change(hass, config_entry, changed_entity, old_state, new_state)
def zwave_js_event_listener(evt: Event):
"""Listener to handle Z-Wave JS events."""
handle_zwave_js_event(hass, config_entry, evt)
def homeassistant_started_listener(evt: Event = None):
"""Start tracking state changes after HomeAssistant has started."""
# Listen to lock state changes so we can fire an event
hass.data[DOMAIN][config_entry.entry_id][UNSUB_LISTENERS].append(
async_track_state_change(
hass, primary_lock.lock_entity_id, entity_state_listener
)
)
if using_zwave_js(hass):
# Listen to Z-Wave JS events sow e can fire our own events
hass.data[DOMAIN][config_entry.entry_id][UNSUB_LISTENERS].append(
hass.bus.async_listen(ZWAVE_JS_EVENT, zwave_js_event_listener)
)
if hass.state == CoreState.running:
hass.data[DOMAIN][config_entry.entry_id][UNSUB_LISTENERS].append(
async_track_state_change(
hass, primary_lock.lock_entity_id, entity_state_listener
)
)
else:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, homeassistant_started_listener
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Handle removal of an entry."""
lockname = config_entry.data[CONF_LOCK_NAME]
notification_id = f"{DOMAIN}_{lockname}_unload"
async_create(
hass,
(
f"Removing `{lockname}` and all of the files that were generated for it. "
"This may take some time so don't panic. This message will automatically "
"clear when removal is complete."
),
title=f"{DOMAIN.title()} - Removing `{lockname}`",
notification_id=notification_id,
)
unload_ok = await hass.config_entries.async_forward_entry_unload(
config_entry, PLATFORM
)
if unload_ok:
# Remove all package files and the base folder if needed
await hass.async_add_executor_job(
delete_lock_and_base_folder, hass, config_entry
)
await async_reload_package_platforms(hass)
# Unsubscribe to any listeners
for unsub_listener in hass.data[DOMAIN][config_entry.entry_id].get(
UNSUB_LISTENERS, []
):
unsub_listener()
hass.data[DOMAIN][config_entry.entry_id].get(UNSUB_LISTENERS, []).clear()
hass.data[DOMAIN].pop(config_entry.entry_id)
async_dismiss(hass, notification_id)
return unload_ok
async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Migrate an old config entry."""
version = config_entry.version
# 1 -> 2: Migrate to new keys
if version == 1:
_LOGGER.debug("Migrating from version %s", version)
data = config_entry.data.copy()
data[CONF_ALARM_LEVEL_OR_USER_CODE_ENTITY_ID] = data.pop(CONF_ALARM_LEVEL, None)
data[CONF_ALARM_TYPE_OR_ACCESS_CONTROL_ENTITY_ID] = data.pop(
CONF_ALARM_TYPE, None
)
data[CONF_LOCK_ENTITY_ID] = data.pop(CONF_ENTITY_ID)
if CONF_HIDE_PINS not in data:
data[CONF_HIDE_PINS] = DEFAULT_HIDE_PINS
data[CONF_CHILD_LOCKS_FILE] = data.get(CONF_CHILD_LOCKS_FILE, "")
hass.config_entries.async_update_entry(entry=config_entry, data=data)
config_entry.version = 2
_LOGGER.debug("Migration to version %s complete", config_entry.version)
return True
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Update listener."""
# If the path has changed delete the old base folder, otherwise if the lock name
# has changed only delete the old lock folder
if config_entry.options[CONF_PATH] != config_entry.data[CONF_PATH]:
await hass.async_add_executor_job(
delete_folder, hass.config.path(), config_entry.data[CONF_PATH]
)
elif config_entry.options[CONF_LOCK_NAME] != config_entry.data[CONF_LOCK_NAME]:
await hass.async_add_executor_job(
delete_folder,
hass.config.path(),
config_entry.data[CONF_PATH],
config_entry.data[CONF_LOCK_NAME],
)
new_data = config_entry.options.copy()
new_data.pop(CONF_GENERATE, None)
hass.config_entries.async_update_entry(
entry=config_entry,
unique_id=config_entry.options[CONF_LOCK_NAME],
data=new_data,
)
primary_lock, child_locks = await generate_keymaster_locks(hass, config_entry)
hass.data[DOMAIN][config_entry.entry_id].update(
{
PRIMARY_LOCK: primary_lock,
CHILD_LOCKS: child_locks,
}
)
servicedata = {"lockname": primary_lock.lock_name}
await hass.services.async_call(DOMAIN, SERVICE_GENERATE_PACKAGE, servicedata)
def entity_state_listener(
changed_entity: str, old_state: State, new_state: State
) -> None:
"""Listener to track state changes to lock entities."""
handle_state_change(hass, config_entry, changed_entity, old_state, new_state)
# Unsubscribe to any listeners so we can create new ones
for unsub_listener in hass.data[DOMAIN][config_entry.entry_id].get(
UNSUB_LISTENERS, []
):
unsub_listener()
hass.data[DOMAIN][config_entry.entry_id].get(UNSUB_LISTENERS, []).clear()
def zwave_js_event_listener(evt: Event):
"""Listener to handle Z-Wave JS events."""
handle_zwave_js_event(hass, config_entry, evt)
# Create new listeners for lock state changes
hass.data[DOMAIN][config_entry.entry_id][UNSUB_LISTENERS].append(
async_track_state_change(
hass, primary_lock.lock_entity_id, entity_state_listener
)
)
if using_zwave_js(hass):
hass.data[DOMAIN][config_entry.entry_id][UNSUB_LISTENERS].append(
hass.bus.async_listen(ZWAVE_JS_EVENT, zwave_js_event_listener)
)
class LockUsercodeUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage usercode updates."""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
self._lock: KeymasterLock = hass.data[DOMAIN][config_entry.entry_id][
PRIMARY_LOCK
]
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=timedelta(seconds=5),
update_method=self.async_update_usercodes,
)
self.data = {}
def _invalid_code(self, code_slot):
"""Return the PIN slot value as we are unable to read the slot value
from the lock."""
_LOGGER.debug("Work around code in use.")
# This is a fail safe and should not be needing to return ""
data = ""
# Build data from entities
enabled_bool = f"input_boolean.enabled_{self._lock.lock_name}_{code_slot}"
enabled = self.hass.states.get(enabled_bool)
pin_data = f"input_text.{self._lock.lock_name}_pin_{code_slot}"
pin = self.hass.states.get(pin_data)
# If slot is enabled return the PIN
if enabled is not None and pin is not None:
if enabled.state == "on" and pin.state.isnumeric():
_LOGGER.debug("Utilizing BE469 work around code.")
data = pin.state
else:
_LOGGER.debug("Utilizing FE599 work around code.")
data = ""
return data
async def async_update_usercodes(self) -> Dict[str, Any]:
"""Async wrapper to update usercodes."""
try:
return await self.hass.async_add_executor_job(self.update_usercodes)
except (
NotFoundError,
NotSupportedError,
NoNodeSpecifiedError,
ZWaveIntegrationNotConfiguredError,
) as err:
raise UpdateFailed from err
def update_usercodes(self) -> Dict[str, Any]:
"""Update usercodes."""
# loop to get user code data from entity_id node
instance_id = 1 # default
data = {CONF_LOCK_ENTITY_ID: self._lock.lock_entity_id}
# # make button call
# servicedata = {"entity_id": self._entity_id}
# await self.hass.services.async_call(DOMAIN, SERVICE_REFRESH_CODES, servicedata)
if using_zwave_js(self.hass):
node = self._lock.zwave_js_lock_node
code_slot = 1
for slot in get_usercodes(node):
code_slot = int(slot[ATTR_CODE_SLOT])
usercode = slot[ATTR_USERCODE]
if not slot[ATTR_IN_USE]:
_LOGGER.debug("DEBUG: Code slot %s not enabled", code_slot)
data[code_slot] = ""
elif usercode and "*" in str(usercode):
_LOGGER.debug(
"DEBUG: Ignoring code slot with * in value for code slot %s.",
code_slot,
)
data[code_slot] = self._invalid_code(code_slot)
else:
_LOGGER.debug("DEBUG: Code slot %s value: %s", code_slot, usercode)
data[code_slot] = usercode
return data
# pull the codes for ozw
elif using_ozw(self.hass):
node_id = get_node_id(self.hass, self._lock.lock_entity_id)
if node_id is None:
return data
data[ATTR_NODE_ID] = node_id
if data[ATTR_NODE_ID] is None:
raise NoNodeSpecifiedError
# Raises exception when node not found
try:
node = get_node_from_manager(
self.hass.data[OZW_DOMAIN][MANAGER],
instance_id,
data[ATTR_NODE_ID],
)
except NotFoundError:
return data
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import shutil
import tempfile
import time
import traceback
from collections import OrderedDict
# We don't have an explicit pathlib dependency because this code only works with
# the interactive target installed which has an indirect dependency on pathlib
# through ipython>=5.9.0.
from pathlib import Path
from google.protobuf.message import DecodeError
import apache_beam as beam
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileHeader
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileRecord
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive.cache_manager import CacheManager
from apache_beam.runners.interactive.cache_manager import SafeFastPrimitivesCoder
from apache_beam.testing.test_stream import OutputFormat
from apache_beam.testing.test_stream import ReverseTestStream
from apache_beam.utils import timestamp
_LOGGER = logging.getLogger(__name__)
class StreamingCacheSink(beam.PTransform):
"""A PTransform that writes TestStreamFile(Header|Records)s to file.
This transform takes in an arbitrary element stream and writes the list of
TestStream events (as TestStreamFileRecords) to file. When replayed, this
will produce the best-effort replay of the original job (e.g. some elements
may be produced slightly out of order from the original stream).
Note that this PTransform is assumed to be only run on a single machine where
the following assumptions are correct: elements come in ordered, no two
transforms are writing to the same file. This PTransform is assumed to only
run correctly with the DirectRunner.
TODO(BEAM-9447): Generalize this to more source/sink types aside from file
based. Also, generalize to cases where there might be multiple workers
writing to the same sink.
"""
def __init__(
self,
cache_dir,
filename,
sample_resolution_sec,
coder=SafeFastPrimitivesCoder()):
self._cache_dir = cache_dir
self._filename = filename
self._sample_resolution_sec = sample_resolution_sec
self._coder = coder
self._path = os.path.join(self._cache_dir, self._filename)
@property
def path(self):
"""Returns the path the sink leads to."""
return self._path
@property
def size_in_bytes(self):
"""Returns the space usage in bytes of the sink."""
try:
return os.stat(self._path).st_size
except OSError:
_LOGGER.debug(
'Failed to calculate cache size for file %s, the file might have not '
'been created yet. Return 0. %s',
self._path,
traceback.format_exc())
return 0
def expand(self, pcoll):
class StreamingWriteToText(beam.DoFn):
"""DoFn that performs the writing.
Note that the other file writing methods cannot be used in streaming
contexts.
"""
def __init__(self, full_path, coder=SafeFastPrimitivesCoder()):
self._full_path = full_path
self._coder = coder
# Try and make the given path.
Path(os.path.dirname(full_path)).mkdir(parents=True, exist_ok=True)
def start_bundle(self):
# Open the file for 'append-mode' and writing 'bytes'.
self._fh = open(self._full_path, 'ab')
def finish_bundle(self):
self._fh.close()
def process(self, e):
"""Appends the given element to the file.
"""
self._fh.write(self._coder.encode(e) + b'\n')
return (
pcoll
| ReverseTestStream(
output_tag=self._filename,
sample_resolution_sec=self._sample_resolution_sec,
output_format=OutputFormat.SERIALIZED_TEST_STREAM_FILE_RECORDS,
coder=self._coder)
| beam.ParDo(
StreamingWriteToText(full_path=self._path, coder=self._coder)))
class StreamingCacheSource:
"""A class that reads and parses TestStreamFile(Header|Reader)s.
This source operates in the following way:
1. Wait for up to `timeout_secs` for the file to be available.
2. Read, parse, and emit the entire contents of the file
3. Wait for more events to come or until `is_cache_complete` returns True
4. If there are more events, then go to 2
5. Otherwise, stop emitting.
This class is used to read from file and send its to the TestStream via the
StreamingCacheManager.Reader.
"""
def __init__(self, cache_dir, labels, is_cache_complete=None, coder=None):
if not coder:
coder = SafeFastPrimitivesCoder()
if not is_cache_complete:
is_cache_complete = lambda _: True
self._cache_dir = cache_dir
self._coder = coder
self._labels = labels
self._path = os.path.join(self._cache_dir, *self._labels)
self._is_cache_complete = is_cache_complete
from apache_beam.runners.interactive.pipeline_instrument import CacheKey
self._pipeline_id = CacheKey.from_str(labels[-1]).pipeline_id
def _wait_until_file_exists(self, timeout_secs=30):
"""Blocks until the file exists for a maximum of timeout_secs.
"""
# Wait for up to `timeout_secs` for the file to be available.
start = time.time()
while not os.path.exists(self._path):
time.sleep(1)
if time.time() - start > timeout_secs:
from apache_beam.runners.interactive.pipeline_instrument import CacheKey
pcollection_var = CacheKey.from_str(self._labels[-1]).var
raise RuntimeError(
'Timed out waiting for cache file for PCollection `{}` to be '
'available with path {}.'.format(pcollection_var, self._path))
return open(self._path, mode='rb')
def _emit_from_file(self, fh, tail):
"""Emits the TestStreamFile(Header|Record)s from file.
This returns a generator to be able to read all lines from the given file.
If `tail` is True, then it will wait until the cache is complete to exit.
Otherwise, it will read the file only once.
"""
# Always read at least once to read the whole file.
while True:
pos = fh.tell()
line = fh.readline()
# Check if we are at EOF or if we have an incomplete line.
if not line or (line and line[-1] != b'\n'[0]):
# Read at least the first line to get the header.
if not tail and pos != 0:
break
# Complete reading only when the cache is complete.
if self._is_cache_complete(self._pipeline_id):
break
# Otherwise wait for new data in the file to be written.
time.sleep(0.5)
fh.seek(pos)
else:
# The first line at pos = 0 is always the header. Read the line without
# the new line.
to_decode = line[:-1]
proto_cls = TestStreamFileHeader if pos == 0 else TestStreamFileRecord
msg = self._try_parse_as(proto_cls, to_decode)
if msg:
yield msg
else:
break
def _try_parse_as(self, proto_cls, to_decode):
try:
msg = proto_cls()
msg.ParseFromString(self._coder.decode(to_decode))
except DecodeError:
_LOGGER.error(
'Could not parse as %s. This can indicate that the cache is '
'corruputed. Please restart the kernel. '
'\nfile: %s \nmessage: %s',
proto_cls,
self._path,
to_decode)
msg = None
return msg
def read(self, tail):
"""Reads all TestStreamFile(Header|TestStreamFileRecord)s from file.
This returns a generator to be able to read all lines from the given file.
If `tail` is True, then it will wait until the cache is complete to exit.
Otherwise, it will read the file only once.
"""
with self._wait_until_file_exists() as f:
for e in self._emit_from_file(f, tail):
yield e
class StreamingCache(CacheManager):
"""Abstraction that holds the logic for reading and writing to cache.
"""
def __init__(
self, cache_dir, is_cache_complete=None, sample_resolution_sec=0.1):
self._sample_resolution_sec = sample_resolution_sec
self._is_cache_complete = is_cache_complete
if cache_dir:
self._cache_dir = cache_dir
else:
self._cache_dir = tempfile.mkdtemp(
prefix='interactive-temp-', dir=os.environ.get('TEST_TMPDIR', None))
# List of saved pcoders keyed by PCollection path. It is OK to keep this
# list in memory because once FileBasedCacheManager object is
# destroyed/re-created it loses the access to previously written cache
# objects anyways even if cache_dir already exists. In other words,
# it is not possible to resume execution of Beam pipeline from the
# saved cache if FileBasedCacheManager has been reset.
#
# However, if we are to implement better cache persistence, one needs
# to take care of keeping consistency between the cached PCollection
# and its PCoder type.
self._saved_pcoders = {}
self._default_pcoder = SafeFastPrimitivesCoder()
# The sinks to capture data from capturable sources.
# Dict([str, StreamingCacheSink])
self._capture_sinks = {}
self._capture_keys = set()
def size(self, *labels):
if self.exists(*labels):
return os.path.getsize(os.path.join(self._cache_dir, *labels))
return 0
@property
def capture_size(self):
return sum([sink.size_in_bytes for _, sink in self._capture_sinks.items()])
@property
def capture_paths(self):
return list(self._capture_sinks.keys())
@property
def capture_keys(self):
return self._capture_keys
def exists(self, *labels):
path = os.path.join(self._cache_dir, *labels)
return os.path.exists(path)
# TODO(srohde): Modify this to return the correct version.
def read(self, *labels, **args):
"""Returns a generator to read all records from file."""
tail = args.pop('tail', False)
# Only immediately return when the file doesn't exist when the user wants a
# snapshot of the cache (when tail is false).
if not self.exists(*labels) and not tail:
return iter([]), -1
reader = StreamingCacheSource(
self._cache_dir, labels, self._is_cache_complete).read(tail=tail)
# Return an empty iterator if there is nothing in the file yet. This can
# only happen when tail is False.
try:
header = next(reader)
except StopIteration:
return iter([]), -1
return StreamingCache.Reader([header], [reader]).read(), 1
def read_multiple(self, labels, tail=True):
"""Returns a generator to read all records from file.
Does tail until the cache is complete. This is because it is used in the
TestStreamServiceController to read from file which is only used | |
#!/usr/bin/env python
"""
Attempt to create a "manor" akin to::
###############################################
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#########+####+######+###########+#####.......#
#.......+......+......................+.......#
#.......######+######+#.......#######+#########
#.......#......#......#<<#....#.......#.......#
#.......#......#......#<<#....#.......#.......#
#.......#......#......####....+.......#.......#
#.......#......#......#..+....#.......#.......#
##########################....#################
##++##
"""
import random, builder, room
from interface.features import *
from library.coord import *
from library.random_util import *
from library.feature import *
from library import pathfind
class ManorCollection (builder.BuilderCollection):
def __init__ (self, c=[]):
builder.BuilderCollection.__init__(self, c)
def print_corridors (self):
"""
Debugging method. Iterates over all corridors and prints the location
and size of each corridor within the manor.
"""
for idx in self.corridors:
print "Corridor %s: %s" % (idx, self.corridor(idx))
def get_corridor_index (self, pos, single = True):
"""
Returns the index of the corridor a coordinate belongs to, or None
if it doesn't lie in any corridor.
If it's part of the overlap region, the first index is returned.
:``pos``: A coord. *Required*
:``single``: If true, returns the first index encountered.
Otherwise, a list containing all matching indices. *Default true*.
"""
list = []
for idx in self.corridors:
corr = self.corridor(idx)
c = corr.pos()
r = corr.size() - 1
if (pos.x >= c.x and pos.x <= c.x + r.x
and pos.y >= c.y and pos.y <= c.y + r.y):
if single:
return idx
list.append(idx)
if single:
return None
return list
def get_corridor_indices (self, pos):
"""
Returns a list of indices of all corridors a coordinate belongs to,
or None if it's outside the manor.
:``pos``: A coord. *Required*.
"""
return self.get_corridor_index(pos, False)
def print_rooms (self):
"""
Debugging method. Iterates over all rooms and prints the location
and size of each room within the manor.
"""
for idx in self.rooms:
print "Room %s: %s" % (idx, self.get_room(idx))
def get_room_index (self, pos, single = True):
"""
Returns the index of the room a coordinate belongs to, or None if
it's outside the manor.
If it's part of the overlap region, the first index is returned.
:``pos``: A coord. *Required*.
:``single``: If true, returns the first index encountered.
Otherwise, a list containing all matching indices. *Default true*.
"""
list = []
for idx in self.rooms:
curr = self.get_room(idx)
start = curr.pos()
end = start + curr.size() - 1
if (pos.x >= start.x and pos.x <= end.x
and pos.y >= start.y and pos.y <= end.y):
if single:
return idx
list.append(idx)
if single:
return None
return list
def get_room_indices (self, pos):
"""
Returns a list of indices of all rooms a coordinate belongs to,
or None if it's outside the manor.
:``pos``: A coord. *Required*.
"""
return self.get_room_index(pos, False)
def get_room_corridor_indices (self, pos):
"""
Returns a list of indices of all rooms and corridors a coordinate belongs to,
or None if it's outside the manor.
:``pos``: A coord. *Required*.
"""
rooms = self.get_room_index(pos, False)
corrs = self.get_corridor_index(pos, False)
for c in corrs:
rooms.append(c)
return rooms
def get_room_corridors (self):
"""
Get a combined list including both room and corridor indices.
"""
# I might be overly cautious here, but it's so easy to overwrite
# existing lists by setting references without meaning to. (jpeg)
room_corridors = []
for r in self.rooms:
room_corridors.append(r)
for c in self.corridors:
room_corridors.append(c)
room_corridors.sort()
return room_corridors
def get_corridor_name (self, idx):
assert(idx in self.corridors)
if idx == self.main_corridor:
return "main corridor"
corr = self.corridor(idx)
start = corr.pos()
stop = start + coord.Coord(corr.width(), corr.height())
m_end = self.size()
print "corridor %s" % idx
print "start=(%s), stop=(%s)" % (start, stop)
print "manor size=(%s), 1/4 -> (%s), 3/4 -> (%s)" % (m_end, coord.Coord(m_end.x/4, m_end.y/4), coord.Coord(3*m_end.x/4, 3*m_end.y/4))
dir_horizontal = ""
if start.y < max(5, m_end.y/4):
dir_horizontal = "north"
elif stop.y > min(3*m_end.y/4, m_end.y - 5):
dir_horizontal = "south"
else:
dir_horizontal = ""
if start.x < max(5, m_end.x/4):
dir_vertical = "west"
elif stop.x > min(3*m_end.x/4, m_end.x - 5):
dir_vertical = "east"
else:
dir_vertical = ""
# only one other corridor
if len(self.corridors) == 2:
if dir_horizontal != "" and dir_vertical != "":
if coinflip():
dir_horizontal = ""
else:
dir_vertical = ""
# two other corridors
elif len(self.corridors) == 3:
if corr.width() == 1: # vertical
dir_horizontal = ""
else:
dir_vertical = ""
# else just combine both values
if dir_horizontal != "" or dir_vertical != "":
return "%s%s corridor" % (dir_horizontal, dir_vertical)
# If none of these match, just return the number.
return "corridor %s" % idx
def init_room_properties (self):
"""
Initialises a list of RoomProp objects for each room and corridor
in the manor.
"""
self.room_props = []
for r in self.get_room_corridors():
if r in self.rooms:
curr = self.get_room(r)
start = curr.pos()
size = curr.size()
width = size.x
height = size.y
room_prop = room.RoomProps("room %s" % r, start, width, height)
else:
corr = self.corridor(r)
start = corr.pos()
width = corr.width()
height = corr.height()
name = self.get_corridor_name(r)
room_prop = room.RoomProps(name, start, width, height)
room_prop.mark_as_corridor()
self.room_props.append(room_prop)
def get_roomprop (self, idx):
"""
Returns a RoomProp object for a given room index.
:``idx``: A room or corridor index. *Required*.
"""
if not self.room_props:
return None
assert(idx < len(self.room_props))
return self.room_props[idx]
def add_features (self):
# Translate rooms and corridors into wall and floor features.
self.init_features()
# Add doors along corridors.
self.add_doors()
self.maybe_remove_bonus_doors()
# Add windows.
self.add_windows()
# Add doors to rooms still missing them.
self.add_missing_doors()
def init_features (self):
"""
Initialise the manor's feature grid, placing floor and walls as
defined by the rooms/corridor layout.
"""
self.init_room_properties()
self.features = FeatureGrid(self.size().x, self.size().y)
print "Manor size: %s" % self.size()
print "Feature size: %s" % self.features.size()
# Iterate over all rooms and corridors, and mark positions within
# them as floor, and their boundaries as walls.
for r in self.get_room_corridors():
is_corridor = False # The "room" is actually a corridor.
if r in self.rooms:
curr = self.get_room(r)
else:
is_corridor = True
curr = self.corridor(r)
start = curr.pos()
stop = curr.pos() + curr.size()
# Note: Currently, only the main corridor is ever horizontal
# but that might change in the future.
horizontal = False # If a corridor, it's a horizontal one.
# Debugging output, and setting horizontal.
if is_corridor:
if curr.height() == 1:
horizontal = True
direction = "horizontal"
else:
direction = "vertical"
# print "Corridor %s: start=%s, stop=%s (%s)" % (r, start, stop, direction)
# Iterate over all coordinates within the room.
for pos in coord.RectangleIterator(start, stop):
# If we've reached the manor boundary, this is a wall.
if (pos.x == 0 or pos.x == self.size().x -1
or pos.y == 0 or pos.y == self.size().y - 1):
self.set_feature(pos, WALL)
# Corridors overwrite walls previously set by rooms.
elif is_corridor:
self.set_feature(pos, FLOOR)
# print pos
# Depending on the corridor orientation, mark the
# adjacent non-corridor squares as walls.
adjacent = []
if horizontal:
adjacent = (DIR_NORTH, DIR_SOUTH)
else:
adjacent = (DIR_WEST, DIR_EAST)
for dir in adjacent:
pos2 = pos + dir
# self.set_feature(pos2, WALL)
if pos2 <= 0 or pos2 >= self.size():
continue
corridx = self.get_corridor_indices(pos2)
# print "pos2: %s -> corridors=%s" % (pos2, corridx),
if r in corridx:
corridx.remove(r)
# print corridx
# else:
# print
if len(corridx) == 0:
self.set_feature(pos2, WALL)
# The room boundary is always a wall.
elif (pos.x == start.x or pos.x == stop.x - 1
or pos.y == start.y or pos.y == stop.y - 1):
self.set_feature(pos, WALL)
# Otherwise, we are inside the room.
# Mark as floor but don't overwrite previously placed walls.
elif self.get_feature(pos) != WALL:
self.set_feature(pos, FLOOR)
def get_feature (self, pos):
"""
Returns the feature for the given position.
:``pos``: A coordinate within the manor. *Required*.
"""
if pos < DIR_NOWHERE or pos >= self.size():
print "Invalid coord %s in manor of size %s" % (pos, self.size())
return NOTHING
return self.features.__getitem__(pos)
def set_feature (self, pos, feat):
"""
Sets the feature at a given position of the feature grid.
:``pos``: A coordinate within the manor. *Required*.
:``feat``: The feature to set. *Required*.
"""
if pos < DIR_NOWHERE or pos >= self.size():
print "Invalid coord %s in manor of size %s" % (pos, self.size())
return NOTHING
return self.features.__setitem__(pos, feat)
def add_doors_along_corridor (self, start, stop, offset = DIR_NOWHERE):
| |
import pandas as pd
import tushare as ts
from StockAnalysisSystem.core.config import TS_TOKEN
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.core.Utility.CollectorUtility import *
# ----------------------------------------------------------------------------------------------------------------------
FIELDS = {
'Stockholder.PledgeStatus': {
'ts_code': 'TS代码',
'end_date': '截至日期',
'pledge_count': '质押次数',
'unrest_pledge': '无限售股质押数量',
'rest_pledge': '限售股份质押数量',
'total_share': '总股本',
'pledge_ratio': '质押比例',
},
'Stockholder.PledgeHistory': {
'ts_code': 'TS股票代码',
'ann_date': '公告日期',
'holder_name': '股东名称',
'pledge_amount': '质押数量',
'start_date': '质押开始日期',
'end_date': '质押结束日期',
'is_release': '是否已解押',
'release_date': '解押日期',
'pledgor': '质押方',
'holding_amount': '持股总数',
'pledged_amount': '质押总数',
'p_total_ratio': '本次质押占总股本比例',
'h_total_ratio': '持股总数占总股本比例',
'is_buyback': '是否回购',
},
'Stockholder.Count': {
'holder_num': '股东户数',
},
'Stockholder.Statistics': {
'holder_name': '股东名称',
'hold_amount': '持有数量(股)',
'hold_ratio': '持有比例',
},
'Stockholder.ReductionIncrease': {
'ts_code': 'TS代码',
'ann_date': '公告日期',
'holder_name': '股东名称',
'holder_type': '股东类型', # G高管P个人C公司
'in_de': '增减持类型', # IN增持DE减持
'change_vol': '变动数量',
'change_ratio': '占流通比例', # (%)
'after_share': '变动后持股',
'after_ratio': '变动后占流通比例', # (%)
'avg_price': '平均价格',
'total_share': '持股总数',
'begin_date': '增减持开始日期',
'close_date': '增减持结束日期',
}
}
# -------------------------------------------------------- Prob --------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_name': 'stockholder_data_tushare_pro',
'plugin_version': '0.0.0.1',
'tags': ['tusharepro']
}
def plugin_adapt(uri: str) -> bool:
return uri in FIELDS.keys()
def plugin_capacities() -> list:
return list(FIELDS.keys())
# ----------------------------------------------------------------------------------------------------------------------
# pledge_stat: https://tushare.pro/document/2?doc_id=110
# pledge_detail: https://tushare.pro/document/2?doc_id=111
def __fetch_stock_holder_data(**kwargs) -> pd.DataFrame:
uri = kwargs.get('uri')
result = check_execute_test_flag(**kwargs)
if result is None:
# period = kwargs.get('due_date')
ts_code = pickup_ts_code(kwargs)
# since, until = normalize_time_serial(period, default_since(), today())
pro = ts.pro_api(TS_TOKEN)
# time_iter = DateTimeIterator(since, until)
#
# result = None
# while not time_iter.end():
# # The max items count retrieved per 1 fetching: 1000
# # The max items per 1 year: 52 (one new item per 7days for PledgeStatus)
# # So the iter years should not be larger than 20 years
#
# sub_since, sub_until = time_iter.iter_years(15)
# ts_since = sub_since.strftime('%Y%m%d')
# ts_until = sub_until.strftime('%Y%m%d')
#
# clock = Clock()
# delayer.delay()
# if uri == 'Stockholder.PledgeStatus':
# sub_result = pro.pledge_stat(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
# elif uri == 'Stockholder.PledgeHistory':
# sub_result = pro.pledge_detail(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
# else:
# sub_result = None
# print(uri + ' Network finished, time spending: ' + str(clock.elapsed_ms()) + 'ms')
#
# if sub_result is not None:
# if result is None:
# result = sub_result
# else:
# result.append(result)
if not str_available(ts_code):
result = None
else:
clock = Clock()
if uri == 'Stockholder.PledgeStatus':
ts_delay('pledge_stat')
result = pro.pledge_stat(ts_code=ts_code)
elif uri == 'Stockholder.PledgeHistory':
ts_delay('pledge_detail')
result = pro.pledge_detail(ts_code=ts_code)
else:
result = None
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
check_execute_dump_flag(result, **kwargs)
if result is not None:
result.fillna(0.0)
if uri == 'Stockholder.PledgeStatus':
result['due_date'] = result['end_date']
result['total_share'] = result['total_share'] * 10000
result['rest_pledge'] = result['rest_pledge'] * 10000
result['unrest_pledge'] = result['unrest_pledge'] * 10000
result['pledge_count'] = result['pledge_count'].astype(np.int64)
result['pledge_ratio'] = result['pledge_ratio'].astype(float)
elif uri == 'Stockholder.PledgeHistory':
result['due_date'] = result['ann_date']
result['pledge_amount'] = result['pledge_amount'] * 10000
result['holding_amount'] = result['holding_amount'] * 10000
result['pledged_amount'] = result['pledged_amount'] * 10000
convert_ts_code_field(result)
convert_ts_date_field(result, 'due_date')
# result['due_date'] = pd.to_datetime(result['due_date'])
# result['stock_identity'] = result['ts_code']
# result['stock_identity'] = result['stock_identity'].str.replace('.SH', '.SSE')
# result['stock_identity'] = result['stock_identity'].str.replace('.SZ', '.SZSE')
return result
# stk_holdernumber: https://tushare.pro/document/2?doc_id=166
def __fetch_stock_holder_count(**kwargs) -> pd.DataFrame or None:
uri = kwargs.get('uri')
result = check_execute_test_flag(**kwargs)
if result is None:
period = kwargs.get('period')
ts_code = pickup_ts_code(kwargs)
since, until = normalize_time_serial(period, default_since(), today())
pro = ts.pro_api(TS_TOKEN)
ts_since = since.strftime('%Y%m%d')
ts_until = until.strftime('%Y%m%d')
if is_slice_update(ts_code, since, until):
result = None
else:
ts_delay('stk_holdernumber')
clock = Clock()
result = pro.stk_holdernumber(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
convert_ts_code_field(result)
convert_ts_date_field(result, 'ann_date')
convert_ts_date_field(result, 'end_date', 'period')
check_execute_dump_flag(result, **kwargs)
return result
# top10_holders: https://tushare.pro/document/2?doc_id=61
# top10_floatholders: https://tushare.pro/document/2?doc_id=62
def __fetch_stock_holder_statistics(**kwargs) -> pd.DataFrame or None:
uri = kwargs.get('uri')
result = check_execute_test_flag(**kwargs)
if result is None:
period = kwargs.get('period')
ts_code = pickup_ts_code(kwargs)
since, until = normalize_time_serial(period, default_since(), today())
# See TushareApi.xlsx
# since_limit = years_ago_of(until, 3)
# since = max([since, since_limit])
pro = ts.pro_api(TS_TOKEN)
ts_since = since.strftime('%Y%m%d')
ts_until = until.strftime('%Y%m%d')
if is_slice_update(ts_code, since, until):
result = None
else:
clock = Clock()
ts_delay('top10_holders')
result_top10 = pro.top10_holders(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
ts_delay('top10_floatholders')
result_top10_float = pro.top10_floatholders(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
# Process top10_holders data
del result_top10['ts_code']
convert_ts_date_field(result_top10, 'ann_date')
convert_ts_date_field(result_top10, 'end_date')
result_top10 = result_top10.fillna('')
grouped_stockholder_top_10 = result_top10.groupby('end_date')
data_dict = {'period': [], 'stockholder_top10': []}
for g, df in grouped_stockholder_top_10:
data_dict['period'].append(g)
del df['end_date']
data_dict['stockholder_top10'].append(df.to_dict('records'))
grouped_stockholder_top_10_df = pd.DataFrame(data_dict)
grouped_stockholder_top_10_df['stock_identity'] = ts_code_to_stock_identity(ts_code)
# Process top10_floatholders data
del result_top10_float['ts_code']
convert_ts_date_field(result_top10_float, 'ann_date')
convert_ts_date_field(result_top10_float, 'end_date')
result_top10_float = result_top10_float.fillna('')
grouped_stockholder_top_10_float = result_top10_float.groupby('end_date')
data_dict = {'period': [], 'stockholder_top10_float': []}
for g, df in grouped_stockholder_top_10_float:
data_dict['period'].append(g)
del df['end_date']
data_dict['stockholder_top10_float'].append(df.to_dict('records'))
grouped_stockholder_top_10_float_df = pd.DataFrame(data_dict)
grouped_stockholder_top_10_float_df['stock_identity'] = ts_code_to_stock_identity(ts_code)
# Merge together
result = pd.merge(grouped_stockholder_top_10_df, grouped_stockholder_top_10_float_df,
on=['stock_identity', 'period'], how='outer')
result = result.sort_values('period')
# 002978.SZ
# 20070518 - 20200517
# top10_floatholders() may get empty DataFrame
# if isinstance(result_top10, pd.DataFrame) and not result_top10.empty:
# del result_top10['ts_code']
# del result_top10['ann_date']
#
# result_top10.fillna(0.0)
# result_top10['hold_ratio'] = result_top10['hold_ratio'] / 100
#
# result_top10_grouped = pd.DataFrame({'stockholder_top10': result_top10.groupby('end_date').apply(
# lambda x: x.drop('end_date', axis=1).to_dict('records'))}).reset_index()
# else:
# result_top10_grouped = None
#
# if result_top10_float is not None and len(result_top10_float) > 0:
# del result_top10_float['ts_code']
# del result_top10_float['ann_date']
#
# result_top10_float_grouped = pd.DataFrame({'stockholder_top10_nt': result_top10_float.groupby('end_date').apply(
# lambda x: x.drop('end_date', axis=1).to_dict('records'))}).reset_index()
# else:
# result_top10_float_grouped = None
#
# if result_count is None or result_top10 is None or result_top10_float is None:
# print('Fetch stockholder statistics data fail.')
# return None
#
# result = result_top10_grouped \
# if result_top10_grouped is not None and len(result_top10_grouped) > 0 else None
# result = pd.merge(result, result_top10_float_grouped, how='outer', on='end_date', sort=False) \
# if result is not None else result_top10_float_grouped
# result = pd.merge(result, result_count, how='left', on='end_date', sort=False) \
# if result is not None else result_count
# result['ts_code'] = ts_code
# del result_top10['ts_code']
# del result_top10['ann_date']
# del result_top10_float['ts_code']
# del result_top10_float['ann_date']
#
# result_top10.fillna(0.0)
# result_top10['hold_ratio'] = result_top10['hold_ratio'] / 100
#
# try:
# result_top10_grouped = pd.DataFrame({'stockholder_top10': result_top10.groupby('end_date').apply(
# lambda x: x.drop('end_date', axis=1).to_dict('records'))}).reset_index()
# result_top10_float_grouped = pd.DataFrame({'stockholder_top10_nt': result_top10_float.groupby('end_date').apply(
# lambda x: x.drop('end_date', axis=1).to_dict('records'))}).reset_index()
#
# result = pd.merge(result_top10_grouped, result_top10_float_grouped, how='outer', on='end_date', sort=False)
# result = pd.merge(result, result_count, how='left', on='end_date', sort=False)
# result['ts_code'] = ts_code
# except Exception as e:
# print('Fetching stockholder data error:')
# print(e)
# print(traceback.format_exc())
# finally:
# pass
# # Ts data may have issues, just detect it.
# for index, row in result.iterrows():
# end_date = row['end_date']
# stockholder_top10 = row['stockholder_top10']
# stockholder_top10_nt = row['stockholder_top10_nt']
#
# if isinstance(stockholder_top10, list):
# if len(stockholder_top10) != 10:
# print('%s: stockholder_top10 length is %s' % (end_date, len(stockholder_top10)))
# else:
# print('%s: stockholder_top10 type error %s' % (end_date, str(stockholder_top10)))
#
# if isinstance(stockholder_top10_nt, list):
# if len(stockholder_top10_nt) != 10:
# print('%s: stockholder_top10_nt length is %s' % (end_date, len(stockholder_top10_nt)))
# else:
# print('%s: stockholder_top10 type error %s' % (end_date, str(stockholder_top10_nt)))
check_execute_dump_flag(result, **kwargs)
# if result is not None:
# result.fillna('')
# result['period'] = pd.to_datetime(result['end_date'])
# result['stock_identity'] = result['ts_code']
# result['stock_identity'] = result['stock_identity'].str.replace('.SH', '.SSE')
# result['stock_identity'] = result['stock_identity'].str.replace('.SZ', '.SZSE')
return result
# # This method can fetch the whole data from 1990 to now, but it takes too much of time (50s for 000001)
# def __fetch_stock_holder_statistics_full(**kwargs) -> pd.DataFrame or None:
# uri = kwargs.get('uri')
# result = check_execute_test_flag(**kwargs)
#
# if result is None:
# period = kwargs.get('period')
# ts_code = pickup_ts_code(kwargs)
# since, until = normalize_time_serial(period, default_since(), today())
#
# clock = Clock()
# pro = ts.pro_api(TS_TOKEN)
# time_iter = DateTimeIterator(since, until)
#
# ts_since = since.strftime('%Y%m%d')
# ts_until = until.strftime('%Y%m%d')
# result_count = pro.stk_holdernumber(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
#
# result_top10 = None
# result_top10_float = None
# while not time_iter.end():
# # Top10 api can only fetch 100 items per one time (100 / 10 / 4 = 2.5Years)
# sub_since, sub_until = time_iter.iter_years(2.4)
# ts_since = sub_since.strftime('%Y%m%d')
# ts_until = sub_until.strftime('%Y%m%d')
#
# ts_delay('top10_holders')
# result_top10_part = pro.top10_holders(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
#
# ts_delay('top10_floatholders')
# result_top10_float_part = pro.top10_floatholders(ts_code=ts_code, start_date=ts_since, end_date=ts_until)
#
# result_top10 = pd.concat([result_top10, result_top10_part])
# result_top10_float = pd.concat([result_top10_float, result_top10_float_part])
#
# print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
#
# if result_count is None or result_top10 is None or result_top10_float is None:
# print('Fetch stockholder statistics data fail.')
# return None
#
# del result_top10['ann_date']
# del result_top10_float['ann_date']
#
# key_columns = ['ts_code', 'end_date']
# result_top10_grouped = pd.DataFrame({'stockholder_top10': result_top10.groupby(key_columns).apply(
# lambda x: x.drop(key_columns, axis=1).to_dict('records'))}).reset_index()
# result_top10_float_grouped = pd.DataFrame({'stockholder_top10_nt': result_top10_float.groupby(key_columns).apply(
# lambda x: x.drop(key_columns, axis=1).to_dict('records'))}).reset_index()
#
# result = pd.merge(result_top10_grouped, result_top10_float_grouped, how='outer', on=key_columns, sort=False)
# result = pd.merge(result, result_count, how='outer', on=key_columns, sort=False)
#
# print(result)
#
# check_execute_dump_flag(result, **kwargs)
#
# if result is not None:
# | |
End scores = (19, 64)
>>> print(turns[1])
Start scores = (19, 64).
Player 1 rolls 10 dice and gets outcomes [3, 2, 3, 4, 3, 1, 2, 1, 3, 1].
End scores = (65, 19)
>>> print(turns[2])
Start scores = (65, 19).
Player 0 rolls 9 dice and gets outcomes [6, 6, 4, 4, 2, 3, 4, 2, 5].
End scores = (19, 101)
>>> print(turns[3])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=85071, score0=86, score1=5, goal=89, feral_hogs=False)
>>> print(turns[0])
Start scores = (86, 5).
Player 0 rolls 7 dice and gets outcomes [1, 3, 2, 6, 4, 5, 6].
End scores = (87, 5)
>>> print(turns[1])
Start scores = (87, 5).
Player 1 rolls 0 dice and gets outcomes [].
End scores = (87, 7)
>>> print(turns[2])
Start scores = (87, 7).
Player 0 rolls 5 dice and gets outcomes [6, 6, 1, 3, 5].
End scores = (88, 7)
>>> print(turns[3])
Start scores = (88, 7).
Player 1 rolls 6 dice and gets outcomes [6, 6, 5, 5, 3, 4].
End scores = (36, 88)
>>> print(turns[4])
Start scores = (36, 88).
Player 0 rolls 2 dice and gets outcomes [4, 2].
End scores = (42, 88)
>>> print(turns[5])
Start scores = (42, 88).
Player 1 rolls 3 dice and gets outcomes [5, 5, 4].
End scores = (42, 102)
>>> print(turns[6])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=23577, score0=32, score1=23, goal=45, feral_hogs=False)
>>> print(turns[0])
Start scores = (32, 23).
Player 0 rolls 7 dice and gets outcomes [1, 4, 6, 5, 3, 6, 4].
End scores = (33, 23)
>>> print(turns[1])
Start scores = (33, 23).
Player 1 rolls 8 dice and gets outcomes [2, 1, 3, 5, 3, 6, 6, 5].
End scores = (33, 24)
>>> print(turns[2])
Start scores = (33, 24).
Player 0 rolls 1 dice and gets outcomes [1].
End scores = (34, 24)
>>> print(turns[3])
Start scores = (34, 24).
Player 1 rolls 7 dice and gets outcomes [4, 4, 5, 2, 6, 4, 2].
End scores = (51, 34)
>>> print(turns[4])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=663, score0=44, score1=13, goal=59, feral_hogs=True)
>>> print(turns[0])
Start scores = (44, 13).
Player 0 rolls 0 dice and gets outcomes [].
End scores = (48, 13)
>>> print(turns[1])
Start scores = (48, 13).
Player 1 rolls 1 dice and gets outcomes [5].
End scores = (48, 18)
>>> print(turns[2])
Start scores = (48, 18).
Player 0 rolls 7 dice and gets outcomes [6, 6, 4, 5, 3, 5, 5].
End scores = (82, 18)
>>> print(turns[3])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=6543, score0=65, score1=70, goal=87, feral_hogs=True)
>>> print(turns[0])
Start scores = (65, 70).
Player 0 rolls 0 dice and gets outcomes [].
End scores = (68, 70)
>>> print(turns[1])
Start scores = (68, 70).
Player 1 rolls 9 dice and gets outcomes [5, 3, 3, 3, 2, 3, 6, 6, 2].
End scores = (68, 103)
>>> print(turns[2])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=31919, score0=2, score1=16, goal=28, feral_hogs=False)
>>> print(turns[0])
Start scores = (2, 16).
Player 0 rolls 0 dice and gets outcomes [].
End scores = (10, 16)
>>> print(turns[1])
Start scores = (10, 16).
Player 1 rolls 1 dice and gets outcomes [5].
End scores = (10, 21)
>>> print(turns[2])
Start scores = (10, 21).
Player 0 rolls 2 dice and gets outcomes [6, 2].
End scores = (18, 21)
>>> print(turns[3])
Start scores = (18, 21).
Player 1 rolls 7 dice and gets outcomes [2, 3, 2, 5, 2, 4, 1].
End scores = (22, 18)
>>> print(turns[4])
Start scores = (22, 18).
Player 0 rolls 10 dice and gets outcomes [3, 2, 2, 5, 4, 1, 2, 2, 3, 5].
End scores = (18, 23)
>>> print(turns[5])
Start scores = (18, 23).
Player 1 rolls 3 dice and gets outcomes [3, 5, 4].
End scores = (18, 35)
>>> print(turns[6])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=67699, score0=24, score1=17, goal=28, feral_hogs=True)
>>> print(turns[0])
Start scores = (24, 17).
Player 0 rolls 5 dice and gets outcomes [2, 1, 3, 6, 6].
End scores = (25, 17)
>>> print(turns[1])
Start scores = (25, 17).
Player 1 rolls 2 dice and gets outcomes [4, 3].
End scores = (25, 27)
>>> print(turns[2])
Start scores = (25, 27).
Player 0 rolls 6 dice and gets outcomes [5, 4, 3, 4, 4, 5].
End scores = (50, 27)
>>> print(turns[3])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=25815, score0=52, score1=11, goal=54, feral_hogs=False)
>>> print(turns[0])
Start scores = (52, 11).
Player 0 rolls 9 dice and gets outcomes [5, 5, 5, 4, 2, 4, 3, 1, 5].
End scores = (53, 11)
>>> print(turns[1])
Start scores = (53, 11).
Player 1 rolls 5 dice and gets outcomes [1, 2, 1, 2, 6].
End scores = (53, 12)
>>> print(turns[2])
Start scores = (53, 12).
Player 0 rolls 6 dice and gets outcomes [4, 5, 6, 3, 1, 4].
End scores = (54, 12)
>>> print(turns[3])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=41969, score0=38, score1=54, goal=78, feral_hogs=False)
>>> print(turns[0])
Start scores = (38, 54).
Player 0 rolls 1 dice and gets outcomes [1].
End scores = (39, 54)
>>> print(turns[1])
Start scores = (39, 54).
Player 1 rolls 4 dice and gets outcomes [1, 2, 5, 6].
End scores = (39, 55)
>>> print(turns[2])
Start scores = (39, 55).
Player 0 rolls 3 dice and gets outcomes [2, 4, 1].
End scores = (40, 55)
>>> print(turns[3])
Start scores = (40, 55).
Player 1 rolls 8 dice and gets outcomes [4, 5, 6, 4, 2, 2, 5, 2].
End scores = (40, 85)
>>> print(turns[4])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=68309, score0=53, score1=40, goal=56, feral_hogs=True)
>>> print(turns[0])
Start scores = (53, 40).
Player 0 rolls 7 dice and gets outcomes [2, 4, 3, 5, 6, 2, 2].
End scores = (77, 40)
>>> print(turns[1])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=8243, score0=28, score1=23, goal=30, feral_hogs=False)
>>> print(turns[0])
Start scores = (28, 23).
Player 0 rolls 6 dice and gets outcomes [4, 2, 5, 2, 6, 5].
End scores = (52, 23)
>>> print(turns[1])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=43015, score0=53, score1=74, goal=77, feral_hogs=True)
>>> print(turns[0])
Start scores = (53, 74).
Player 0 rolls 10 dice and gets outcomes [5, 6, 6, 6, 1, 4, 3, 5, 3, 2].
End scores = (74, 54)
>>> print(turns[1])
Start scores = (74, 54).
Player 1 rolls 2 dice and gets outcomes [1, 6].
End scores = (74, 58)
>>> print(turns[2])
Start scores = (74, 58).
Player 0 rolls 0 dice and gets outcomes [].
End scores = (80, 58)
>>> print(turns[3])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=76012, score0=39, score1=36, goal=73, feral_hogs=False)
>>> print(turns[0])
Start scores = (39, 36).
Player 0 rolls 2 dice and gets outcomes [1, 4].
End scores = (36, 40)
>>> print(turns[1])
Start scores = (36, 40).
Player 1 rolls 3 dice and gets outcomes [6, 6, 6].
End scores = (36, 58)
>>> print(turns[2])
Start scores = (36, 58).
Player 0 rolls | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import with_statement
import pkg_resources
from pkg_resources import resource_filename
import os
import sys
import re
from pathlib import Path, PurePosixPath
import argparse
import errno
import shutil
import subprocess
import jinja2
import json
import pyaml
verbose = False
def vlog(*args, **kwargs):
if verbose:
print(*args, **kwargs)
def set_verbose(value):
global verbose
verbose = value
def get_template(name):
root = resource_filename(__name__, 'templates')
try:
template = jinja2.Environment(
loader=jinja2.FileSystemLoader(root)
).get_template(name)
except:
print(name)
raise
return template
def save(text, path):
path = Path(path)
if path.suffix == '.bat':
text = text.replace('\n', '\r\n')
with path.open('wb') as f:
f.write(text.encode('utf8'))
if path.suffix == '.sh' and os.name != 'nt':
subprocess.check_call('chmod +x "{}" '.format(path), shell=True)
def render(args, dest_dir, templates):
for t in templates:
t = str(t)
ret = get_template(t).render(args)
save(ret, dest_dir.joinpath(t))
def mkdir_p(path):
path = str(path)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def export_files(dest, flielist):
dest = Path(dest)
for f in flielist:
d = dest.joinpath(f)
vlog('copy => {}'.format(d))
mkdir_p(d.parent)
shutil.copyfile(str(f), str(d))
def make_all_library_info(base_dir):
result = []
for x in Path(base_dir).rglob('mbed_lib.json'):
test_dir = str(Path(base_dir, 'tools', 'test'))
if os.path.commonprefix([str(x), test_dir]) == test_dir:
vlog('without test-library:{}'.format(x))
continue
vlog('found library: {}'.format(x))
with x.open() as f:
try:
info = json.load(f)
except json.decoder.JSONDecodeError:
continue
info['dir'] = x.parent
result.append(info)
return result
class UnusedLibraryFilter():
def filterd(self, build_info, unused_libraries):
filterd_build_info = BuildInfo()
filterd_build_info.definitions = build_info.definitions
filterd_build_info.arch_opts = build_info.arch_opts
filterd_build_info.linker_flags = build_info.linker_flags
filterd_build_info.link_libraries = build_info.link_libraries
filterd_build_info.c_extra_opts = build_info.c_extra_opts
filterd_build_info.cxx_extra_opts = build_info.cxx_extra_opts
filterd_build_info.warning_opts = build_info.warning_opts
##############################
# filter config_definitions
regex = re.compile(r'library:([-_a-zA-Z0-9]+)')
unused_library_names = [x['name'] for x in unused_libraries]
for x in build_info.config_definitions:
m = regex.search(x)
if not m:
filterd_build_info.config_definitions.append(x)
continue
x_library_name = m.group(1)
if x_library_name in unused_library_names:
filterd_build_info.removed_config_definitions.append(
{'removed_by': x_library_name, 'value': x})
else:
filterd_build_info.config_definitions.append(x)
##############################
# filter include_dirs
result = []
for x in build_info.include_dirs:
for unused_info in unused_libraries:
unused_dir = str(unused_info['dir'])
if os.path.commonprefix([str(x), unused_dir]) == unused_dir:
filterd_build_info.removed_include_dirs.append(
{'removed_by': unused_info['name'], 'value': x})
break
else:
filterd_build_info.include_dirs.append(x)
##############################
# filter sources
for x in build_info.sources:
x_parent = str(x.parent)
for unused_info in unused_libraries:
unused_dir = str(unused_info['dir'])
if os.path.commonprefix([x_parent, unused_dir]) == unused_dir:
filterd_build_info.removed_sources.append(
{'removed_by': unused_info['name'], 'value': x})
break
else:
filterd_build_info.sources.append(x)
##############################
# filter headers
for x in build_info.headers:
x_parent = str(x.parent)
for unused_info in unused_libraries:
unused_dir = str(unused_info['dir'])
if os.path.commonprefix([x_parent, unused_dir]) == unused_dir:
filterd_build_info.removed_headers.append(
{'removed_by': unused_info['name'], 'value': x})
break
else:
filterd_build_info.headers.append(x)
return filterd_build_info
class BuildInfo():
def __init__(self):
self.sources = []
self.include_dirs = []
self.headers = []
self.definitions = []
self.arch_opts = []
self.linker_flags = []
self.link_libraries = []
self.c_extra_opts = []
self.cxx_extra_opts = []
self.warning_opts = []
self.config_definitions = []
self.removed_sources = []
self.removed_headers = []
self.removed_config_definitions = []
self.removed_include_dirs = []
class Parser():
def parse_makefile(self, makefile, build_info):
if build_info is None:
build_info = BuildInfo()
with open(makefile) as f:
lines = f.read().splitlines()
objects = self._get_paths(lines, 'OBJECTS')
build_info.sources = self._find_matching_suffix(objects, ['.c', '.cpp', '.S'])
build_info.include_dirs = self._get_paths(lines, 'INCLUDE_PATHS')
build_info.headers = self._find_headers(build_info.include_dirs)
build_info.definitions = self._get_definitions(lines, 'CXX_FLAGS')
build_info.arch_opts = self._get_arch_opts(lines)
build_info.linker_flags = self._get_linker_flags(lines)
build_info.link_libraries = self._get_link_libraries(lines)
build_info.cxx_extra_opts = self._get_cxx_extra_opts(lines)
build_info.c_extra_opts = self._get_c_extra_opts(lines)
build_info.warning_opts = self._get_warning_opts(lines)
return build_info
def parse_mbed_config(self, mbed_config, build_info):
if build_info is None:
build_info = BuildInfo()
with open(mbed_config) as f:
lines = f.read().splitlines()
regex = re.compile(
'^#define\s+(?P<name>[^\s]+)\s+(?:(?P<value>.*))//(?P<comment>.*)') # noqa
config_definitions = []
for l in lines:
m = regex.match(l)
is_braces = False
if m:
name = m.group('name')
value = m.group('value').strip()
comment = m.group('comment')
x = '-D' + name
if value:
x += '=' + value
if value.startswith('{'):
is_braces = True
if is_braces:
x = '"{}"'.format(x)
# Padding for alignment
if len(x) < 55:
x += ' ' * (55 - len(x))
x += ' #' + comment
config_definitions.append(x)
build_info.config_definitions = sorted(config_definitions)
return build_info
def _find_headers(self, include_dirs):
result = []
header_suffixes = ['', '.h', '.hpp']
for d in include_dirs:
if not d.exists():
continue
for f in [x for x in d.iterdir() if x.is_file()]:
if f.suffix in header_suffixes:
result.append(f)
result = [Path(x) for x in result]
return result
def _find_matching_suffix(self, filelist, suffixes):
result = []
for f in filelist:
for s in suffixes:
x = Path(f).with_suffix(s)
if x.exists():
result.append(x)
return result
def _get_definitions(self, lines, var_name):
values = self._get_values(lines, var_name)
regex = re.compile(r'^(-D.*)')
result = []
for v in values:
m = regex.match(v)
if m:
result.append(m.group(1))
result = sorted(result)
return result
def _get_values(self, lines, var_name):
regex = re.compile('^' + var_name + r'\s*[+:]*\=\s*(.*)')
result = []
for line in lines:
m = regex.match(line)
if not m:
continue
value = m.group(1)
if value in result:
continue
result.append(value)
return result
def _get_paths(self, lines, var_name):
tmp = self._get_values(lines, var_name)
result = []
for x in tmp:
pos = x.find('mbed-os')
if pos >= 0:
x = x[pos:]
ignores = ['frameworks', 'TESTS_COMMON']
skip = False
for ignore in ignores:
if x.find(ignore) != -1:
skip = True
break
if skip:
continue
result.append(x)
result = [Path(x) for x in result]
return result
def _get_link_libraries(self, lines):
result = []
values = self._get_values(lines, 'LD_SYS_LIBS')[0].split(' ')
regex = re.compile('^-l(.*)')
for v in values:
m = regex.match(v)
if m:
result.append(m.group(1))
return result
def _get_linker_flags(self, lines):
result = []
values = self._get_values(lines, 'LD_FLAGS')[0]
values = values.split(' ')
regex = re.compile('^(-Wl,.*)')
for v in values:
m = regex.match(v)
if m:
result.append(m.group(1))
return result
def _strip_quote(self, str_):
is_quote_single = str_.startswith("'") and str_.endswith("'")
is_quote_double = str_.startswith('"') and str_.endswith('"')
if is_quote_single or is_quote_double:
str_ = str_[1:-1]
return str_
def _get_warning_opts(self, lines):
result = []
values = self._get_values(lines, 'CXX_FLAGS')
for v in values:
v = self._strip_quote(v)
regex = re.compile('^(-W.*)')
m = regex.match(v)
if m:
result.append(m.group(1))
return result
def _get_cxx_extra_opts(self, lines):
result = []
values = self._get_values(lines, 'CXX_FLAGS')
for v in values:
v = self._strip_quote(v)
regex = re.compile('^(-f.*)')
m = regex.match(v)
if m:
result.append(m.group(1))
return result
def _get_c_extra_opts(self, lines):
result = []
values = self._get_values(lines, 'C_FLAGS')
for v in values:
v = self._strip_quote(v)
regex = re.compile('^(-f.*)')
m = regex.match(v)
if m:
result.append(m.group(1))
return result
def _get_arch_opts(self, lines):
result = []
values = self._get_values(lines, 'CXX_FLAGS')
for v in values:
v = self._strip_quote(v)
arch_opts = [
r'^(-mthumb)',
r'^(-mcpu\=.*)',
r'^(-mfpu\=.*)',
r'^(-mfloat-abi\=.*)'
]
for opt in arch_opts:
regex = re.compile(opt)
m = regex.match(v)
if m:
result.append(m.group(1))
break
return result
def main():
argparser = argparse.ArgumentParser(
description='This tool will help local development of mbed',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
argparser.add_argument(
'-m', '--target',
required=True,
help='Compile target MCU. @see https://github.com/ARMmbed/mbed-cli')
argparser.add_argument(
'-T', '--tag',
required=True,
help='Tag of mbed @see https://github.com/ARMmbed/mbed-os/releases')
argparser.add_argument(
'-d', '--dest',
required=False,
default='.',
help='Directory of export destination')
argparser.add_argument(
'-v', '--verbose',
required=False,
default=False,
action='store_true',
help='Verbose output')
argparser.add_argument(
'--version',
action='version',
version='%(prog)s {}'.format(
pkg_resources.require('xmbedinit')[0].version))
args = argparser.parse_args()
set_verbose(args.verbose)
dest = Path(args.dest).resolve()
if not dest.exists():
sys.exit('"{}" is not exists'.format(dest))
if not dest.is_dir():
sys.exit('"{}" is not directory'.format(dest))
work_dir = Path(os.path.expanduser('~/.cache/xmbedinit'))
mkdir_p(work_dir)
mbed_tag = args.tag
mbed_dir = work_dir.joinpath(mbed_tag)
mbed_target = args.target
mkdir_p(mbed_dir)
os.chdir(str(mbed_dir))
if not Path('mbed-os').exists():
url = 'https://github.com/ARMmbed/mbed-os.git'
cmd = 'git clone --depth=1 -b {} {}'.format(mbed_tag, url)
subprocess.check_call(cmd, shell=True)
with Path('main.cpp').open('w') as f:
f.write(u'int main(void) { return 0; }')
cmd = 'mbed compile -t GCC_ARM -m {}'.format(mbed_target)
subprocess.check_call(cmd, shell=True)
cmd = 'mbed export -i GCC_ARM -m {}'.format(mbed_target)
subprocess.check_call(cmd, shell=True)
vlog('Parse Files ...')
parser = Parser()
build_info = parser.parse_makefile('Makefile', None)
parser.parse_mbed_config('mbed_config.h', build_info)
vlog('Copy files ...')
export_files(dest, build_info.headers)
export_files(dest, build_info.sources)
shutil.copyfile(
'BUILD/{}/GCC_ARM/.link_script.ld'.format(mbed_target),
str(dest.joinpath('linker_script.ld')))
all_library_info = make_all_library_info('mbed-os')
use_libraries = [
'targets',
'platform',
'rtos',
'rtos-api',
'cmsis',
'drivers',
'events'
]
unused_libraries = [
x for x in all_library_info
if x['name'] not in use_libraries]
build_info = UnusedLibraryFilter().filterd(
build_info, unused_libraries)
# ----------------------------------------
cmake_dir = dest.joinpath('CMake')
mkdir_p(str(cmake_dir))
b = build_info
b.include_dirs = [Path(x).as_posix() for x in b.include_dirs]
b.sources = [Path(x).as_posix() for x in b.sources]
b.headers = [Path(x).as_posix() for x in b.headers]
for x in b.removed_sources:
x['value'] = Path(x['value']).as_posix()
for x in b.removed_headers:
x['value'] = Path(x['value']).as_posix()
for x in b.removed_include_dirs:
x['value'] = Path(x['value']).as_posix()
args = {}
args['project_name'] = dest.stem
args['url'] = 'https://github.com/ARMmbed/mbed-os.git'
args['tag'] = mbed_tag
args['target'] = mbed_target
args['warning_opts'] = build_info.warning_opts
args['arch_opts'] = build_info.arch_opts
args['c_extra_opts'] = build_info.c_extra_opts
args['cxx_extra_opts'] = build_info.cxx_extra_opts
args['include_dirs'] = build_info.include_dirs
args['definitions'] = build_info.definitions
args['link_libraries'] = build_info.link_libraries
args['linker_flags'] = build_info.linker_flags
args['sources'] = build_info.sources
args['headers'] = build_info.headers
args['config_definitions'] = build_info.config_definitions
args['removed_sources'] = build_info.removed_sources
args['removed_headers'] = build_info.removed_headers
args['removed_config_definitions'] = build_info.removed_config_definitions
args['removed_include_dirs'] = build_info.removed_include_dirs
if verbose:
vlog(pyaml.dump(args))
templates = [
'mbed.cmake',
'toolchain-arm-none-eabi-gcc.cmake',
]
render(args, cmake_dir, templates)
templates = [
'build.sh',
'build-debug.bat',
'build-release.bat',
'build-debug.sh',
'build-release.sh',
'build-compile_commands.sh',
'xmbedinit.log',
'CMakeLists.txt',
'main.cpp',
]
render(args, dest, templates)
if __name__ == | |
cnxn, item_ids):
return {
item_id: self.LookupItemStarrers(cnxn, item_id) for item_id in item_ids}
def LookupStarredItemIDs(self, _cnxn, starrer_user_id):
return self.stars_by_starrer_id.get(starrer_user_id, [])
def IsItemStarredBy(self, cnxn, item_id, starrer_user_id):
return item_id in self.LookupStarredItemIDs(cnxn, starrer_user_id)
def CountItemStars(self, cnxn, item_id):
return len(self.LookupItemStarrers(cnxn, item_id))
def CountItemsStars(self, cnxn, item_ids):
return {item_id: self.CountItemStars(cnxn, item_id)
for item_id in item_ids}
def _SetStar(self, cnxn, item_id, starrer_user_id, starred):
if starred and not self.IsItemStarredBy(cnxn, item_id, starrer_user_id):
self.stars_by_item_id.setdefault(item_id, []).append(starrer_user_id)
self.stars_by_starrer_id.setdefault(starrer_user_id, []).append(item_id)
elif not starred and self.IsItemStarredBy(cnxn, item_id, starrer_user_id):
self.stars_by_item_id[item_id].remove(starrer_user_id)
self.stars_by_starrer_id[starrer_user_id].remove(item_id)
def SetStar(self, cnxn, item_id, starrer_user_id, starred):
self._SetStar(cnxn, item_id, starrer_user_id, starred)
def SetStarsBatch(
self, cnxn, item_id, starrer_user_ids, starred, commit=True):
for starrer_user_id in starrer_user_ids:
self._SetStar(cnxn, item_id, starrer_user_id, starred)
class UserStarService(AbstractStarService):
pass
class ProjectStarService(AbstractStarService):
pass
class HotlistStarService(AbstractStarService):
pass
class IssueStarService(AbstractStarService):
# pylint: disable=arguments-differ
def SetStar(
self, cnxn, services, _config, issue_id, starrer_user_id,
starred):
super(IssueStarService, self).SetStar(
cnxn, issue_id, starrer_user_id, starred)
try:
issue = services.issue.GetIssue(cnxn, issue_id)
issue.star_count += (1 if starred else -1)
except exceptions.NoSuchIssueException:
pass
# pylint: disable=arguments-differ
def SetStarsBatch(
self, cnxn, _service, _config, issue_id, starrer_user_ids,
starred):
super(IssueStarService, self).SetStarsBatch(
cnxn, issue_id, starrer_user_ids, starred)
def SetStarsBatch_SkipIssueUpdate(
self, cnxn, issue_id, starrer_user_ids, starred, commit=True):
super(IssueStarService, self).SetStarsBatch(
cnxn, issue_id, starrer_user_ids, starred)
class ProjectService(object):
"""Fake ProjectService object.
Provides methods for creating users and projects, which are accessible
through parts of the real ProjectService interface.
"""
def __init__(self):
self.test_projects = {} # project_name -> project_pb
self.projects_by_id = {} # project_id -> project_pb
self.test_star_manager = None
self.indexed_projects = {}
self.unindexed_projects = set()
self.index_counter = 0
self.project_commitments = {}
self.ac_exclusion_ids = {}
self.no_expand_ids = {}
def TestAddProject(
self, name, summary='', state=project_pb2.ProjectState.LIVE,
owner_ids=None, committer_ids=None, contrib_ids=None,
issue_notify_address=None, state_reason='', description=None,
project_id=None, process_inbound_email=None, access=None,
extra_perms=None):
"""Add a project to the fake ProjectService object.
Args:
name: The name of the project. Will replace any existing project under
the same name.
summary: The summary string of the project.
state: Initial state for the project from project_pb2.ProjectState.
owner_ids: List of user ids for project owners
committer_ids: List of user ids for project committers
contrib_ids: List of user ids for project contributors
issue_notify_address: email address to send issue change notifications
state_reason: string describing the reason the project is in its current
state.
description: The description string for this project
project_id: A unique integer identifier for the created project.
process_inbound_email: True to make this project accept inbound email.
access: One of the values of enum project_pb2.ProjectAccess.
extra_perms: List of ExtraPerms PBs for project members.
Returns:
A populated project PB.
"""
proj_pb = project_pb2.Project()
proj_pb.project_id = project_id or hash(name) % 100000
proj_pb.project_name = name
proj_pb.summary = summary
proj_pb.state = state
proj_pb.state_reason = state_reason
proj_pb.extra_perms = extra_perms or []
if description is not None:
proj_pb.description = description
self.TestAddProjectMembers(owner_ids, proj_pb, OWNER_ROLE)
self.TestAddProjectMembers(committer_ids, proj_pb, COMMITTER_ROLE)
self.TestAddProjectMembers(contrib_ids, proj_pb, CONTRIBUTOR_ROLE)
if issue_notify_address is not None:
proj_pb.issue_notify_address = issue_notify_address
if process_inbound_email is not None:
proj_pb.process_inbound_email = process_inbound_email
if access is not None:
proj_pb.access = access
self.test_projects[name] = proj_pb
self.projects_by_id[proj_pb.project_id] = proj_pb
return proj_pb
def TestAddProjectMembers(self, user_id_list, proj_pb, role):
if user_id_list is not None:
for user_id in user_id_list:
if role == OWNER_ROLE:
proj_pb.owner_ids.append(user_id)
elif role == COMMITTER_ROLE:
proj_pb.committer_ids.append(user_id)
elif role == CONTRIBUTOR_ROLE:
proj_pb.contributor_ids.append(user_id)
def LookupProjectIDs(self, cnxn, project_names):
return {
project_name: self.test_projects[project_name].project_id
for project_name in project_names
if project_name in self.test_projects}
def LookupProjectNames(self, cnxn, project_ids):
projects_dict = self.GetProjects(cnxn, project_ids)
return {p.project_id: p.project_name
for p in projects_dict.values()}
def CreateProject(
self, _cnxn, project_name, owner_ids, committer_ids,
contributor_ids, summary, description,
state=project_pb2.ProjectState.LIVE, access=None,
read_only_reason=None,
home_page=None, docs_url=None, source_url=None,
logo_gcs_id=None, logo_file_name=None):
"""Create and store a Project with the given attributes."""
if project_name in self.test_projects:
raise exceptions.ProjectAlreadyExists()
project = self.TestAddProject(
project_name, summary=summary, state=state,
owner_ids=owner_ids, committer_ids=committer_ids,
contrib_ids=contributor_ids, description=description,
access=access)
return project.project_id
def ExpungeProject(self, _cnxn, project_id):
project = self.projects_by_id.get(project_id)
if project:
self.test_projects.pop(project.project_name, None)
def GetProjectsByName(self, _cnxn, project_name_list, use_cache=True):
return {
pn: self.test_projects[pn] for pn in project_name_list
if pn in self.test_projects}
def GetProjectByName(self, _cnxn, name, use_cache=True):
return self.test_projects.get(name)
def GetProjectList(self, cnxn, project_id_list, use_cache=True):
project_dict = self.GetProjects(cnxn, project_id_list, use_cache=use_cache)
return [project_dict[pid] for pid in project_id_list
if pid in project_dict]
def GetVisibleLiveProjects(
self, _cnxn, logged_in_user, effective_ids, domain=None, use_cache=True):
project_ids = list(self.projects_by_id.keys())
visible_project_ids = []
for pid in project_ids:
can_view = permissions.UserCanViewProject(
logged_in_user, effective_ids, self.projects_by_id[pid])
different_domain = framework_helpers.GetNeededDomain(
self.projects_by_id[pid].project_name, domain)
if can_view and not different_domain:
visible_project_ids.append(pid)
return visible_project_ids
def GetProjects(self, _cnxn, project_ids, use_cache=True):
result = {}
for project_id in project_ids:
project = self.projects_by_id.get(project_id)
if project:
result[project_id] = project
else:
raise exceptions.NoSuchProjectException(project_id)
return result
def GetAllProjects(self, _cnxn, use_cache=True):
result = {}
for project_id in self.projects_by_id:
project = self.projects_by_id.get(project_id)
result[project_id] = project
return result
def GetProject(self, cnxn, project_id, use_cache=True):
"""Load the specified project from the database."""
project_id_dict = self.GetProjects(cnxn, [project_id], use_cache=use_cache)
if project_id not in project_id_dict:
raise exceptions.NoSuchProjectException()
return project_id_dict[project_id]
def GetProjectCommitments(self, _cnxn, project_id):
if project_id in self.project_commitments:
return self.project_commitments[project_id]
project_commitments = project_pb2.ProjectCommitments()
project_commitments.project_id = project_id
return project_commitments
def TestStoreProjectCommitments(self, project_commitments):
key = project_commitments.project_id
self.project_commitments[key] = project_commitments
def GetProjectAutocompleteExclusion(self, cnxn, project_id):
return (self.ac_exclusion_ids.get(project_id, []),
self.no_expand_ids.get(project_id, []))
def UpdateProject(
self,
_cnxn,
project_id,
summary=None,
description=None,
state=None,
state_reason=None,
access=None,
issue_notify_address=None,
attachment_bytes_used=None,
attachment_quota=None,
moved_to=None,
process_inbound_email=None,
only_owners_remove_restrictions=None,
read_only_reason=None,
cached_content_timestamp=None,
only_owners_see_contributors=None,
delete_time=None,
recent_activity=None,
revision_url_format=None,
home_page=None,
docs_url=None,
source_url=None,
logo_gcs_id=None,
logo_file_name=None,
issue_notify_always_detailed=None,
commit=True):
project = self.projects_by_id.get(project_id)
if not project:
raise exceptions.NoSuchProjectException(
'Project "%s" not found!' % project_id)
# TODO(jrobbins): implement all passed arguments - probably as a utility
# method shared with the real persistence implementation.
if read_only_reason is not None:
project.read_only_reason = read_only_reason
if attachment_bytes_used is not None:
project.attachment_bytes_used = attachment_bytes_used
def UpdateProjectRoles(
self, _cnxn, project_id, owner_ids, committer_ids,
contributor_ids, now=None):
project = self.projects_by_id.get(project_id)
if not project:
raise exceptions.NoSuchProjectException(
'Project "%s" not found!' % project_id)
project.owner_ids = owner_ids
project.committer_ids = committer_ids
project.contributor_ids = contributor_ids
def MarkProjectDeletable(
self, _cnxn, project_id, _config_service):
project = self.projects_by_id[project_id]
project.project_name = 'DELETABLE_%d' % project_id
project.state = project_pb2.ProjectState.DELETABLE
def UpdateRecentActivity(self, _cnxn, _project_id, now=None):
pass
def GetUserRolesInAllProjects(self, _cnxn, effective_ids):
owned_project_ids = set()
membered_project_ids = set()
contrib_project_ids = set()
for project in self.projects_by_id.values():
if not effective_ids.isdisjoint(project.owner_ids):
owned_project_ids.add(project.project_id)
elif not effective_ids.isdisjoint(project.committer_ids):
membered_project_ids.add(project.project_id)
elif not effective_ids.isdisjoint(project.contributor_ids):
contrib_project_ids.add(project.project_id)
return owned_project_ids, membered_project_ids, contrib_project_ids
def GetProjectMemberships(self, _cnxn, effective_ids, use_cache=True):
# type: MonorailConnection, Collection[int], bool ->
# Mapping[int, Collection[int]]
projects_by_user_id = collections.defaultdict(set)
for project in self.projects_by_id.values():
member_ids = set(
itertools.chain(
project.owner_ids, project.committer_ids,
project.contributor_ids))
for user_id in effective_ids:
if user_id in member_ids:
projects_by_user_id[user_id].add(project.project_id)
return projects_by_user_id
def ExpungeUsersInProjects(self, cnxn, user_ids, limit=None):
for project in self.projects_by_id.values():
project.owner_ids = [owner_id for owner_id in project.owner_ids
if owner_id not in user_ids]
project.committer_ids = [com_id for com_id in project.committer_ids
if com_id not in user_ids]
project.contributor_ids = [con_id for con_id in project.contributor_ids
if con_id not in user_ids]
class ConfigService(object):
"""Fake version of ConfigService that just works in-RAM."""
def __init__(self, user_id=None):
self.project_configs = {}
self.next_field_id = 123
self.next_component_id = 345
self.next_template_id = 23
self.expunged_configs = []
self.expunged_users_in_configs = []
self.component_ids_to_templates = {}
self.label_to_id = {}
self.id_to_label = {}
self.strict = False # Set true to raise more exceptions like real class.
def TestAddLabelsDict(self, label_to_id):
self.label_to_id = label_to_id
self.id_to_label = {
label_id: label
for label, label_id in list(self.label_to_id.items())}
def TestAddFieldDef(self, fd):
self.project_configs[fd.project_id].field_defs.append(fd)
def TestAddApprovalDef(self, ad, project_id):
self.project_configs[project_id].approval_defs.append(ad)
def ExpungeConfig(self, _cnxn, project_id):
self.expunged_configs.append(project_id)
def ExpungeUsersInConfigs(self, _cnxn, user_ids, limit=None):
self.expunged_users_in_configs.extend(user_ids)
def GetLabelDefRows(self, cnxn, project_id, use_cache=True):
"""This always returns empty results. Mock it to test other cases."""
return []
def GetLabelDefRowsAnyProject(self, cnxn, where=None):
"""This always returns empty results. Mock it to test other cases."""
return []
def LookupLabel(self, cnxn, project_id, label_id):
if label_id in self.id_to_label:
return self.id_to_label[label_id]
if label_id == 999:
return None
return 'label_%d_%d' % (project_id, label_id)
def LookupLabelID(self, cnxn, project_id, label, autocreate=True):
if label in self.label_to_id:
return self.label_to_id[label]
return 1
def LookupLabelIDs(self, cnxn, project_id, labels, autocreate=False):
ids = []
next_label_id = 0
if self.id_to_label.keys():
existing_ids = self.id_to_label.keys()
existing_ids.sort()
next_label_id = existing_ids[-1] + 1
for label in labels:
if self.label_to_id.get(label) is not None:
ids.append(self.label_to_id[label])
elif autocreate:
self.label_to_id[label] = next_label_id
self.id_to_label[next_label_id] = label
ids.append(next_label_id)
next_label_id += 1
return ids
def LookupIDsOfLabelsMatching(self, cnxn, project_id, regex):
return [1, 2, 3]
def LookupStatus(self, cnxn, project_id, status_id):
return 'status_%d_%d' % (project_id, status_id)
def LookupStatusID(self, cnxn, project_id, status, autocreate=True):
if status:
return 1
else:
return 0
def LookupStatusIDs(self, cnxn, project_id, statuses):
return [idx for idx, _status in enumerate(statuses)]
def LookupClosedStatusIDs(self, cnxn, project_id):
return [7, 8, 9]
def StoreConfig(self, _cnxn, config):
self.project_configs[config.project_id] = config
def GetProjectConfig(self, _cnxn, project_id, use_cache=True):
if project_id in self.project_configs:
| |
<filename>dockercloudcli/commands.py
from __future__ import print_function
import errno
import getpass
import json
import logging
import sys
import urllib
import dockercloud
import websocket
import yaml
from dockercloudcli import utils
AUTH_ERROR_EXIT_CODE = 2
EXCEPTION_EXIT_CODE = 3
cli_log = logging.getLogger("cli")
API_VERSION = "v1"
def login():
print('''
Please use "docker login" to log into Docker Cloud with you Docker ID"
Alternatively, you can set the following environment variables:
export DOCKERCLOUD_USER=<docker username>
export DOCKERCLOUD_PASS=<docker password>
''')
def event():
def on_error(e):
print(e, file=sys.stderr)
if isinstance(e, KeyboardInterrupt):
exit(0)
try:
events = dockercloud.Events()
events.on_error(on_error)
events.on_message(lambda m: print(m))
events.run_forever()
except KeyboardInterrupt:
pass
except dockercloud.AuthError as e:
print(e, file=sys.stderr)
sys.exit(AUTH_ERROR_EXIT_CODE)
def service_inspect(identifiers):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier)
print(json.dumps(service.get_all_attributes(), indent=2))
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_logs(identifiers, tail, follow):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier)
service.logs(tail, follow, utils.container_service_log_handler)
except KeyboardInterrupt:
pass
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_ps(quiet, status, stack):
try:
headers = ["NAME", "UUID", "STATUS", "#CONTAINERS", "IMAGE", "DEPLOYED", "PUBLIC DNS", "STACK"]
stack_resource_uri = None
if stack:
s = dockercloud.Utils.fetch_remote_stack(stack, raise_exceptions=False)
if isinstance(s, dockercloud.NonUniqueIdentifier):
raise dockercloud.NonUniqueIdentifier(
"Identifier %s matches more than one stack, please use UUID instead" % stack)
if isinstance(s, dockercloud.ObjectNotFound):
raise dockercloud.ObjectNotFound("Identifier '%s' does not match any stack" % stack)
stack_resource_uri = s.resource_uri
service_list = dockercloud.Service.list(state=status, stack=stack_resource_uri)
data_list = []
long_uuid_list = []
has_unsynchronized_service = False
stacks = {}
for stack in dockercloud.Stack.list():
stacks[stack.resource_uri] = stack.name
for service in service_list:
service_state = utils.add_unicode_symbol_to_state(service.state)
if not service.synchronized and service.state != "Redeploying":
service_state += "(*)"
has_unsynchronized_service = True
data_list.append([service.name, service.uuid[:8],
service_state,
service.current_num_containers,
service.image_name,
utils.get_humanize_local_datetime_from_utc_datetime_string(service.deployed_datetime),
service.public_dns,
stacks.get(service.stack)])
long_uuid_list.append(service.uuid)
if len(data_list) == 0:
data_list.append(["", "", "", "", "", ""])
if quiet:
for uuid in long_uuid_list:
print(uuid)
else:
utils.tabulate_result(data_list, headers)
if has_unsynchronized_service:
print(
"\n(*) Please note that this service needs to be redeployed to "
"have its configuration changes applied")
except Exception as e:
print(e, file=sys.stderr)
sys.exit(EXCEPTION_EXIT_CODE)
def service_redeploy(identifiers, not_reuse_volume, sync):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier)
result = service.redeploy(not not_reuse_volume)
if not utils.sync_action(service, sync):
has_exception = True
if result:
print(service.uuid)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_create(image, name, cpu_shares, memory, privileged, target_num_containers, run_command, entrypoint,
expose, publish, envvars, envfiles, tag, linked_to_service, autorestart, autodestroy, autoredeploy,
roles, sequential, volume, volumes_from, deployment_strategy, sync, net, pid):
has_exception = False
try:
ports = utils.parse_published_ports(publish)
# Add exposed_port to ports, excluding whose inner_port that has been defined in published ports
exposed_ports = utils.parse_exposed_ports(expose)
for exposed_port in exposed_ports:
existed = False
for port in ports:
if exposed_port.get('inner_port', '') == port.get('inner_port', ''):
existed = True
break
if not existed:
ports.append(exposed_port)
envvars = utils.parse_envvars(envvars, envfiles)
links_service = utils.parse_links(linked_to_service, 'to_service')
tags = []
if tag:
if isinstance(tag, list):
for t in tag:
tags.append({"name": t})
else:
tags.append({"name": tag})
bindings = utils.parse_volume(volume)
bindings.extend(utils.parse_volumes_from(volumes_from))
service = dockercloud.Service.create(image=image, name=name, cpu_shares=cpu_shares,
memory=memory, privileged=privileged,
target_num_containers=target_num_containers, run_command=run_command,
entrypoint=entrypoint, container_ports=ports, container_envvars=envvars,
linked_to_service=links_service,
autorestart=autorestart, autodestroy=autodestroy,
autoredeploy=autoredeploy,
roles=roles, sequential_deployment=sequential, tags=tags,
bindings=bindings,
deployment_strategy=deployment_strategy, net=net, pid=pid)
result = service.save()
if not utils.sync_action(service, sync):
has_exception = True
if result:
print(service.uuid)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_run(image, name, cpu_shares, memory, privileged, target_num_containers, run_command, entrypoint,
expose, publish, envvars, envfiles, tag, linked_to_service, autorestart, autodestroy, autoredeploy,
roles, sequential, volume, volumes_from, deployment_strategy, sync, net, pid):
has_exception = False
try:
ports = utils.parse_published_ports(publish)
# Add exposed_port to ports, excluding whose inner_port that has been defined in published ports
exposed_ports = utils.parse_exposed_ports(expose)
for exposed_port in exposed_ports:
existed = False
for port in ports:
if exposed_port.get('inner_port', '') == port.get('inner_port', ''):
existed = True
break
if not existed:
ports.append(exposed_port)
envvars = utils.parse_envvars(envvars, envfiles)
links_service = utils.parse_links(linked_to_service, 'to_service')
tags = []
if tag:
if isinstance(tag, list):
for t in tag:
tags.append({"name": t})
else:
tags.append({"name": tag})
bindings = utils.parse_volume(volume)
bindings.extend(utils.parse_volumes_from(volumes_from))
service = dockercloud.Service.create(image=image, name=name, cpu_shares=cpu_shares,
memory=memory, privileged=privileged,
target_num_containers=target_num_containers, run_command=run_command,
entrypoint=entrypoint, container_ports=ports, container_envvars=envvars,
linked_to_service=links_service,
autorestart=autorestart, autodestroy=autodestroy,
autoredeploy=autoredeploy,
roles=roles, sequential_deployment=sequential, tags=tags,
bindings=bindings,
deployment_strategy=deployment_strategy, net=net, pid=pid)
service.save()
result = service.start()
if not utils.sync_action(service, sync):
has_exception = True
if result:
print(service.uuid)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_scale(identifiers, target_num_containers, sync):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier)
service.target_num_containers = target_num_containers
service.save()
result = service.scale()
if not utils.sync_action(service, sync):
has_exception = True
if result:
print(service.uuid)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_set(identifiers, image, cpu_shares, memory, privileged, target_num_containers, run_command, entrypoint,
expose, publish, envvars, envfiles, tag, linked_to_service, autorestart, autodestroy, autoredeploy,
roles, sequential, redeploy, volume, volumes_from, deployment_strategy, sync, net, pid):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier, raise_exceptions=True)
if service is not None:
if image:
service.image = image
if cpu_shares:
service.cpu_shares = cpu_shares
if memory:
service.memory = memory
if privileged is not None:
service.privileged = privileged
if target_num_containers:
service.target_num_containers = target_num_containers
if run_command:
service.run_command = run_command
if entrypoint:
service.entrypoint = entrypoint
ports = utils.parse_published_ports(publish)
# Add exposed_port to ports, excluding whose inner_port that has been defined in published ports
exposed_ports = utils.parse_exposed_ports(expose)
for exposed_port in exposed_ports:
existed = False
for port in ports:
if exposed_port.get('inner_port', '') == port.get('inner_port', ''):
existed = True
break
if not existed:
ports.append(exposed_port)
if ports:
service.container_ports = ports
envvars = utils.parse_envvars(envvars, envfiles)
if envvars:
service.container_envvars = envvars
if tag:
service.tags = []
for t in tag:
new_tag = {"name": t}
if new_tag not in service.tags:
service.tags.append(new_tag)
service.__addchanges__("tags")
links_service = utils.parse_links(linked_to_service, 'to_service')
if linked_to_service:
service.linked_to_service = links_service
if autorestart:
service.autorestart = autorestart
if autodestroy:
service.autodestroy = autodestroy
if autoredeploy is not None:
service.autoredeploy = autoredeploy
if roles:
service.roles = roles
if sequential is not None:
service.sequential_deployment = sequential
bindings = utils.parse_volume(volume)
bindings.extend(utils.parse_volumes_from(volumes_from))
if bindings:
service.bindings = bindings
if deployment_strategy:
service.deployment_strategy = deployment_strategy
if net:
service.net = net
if pid:
service.pid = pid
result = service.save()
if not utils.sync_action(service, sync):
has_exception = True
if result:
if redeploy:
print("Redeploying Service ...")
result2 = service.redeploy()
if not utils.sync_action(service, sync):
has_exception = True
if result2:
print(service.uuid)
else:
print(service.uuid)
print("Service must be redeployed to have its configuration changes applied.")
print("To redeploy execute: $ docker-cloud service redeploy", identifier)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_start(identifiers, sync):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier)
result = service.start()
if not utils.sync_action(service, sync):
has_exception = True
if result:
print(service.uuid)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_stop(identifiers, sync):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier)
result = service.stop()
if not utils.sync_action(service, sync):
has_exception = True
if result:
print(service.uuid)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def service_terminate(identifiers, sync):
has_exception = False
for identifier in identifiers:
try:
service = dockercloud.Utils.fetch_remote_service(identifier)
result = service.delete()
if not utils.sync_action(service, sync):
has_exception = True
if result:
print(service.uuid)
except Exception as e:
print(e, file=sys.stderr)
has_exception = True
if has_exception:
sys.exit(EXCEPTION_EXIT_CODE)
def container_exec(identifier, command):
try:
import termios
import tty
import select
import signal
except ImportError:
print("docker-cloud exec is not supported on this operating system", file=sys.stderr)
sys.exit(EXCEPTION_EXIT_CODE)
def invoke_shell(url):
header = {'User-Agent': dockercloud.user_agent}
header.update(dockercloud.auth.get_auth_header())
h = [": ".join([key, value]) for key, value in header.items()]
cli_log.info("websocket: %s %s" % (url, h))
shell = websocket.create_connection(url, timeout=10, header=h)
oldtty = None
try:
oldtty = termios.tcgetattr(sys.stdin)
except:
pass
old_handler = signal.getsignal(signal.SIGWINCH)
errorcode = 0
try:
if oldtty:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
while True:
try:
if oldtty:
r, w, e = select.select([shell.sock, sys.stdin], [], [shell.sock], 5)
if sys.stdin in r:
x = sys.stdin.read(1)
# read arrows
if x == '\x1b':
x += sys.stdin.read(1)
if x[1] == '[':
x += sys.stdin.read(1)
if len(x) == 0:
shell.send('\n')
shell.send(x)
else:
x = str(sys.stdin.read())
r, w, e = select.select([shell.sock], [], [shell.sock], 1)
shell.send(x)
shell.send(u"\u0004")
if shell.sock in r:
data = shell.recv()
if not data:
continue
try:
message = json.loads(data)
if message.get("type") == "error":
if message.get("data", {}).get("errorMessage") == "UNAUTHORIZED":
raise dockercloud.AuthError
else:
raise dockercloud.ApiError(message)
streamType = message.get("streamType")
if streamType == "stdout":
sys.stdout.write(message.get("output"))
sys.stdout.flush()
elif streamType == "stderr":
sys.stderr.write(message.get("output"))
sys.stderr.flush()
except dockercloud.AuthError:
raise
except:
sys.stdout.write(data)
sys.stdout.flush()
except (select.error, IOError) as e:
if e.args and e.args[0] == errno.EINTR:
pass
else:
raise
except dockercloud.AuthError:
sys.stderr.write("Not | |
self.delta_ipre_to_isym[infector] +
self.delta_isym_to_dead[infector] if self.bernoulli_is_fatal[infector] else self.delta_isym_to_resi[infector])
# sample exposure at later point
if t < tmax:
self.__push_contact_exposure_infector_to_j(
t=t, infector=infector, j=i, base_rate=base_rate_infector, tmax=tmax)
elif event == 'ipre':
self.__process_presymptomatic_event(t, i)
elif event == 'iasy':
self.__process_asymptomatic_event(t, i)
elif event == 'isym':
self.__process_symptomatic_event(t, i)
elif event == 'resi':
self.__process_resistant_event(t, i)
elif event == 'test':
self.__process_testing_event(t, i, metadata)
elif event == 'dead':
self.__process_fatal_event(t, i)
elif event == 'hosp':
# cannot get hospitalization if not ill anymore
valid_hospitalization = \
((not self.state['resi'][i]) and
(not self.state['dead'][i]))
if valid_hospitalization:
self.__process_hosp_event(t, i)
else:
# this should only happen for invalid exposure events
assert(event == 'expo')
# print
self.__print(t, force=True)
print('End main loop')
print('Total number of infections:', self.tot_inf_num)
print('Infections from contacts', self.inf_num)
print('Infections from indirect contacts', self.indir_inf_num)
print('Infections from pure indirect contacts', self.full_indir_inf_num)
# print('% exposed in risk buckets: ', 100.0 * self.risk_got_exposed / (self.risk_got_exposed + self.risk_got_not_exposed))
'''Compute ROC statistics'''
# tracing_stats [threshold][policy][action][stat]
self.tracing_stats = {}
if len(self.thresholds_roc) > 0:
for threshold in self.thresholds_roc:
self.tracing_stats[threshold] = self.compute_roc_stats(
threshold_isolate=threshold, threshold_test=threshold)
# stats = self.tracing_stats[self.thresholds_roc[0]]['sites']['isolate']
# print(" P {:5.2f} N {:5.2f}".format(
# (stats['fn'] + stats['tp']), (stats['fp'] + stats['tn'])
# ))
# free memory
self.valid_contacts_for_tracing = None
self.queue = None
def compute_roc_stats(self, *, threshold_isolate, threshold_test):
'''
Recovers contacts for which trace/no-trace decision was made.
Then re-computes TP/FP/TN/FN for different decision thresholds, given
the label that a given person with contact got exposed.
Assumes `advanced-threshold` policy for both isolation and testing.
'''
stats = {
'sites' : {
'isolate' : {'tp' : 0, 'fp' : 0, 'tn' : 0, 'fn' : 0},
'test' : {'tp' : 0, 'fp' : 0, 'tn' : 0, 'fn' : 0},
},
'no_sites' : {
'isolate' : {'tp' : 0, 'fp' : 0, 'tn' : 0, 'fn' : 0},
'test' : {'tp' : 0, 'fp' : 0, 'tn' : 0, 'fn' : 0},
},
}
# c[sites/no_sites][isolate/test][False/True][j]
# i-j contacts due to which j was traced/not traced
c = {
'sites' : {
'isolate': {
False: [[] for _ in range(self.n_people)],
True: [[] for _ in range(self.n_people)],
},
'test': {
False: [[] for _ in range(self.n_people)],
True: [[] for _ in range(self.n_people)],
},
},
'no_sites' : {
'isolate': {
False: [[] for _ in range(self.n_people)],
True: [[] for _ in range(self.n_people)],
},
'test': {
False: [[] for _ in range(self.n_people)],
True: [[] for _ in range(self.n_people)],
},
},
}
individuals_traced = set()
# for each tracing call due to an `infector`, re-compute classification decision (tracing or not)
# under the decision threshold `thres`, and record decision in `contacts_caused_tracing_...` arrays by individual
for t, infector, valid_contacts_with_j in self.valid_contacts_for_tracing:
# inspect whether the infector was symptomatic or asymptomatic
if self.state_started_at['iasy'][infector] < np.inf:
base_rate_inf = self.mu
else:
base_rate_inf = 1.0
# compute empirical survival probability
emp_survival_prob = {
'sites' : dict(),
'no_sites' : dict()
}
for j, contacts_j in valid_contacts_with_j.items():
individuals_traced.add(j)
emp_survival_prob['sites'][j] = self.__compute_empirical_survival_probability(
t=t, i=infector, j=j, contacts_i_j=contacts_j, base_rate=base_rate_inf, ignore_sites=False)
emp_survival_prob['no_sites'][j] = self.__compute_empirical_survival_probability(
t=t, i=infector, j=j, contacts_i_j=contacts_j, base_rate=base_rate_inf, ignore_sites=True)
# compute tracing decision
for policy in ['sites', 'no_sites']:
for action in ['isolate', 'test']:
contacts_action, contacts_no_action = self.__tracing_policy_advanced_threshold(
t=t, contacts_with_j=valid_contacts_with_j,
threshold=threshold_isolate if action == 'isolate' else threshold_test,
emp_survival_prob=emp_survival_prob[policy])
for j, contacts_j in contacts_action:
c[policy][action][True][j].append((t, set(contacts_j)))
for j, contacts_j in contacts_no_action:
c[policy][action][False][j].append((t, set(contacts_j)))
# for each individual considered in tracing, compare label (contact exposure?) with classification (traced due to this contact?)
for j in individuals_traced:
j_was_exposed = self.state_started_at['expo'][j] < np.inf
c_expo = self.contact_caused_expo[j]
# skip if `j` got exposed by another source, even though traced (household or background)
if (c_expo is None) and j_was_exposed:
continue
for policy in ['sites', 'no_sites']:
for action in ['isolate', 'test']:
# each time `j` is traced after a contact
for timing, c_traced in c[policy][action][True][j]:
# ignore FP if there is no way of knowing
if self.state_started_at['expo'][j] < timing - self.smart_tracing_contact_delta:
continue
# and this contact ultimately caused the exposure of `j`
# TP
# if (c_expo is not None) and (c_expo in c_traced):
if self.state_started_at['expo'][j] <= timing \
and timing - self.smart_tracing_contact_delta <= self.state_started_at['expo'][j] \
and (c_expo is not None):
stats[policy][action]['tp'] += 1
# otherwise: `j` either wasn't exposed or exposed but by another contact
# FP
else:
stats[policy][action]['fp'] += 1
# each time `j` is not traced after a contact
for timing, c_not_traced in c[policy][action][False][j]:
# ignore TN if there is no way of knowing
if self.state_started_at['expo'][j] < timing - self.smart_tracing_contact_delta:
continue
# and this contact ultimately caused the exposure of `j`
# FN
# if (c_expo is not None) and (c_expo in c_not_traced):
if self.state_started_at['expo'][j] <= timing \
and timing - self.smart_tracing_contact_delta <= self.state_started_at['expo'][j] \
and (c_expo is not None):
stats[policy][action]['fn'] += 1
# otherwise: `j` either wasn't exposed or not exposed but by another contact
# TN
else:
stats[policy][action]['tn'] += 1
return stats
def __process_exposure_event(self, *, t, i, parent, contact):
"""
Mark person `i` as exposed at time `t`
Push asymptomatic or presymptomatic queue event
"""
self.tot_inf_num += 1
# track flags
assert(self.state['susc'][i])
self.state['susc'][i] = False
self.state['expo'][i] = True
self.state_ended_at['susc'][i] = t
self.state_started_at['expo'][i] = t
if parent is not None:
self.parent[i] = parent
if self.state['iasy'][parent]:
self.children_count_iasy[parent] += 1
elif self.state['ipre'][parent]:
self.children_count_ipre[parent] += 1
elif self.state['isym'][parent]:
self.children_count_isym[parent] += 1
else:
assert False, 'only infectous parents can expose person i'
# decide whether asymptomatic or (pre-)symptomatic
if self.bernoulli_is_iasy[i]:
if t + self.delta_expo_to_iasy[i] < self.max_time:
self.queue.push(
(t + self.delta_expo_to_iasy[i], 'iasy', i, None, None, None),
priority=t + self.delta_expo_to_iasy[i])
else:
if t + self.delta_expo_to_ipre[i] < self.max_time:
self.queue.push(
(t + self.delta_expo_to_ipre[i], 'ipre', i, None, None, None),
priority=t + self.delta_expo_to_ipre[i])
# record which contact caused this exposure event (to check if it was traced for TP/FP/TN/FN computation)
if contact is not None:
self.inf_num += 1
if contact.t_to_direct < t:
assert contact.t_to >= t
self.indir_inf_num += 1
if contact.t_from > contact.t_to_direct:
assert t >= contact.t_from
self.full_indir_inf_num += 1
assert(self.contact_caused_expo[i] is None)
self.contact_caused_expo[i] = contact
def __process_presymptomatic_event(self, t, i, add_exposures=True):
"""
Mark person `i` as presymptomatic at time `t`
Push symptomatic queue event
"""
# track flags
assert(self.state['expo'][i])
self.state['ipre'][i] = True
self.state['expo'][i] = False
self.state_ended_at['expo'][i] = t
self.state_started_at['ipre'][i] = t
# symptomatic event
if t + self.delta_ipre_to_isym[i] < self.max_time:
self.queue.push(
(t + self.delta_ipre_to_isym[i], 'isym', i, None, None, None),
priority=t + self.delta_ipre_to_isym[i])
if add_exposures:
# find tmax for efficiency reasons (based on when individual i will not be infectious anymore)
tmax = (t + self.delta_ipre_to_isym[i] +
self.delta_isym_to_dead[i] if self.bernoulli_is_fatal[i] else
self.delta_isym_to_resi[i])
# contact exposure of others
self.__push_contact_exposure_events(t=t, infector=i, base_rate=1.0, tmax=tmax)
# household exposures
if self.households is not None and self.beta_household > 0:
self.__push_household_exposure_events(t=t, infector=i, base_rate=1.0, tmax=tmax)
def __process_symptomatic_event(self, t, i, apply_for_test=True):
"""
Mark person `i` as symptomatic at time `t`
Push resistant queue event
"""
# track flags
assert(self.state['ipre'][i])
self.state['isym'][i] = True
self.state['ipre'][i] = False
self.state_ended_at['ipre'][i] = t
self.state_started_at['isym'][i] = t
# testing
if self.test_targets == 'isym' and apply_for_test:
self.__apply_for_testing(t=t, i=i, priority= -self.max_time + t, trigger_tracing_if_positive=True)
# hospitalized?
if self.bernoulli_is_hospi[i]:
if t + self.delta_isym_to_hosp[i] < self.max_time:
self.queue.push(
(t + self.delta_isym_to_hosp[i], 'hosp', i, None, None, None),
priority=t + self.delta_isym_to_hosp[i])
# resistant event vs fatality event
if self.bernoulli_is_fatal[i]:
if t + self.delta_isym_to_dead[i] < self.max_time:
self.queue.push(
(t + self.delta_isym_to_dead[i], 'dead', i, None, None, None),
priority=t + self.delta_isym_to_dead[i])
else:
if t + self.delta_isym_to_resi[i] < self.max_time:
self.queue.push(
(t + self.delta_isym_to_resi[i], 'resi', i, None, None, None),
priority=t + self.delta_isym_to_resi[i])
def __process_asymptomatic_event(self, t, i, add_exposures=True):
"""
Mark person `i` as asymptomatic at time `t`
Push resistant queue event
"""
# track flags
assert(self.state['expo'][i])
self.state['iasy'][i] = True
self.state['expo'][i] = False
self.state_ended_at['expo'][i] = t
self.state_started_at['iasy'][i] = t
# resistant event
if t + self.delta_iasy_to_resi[i] < self.max_time:
self.queue.push(
(t + self.delta_iasy_to_resi[i], 'resi', i, None, None, None),
priority=t + self.delta_iasy_to_resi[i])
if add_exposures:
# contact exposure of others
self.__push_contact_exposure_events(t=t, infector=i, base_rate=self.mu, tmax=t + self.delta_iasy_to_resi[i])
# household exposures
if self.households is not None and self.beta_household > 0:
self.__push_household_exposure_events(t=t, infector=i, base_rate=self.mu, tmax=t | |
<filename>Table.py<gh_stars>0
import numpy as np
import pandas as pd
from hdfs import InsecureClient
import threading
class Table:
def __init__(self, schema, n_rows, name, storage="row", filename=None):
self.name = name
self.n_cols = len(schema)
self.storage = storage
self.schema = schema
self.col_names = [p[0] for p in schema.items()]
self.dtypes = [p[1] for p in schema.items()]
self.n_rows = n_rows
self.filename = filename
self.view_number = 0
self.col_index = {}
for i, col in enumerate(self.col_names):
self.col_index[col] = i
if self.storage == "row":
self.data = np.empty(self.n_rows, dtype=object)
elif self.storage == "col":
self.data = [np.empty(self.n_rows, dtype=column[1]) for column in self.schema.items()]
else:
self.data = None
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < self.n_rows:
res = self.data[self.n]
self.n += 1
return res
else:
raise StopIteration
def load_csv(self, filename):
df = pd.read_csv(filename, delimiter='|', header=None).iloc[:, :-1]
self.n_rows = len(df)
self.filename = filename
if self.storage == "row":
self.data = df.values
else:
self.data = [np.empty(self.n_rows, dtype=column[1]) for column in self.schema.items()]
for i in range(self.n_cols):
self.data[i][:] = df.iloc[:, i].values[:].astype(self.dtypes[i])
def fill_data(self, listname):
if self.storage == 'row':
# self.data = np.empty(shape = (self.n_rows, self.n_cols))
for i in range(self.n_rows):
for j in range(self.n_cols):
self.data = listname[:, :]
else:
for i in range(self.n_cols):
for j in range (self.n_rows):
self.data[i][j] = listname[j][i]
def read_from_hdfs(self, file_name, host):
client_hdfs = InsecureClient(host)
with client_hdfs.read('/user/root/tables'+file_name) as reader:
df = pd.read_csv(reader,index_col=0)
self.n_rows = len(df)
self.filename = file_name
if self.storage == "row":
self.data = df.values
else:
self.data = [np.empty(self.n_rows, dtype=column[1]) for column in self.schema.items()]
for i in range(self.n_cols):
self.data[i][:] = df.iloc[:, i].values[:].astype(self.dtypes[i])
def projection_multithread(self, columns, num_threads):
"""
projection
Projects the data of the table keeping only the columns given as arguments
and returns a new table without duplicate row (multithreading)
:param columns: name of the columns selected to perform the projection.
:type b: List of string
:return: The view of the table projected on the selected columns.
:rtype: Table
"""
# Construction of the name of the projected view
self.view_number += 1
view_name = "{}_View_{}".format(self.name, self.view_number)
# Construction of the schema of the projected view
projected_schema = {}
for col in columns:
projected_schema[col] = self.schema[col]
# Extraction of the data corresponding to the selected columns
if self.storage == "row":
selected_data = np.empty(self.n_rows, dtype=object)
sel_col = []
for col in columns:
sel_col.append(self.col_index[col])
for i in range(self.n_rows):
selected_data[i] = self.data[i][sel_col]
else:
selected_data = []
for col in columns:
selected_data.append(self.data[self.col_index[col]][:])
# Deletion of the duplicate rows
if self.storage == "row":
# We transpose the data to find the duplicate rows ('np.unique' or 'set' doesn't work with data of type object)
List_index_to_delete = get_index_to_delete(from_rows_to_columns(selected_data, projected_schema))
else:
List_index_to_delete = get_index_to_delete(selected_data)
Nb_rows = self.n_rows - len(List_index_to_delete)
# View construction
projected_view = Table(projected_schema, Nb_rows, view_name, storage=self.storage)
# Updating the data of the projected view
threads_list = np.empty(num_threads, dtype=object)
threads_row = np.array_split(np.array(range(self.n_rows)), num_threads)
def single_thread(row, a):
if self.storage == "row":
if len(List_index_to_delete) != 0:
k = -1
for i in row:
if i not in List_index_to_delete:
k += 1
projected_view.data[k] = selected_data[i]
else:
projected_view.data = selected_data
else:
if len(List_index_to_delete) != 0:
k = -1
for i in row:
if i not in List_index_to_delete:
k += 1
for j, col in enumerate(columns):
projected_view.data[j][k] = self.data[self.col_index[col]][i]
else:
for j, col in enumerate(columns):
projected_view.data[j][:] = self.data[self.col_index[col]][:]
for i in range(num_threads):
threads_list[i] = threading.Thread(target=single_thread, args=(threads_row[i], 1))
# Starting Threads
for t in threads_list:
t.start()
# Waiting for all threads to finish
for t in threads_list:
t.join()
return projected_view
def projection(self, columns):
"""
projection
Projects the data of the table keeping only the columns given as arguments
and returns a new table without duplicate row
:param columns: name of the columns selected to perform the projection.
:type b: List of string
:return: The view of the table projected on the selected columns.
:rtype: Table
"""
# Construction of the name of the projected view
self.view_number += 1
view_name = "{}_View_{}".format(self.name, self.view_number)
# Construction of the schema of the projected view
projected_schema = {}
for col in columns:
projected_schema[col] = self.schema[col]
# Extraction of the data corresponding to the selected columns
if self.storage == "row":
selected_data = np.empty(self.n_rows, dtype=object)
sel_col = []
for col in columns:
sel_col.append(self.col_index[col])
for i in range(self.n_rows):
selected_data[i] = self.data[i][sel_col]
else:
selected_data = []
for col in columns:
selected_data.append(self.data[self.col_index[col]][:])
# Deletion of the duplicate rows
if self.storage == "row":
# We transpose the data to find the duplicate rows ('np.unique' or 'set' doesn't work with data of type object)
List_index_to_delete = get_index_to_delete(from_rows_to_columns(selected_data, projected_schema))
else:
List_index_to_delete = get_index_to_delete(selected_data)
Nb_rows = self.n_rows - len(List_index_to_delete)
# View construction
projected_view = Table(projected_schema, Nb_rows, view_name, storage=self.storage)
# Updating the data of the projected view
if self.storage == "row":
if len(List_index_to_delete) != 0:
k = -1
for i in range(self.n_rows):
if i not in List_index_to_delete:
k += 1
projected_view.data[k] = selected_data[i]
else:
projected_view.data = selected_data
else:
if len(List_index_to_delete) != 0:
k = -1
projected_view.data = [np.empty(projected_view.n_rows, dtype=column[1]) for column in projected_view.schema.items()]
for i in range(self.n_rows):
if i not in List_index_to_delete:
k += 1
for j, col in enumerate(columns):
projected_view.data[j][k] = self.data[self.col_index[col]][i]
else:
projected_view.data = [np.empty(projected_view.n_rows, dtype=column[1]) for column in projected_view.schema.items()]
for j, col in enumerate(columns):
projected_view.data[j][:] = self.data[self.col_index[col]][:]
return projected_view
def create_slice(table, idx):
result = Table(table.schema, len(idx), table.name, table.storage)
if result.storage == "col":
result.data = [col[idx] for col in table.data]
else:
result.data = table.data[idx]
return result
def index_duplicate(a, index_to_explore=None, prefix_key=None):
"""
index_duplicate
Find the indexes of the duplicate values in a Numpy ndarray
:param a: the Numpy array
:type a: numpy.ndarray
:param index_to_explore: list of the indexes from the Numpy array that we want to explore.
Working on a subset of the array improves performance.
(default value=None: the whole array is explored)
:type index_to_explore: list of int
param prefix_key: prefix added to the dictionary key to avoid duplicate keys.
This prefix is constructed from the keys constructed during the processing
of the previous column (default value=None)
:type prefix_key: string
:return: A dictionary (dict_duplicate) which associates with each duplicated value
its index in the Numpy array
:rtype: dict
example:
>>> index_duplicate(np.array(['B', 'C', 'B', 'D', 'D', 'D', 'D', 'A']))
{'B': (array([0, 2], dtype=int64),), 'D': (array([3, 4, 5, 6], dtype=int64),)}
>>> index_duplicate(np.array(['B', 'C', 'B', 'D', 'D', 'D', 'D', 'A']), index_to_explore=[1, 2, 3, 4])
{'D': array([3, 4], dtype=int64)}
"""
if index_to_explore is None:
s = np.sort(a, axis=None)
else:
s = np.sort(a[index_to_explore], axis=None)
list_duplicate_values = s[:-1][s[1:] == s[:-1]]
dict_duplicate = {}
if index_to_explore is None:
for i in range(len(list_duplicate_values)):
if prefix_key is None:
key = list_duplicate_values[i]
else:
key = "{}{}".format(prefix_key, list_duplicate_values[i])
dict_duplicate[key] = np.where(a == list_duplicate_values[i])
else:
for i in range(len(list_duplicate_values)):
if prefix_key is None:
key = list_duplicate_values[i]
else:
key = "{}{}".format(prefix_key, list_duplicate_values[i])
dict_duplicate[key] = np.intersect1d(np.where(a == list_duplicate_values[i]),
index_to_explore)
return dict_duplicate
# ===================================================
def get_index_to_delete(b):
"""
get_index_to_delete
In a projection operation, only one instance of the duplicated rows is kept.
This function gives the indices of the rows to be deleted
:param b: List of Numpy arrays. Each array contains the values of a column of the table.
:type b: List of numpy.ndarray
:return: the list of the index of the duplicate values to be deleted. This list may be empty.
:rtype: list of int
example:
>>> get_index_to_delete([np.array([1, 2, 3]), np.array(['A', 'B', 'A'])])
[]
>>> get_index_to_delete([np.array([1, 2, 1, 1]), np.array(['A', 'B', 'A', 'A'])])
[0, 2]
"""
index_to_delete = []
# First column
dict_duplicate_2 = index_duplicate(b[0])
if dict_duplicate_2 == {}: return index_to_delete
# Next columns
for i in range(1, len(b)):
dict_duplicate_1 = dict_duplicate_2
dict_duplicate_2 = {}
for key in dict_duplicate_1:
prefix_key = "__{}__".format(key)
dict_duplicate_2.update(index_duplicate(b[i], dict_duplicate_1[key], prefix_key=prefix_key))
if dict_duplicate_2 == {}: return index_to_delete
for key in dict_duplicate_2:
# We keep the last element of the duplicate list associated with each key
list_duplicate = dict_duplicate_2[key][0:len(dict_duplicate_2[key]) - 1]
for index in list_duplicate:
index_to_delete.append(index)
index_to_delete.sort()
return index_to_delete
# ===================================================
def from_rows_to_columns(r, schema):
"""
from_rows_to_columns
Changes from a representation of data by rows to a representation of data by columns
:param r: data stored per rows.
:type r: numpy.ndarray
:param schema: name and dtype of each column of the Table.
| |
If their is bad contact score = 1000.
if flag: # if any distance < cutoff : no scoring
#self.score = min(9999999.9, 9999.9/mini)
self.score = 1000.
estat = hbond = vdw = ds = 1000.
else:
self.score = min(self.scorer.get_score(),100.)
estat = min(round(self.estat.get_score() * self.ESTAT_WEIGHT_AUTODOCK,2),1000.)
hbond = min(round(self.hbond.get_score() * self.HBOND_WEIGHT_AUTODOCK,2),1000.)
vdw = min(round(self.vdw.get_score() * self.VDW_WEIGHT_AUTODOCK,2),1000.)
ds = min(round(self.ds.get_score() * self.DESOLV_WEIGHT_AUTODOCK,2),1000.)
#print "--",estat,hbond,vdw,ds
return (self.score,estat,hbond,vdw,ds)
def pyMolToCAtomVect( self,mol):
"""convert Protein or AtomSet to AtomVector
"""
try :
from cAutoDock.scorer import AtomVector, Atom, Coords
except :
pass
className = mol.__class__.__name__
if className == 'Protein':
pyAtoms = mol.getAtoms()
elif className == 'AtomSet':
pyAtoms = mol
else:
return None
pyAtomVect = AtomVector()
for atm in pyAtoms:
a=Atom()
a.set_name(atm.name)
a.set_element(atm.autodock_element)# aromatic type 'A', vs 'C'
coords=atm.coords
a.set_coords( Coords(coords[0],coords[1],coords[2]))
a.set_charge( atm.charge)
try:
a.set_atvol( atm.AtVol)
except:
pass
try:
a.set_atsolpar( atm.AtSolPar)
except:
pass
a.set_bond_ord_rad( atm.bondOrderRadius)
a.set_charge( atm.charge)
pyAtomVect.append(a)
return pyAtomVect
def free_memory(self):
# free the shared memory
memobject.free_shared_mem("SharedMemory")
from AutoDockTools.pyAutoDockCommands import pep_aromList
class PyADCalcAD3Energies(EnergyScorer):
"""For each atom in one AtomSet, determine the autodock3 energy vs all the atoms in a second
AtomSet
"""
def __init__(self,atomset1,atomset2):
""" """
EnergyScorer.__init__(self,atomset1,atomset2)
self.weight = None
self.weightLabel = None
self.scorer = AutoDock305Scorer()
self.prop = self.scorer.prop
bothAts = atomset1 + atomset2
for a in bothAts:
if a.parent.type + '_' + a.name in pep_aromList:
a.autodock_element=='A'
a.AtSolPar = .1027
elif a.autodock_element=='A':
a.AtSolPar = .1027
elif a.autodock_element=='C':
a.AtSolPar = .6844
else:
a.AtSolPar = 0.0
self.r = self.ms.add_entities(atomset1)
self.l = self.ms.add_entities(atomset2)
self.scorer.set_molecular_system(self.ms)
def update_coords(self):
""" update the coords """
if hasattr(self.mol1,'cconformationIndex'):
self.atomset1.setConformation(self.mol1.cconformationIndex)
if hasattr(self.mol2,'cconformationIndex'):
self.atomset2.setConformation(self.mol2.cconformationIndex)
for ind in (self.r,self.l):
# clear distance matrix
self.ms.clear_dist_mat(ind)
def get_score(self):
score = self.scorer.get_score()
terms_score = []
for t,w in self.scorer.terms:
terms_score.append(w*t.get_score())
estat = min(round(terms_score[0]),1000.)
hbond = min(round(terms_score[1]),1000.)
vdw = min(round(terms_score[2]),1000.)
ds = min(round(terms_score[3]),1000.)
# labels atoms
score_array = self.scorer.get_score_array()
self.scorer.labels_atoms_w_nrg(score_array)
return (score,estat,hbond,vdw,ds)
class PyADCalcAD4Energies(EnergyScorer):
"""For each atom in one AtomSet, determine the autodock4 energy vs all the atoms
in a second AtomSet
"""
def __init__(self,atomset1,atomset2):
""" """
EnergyScorer.__init__(self,atomset1,atomset2)
self.weight = None
self.weightLabel = None
self.scorer = AutoDock4Scorer()
self.prop = self.scorer.prop
self.r = self.ms.add_entities(atomset1)
self.l = self.ms.add_entities(atomset2)
self.scorer.set_molecular_system(self.ms)
def update_coords(self):
""" update the coords """
if hasattr(self.mol1,'cconformationIndex'):
self.atomset1.setConformation(self.mol1.cconformationIndex)
if hasattr(self.mol2,'cconformationIndex'):
self.atomset2.setConformation(self.mol2.cconformationIndex)
for ind in (self.r,self.l):
# clear distance matrix
self.ms.clear_dist_mat(ind)
def get_score(self):
score = self.scorer.get_score()
terms_score = []
for t,w in self.scorer.terms:
terms_score.append(w*t.get_score())
estat = min(round(terms_score[0]),1000.)
hbond = min(round(terms_score[1]),1000.)
vdw = min(round(terms_score[2]),1000.)
ds = min(round(terms_score[3]),1000.)
self.scores = [estat,hbond,vdw,ds]
# labels atoms
score_array = self.scorer.get_score_array()
self.scorer.labels_atoms_w_nrg(score_array)
return (score,estat,hbond,vdw,ds)
if cAD:
from cAutoDock.AutoDockScorer import AutoDock305Scorer as c_AutoDock305Scorer
from cAutoDock.scorer import MolecularSystem as c_MolecularSystem
from cAutoDock.scorer import updateCoords as c_updateCoords
class cADCalcAD3Energies(EnergyScorer):
"""For each atom in one AtomSet, determine the electrostatics eneregy vs all the atoms in a second
AtomSet using the C implementation of the autodock scorer.
When using the autodock3 scorer, the receptor need to be loaded as a pdbqs file, the ligand as pdbqt.
"""
def __init__(self,atomset1,atomset2):
EnergyScorer.__init__(self,atomset1,atomset2)
self.weight = None
self.weightLabel = None
bothAts = atomset1 + atomset2
## for a in bothAts:
## if a.parent.type + '_' + a.name in pep_aromList:
## a.autodock_element=='A'
## a.AtSolPar = .1027
## elif a.autodock_element=='A':
## a.AtSolPar = .1027
## elif a.autodock_element=='C':
## a.AtSolPar = .6844
## else:
## a.AtSolPar = 0.0
self.ms = c_MolecularSystem()
self.receptor= self.pyMolToCAtomVect(atomset1)
self.ligand = self.pyMolToCAtomVect(atomset2)
self.r = self.ms.add_entities(self.receptor)
self.l = self.ms.add_entities(self.ligand)
self.ms.build_bonds( self.r )
self.ms.build_bonds( self.l )
self.scorer = c_AutoDock305Scorer(ms=self.ms,
pyatomset1=atomset1,
pyatomset2=atomset2)
self.prop = self.scorer.prop
# shared memory, used by C++ functions
self.proteinLen = len(atomset1)
self.ligLen = len(atomset2)
self.msLen = self.proteinLen + self.ligLen
self.sharedMem = memobject.allocate_shared_mem([self.msLen, 3],
'SharedMemory', memobject.FLOAT)
self.sharedMemPtr = memobject.return_share_mem_ptr('SharedMemory')[0]
#print "Shared memory allocated.."
def update_coords(self):
""" update the coordinate of atomset """
# use conformation set by dectected patterns
if hasattr(self.mol1,'cconformationIndex'):
self.atomset1.setConformation(self.mol1.cconformationIndex)
if hasattr(self.mol2,'cconformationIndex'):
self.atomset2.setConformation(self.mol2.cconformationIndex)
#confNum = self.mol2.cconformationIndex
# get the coords
#if hasattr(self.mol1,'mat_transfo_inv'):
# M = self.mol1.mat_transfo_inv
# vt = []
# for pt in self.mol2.allAtoms.coords:
# ptx = (M[0][0]*pt[0]+M[0][1]*pt[1]+M[0][2]*pt[2]+M[0][3])#+self.mol1.getCenter()[0]
# pty = (M[1][0]*pt[0]+M[1][1]*pt[1]+M[1][2]*pt[2]+M[1][3])#+self.mol1.getCenter()[1]
# ptz = (M[2][0]*pt[0]+M[2][1]*pt[1]+M[2][2]*pt[2]+M[2][3])#+self.mol1.getCenter()[2]
# vt.append( (ptx, pty, ptz) )
# self.mol2.allAtoms.updateCoords(vt,ind=confNum)#confNum
#
R_coords = self.atomset1.coords
L_coords = self.atomset2.coords
self.sharedMem[:] = Numeric.array(R_coords+L_coords, 'f')[:]
c_updateCoords(self.proteinLen, self.msLen, self.ms,self.sharedMemPtr)
def get_score(self):
""" return the score """
mini = self.ms.check_distance_cutoff(0, 1, self.cutoff)
# when number return should not do get_score ( proteins too close)
# flag = (mini==1.0 and mini==2.0)
flag = (mini == mini)
# for each of the terms and the score, we cap their max value to 100
# so if anything is greater than 100 we assign 100
# If their is bad contact score = 1000.
if flag: # if any distance < cutoff : no scoring
#self.score = min(9999999.9, 9999.9/mini)
self.score = 1000.
estat = hbond = vdw = ds = 1000.
self.scores = [1000.,1000.,1000.,1000.]
else:
self.score = min(self.scorer.get_score(),100.)
terms_score = self.scorer.get_score_per_term()
estat = min(round(terms_score[0],2),1000.)
hbond = min(round(terms_score[1],2),1000.)
vdw = min(round(terms_score[2],2),1000.)
ds = 0.#min(round(terms_score[3],2),1000.) #problem with ds
#print "--",estat,hbond,vdw,ds
self.scores = [estat,hbond,vdw,ds]
# labels atoms
score_array = self.scorer.get_score_array()
self.scorer.labels_atoms_w_nrg(score_array)
#ds=ds-ds
#self.score = self.score -ds
return (self.score,estat,hbond,vdw,ds)
def pyMolToCAtomVect( self,mol):
"""convert Protein or AtomSet to AtomVector
"""
from cAutoDock.scorer import AtomVector, Atom, Coords
className = mol.__class__.__name__
if className == 'Protein':
pyAtoms = mol.getAtoms()
elif className == 'AtomSet':
pyAtoms = mol
else:
return None
pyAtomVect = AtomVector()
for atm in pyAtoms:
a=Atom()
a.set_name(atm.name)
a.set_element(atm.autodock_element)# aromatic type 'A', vs 'C'
coords=atm.coords
a.set_coords( Coords(coords[0],coords[1],coords[2]))
a.set_charge( atm.charge)
try:
a.set_atvol( atm.AtVol)
except:
pass
try:
a.set_atsolpar( atm.AtSolPar)
except:
pass
a.set_bond_ord_rad( atm.bondOrderRadius)
a.set_charge( atm.charge)
pyAtomVect.append(a)
return pyAtomVect
def free_memory(self):
# free the shared memory
memobject.free_shared_mem("SharedMemory")
#############################################################################################################
class PyPairWiseEnergyScorer(EnergyScorer):
"""For each atom in one AtomSet, determine the electrostatics energy vs all the atoms in a second
AtomSet
"""
def __init__(self,atomset1,atomset2,scorer_ad_type='305'):
EnergyScorer.__init__(self,atomset1,atomset2)
self.r = self.ms.add_entities(self.atomset1)
self.l = self.ms.add_entities(self.atomset2)
self.scorer = WeightedMultiTerm()
self.scorer.set_molecular_system(self.ms)
self.scorer_ad_type = scorer_ad_type
if self.scorer_ad_type == '305':
self.ESTAT_WEIGHT_AUTODOCK = 0.1146 # electrostatics
self.HBOND_WEIGHT_AUTODOCK = 0.0656 # hydrogen bonding
self.VDW_WEIGHT_AUTODOCK = 0.1485 # van der waals
self.DESOLV_WEIGHT_AUTODOCK= 0.1711 # desolvation
# different terms to be use for score
self.estat= Electrostatics(self.ms)
self.scorer.add_term(self.estat, self.ESTAT_WEIGHT_AUTODOCK)
self.hbond=HydrogenBonding(self.ms)
self.scorer.add_term(self.hBond, self.HBOND_WEIGHT_AUTODOCK)
self.vdw = VanDerWaals(self.ms)
self.scorer.add_term(self.vdw, self.VDW_WEIGHT_AUTODOCK)
self.ds= Desolvation(self.ms)
self.scorer.add_term(self.ds,self.DESOLV_WEIGHT_AUTODOCK)
def update_coords(self):
""" update the coords """
if hasattr(self.mol1,'cconformationIndex'):
self.atomset1.setConformation(self.mol1.cconformationIndex)
if hasattr(self.mol2,'cconformationIndex'):
self.atomset2.setConformation(self.mol2.cconformationIndex)
for ind in (self.r,self.l):
# clear distance matrix
self.ms.clear_dist_mat(ind)
def get_score(self):
score = self.scorer.get_score()
estat = min(round(self.estat.get_score() * self.ESTAT_WEIGHT_AUTODOCK,2),1000.)
hbond = min(round(self.hbond.get_score() * self.HBOND_WEIGHT_AUTODOCK,2),1000.)
vdw = min(round(self.vdw.get_score() * self.VDW_WEIGHT_AUTODOCK,2),1000.)
ds = min(round(self.ds.get_score() * self.DESOLV_WEIGHT_AUTODOCK,2),1000.)
return (score,estat,hbond,vdw,ds)
#"""
#class PatternDistanceMatrix:
# # Object to store information about distance between a
# # set of patterns
#
#
# def __init__(self,patternList):
# self.patterns = patternList # list of patterns object
# self.vertices = [] # list of vertice of center of pattern
# self.centers = [] # list of center between two marker
# self.dist = [] # distance between 2 markers
# self.dist_str = [] # distance between 2 markers as string, so we
# # can pass it to display
#
# def clear_matrix(self):
# # delete all values store
# self.vertices = []
# self.centers = []
# self.dist = []
# self.dist_str = []
#
# def compute(self):
# # compute the distance between patterns
# We only compute half of the distance matrix as the rest is not needed
#
# self.clear_matrix()
# index = 0
# for pat in self.patterns[index:]:
# if not pat.isdetected: continue
# x = pat.gl_para[12]
# y = pat.gl_para[13]
# z = pat.gl_para[14]
# v = (x,y,z)
# for pat1 in self.patterns[index+1:]:
# if not pat1.isdetected: continue
# x1= pat1.gl_para[12]
# y1= pat1.gl_para[13]
# z1= pat1.gl_para[14]
# v1= (x1,y1,z1)
# # calculate distance
# d = util.measure_distance(Numeric.array(v),Numeric.array(v1))
# # calculate center between 2 patterns
# c = util.get_center(Numeric.array(v),Numeric.array(v1))
#
# self.dist.append(d)
# self.dist_str.append('%4.1f'%(d/10.)) # we want the values in centimeters
# # 1 unit = 10 mm
#
# self.vertices.append(v)
# self.vertices.append(v1)
# self.centers.append(c.tolist())
# index +=1
#
# def getall(self):
# # return vertices,centers,dist,dist_str
# return self.vertices,self.centers,self.dist,self.dist_str
#
#### Functions used to create Event
#
#def test_display_dist(arviewer):
# # test function to determine if we need to run the event function
# if arviewer.display_pat_distance: return True
#
#def display_distance(arviewer):
# # event function to display the distance between patterns
# vertices,centers,dist,dist_str = arviewer.patternMgr.distance_matrix.getall()
# arviewer.set_dist_geoms(vertices,centers,dist_str)
#
#
#
#def measure_atoms_dist(arviewer):
# # calculate and set to display the distance between atoms selected
# #Computes the distance between atom1, atom2, atom3.All coordinates are Cartesian; result is in mm
#
# #print "-- measure_atoms_dist --"
# detected_mols = [] # name list
# if len(arviewer.atoms_selected) < 2: return # we | |
format((LINE - charlen(splitbox[n-1],mode = 1))*16 + halflen(splitbox[n-1],1),"02x")
if w != "00" and not "-" in w:
shift = r"+T\x{}".format(w)
elif w == "00" or "-" in w:
shift = ""
if "T+" in splitbox[n-1]:
splitbox[n-1] = shift + splitbox[n-1][3:] + "^^<<"
elif "T+" not in splitbox[n-1]:
splitbox[n-1] = shift + splitbox[n-1] + "^^<<"
elif "~" in text:
splitbox[n-1] = splitbox[n-1] + "^^<<"
elif charlen(splitbox[n-1],mode = 1)-2 > LINE:
j = charlen(splitbox[n-1],LINE,mode = 1)
p = 0
k = 0
while LINE * (p - 1) <= charlen(splitbox[n-1],mode = 1):
if (n + p) % 3 == 0:
if "~" not in text:
if alone == "left":
if "T+" in splitbox[n-1][k:j]:
box[p] = splitbox[n-1][k+3:j] + "^^<<"
elif "T+" not in splitbox[n-1][k:j]:
box[p] = splitbox[n-1][k:j] + "^^<<"
elif alone == "center":
w = format((LINE - charlen(splitbox[n-1][k:j],mode = 1))*8 + int(halflen(splitbox[n-1][k:j],1) / 2),"02x")
if w != "00" and not "-" in w:
shift = r"+T\x{}".format(w)
elif w == "00" or "-" in w:
shift = ""
if "+T" in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k+3:j] + "^^<<"
elif "+T" not in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k:j] + "^^<<"
elif alone == "right":
w = format((LINE - charlen(splitbox[n-1][k:j],mode = 1))*16 + halflen(splitbox[n-1][k:j],1),"02x")
if w != "00" and not "-" in w:
shift = r"+T\x{}".format(w)
elif w == "00" or "-" in w:
shift = ""
if "+T" in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k+3:j] + "^^<<"
elif "+T" not in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k:j] + "^^<<"
elif "~" in text:
box[p] = splitbox[n-1][k:j] + "^^<<"
elif (n + p) % 3 != 0:
if "~" not in text:
if alone == "left":
if "T+" in splitbox[n-1][k:j]:
box[p] = splitbox[n-1][k+3:j] + "&&"
elif "T+" not in splitbox[n-1][k:j]:
box[p] = splitbox[n-1][k:j] + "&&"
elif alone == "center":
w = format((LINE - charlen(splitbox[n-1][k:j],mode = 1))*8 + int(halflen(splitbox[n-1][k:j],1) / 2),"02x")
if w != "00" and not "-" in w:
shift = r"+T\x{}".format(w)
elif w == "00" or "-" in w:
shift = ""
if "+T" in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k+3:j] + "&&"
elif "+T" not in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k:j] + "&&"
elif alone == "right":
w = format((LINE - charlen(splitbox[n-1][k:j],mode = 1))*16 + halflen(splitbox[n-1][k:j],1),"02x")
if w != "00" and not "-" in w:
shift = r"+T\x{}".format(w)
elif w == "00" or "-" in w:
shift = ""
if "+T" in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k+3:j] + "&&"
elif "+T" not in splitbox[n-1][k:j]:
box[p] = shift + splitbox[n-1][k:j] + "&&"
elif "~" in text:
box[p] = splitbox[n-1][k:j] + "&&"
k = j
j += charlen(splitbox[n-1][k:], LINE,mode = 1)
p += 1
splitbox[n-1] = "".join(box.values())
if splitbox[n-1].endswith("&&"):
splitbox[n-1] = splitbox[n-1][:-2] + "^^<<"
n += 1
text = "".join(splitbox)
text = text.replace("<<<","<")
text = text.replace("^^^","^")
if text.endswith("&&"):
text = text[:-2]
elif text.endswith("^^<<"):
text = text[:-4]
text = text.replace("^^,<^^<<","^^<<")
text = text.replace("^^<<&&","^^<<")
text = text.replace("&&^^<<","^^<<")
text = text.replace("&&&","&")
if text.endswith("&&"):
text = text[:-2]
elif text.endswith("^^<<"):
text = text[:-4]
if instant == 1:
text = "<<" + text
return text
def calculate_width(words):
words_width = 0
for word in words:
index = 0
while index < len(word):
character = word[index]
index += 1
if character.code in Messages.CONTROL_CODES:
if character.code == 0x06:
words_width += character.data
words_width += get_character_width(chr(character.code))
spaces_width = get_character_width(' ') * (len(words) - 1)
return words_width + spaces_width
def get_character_width(character):
try:
return character_table[character]
except KeyError:
if ord(character) < 0x20:
if character in control_code_width:
return sum([character_table[c] for c in control_code_width[character]])
else:
return 0
else:
# A sane default with the most common character width
return character_table[' ']
control_code_width = {
'\x0F': '00000000',
'\x16': '00\'00"',
'\x17': '00\'00"',
'\x18': '00000',
'\x19': '100',
'\x1D': '00',
'\x1E': '00000',
'\x1F': '00\'00"',
}
# Tediously measured by filling a full line of a gossip stone's text box with one character until it is reasonably full
# (with a right margin) and counting how many characters fit. OoT does not appear to use any kerning, but, if it does,
# it will only make the characters more space-efficient, so this is an underestimate of the number of letters per line,
# at worst. This ensures that we will never bleed text out of the text box while line wrapping.
# Larger numbers in the denominator mean more of that character fits on a line; conversely, larger values in this table
# mean the character is wider and can't fit as many on one line.
character_table = {
'\x0F': 655200,
'\x16': 292215,
'\x17': 292215,
'\x18': 300300,
'\x19': 145860,
'\x1D': 85800,
'\x1E': 300300,
'\x1F': 265980,
'a': 51480, # LINE_WIDTH / 35
'b': 51480, # LINE_WIDTH / 35
'c': 51480, # LINE_WIDTH / 35
'd': 51480, # LINE_WIDTH / 35
'e': 51480, # LINE_WIDTH / 35
'f': 34650, # LINE_WIDTH / 52
'g': 51480, # LINE_WIDTH / 35
'h': 51480, # LINE_WIDTH / 35
'i': 25740, # LINE_WIDTH / 70
'j': 34650, # LINE_WIDTH / 52
'k': 51480, # LINE_WIDTH / 35
'l': 25740, # LINE_WIDTH / 70
'm': 81900, # LINE_WIDTH / 22
'n': 51480, # LINE_WIDTH / 35
'o': 51480, # LINE_WIDTH / 35
'p': 51480, # LINE_WIDTH / 35
'q': 51480, # LINE_WIDTH / 35
'r': 42900, # LINE_WIDTH / 42
's': 51480, # LINE_WIDTH / 35
't': 42900, # LINE_WIDTH / 42
'u': 51480, # LINE_WIDTH / 35
'v': 51480, # LINE_WIDTH / 35
'w': 81900, # LINE_WIDTH / 22
'x': 51480, # LINE_WIDTH / 35
'y': 51480, # LINE_WIDTH / 35
'z': 51480, # LINE_WIDTH / 35
'A': 81900, # LINE_WIDTH / 22
'B': 51480, # LINE_WIDTH / 35
'C': 72072, # LINE_WIDTH / 25
'D': 72072, # LINE_WIDTH / 25
'E': 51480, # LINE_WIDTH / 35
'F': 51480, # LINE_WIDTH / 35
'G': 81900, # LINE_WIDTH / 22
'H': 60060, # LINE_WIDTH / 30
'I': 25740, # LINE_WIDTH / 70
'J': 51480, # LINE_WIDTH / 35
'K': 60060, # LINE_WIDTH / 30
'L': 51480, # LINE_WIDTH / 35
'M': 81900, # LINE_WIDTH / 22
'N': 72072, # LINE_WIDTH / 25
'O': 81900, # LINE_WIDTH / 22
'P': 51480, # LINE_WIDTH / 35
'Q': 81900, # LINE_WIDTH / 22
'R': 60060, # LINE_WIDTH / 30
'S': 60060, # LINE_WIDTH / 30
'T': 51480, # LINE_WIDTH / 35
'U': 60060, # LINE_WIDTH / 30
'V': 72072, # LINE_WIDTH / 25
'W': 100100, # LINE_WIDTH / 18
'X': 72072, # LINE_WIDTH / 25
'Y': 60060, # LINE_WIDTH / 30
'Z': 60060, # LINE_WIDTH / 30
' ': 51480, # LINE_WIDTH / 35
'1': 25740, # LINE_WIDTH / 70
'2': 51480, # LINE_WIDTH / 35
'3': 51480, # LINE_WIDTH / 35
'4': 60060, # LINE_WIDTH / 30
'5': 51480, # LINE_WIDTH / 35
'6': 51480, # LINE_WIDTH / 35
'7': 51480, # LINE_WIDTH / 35
'8': 51480, # LINE_WIDTH / 35
'9': 51480, # LINE_WIDTH / 35
'0': 60060, # LINE_WIDTH / 30
'!': 51480, # LINE_WIDTH / 35
'?': 72072, # LINE_WIDTH / 25
'\'': 17325, # LINE_WIDTH / 104
'"': 34650, # LINE_WIDTH / 52
'.': 25740, # LINE_WIDTH / 70
',': 25740, # LINE_WIDTH / 70
'/': 51480, # LINE_WIDTH / 35
'-': 34650, # LINE_WIDTH / 52
'_': 51480, # LINE_WIDTH / 35
'(': 42900, # LINE_WIDTH / 42
')': 42900, # LINE_WIDTH / 42
'$': 51480 # LINE_WIDTH / 35
}
# To run tests, enter the following into a python3 REPL:
# >>> import Messages
# >>> from TextBox import line_wrap_tests
# >>> line_wrap_tests()
def line_wrap_tests():
test_wrap_simple_line()
test_honor_forced_line_wraps()
test_honor_box_breaks()
test_honor_control_characters()
test_honor_player_name()
test_maintain_multiple_forced_breaks()
test_trim_whitespace()
test_support_long_words()
def test_wrap_simple_line():
words = 'Hello World! Hello World! Hello World!'
expected = 'Hello World! Hello World! Hello\x01World!'
result = line_wrap(words)
if result != expected:
print('"Wrap Simple Line" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Wrap Simple Line" test passed!')
def test_honor_forced_line_wraps():
words = 'Hello World! Hello World!&Hello World! Hello World! Hello World!'
expected = 'Hello World! Hello World!\x01Hello World! Hello World! Hello\x01World!'
result = line_wrap(words)
if | |
rangehint(self):
return(self.xs-3,self.xs+3)
class TwoSlopeATanModel(FunctionModel1DAuto):
"""
This model transitions between two asymptotic slopes with an additional
parameter that allows for a variable transition region size. The functional
form is
.. math::
y = (x-x_0) \\frac{a+b}{2} +
\\frac{ s - (x-x_0) (a-b)}{\\pi}
\\arctan \\left (\\frac{x-x0}{w} \\right) + c
`a` is the slope for small x, `b` for large x, `c` is the value at x=x0,
`x0` is the location of the transition, `w` is the width of the transition,
and `s` is the amount of y-axis offset that occurs at the transition
(positive for left-to-right).
"""
#no-S form from old docs
#.. math::
# y = (x-x_0) \\left[ \\frac{a+b}{2} -
# \\frac{a-b}{\\pi} \\arctan(\\frac{x-x_0}{w})\\right] + c
#alternative representation of no-S form for docs:
#.. math::
# y = \\frac{a (x-x_0)}{\\pi} \\left(\\frac{\\pi}{2}-\\arctan(\\frac{x-x_0}{w}) \\right) +
# \\frac{b (x-x_0)}{\\pi} \\left(\\frac{\\pi}{2}+\\arctan(\\frac{x-x_0}{w}) \\right) + c
#no-s form
#def f(self,x,a=1,b=2,c=0,x0=0,w=1):
# xoff = x-x0
# tana = np.arctan(-xoff/w)/pi+0.5
# tanb = np.arctan(xoff/w)/pi+0.5
# return a*xoff*tana+b*xoff*tanb+c
def f(self,x,a=1,b=2,c=0,x0=0,w=1,s=0):
xo = x - x0
# if w==0:
# tanfactor = .5*np.sign(x-x0)
# else:
# tanfactor = np.arctan(xo/w)/pi
#above is unneccessary b/c numpy division properly does infinities
tanfactor = np.arctan(xo/w)/pi
return xo*(a+b)/2 + (s - xo*(a-b))*tanfactor + c
@property
def rangehint(self):
return self.x0-3*self.w,self.x0+3*self.w
class _InterpolatedModel(DatacentricModel1DAuto):
_fittypes=['interp']
fittype = 'interp'
def __init__(self,**kwargs):
"""
Generate a new interpolated model.
"""
super(_InterpolatedModel,self).__init__()
self.i1d = lambda x:x #default should never be externally seen
for k,v in kwargs.items():
setattr(self,k,v)
def f(self,x):
if self.data is not None:
res = self.i1d(x)
xd,yd = self.data[0],self.data[1]
mi,mx = np.min(xd),np.max(xd)
res[x<mi] = yd[mi==xd][0]
res[x>mx] = yd[mx==xd][0]
return res
else:
return x
def fitData(self,x,y,**kwargs):
kwargs['savedata'] = True
return super(_InterpolatedModel,self).fitData(x,y,**kwargs)
def fitInterp(self,x,y,fixedpars=(),**kwargs):
from scipy.interpolate import interp1d
xi = np.argsort(x)
self.i1d = interp1d(x[xi],y[xi],kind=self.kind,bounds_error=False)
return []
class LinearInterpolatedModel(_InterpolatedModel):
"""
A model that is the linear interpolation of the data, or if out of bounds,
fixed to the edge value.
"""
kind = 'linear'
class NearestInterpolatedModel(_InterpolatedModel):
"""
A model that is the interpolation of the data by taking the value of the
nearest point
"""
kind = 'nearest'
class SmoothSplineModel(DatacentricModel1DAuto):
"""
This model uses a B-spline as a model for the function. Note that by default
the parameters are not tuned - the input smoothing and degree are left alone
when fitting.
The :class:`scipy.interpolate.UnivariateSpline` class is used to do the
calculation (in the :attr:`spline` attribute).
"""
def __init__(self,**kwargs):
super(SmoothSplineModel,self).__init__()
self._oldd = self._olds = self._ws = self._inits = None
self.data = (np.arange(self.degree+1),np.arange(self.degree+1),self._ws)
self.fitData(self.data[0],self.data[1])
self._inits = self.data[:2]
for k,v in kwargs.items():
setattr(self,k,v)
_fittypes=['spline']
fittype = 'spline'
def fitSpline(self,x,y,fixedpars=(),**kwargs):
"""
Fits the spline with the current s-value - if :attr:`s` is not changed,
it will execute very quickly after, as the spline is saved.
"""
from scipy.interpolate import UnivariateSpline
#if the spline has never been fit to non-init data, set s appropriately
if self._inits is not None and not (np.all(self._inits[0] == x) and np.all(self._inits[1] == y)):
self.s = len(x)
self._inits = None
self.spline = UnivariateSpline(x,y,s=self.s,k=self.degree,w=kwargs['weights'] if 'weights' in kwargs else None)
self._olds = self.s
self._oldd = self.degree
return np.array([self.s,self.degree])
def fitData(self,x,y,**kwargs):
"""
Custom spline data-fitting method. Kwargs are ignored except
`weights` and `savedata` (see :meth:`FunctionModel.fitData` for meaning)
"""
self._oldd = self._olds = None
if 'savedata' in kwargs and not kwargs['savedata']:
raise ValueError('data must be saved for spline models')
else:
kwargs['savedata']=True
if 'weights' in kwargs:
self._ws = kwargs['weights']
else:
self._ws = None
sorti = np.argsort(x)
return super(SmoothSplineModel,self).fitData(x[sorti],y[sorti],**kwargs)
def f(self,x,s=2,degree=3):
if self._olds != s or self._oldd != degree:
xd,yd,weights = self.data
self.fitSpline(xd,yd,weights=weights)
return self.spline(x)
def derivative(self, x, dx=None, nderivs=1):
"""
Compute the derivative of this spline at the requested points.
`order` specifies the number of derivatives to take - e.g. ``1`` gives
the first derivative, ``2`` is the second, etc. This can go up to the
number of degrees in the spline+1 (e.g. a cubic spline can go up to 4)
"""
return np.array([self.spline.derivatives(xi)[order] for xi in x])
@property
def rangehint(self):
xd = self.data[0]
return np.min(xd),np.max(xd)
class InterpolatedSplineModel(DatacentricModel1DAuto):
"""
This uses a B-spline as a model for the function. Note that by default the
degree is left alone when fitting, as this model always fits the points
exactly.
the :class:`scipy.interpolate.InterpolatedUnivariateSpline` class is used to
do the calculation (in the :attr:`spline` attribute).
"""
def __init__(self):
super(InterpolatedSplineModel,self).__init__()
self._oldd=self._olds=self._ws=None
self.data = (np.arange(self.degree+1),np.arange(self.degree+1),self._ws)
self.fitData(self.data[0],self.data[1])
_fittypes = ['spline']
fittype = 'spline'
def fitSpline(self,x,y,fixedpars=(),**kwargs):
"""
Fits the spline with the current s-value - if :attr:`s` is not changed,
it will execute very quickly after, as the spline is saved.
"""
from scipy.interpolate import InterpolatedUnivariateSpline
self.spline = InterpolatedUnivariateSpline(x,y,w=kwargs['weights'] if 'weights' in kwargs else None,k=self.degree)
self._oldd = self.degree
return np.array([self.degree])
def fitData(self,x,y,**kwargs):
"""
Custom spline data-fitting method. Kwargs are ignored except
`weights` and `savedata` (see :meth:`FunctionModel.fitData` for meaning)
"""
self._oldd=None
if 'savedata' in kwargs and not kwargs['savedata']:
raise ValueError('data must be saved for spline models')
else:
kwargs['savedata']=True
if 'weights' in kwargs:
self._ws = kwargs['weights']
else:
self._ws = None
sorti = np.argsort(x)
return super(InterpolatedSplineModel,self).fitData(x[sorti],y[sorti],**kwargs)
def f(self,x,degree=3):
if self._oldd != degree:
xd,yd,weights = self.data
self.fitSpline(xd,yd,weights=weights)
return self.spline(x)
def derivative(self, x, dx=None, nderivs=1):
"""
Compute the derivative of this spline at the requested points.
`order` specifies the number of derivatives to take - e.g. ``1`` gives
the first derivative, ``2`` is the second, etc. This can go up to the
number of degrees in the spline+1 (e.g. a cubic spline can go up to 4)
"""
return np.array([self.spline.derivatives(xi)[order] for xi in x])
@property
def rangehint(self):
xd = self.data[0]
return np.min(xd),np.max(xd)
class _KnotSplineModel(DatacentricModel1DAuto):
"""
this uses a B-spline as a model for the function. The knots parameter
specifies the number of INTERIOR knots to use for the fit.
The :attr:`locmethod` determines how to locate the knots and can be:
* 'cdf'
The locations of the knots will be determined by evenly sampling the cdf
of the x-points.
* 'even'
The knots are evenly spaced in x.
The :class:`scipy.interpolate.UnivariateSpline` class is used to do the
calculation (in the "spline" attribute).
"""
def __init__(self):
super(_KnotSplineModel,self).__init__()
self._ws = None
self.data = (np.arange(self.degree+self.nknots+1),np.arange(self.degree+self.nknots+1),self._ws)
@abstractmethod
def f(self,x):
raise NotImplemetedError
_fittypes = ['spline']
fittype = 'spline'
@abstractmethod
def fitSpline(self,x,y,fixedpars=(),**kwargs):
"""
Fits the spline with the current s-value - if :attr:`s` is not changed,
it will execute very quickly after, as the spline is saved.
"""
from scipy.interpolate import LSQUnivariateSpline
self.spline = LSQUnivariateSpline(x,y,t=self.iknots,k=int(self.degree),w=kwargs['weights'] if 'weights' in kwargs else None)
def fitData(self,x,y,**kwargs):
"""
Custom spline data-fitting method. Kwargs are ignored except
`weights` and `savedata` (see :meth:`FunctionModel.fitData` for meaning)
"""
self._oldd=self._olds=None
if 'savedata' in kwargs and not kwargs['savedata']:
raise ValueError('data must be saved for spline models')
else:
kwargs['savedata']=True
if 'weights' in kwargs:
self._ws = kwargs['weights']
else:
self._ws = None
sorti = np.argsort(x)
return super(_KnotSplineModel,self).fitData(x[sorti],y[sorti],**kwargs)
def derivative(self, x, dx=None, nderivs=1):
"""
Compute the derivative of this spline at the requested points.
`order` specifies the number of derivatives to take - e.g. ``1`` gives
the first derivative, ``2`` is the second, etc. This can go up to the
number of degrees in the spline+1 (e.g. a cubic spline can go up to 4)
"""
return np.array([self.spline.derivatives(xi)[order] for xi in x])
@property
def rangehint(self):
xd = self.data[0]
return np.min(xd),np.max(xd)
class UniformKnotSplineModel(_KnotSplineModel):
"""
A spline model with a uniform seperation between the internal knots, with
their number set by the :attr:`nknots` parameter.
"""
def __init__(self):
self._oldk = self._oldd = None
super(UniformKnotSplineModel,self).__init__()
self.fitData(self.data[0],self.data[1])
def fitSpline(self,x,y,fixedpars=(),**kwargs):
"""
Fits the spline with the current s-value - if :attr:`s` is not changed,
it will execute very quickly after, as the spline is saved.
"""
self.iknots = np.linspace(x[0],x[-1],self.nknots+2)[1:-1]
| |
<gh_stars>0
## Python packages
from datetime import datetime
import json
import re
from binascii import a2b_base64
import os
## Django Packages
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.shortcuts import redirect
from django.utils import timezone
from django.http import (
Http404, HttpResponse, JsonResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib import messages
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.template.loader import render_to_string
from django.contrib.gis.geos import Point, LineString
## Custom Libs ##
from lib.functions import *
## Project packages
from accounts.models import CustomUser, MapillaryUser
from tour.models import Tour, TourSequence
from sequence.models import TransType
## App packages
# That includes from .models import *
from .forms import *
from sequence.forms import SequenceSearchForTourForm
MAIN_PAGE_DESCRIPTION = "Tours are collections of sequences that have been curated by their owner. Browse others' tours or create one using your own sequences."
# Tour
def index(request):
return redirect('tour.tour_list')
@my_login_required
def tour_create(request, unique_id=None):
mapillary_user = MapillaryUser.objects.get(user=request.user)
if not mapillary_user:
messages.error(request, "You don't have any sequences. Please upload sequence or import from Mapillary.")
return redirect('sequence.index')
if request.method == "POST":
form = TourForm(request.POST)
if form.is_valid():
if unique_id is None:
tour = form.save(commit=False)
tour.user = request.user
tour.username = mapillary_user.username
tour.is_published = False
tour.save()
if form.cleaned_data['tour_tag'].count() > 0:
for tour_tag in form.cleaned_data['tour_tag']:
tour.tour_tag.add(tour_tag)
for tour_tag in tour.tour_tag.all():
if not tour_tag in form.cleaned_data['tour_tag']:
tour.tour_tag.remove(tour_tag)
messages.success(request, 'A tour was created successfully.')
else:
tour = get_object_or_404(Tour, unique_id=unique_id)
tour.name = form.cleaned_data['name']
tour.description = form.cleaned_data['description']
if form.cleaned_data['tour_tag'].count() > 0:
for tour_tag in form.cleaned_data['tour_tag']:
tour.tour_tag.add(tour_tag)
for tour_tag in tour.tour_tag.all():
if not tour_tag in form.cleaned_data['tour_tag']:
tour.tour_tag.remove(tour_tag)
tour.save()
messages.success(request, 'A tour was updated successfully.')
return redirect('tour.tour_add_sequence', unique_id=tour.unique_id)
else:
if unique_id:
tour = get_object_or_404(Tour, unique_id=unique_id)
form = TourForm(instance=tour)
else:
form = TourForm()
content = {
'form': form,
'pageName': 'Create Tour',
'pageTitle': 'Create Tour',
'pageDescription': MAIN_PAGE_DESCRIPTION,
}
return render(request, 'tour/create.html', content)
@my_login_required
def tour_add_sequence(request, unique_id):
tour = get_object_or_404(Tour, unique_id=unique_id)
sequences = None
page = 1
action = 'list'
if request.method == "GET":
page = request.GET.get('page')
if page is None:
page = 1
action = request.GET.get('action')
if action is None:
action = 'list'
form = SequenceSearchForTourForm(request.GET)
if form.is_valid():
name = form.cleaned_data['name']
camera_make = form.cleaned_data['camera_make']
tags = form.cleaned_data['tag']
transport_type = form.cleaned_data['transport_type']
# start_time = form.cleaned_data['start_time']
# end_time = form.cleaned_data['end_time']
sequences = Sequence.objects.all().filter(
user=request.user
)
if name and name != '':
sequences = sequences.filter(name__contains=name)
if camera_make and camera_make != '':
sequences = sequences.filter(camera_make__contains=camera_make)
if transport_type and transport_type != 0 and transport_type != '':
children_trans_type = TransType.objects.filter(parent_id=transport_type)
if children_trans_type.count() > 0:
types = []
types.append(transport_type)
for t in children_trans_type:
types.append(t.pk)
sequences = sequences.filter(transport_type_id__in=types)
else:
sequences = sequences.filter(transport_type_id=transport_type)
if len(tags) > 0:
for tag in tags:
sequences = sequences.filter(tag=tag)
if sequences == None:
sequences = Sequence.objects.all().filter(is_published=True)
form = SequenceSearchForTourForm()
sequences = sequences.order_by('-created_at')
sequence_ary = []
tour_sequences = TourSequence.objects.filter(tour=tour)
t_sequence_ary = []
if tour_sequences.count() > 0:
for t_s in tour_sequences:
t_sequence_ary.append(t_s.sequence.unique_id)
for sequence in sequences:
if not sequence.unique_id in t_sequence_ary:
sequence_ary.append(sequence)
t_form = TourForm(instance=tour)
if action == 'add':
# if sequences.count() > 0:
# for sequence in sequences:
# if not sequence in t_sequence_ary:
# sequence_ary.append(sequence)
#
# paginator = Paginator(sequence_ary, 5)
paginator = Paginator(sequence_ary, 5)
try:
pSequences = paginator.page(page)
except PageNotAnInteger:
pSequences = paginator.page(1)
except EmptyPage:
pSequences = paginator.page(paginator.num_pages)
first_num = 1
last_num = paginator.num_pages
if paginator.num_pages > 7:
if pSequences.number < 4:
first_num = 1
last_num = 7
elif pSequences.number > paginator.num_pages - 3:
first_num = paginator.num_pages - 6
last_num = paginator.num_pages
else:
first_num = pSequences.number - 3
last_num = pSequences.number + 3
pSequences.paginator.pages = range(first_num, last_num + 1)
pSequences.count = len(pSequences)
content = {
'sequences': pSequences,
'sequence_count': len(pSequences),
'form': form,
'pageName': 'Edit Tour',
'pageTitle': tour.name + ' - Edit Tour',
'pageDescription': MAIN_PAGE_DESCRIPTION,
'page': page,
'tour': tour,
'action': action,
't_form': t_form,
't_sequence_ary': t_sequence_ary
}
return render(request, 'tour/add_seq.html', content)
else:
sequences = sequences.filter(unique_id__in=t_sequence_ary)
content = {
'sequences': sequences,
'sequence_count': len(sequences),
'form': form,
'pageName': 'Edit Tour',
'pageTitle': tour.name + ' - Edit Tour',
'pageDescription': MAIN_PAGE_DESCRIPTION,
'page': page,
'tour': tour,
'action': action,
't_form': t_form,
't_sequence_ary': t_sequence_ary
}
return render(request, 'tour/add_seq.html', content)
def tour_list(request):
tours = None
page = 1
if request.method == "GET":
page = request.GET.get('page')
if page is None:
page = 1
form = TourSearchForm(request.GET)
if form.is_valid():
name = form.cleaned_data['name']
tags = form.cleaned_data['tour_tag']
username = form.cleaned_data['username']
tours = Tour.objects.all().filter(
is_published=True
)
print(tours.count())
if name and name != '':
tours = tours.filter(name__contains=name)
if username and username != '':
users = CustomUser.objects.filter(username__contains=username)
tours = tours.filter(user__in=users)
if len(tags) > 0:
for tour_tag in tags:
tours = tours.filter(tour_tag=tour_tag)
if tours == None:
tours = Tour.objects.all().filter(is_published=True)
form = TourSearchForm()
paginator = Paginator(tours.order_by('-created_at'), 5)
try:
pTours = paginator.page(page)
except PageNotAnInteger:
pTours = paginator.page(1)
except EmptyPage:
pTours = paginator.page(paginator.num_pages)
first_num = 1
last_num = paginator.num_pages
if paginator.num_pages > 7:
if pTours.number < 4:
first_num = 1
last_num = 7
elif pTours.number > paginator.num_pages - 3:
first_num = paginator.num_pages - 6
last_num = paginator.num_pages
else:
first_num = pTours.number - 3
last_num = pTours.number + 3
pTours.paginator.pages = range(first_num, last_num + 1)
pTours.count = len(pTours)
content = {
'tours': pTours,
'form': form,
'pageName': 'Tours',
'pageDescription': MAIN_PAGE_DESCRIPTION,
'pageTitle': 'Tours',
'page': page
}
return render(request, 'tour/list.html', content)
@my_login_required
def my_tour_list(request):
tours = None
page = 1
if request.method == "GET":
page = request.GET.get('page')
if page is None:
page = 1
form = TourSearchForm(request.GET)
if form.is_valid():
name = form.cleaned_data['name']
tags = form.cleaned_data['tour_tag']
tours = Tour.objects.all().filter(
user=request.user
)
if name and name != '':
tours = tours.filter(name__contains=name)
if len(tags) > 0:
for tour_tag in tags:
tours = tours.filter(tour_tag=tour_tag)
if tours == None:
tours = Tour.objects.all().filter(is_published=True)
form = TourSearchForm()
paginator = Paginator(tours.order_by('-created_at'), 5)
try:
pTours = paginator.page(page)
except PageNotAnInteger:
pTours = paginator.page(1)
except EmptyPage:
pTours = paginator.page(paginator.num_pages)
first_num = 1
last_num = paginator.num_pages
if paginator.num_pages > 7:
if pTours.number < 4:
first_num = 1
last_num = 7
elif pTours.number > paginator.num_pages - 3:
first_num = paginator.num_pages - 6
last_num = paginator.num_pages
else:
first_num = pTours.number - 3
last_num = pTours.number + 3
pTours.paginator.pages = range(first_num, last_num + 1)
pTours.count = len(pTours)
form._my(request.user.username)
content = {
'tours': pTours,
'form': form,
'pageName': 'My Tours',
'pageTitle': 'My Tours',
'pageDescription': MAIN_PAGE_DESCRIPTION,
'page': page
}
return render(request, 'tour/list.html', content)
def tour_detail(request, unique_id):
tour = get_object_or_404(Tour, unique_id=unique_id)
sequence_ary = []
tour_sequences = TourSequence.objects.filter(tour=tour).order_by('sort')
t_count_ary = []
if tour_sequences.count() > 0:
for t_s in tour_sequences:
t_sequences = TourSequence.objects.filter(sequence=t_s.sequence)
if t_sequences is None or not t_sequences:
t_s.sequence.tour_count = 0
else:
t_s.sequence.tour_count = t_sequences.count()
sequence_ary.append(t_s.sequence)
first_image_key = ''
if len(sequence_ary) > 0:
first_image_key = sequence_ary[0].coordinates_image[0]
content = {
'sequences': sequence_ary,
'sequence_count': len(sequence_ary),
'pageName': 'Tour Detail',
'pageTitle': tour.name + ' - Tour Detail',
'pageDescription': tour.description,
'tour': tour,
'first_image_key': first_image_key,
't_count_ary': t_count_ary
}
return render(request, 'tour/detail.html', content)
@my_login_required
def ajax_tour_update(request, unique_id=None):
if request.method == "POST":
form = TourForm(request.POST)
if form.is_valid():
tour = Tour.objects.get(unique_id=unique_id)
if not tour:
return JsonResponse({
'status': 'failed',
'message': 'The tour does not exist or has no access.'
})
tour.name = form.cleaned_data['name']
tour.description = form.cleaned_data['description']
if form.cleaned_data['tour_tag'].count() > 0:
for tour_tag in form.cleaned_data['tour_tag']:
tour.tour_tag.add(tour_tag)
for tour_tag in tour.tour_tag.all():
if not tour_tag in form.cleaned_data['tour_tag']:
tour.tour_tag.remove(tour_tag)
tour.save()
return JsonResponse({
'status': 'success',
'message': 'Tour was uploaded successfully.',
'tour': {
'name': tour.name,
'description': tour.description,
'tag': tour.getTags()
}
})
else:
errors = []
for field in form:
for error in field.errors:
errors.append(field.name + ': ' + error)
return JsonResponse({
'status': 'failed',
'message': '<br>'.join(errors)
})
return JsonResponse({
'status': 'failed',
'message': 'The tour does not exist or has no access.'
})
@my_login_required
def tour_delete(request, unique_id):
tour = get_object_or_404(Tour, unique_id=unique_id)
if request.method == "POST":
if tour.user != request.user:
messages.error(request, 'The tour does not exist or has no access.')
return redirect('tour.tour_list')
name = tour.name
sequences = TourSequence.objects.filter(tour=tour)
if sequences.count() > 0:
for seq in sequences:
seq.delete()
tour_likes = TourLike.objects.filter(tour=tour)
if tour_likes.count() > 0:
for t_like in tour_likes:
t_like.delete()
tour.delete()
messages.success(request, 'Tour "{}" is deleted successfully.'.format(name))
return redirect('tour.tour_list')
messages.error(request, 'The tour does not exist or has no access.')
return redirect('tour.tour_list')
def ajax_change_tour_seq(request, unique_id):
if not request.user.is_authenticated:
return JsonResponse({
'status': 'failed',
'message': 'The tour does not exist or has no access.'
})
tour = Tour.objects.get(unique_id=unique_id)
if not tour or tour is None or tour.user != request.user:
return JsonResponse({
'status': 'failed',
'message': 'The tour does not exist or has no access.'
})
if request.method == | |
None
""" The service plugin """
service_connection_handler = None
""" The service connection handler """
connection_socket = None
""" The connection socket """
connection_address = None
""" The connection address """
connection_port = None
""" The connection port """
connection_request_timeout = None
""" The connection request timeout """
connection_response_timeout = None
""" The connection response timeout """
connection_chunk_size = None
""" The connection chunk size """
connection_opened_handlers = []
""" The connection opened handlers """
connection_closed_handlers = []
""" The connection closed handlers """
connection_properties = {}
""" The connection properties map """
connection_status = False
""" The connection status flag """
cancel_time = None
""" The cancel time """
service_execution_thread = None
""" The service execution thread reference """
request_data = {}
""" The data map to be used to persist the processing request data """
pending_data_buffer = []
""" The buffer that holds the pending data """
_connection_socket = None
""" The original connection socket """
_read_buffer = []
""" The read buffer """
_read_lock = None
""" The read lock """
_write_lock = None
""" The write lock """
def __init__(self, service_plugin, service_connection_handler, connection_socket, connection_address, connection_port, connection_request_timeout, connection_response_timeout, connection_chunk_size):
"""
Constructor of the class.
:type service_plugin: Plugin
:param service_plugin: The service plugin.
:type service_connection_handler: AbstractServiceConnectionHandler
:param service_connection_handler: The service connection handler.
:type connection_socket: Socket
:param connection_socket: The connection socket.
:type connection_address: Tuple
:param connection_address: The connection address.
:type connection_port: int
:param connection_port: The connection port.
:type connection_request_timeout: float
:param connection_request_timeout: The connection request timeout.
:type connection_response_timeout: float
:param connection_response_timeout: The connection response timeout.
:type connection_chunk_size: int
:param connection_chunk_size: The connection chunk size.
"""
self.service_plugin = service_plugin
self.service_connection_handler = service_connection_handler
self.connection_socket = connection_socket
self.connection_address = connection_address
self.connection_port = connection_port
self.connection_request_timeout = connection_request_timeout
self.connection_response_timeout = connection_response_timeout
self.connection_chunk_size = connection_chunk_size
self._connection_socket = connection_socket
self.connection_opened_handlers = []
self.connection_closed_handlers = []
self.connection_properties = {}
self.request_data = {}
self.pending_data_buffer = []
self._read_buffer = []
self._read_lock = threading.RLock()
self._write_lock = threading.RLock()
def __repr__(self):
return "(%s, %s)" % (self.connection_address, self.connection_port)
def open(self):
"""
Opens the connection.
"""
# prints debug message about connection
self.service_plugin.debug("Connected to: %s" % str(self.connection_address))
# calls the connection opened handlers
self._call_connection_opened_handlers()
# sets the connection status flag
self.connection_status = True
def close(self):
"""
Closes the connection.
"""
# unsets the connection status flag
self.connection_status = False
# closes the connection socket
self.connection_socket.close()
# calls the connection closed handlers
self._call_connection_closed_handlers()
# prints debug message about connection
self.service_plugin.debug("Disconnected from: %s" % str(self.connection_address))
def cancel(self, delta_time):
"""
Cancels (closes) the given connection in
the given amount of seconds.
:type delta_time: float
:param delta_time: The amount of seconds until canceling.
"""
# sets the cancel time
self.cancel_time = time.clock() + delta_time
def upgrade(self, socket_upgrader, parameters):
"""
Upgrades the current connection socket, using
the the upgrader with the given name and the given parameters.
:type socket_upgrader: String
:param socket_upgrader: The name of the socket upgrader.
:type parameters: Dictionary
:param parameters: The parameters to the upgrade process.
"""
# retrieves the service utils
service_utils = self.service_connection_handler.service.service_utils
# retrieves the socket upgrader plugins map
socket_upgrader_plugins_map = service_utils.socket_upgrader_plugins_map
# in case the upgrader handler is not found in the handler plugins map
if not socket_upgrader in socket_upgrader_plugins_map:
# raises the socket upgrader not found exception
raise exceptions.SocketUpgraderNotFound("socket upgrader %s not found" % self.socket_upgrader)
# retrieves the socket upgrader plugin
socket_upgrader_plugin = service_utils.socket_upgrader_plugins_map[socket_upgrader]
# upgrades the current connection socket using the socket upgrader plugin
self.connection_socket = socket_upgrader_plugin.upgrade_socket_parameters(self.connection_socket, parameters)
# sets the socket to non blocking mode
self.connection_socket.setblocking(0)
def execute_background(self, callable, retries = 0, timeout = 0.0, timestamp = None):
"""
Executes the given callable object in a background
thread.
This method is useful for avoid blocking the request
handling method in non critic tasks.
:type callable: Callable
:param callable: The callable to be called in background.
:type retries: int
:param retries: The number of times to retry executing the
callable in case exception is raised.
:type timeout: float
:param timeout: The time to be set in between calls of the
callable, used together with the retry value.
:type timestamp: float
:param timestamp: The unix second based timestamp for the
first execution of the callable.
"""
# adds the callable to the service execution thread
self.service_execution_thread.add_callable(
callable,
retries = retries,
timeout = timeout,
timestamp = timestamp
)
def receive(self, request_timeout = None, chunk_size = None, retries = RECEIVE_RETRIES):
"""
Receives the data from the current connection socket, with the
given timeout and with a maximum size given by the chunk size.
:type request_timeout: float
:param request_timeout: The timeout to be used in data receiving.
:type chunk_size: int
:param chunk_size: The maximum size of the chunk to be received.
:type retries: int
:param retries: The number of retries to be used.
:rtype: String
:return: The received data.
"""
# acquires the read lock
self._read_lock.acquire()
try:
# receives the data and returns the value
return_value = self._receive(request_timeout, chunk_size, retries)
finally:
# releases the read lock
self._read_lock.release()
# returns the return value
return return_value
def send(self, message, response_timeout = None, retries = SEND_RETRIES, write_front = False):
"""
Sends the given message to the socket.
Raises an exception in case there is a problem sending
the message.
:type message: String
:param message: The message to be sent.
:type request_timeout: float
:param request_timeout: The timeout to be used in data sending.
:type retries: int
:param retries: The number of retries to be used.
:type write_front: bool
:param write_front: If the write of the message should be
made to the front of the buffer.
"""
# acquires the write lock
self._write_lock.acquire()
try:
# sends the message
self._send(message, response_timeout, retries)
finally:
# releases the write lock
self._write_lock.release()
def is_async(self):
"""
Retrieves if the current connection is
of type asynchronous.
:rtype: bool
:return: If the current connection is of type
asynchronous.
"""
return False
def is_open(self):
"""
Retrieves if the current connection is open.
:rtype: bool
:return: If the current connection is open.
"""
return self.connection_status
def is_secure(self):
"""
Verifies if the current connection is of type secure,
this analysis uses a specific heuristic.
If the connection is secure the underlying level should
be using an encrypted channel for communication.
:rtype: bool
:return: If the current connection is being transmitted
using a secure and encrypted channel.
"""
return hasattr(self.connection_socket, "_secure")
def get_connection_property(self, property_name):
"""
Retrieves the connection property for the given name.
:type property_name: String
:param property_name: The name of the property to
be retrieved.
:rtype: Object
:return: The value of the retrieved property.
"""
return self.connection_properties.get(property_name, None)
def set_connection_property(self, property_name, property_value):
"""
Sets a connection property, associating the given name
with the given value.
:type property_name: String
:param property_name: The name of the property to set.
:type property_value: Object
:param property_value: The value of the property to set.
"""
self.connection_properties[property_name] = property_value
def unset_connection_property(self, property_name):
"""
Unsets a connection property, removing it from the internal
structures.
:type property_name: String
:param property_name: The name of the property to unset.
"""
del self.connection_properties[property_name]
def add_pending_data(self, pending_data):
"""
Adds a chunk of pending data to the pending
data buffer.
:type pending_data: String
:param pending_data: The pending data to be
added to the pending data buffer.
"""
# in case the pending data is not valid
if not pending_data:
# returns immediately
return
# adds the pending data to the pending data
# buffer (list)
self.pending_data_buffer.append(pending_data)
def pop_pending_data(self):
"""
"Pops" the current pending data from the
service connection.
:rtype: String
:return: The current pending data from the
service connection (in case there is one).
"""
# in case the pending data buffer is
# not valid
if not self.pending_data_buffer:
# returns none (invalid)
return None
# returns the result of a "pop" in the
# pending data buffer
return self.pending_data_buffer.pop(0)
def pending_data(self):
"""
Checks if there is pending data to be "read"
or interpreted by the client service.
:rtype: bool
:return: If | |
<gh_stars>0
from geoalchemy2 import functions as geo_func
from sqlalchemy import and_
from sqlalchemy import cast
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy.orm import Query
from tests import factories
from threedi_modelchecker.checks.base import _sqlalchemy_to_sqlite_types
from threedi_modelchecker.checks.base import EnumCheck
from threedi_modelchecker.checks.base import FileExistsCheck
from threedi_modelchecker.checks.base import ForeignKeyCheck
from threedi_modelchecker.checks.base import GeneralCheck
from threedi_modelchecker.checks.base import GeometryCheck
from threedi_modelchecker.checks.base import GeometryTypeCheck
from threedi_modelchecker.checks.base import NotNullCheck
from threedi_modelchecker.checks.base import QueryCheck
from threedi_modelchecker.checks.base import RangeCheck
from threedi_modelchecker.checks.base import TypeCheck
from threedi_modelchecker.checks.base import UniqueCheck
from threedi_modelchecker.threedi_model import constants
from threedi_modelchecker.threedi_model import custom_types
from threedi_modelchecker.threedi_model import models
import factory
import pytest
def test_base_extra_filters_ok(session):
factories.ConnectionNodeFactory(id=1, storage_area=3.0)
factories.ConnectionNodeFactory(id=2, storage_area=None)
null_check = NotNullCheck(
column=models.ConnectionNode.storage_area, filters=models.ConnectionNode.id != 2
)
invalid_rows = null_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_base_extra_filters_err(session):
factories.ConnectionNodeFactory(id=1, storage_area=3.0)
factories.ConnectionNodeFactory(id=2, storage_area=None)
null_check = NotNullCheck(
column=models.ConnectionNode.storage_area, filters=models.ConnectionNode.id == 2
)
invalid_rows = null_check.get_invalid(session)
assert len(invalid_rows) == 1
def test_fk_check(session):
factories.ManholeFactory.create_batch(5)
fk_check = ForeignKeyCheck(
models.ConnectionNode.id, models.Manhole.connection_node_id
)
invalid_rows = fk_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_fk_check_no_entries(session):
fk_check = ForeignKeyCheck(
models.ConnectionNode.id, models.Manhole.connection_node_id
)
invalid_rows = fk_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_fk_check_null_fk(session):
conn_node = factories.ConnectionNodeFactory()
factories.ManholeFactory.create_batch(5, manhole_indicator=conn_node.id)
factories.ManholeFactory(manhole_indicator=None)
fk_check = ForeignKeyCheck(
models.ConnectionNode.id, models.Manhole.manhole_indicator
)
invalid_rows = fk_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_fk_check_both_null(session):
factories.GlobalSettingsFactory(control_group_id=None)
assert session.query(models.GlobalSetting).first().id is not None
assert session.query(models.GlobalSetting.control_group_id).scalar() is None
assert session.query(models.ControlGroup.id).scalar() is None
fk_check = ForeignKeyCheck(
models.ControlGroup.id, models.GlobalSetting.control_group_id
)
invalid_rows = fk_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_fk_check_missing_fk(session):
conn_node = factories.ConnectionNodeFactory()
factories.ManholeFactory.create_batch(5, manhole_indicator=conn_node.id)
missing_fk = factories.ManholeFactory(manhole_indicator=-1)
fk_check = ForeignKeyCheck(
models.ConnectionNode.id, models.Manhole.manhole_indicator
)
invalid_rows = fk_check.get_invalid(session)
assert len(invalid_rows) == 1
assert invalid_rows[0].id == missing_fk.id
def test_unique_check(session):
factories.ManholeFactory.create_batch(5)
unique_check = UniqueCheck(models.Manhole.code)
invalid_rows = unique_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_unique_check_duplicate_value(session):
manholes = factories.ManholeFactory.create_batch(
5, zoom_category=factory.Sequence(lambda n: n)
)
duplicate_manhole = factories.ManholeFactory(
zoom_category=manholes[0].zoom_category
)
unique_check = UniqueCheck(models.Manhole.zoom_category)
invalid_rows = unique_check.get_invalid(session)
assert len(invalid_rows) == 2
invalid_ids = [invalid.id for invalid in invalid_rows]
assert manholes[0].id in invalid_ids
assert duplicate_manhole.id in invalid_ids
def test_unique_check_null_values(session):
factories.ManholeFactory.create_batch(
5, zoom_category=factory.Sequence(lambda n: n)
)
factories.ManholeFactory.create_batch(3, zoom_category=None)
unique_check = UniqueCheck(models.ConnectionNode.id)
invalid_rows = unique_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_null_check(session):
factories.ConnectionNodeFactory.create_batch(5, storage_area=3.0)
null_check = NotNullCheck(models.ConnectionNode.storage_area)
invalid_rows = null_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_null_check_with_null_value(session):
factories.ConnectionNodeFactory.create_batch(5, storage_area=3.0)
null_node = factories.ConnectionNodeFactory(storage_area=None)
null_check = NotNullCheck(models.ConnectionNode.storage_area)
invalid_rows = null_check.get_invalid(session)
assert len(invalid_rows) == 1
assert invalid_rows[0].id == null_node.id
def test_threedi_db_and_factories(threedi_db):
"""Test to ensure that the threedi_db and factories use the same
session object."""
session = threedi_db.get_session()
factories.ManholeFactory()
q = session.query(models.Manhole)
assert q.count() == 1
def test_run_spatial_function(session):
"""Example how to use spatial functions.
Works on postgis and spatialite"""
factories.ConnectionNodeFactory()
from geoalchemy2 import func
q = session.query(func.ST_AsGeoJSON(models.ConnectionNode.the_geom))
q.first()
def test_type_check(session):
if session.bind.name == "postgresql":
pytest.skip("type checks not working on postgres")
factories.ManholeFactory(zoom_category=123)
factories.ManholeFactory(zoom_category=456)
type_check = TypeCheck(models.Manhole.zoom_category)
invalid_rows = type_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_type_check_integer(session):
if session.bind.name == "postgresql":
pytest.skip("type checks not working on postgres")
factories.ManholeFactory(zoom_category=123)
factories.ManholeFactory(zoom_category=None)
m1 = factories.ManholeFactory(zoom_category="abc")
m2 = factories.ManholeFactory(zoom_category=1.23)
type_check = TypeCheck(models.Manhole.zoom_category)
invalid_rows = type_check.get_invalid(session)
assert len(invalid_rows) == 2
invalid_ids = [invalid.id for invalid in invalid_rows]
assert m1.id in invalid_ids
assert m2.id in invalid_ids
def test_type_check_float_can_store_integer(session):
if session.bind.name == "postgresql":
pytest.skip("type checks not working on postgres")
factories.ManholeFactory(surface_level=1.3)
factories.ManholeFactory(surface_level=None)
factories.ManholeFactory(surface_level=1)
m1 = factories.ManholeFactory(zoom_category="abc")
type_check = TypeCheck(models.Manhole.zoom_category)
invalid_rows = type_check.get_invalid(session)
valid_rows = type_check.get_valid(session)
assert len(valid_rows) == 3
assert len(invalid_rows) == 1
invalid_ids = [invalid.id for invalid in invalid_rows]
assert m1.id in invalid_ids
def test_type_check_varchar(session):
if session.bind.name == "postgresql":
pytest.skip("type checks not working on postgres")
factories.ManholeFactory(code="abc")
factories.ManholeFactory(code=123)
type_check = TypeCheck(models.Manhole.code)
invalid_rows = type_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_type_check_boolean(session):
if session.bind.name == "postgresql":
pytest.skip("type checks not working on postgres")
factories.GlobalSettingsFactory(use_1d_flow=True)
factories.GlobalSettingsFactory(use_1d_flow=1)
# factories.GlobalSettingsFactory(use_1d_flow='true')
# factories.GlobalSettingsFactory(use_1d_flow='1')
# factories.GlobalSettingsFactory(use_1d_flow=1.0)
type_check = TypeCheck(models.GlobalSetting.use_1d_flow)
invalid_rows = type_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_geometry_check(session):
factories.ConnectionNodeFactory(the_geom="SRID=4326;POINT(-371.064544 42.28787)")
geometry_check = GeometryCheck(models.ConnectionNode.the_geom)
invalid_rows = geometry_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_geometry_check_with_invalid_geoms(session):
if session.bind.name == "postgresql":
pytest.skip("Not sure how to insert invalid types in postgresql")
inser_invalid_geom_q = """
INSERT INTO v2_connection_nodes (id, code, the_geom)
VALUES (2, 'the_code', 'invalid_geom')
"""
session.execute(inser_invalid_geom_q)
factories.ConnectionNodeFactory(the_geom="SRID=4326;POINT(-71.064544 42.28787)")
geometry_check = GeometryCheck(models.ConnectionNode.the_geom)
invalid_rows = geometry_check.get_invalid(session)
assert len(invalid_rows) == 1
def test_geometry_type_check(session):
factories.ConnectionNodeFactory.create_batch(
2, the_geom="SRID=4326;POINT(-71.064544 42.28787)"
)
geometry_type_check = GeometryTypeCheck(models.ConnectionNode.the_geom)
invalid_rows = geometry_type_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_geometry_type_check_invalid_geom_type(session):
if session.bind.name == "postgresql":
pytest.skip("Not sure how to insert invalid geometry types in postgresql")
factories.ConnectionNodeFactory(the_geom="SRID=4326;POINT(-71.064544 42.28787)")
invalid_geom_type = factories.ConnectionNodeFactory(
the_geom="SRID=4326;LINESTRING(71.0 42.2, 71.3 42.3)"
)
geometry_type_check = GeometryTypeCheck(models.ConnectionNode.the_geom)
invalid_rows = geometry_type_check.get_invalid(session)
assert len(invalid_rows) == 1
assert invalid_rows[0].id == invalid_geom_type.id
def test_enum_check(session):
factories.BoundaryConditions2DFactory()
enum_check = EnumCheck(models.BoundaryConditions2D.boundary_type)
invalid_rows = enum_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_enum_check_with_null_values(session):
factories.BoundaryConditions2DFactory(boundary_type=None)
enum_check = EnumCheck(models.BoundaryConditions2D.boundary_type)
invalid_rows = enum_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_enum_check_with_invalid_value(session):
factories.BoundaryConditions2DFactory()
faulty_boundary = factories.BoundaryConditions2DFactory(boundary_type=-1)
enum_check = EnumCheck(models.BoundaryConditions2D.boundary_type)
invalid_rows = enum_check.get_invalid(session)
assert len(invalid_rows) == 1
assert invalid_rows[0].id == faulty_boundary.id
def test_enum_check_string_enum(session):
factories.AggregationSettingsFactory()
enum_check = EnumCheck(models.AggregationSettings.aggregation_method)
invalid_rows = enum_check.get_invalid(session)
assert len(invalid_rows) == 0
def test_enum_check_string_with_invalid_value(session):
if session.bind.name == "postgresql":
pytest.skip(
"Not able to add invalid aggregation method due to " "CHECKED CONSTRAINT"
)
a = factories.AggregationSettingsFactory(aggregation_method="invalid")
enum_check = EnumCheck(models.AggregationSettings.aggregation_method)
invalid_rows = enum_check.get_invalid(session)
assert len(invalid_rows) == 1
assert invalid_rows[0].id == a.id
def test_sqlalchemy_to_sqlite_type_with_custom_type():
customIntegerEnum = custom_types.IntegerEnum(constants.BoundaryType)
assert _sqlalchemy_to_sqlite_types(customIntegerEnum) == ["integer"]
def test_conditional_checks(session):
global_settings1 = factories.GlobalSettingsFactory(
dem_obstacle_detection=True, dem_obstacle_height=-5
)
factories.GlobalSettingsFactory(
dem_obstacle_detection=False, dem_obstacle_height=-5
)
query = Query(models.GlobalSetting).filter(
models.GlobalSetting.dem_obstacle_height <= 0,
models.GlobalSetting.dem_obstacle_detection == True,
)
conditional_range_check_to_query_check = QueryCheck(
column=models.GlobalSetting.dem_obstacle_height,
invalid=query,
message="GlobalSetting.dem_obstacle_height should be larger than 0 "
"when GlobalSetting.dem_obstacle_height is True.",
)
invalids_querycheck = conditional_range_check_to_query_check.get_invalid(session)
assert len(invalids_querycheck) == 1
assert invalids_querycheck[0].id == global_settings1.id
def test_conditional_check_storage_area(session):
# if connection node is a manhole, then the storage area of the
# connection_node must be > 0
factories.ConnectionNodeFactory(storage_area=5)
factories.ConnectionNodeFactory(storage_area=-3)
conn_node_manhole_valid = factories.ConnectionNodeFactory(storage_area=4)
conn_node_manhole_invalid = factories.ConnectionNodeFactory(storage_area=-5)
factories.ManholeFactory(connection_node=conn_node_manhole_valid)
factories.ManholeFactory(connection_node=conn_node_manhole_invalid)
query = (
Query(models.ConnectionNode)
.join(models.Manhole)
.filter(models.ConnectionNode.storage_area <= 0)
)
query_check = QueryCheck(
column=models.ConnectionNode.storage_area, invalid=query, message=""
)
invalids = query_check.get_invalid(session)
assert len(invalids) == 1
assert invalids[0].id == conn_node_manhole_invalid.id
def test_conditional_check_joining_criterion_valid(session):
# Joining on criterion valid fails because it takes the complement (negation)
# of the joins (instead of only the where statement (joins are in the where
# statement)).
connection_node1 = factories.ConnectionNodeFactory()
connection_node2 = factories.ConnectionNodeFactory()
manhole1 = factories.ManholeFactory(
connection_node=connection_node1, bottom_level=1.0
)
factories.ManholeFactory(connection_node=connection_node2, bottom_level=-1.0)
factories.PumpstationFactory(
connection_node_start=connection_node1, lower_stop_level=0.0
)
factories.PumpstationFactory(
connection_node_start=connection_node2, lower_stop_level=2.0
)
check_lower_stop_level_gt_bottom_level_compliment = GeneralCheck(
column=models.Manhole.bottom_level,
criterion_valid=and_(
models.Pumpstation.connection_node_start_id == models.ConnectionNode.id,
models.Manhole.connection_node_id == models.ConnectionNode.id,
models.Pumpstation.lower_stop_level > models.Manhole.bottom_level,
),
)
invalids = check_lower_stop_level_gt_bottom_level_compliment.get_invalid(session)
assert len(invalids) != 1 # Note that 1 is what we actually want!
assert invalids[0].id == manhole1.id
def test_query_check_with_joins(session):
connection_node1 = factories.ConnectionNodeFactory()
connection_node2 = factories.ConnectionNodeFactory()
factories.ManholeFactory(connection_node=connection_node1, bottom_level=1.0)
factories.ManholeFactory(connection_node=connection_node2, bottom_level=-1.0)
pump1 = factories.PumpstationFactory(
connection_node_start=connection_node1, lower_stop_level=0.0
)
factories.PumpstationFactory(
connection_node_start=connection_node2, lower_stop_level=2.0
)
query = (
Query(models.Pumpstation)
.join(
models.ConnectionNode,
models.Pumpstation.connection_node_start_id == models.ConnectionNode.id,
)
.join(models.Manhole)
.filter(
models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level,
)
)
check = QueryCheck(
column=models.Pumpstation.lower_stop_level,
invalid=query,
message="Pumpstation.lower_stop_level should be higher than "
"Manhole.bottom_level",
)
invalids = check.get_invalid(session)
assert len(invalids) == 1
assert invalids[0].id == pump1.id
def test_query_check_on_pumpstation(session):
connection_node1 = factories.ConnectionNodeFactory()
connection_node2 = factories.ConnectionNodeFactory()
factories.ManholeFactory(connection_node=connection_node1, bottom_level=1.0)
factories.ManholeFactory(connection_node=connection_node2, bottom_level=-1.0)
pumpstation_wrong = factories.PumpstationFactory(
connection_node_start=connection_node1, lower_stop_level=0.0
)
factories.PumpstationFactory(
connection_node_start=connection_node2, lower_stop_level=2.0
)
query = (
Query(models.Pumpstation)
.join(
models.ConnectionNode,
models.Pumpstation.connection_node_start_id
== models.ConnectionNode.id, # noqa: E501
)
.join(
models.Manhole,
models.Manhole.connection_node_id == models.ConnectionNode.id,
)
.filter(
models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level,
)
)
check = QueryCheck(
column=models.Pumpstation.lower_stop_level,
invalid=query,
message="Pumpstation lower_stop_level should be higher than Manhole "
"bottom_level",
)
invalids = check.get_invalid(session)
assert len(invalids) == 1
assert invalids[0].id == pumpstation_wrong.id
def test_get_valid(session):
factories.ConnectionNodeFactory(storage_area=1)
factories.ConnectionNodeFactory(storage_area=2)
factories.ConnectionNodeFactory(storage_area=3)
range_check = GeneralCheck(
column=models.ConnectionNode.storage_area,
criterion_valid=models.ConnectionNode.storage_area > 2,
)
to_check = range_check.to_check(session).all()
assert len(to_check) == 3
invalids = range_check.get_invalid(session)
valids = range_check.get_valid(session)
assert len(valids) + len(invalids) == 3
def test_general_check_range(session):
w1 = factories.WeirFactory(friction_value=2)
factories.WeirFactory(friction_value=-1)
invalid_criterion = models.Weir.friction_value > 0
general_range_check = GeneralCheck(
column=models.Weir.friction_value, criterion_invalid=invalid_criterion
)
invalid = general_range_check.get_invalid(session)
assert len(invalid) == 1
assert invalid[0].id == w1.id
def test_general_check_valid_criterion_range(session):
factories.WeirFactory(friction_value=2)
w2 = factories.WeirFactory(friction_value=-1)
valid_criterion = models.Weir.friction_value >= 0
general_range_check = GeneralCheck(
column=models.Weir.friction_value, criterion_valid=valid_criterion
)
invalid = general_range_check.get_invalid(session)
assert len(invalid) == 1
assert invalid[0].id == w2.id
@pytest.mark.skip("Aggregate function not working for general checks")
def test_general_check_aggregation_function(session):
# Aggregation functions need something different!
w1 = factories.WeirFactory(friction_value=2)
w2 = factories.WeirFactory(friction_value=-1)
invalid_criterion = func.count(models.Weir.friction_value) < 3
general_range_check = GeneralCheck(
column=models.Weir.friction_value, criterion_invalid=invalid_criterion
)
invalid = general_range_check.get_invalid(session)
assert len(invalid) == 2
invalid_ids = [row.id for row in invalid]
assert w1.id in invalid_ids
assert w2.id in invalid_ids
def test_general_check_modulo_operator(session):
factories.GlobalSettingsFactory(nr_timesteps=120, output_time_step=20)
global_settings_remainder = factories.GlobalSettingsFactory(
nr_timesteps=125, output_time_step=20 # This is a FLOAT
)
# We cast to Integer because postgis modulo operator expects two of
# same type.
modulo_check = GeneralCheck(
column=models.GlobalSetting.nr_timesteps,
criterion_valid=models.GlobalSetting.nr_timesteps
% cast(models.GlobalSetting.output_time_step, Integer)
== 0,
)
invalid = modulo_check.get_invalid(session)
assert len(invalid) == 1
assert invalid[0].id == global_settings_remainder.id
def test_query_check_manhole_drain_level_calc_type_2(session):
# manhole.drain_level can be null, but if manhole.calculation_type == 2 (Connected)
# then manhole.drain_level >= manhole.bottom_level
factories.ManholeFactory(drain_level=None)
factories.ManholeFactory(drain_level=1)
m3_error = factories.ManholeFactory(
drain_level=None, calculation_type=constants.CalculationTypeNode.CONNECTED
) # drain_level cannot be | |
<filename>Tests.py
# test.py
""" This file houses the suite of tests for main.py classes and functions
THIS FILE IS A MESS AND TOTALLY BROKEN AT THIS POINT
IT WILL NOT RUN
IT IS HERE THAT IT MAY BE CANABALISED FOR FUTURE ITERATIONS OF THE PROJECT
"""
def test_variables():
""" variables needed for various tests """
global game_1, game_2, game_3, game_4, game_5, game_6, game_7, game_8, game_9
global game_10, game_11, game_12, game_13, game_14, game_15, game_16, game_17, game_18
global game_19, game_20, game_21, game_22, game_23, game_24, game_25, game_26, game_27
global game_28, game_29, game_30, game_31, game_32, game_33, game_34, game_35, game_36
global game_37, game_38, game_39, game_40, game_41, game_42, game_43, game_44, game_45
global game_46, game_47, game_48, game_49, legal_checkmates
global fens, fen_1, fen_2, fen_3, fen_4
fen_1 = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
fen_2 = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
fen_3 = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/8/PPPP1PPP/RNBQKBNR w KQkq c6 0 2'
fen_4 = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2'
fens = [fen_1, fen_2, fen_3, fen_4]
game_1 = '1. e4 d5 '\
'2. exd5 Qxd5 3. Nc3 Qd8 4. Bc4 Nf6 5. Nf3 Bg4 6. h3 Bxf3 '\
'7. Qxf3 e6 8. Qxb7 Nbd7 9. Nb5 Rc8 10. Nxa7 Nb6 11. Nxc8 Nxc8 '\
'12. d4 Nd6 13. Bb5+ Nxb5 14. Qxb5+ Nd7 15. d5 exd5 16. Be3 Bd6 '\
'17. Rd1 Qf6 18. Rxd5 Qg6 19. Bf4 Bxf4 20. Qxd7+ Kf8 21. Qd8#'
game_2 = '1.e4 b6 2.d4 Bb7 3.Bd3 f5 4.exf5 Bxg2 5.Qh5+ g6 6.fxg6 Nf6 ' \
'7.gxh7 Nxh5 8.Bg6#'
game_3 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Nf6 4.Ng5 d5 5.exd5 Nxd5 6.Nxf7 Kxf7 '\
'7.Qf3+ Ke6 8.Nc3 Nce7 9.O-O c6 10.Re1 Bd7 11.d4 Kd6 12.Rxe5 Ng6 '\
'13.Nxd5 Nxe5 14.dxe5+ Kc5 15.Qa3+ Kxc4 16.Qd3+ Kc5 17.b4#'
game_4 = '1. e4 e5 2. Nf3 d6 3. Bc4 Bg4 4. Nc3 g6 5. Nxe5 Bxd1 6. Bxf7+ '\
'Ke7 7. Nd5#'
game_5 = '1. e4 e5 2. Bc4 Bc5 3. d3 c6 4. Qe2 d6 5. f4 exf4 6. Bxf4 Qb6 '\
'7. Qf3 Qxb2 8. Bxf7+ Kd7 9. Ne2 Qxa1 10. Kd2 Bb4+ 11. Nbc3 '\
'Bxc3+ 12. Nxc3 Qxh1 13. Qg4+ Kc7 14. Qxg7 Nd7 15. Qg3 b6 '\
'16. Nb5+ cxb5 17. Bxd6+ Kb7 18. Bd5+ Ka6 19. d4 b4 20. Bxb4 '\
'Kb5 21. c4+ Kxb4 22. Qb3+ Ka5 23. Qb5#'
game_6 = '1.e4 e5 2.f4 exf4 3.Bc4 Qh4+ 4.Kf1 b5 5.Bxb5 Nf6 6.Nf3 Qh6 '\
'7.d3 Nh5 8.Nh4 Qg5 9.Nf5 c6 10.g4 Nf6 11.Rg1 cxb5 12.h4 Qg6 '\
'13.h5 Qg5 14.Qf3 Ng8 15.Bxf4 Qf6 16.Nc3 Bc5 17.Nd5 Qxb2 18.Bd6 '\
"Bxg1 {It is from this move that Black's defeat stems. Wilhelm "\
'Steinitz suggested in 1879 that a better move would be '\
'18... Qxa1+; likely moves to follow are 19. Ke2 Qb2 20. Kd2 '\
'Bxg1.} 19. e5 Qxa1+ 20. Ke2 Na6 21.Nxg7+ Kd8 22.Qf6+ Nxf6 '\
'23.Be7#'
game_7 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Bc5 4.c3 Qe7 5.O-O d6 6.d4 Bb6 7.Bg5 '\
'f6 8.Bh4 g5 9.Nxg5 fxg5 10.Qh5+ Kf8 11.Bxg5 Qe8 12.Qf3+ Kg7 '\
'13.Bxg8 Rxg8 14.Qf6#'
game_8 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Bc5 4.b4 Bxb4 5.c3 Ba5 6.d4 exd4 7.O-O '\
'd3 8.Qb3 Qf6 9.e5 Qg6 10.Re1 Nge7 11.Ba3 b5 12.Qxb5 Rb8 13.Qa4 '\
'Bb6 14.Nbd2 Bb7 15.Ne4 Qf5 16.Bxd3 Qh5 17.Nf6+ gxf6 18.exf6 '\
'Rg8 19.Rad1 Qxf3 20.Rxe7+ Nxe7 21.Qxd7+ Kxd7 22.Bf5+ Ke8 '\
'23.Bd7+ Kf8 24.Bxe7#'
game_9 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Nf6 4.d4 exd4 5.Ng5 d5 6.exd5 Nxd5 '\
'7.O-O Be7 8.Nxf7 Kxf7 9.Qf3+ Ke6 10.Nc3 dxc3 11.Re1+ Ne5 '\
'12.Bf4 Bf6 13.Bxe5 Bxe5 14.Rxe5+ Kxe5 15.Re1+ Kd4 16.Bxd5 Re8 '\
'17.Qd3+ Kc5 18.b4+ Kxb4 19.Qd4+ Ka5 20.Qxc3+ Ka4 21.Qb3+ Ka5 '\
'22.Qa3+ Kb6 23.Rb1#'
game_10 = '1. e4 e5 2. d4 exd4 3. Bc4 Nf6 4. e5 d5 5. Bb3 Ne4 6. Ne2 Bc5 '\
'7. f3 Qh4+ 8. g3 d3 9. gxh4 Bf2+ 10. Kf1 Bh3#'
game_11 = '1. e4 e5 2. Nf3 d6 3. Bc4 f5 4. d4 Nf6 5. Nc3 exd4 6. Qxd4 Bd7 '\
'7. Ng5 Nc6 8. Bf7+ Ke7 9. Qxf6+ Kxf6 10. Nd5+ Ke5 11. Nf3+ '\
'Kxe4 12. Nc3#'
game_12 = '1. e4 e5 2. d4 exd4 3. c3 dxc3 4. Bc4 d6 5. Nxc3 Nf6 6. Nf3 '\
'Bg4 7. O-O Nc6 8. Bg5 Ne5 9. Nxe5 Bxd1 10. Bxf7+ Ke7 11. Nd5#'
game_13 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Nh6 4.O-O Ng4 5.d4 exd4 6.Bxf7+ Kxf7 '\
'7.Ng5+ Kg6 8.Qxg4 d5 9.Ne6+ Kf6 10.Qf5+ Ke7 11.Bg5+ Kd6 '\
'12.Qxd5#'
game_14 = '1. e4 e6 2. d4 d5 3. Nc3 Bb4 4. Bd3 Bxc3+ 5. bxc3 h6 6. Ba3 '\
'Nd7 7. Qe2 dxe4 8. Bxe4 Ngf6 9. Bd3 b6 10. Qxe6+ fxe6 11. Bg6#'
game_15 = '1.e4 e5 2.d4 exd4 3.Nf3 Nc6 4.Bc4 Be7 5.c3 dxc3 6.Qd5 d6 '\
'7.Qxf7+ Kd7 8.Be6#'
game_16 = '1. Nf3 Nf6 2. c4 c5 3. d4 Nc6 4. d5 Nb8 5. Nc3 d6 6. g3 g6 '\
'7. Bg2 Bg7 8. O-O O-O 9. Bf4 h6 10. Qd2 Kh7 11. e4 Nh5 12. Be3 '\
'Nd7 13. Rae1 Rb8 14. Nh4 Ndf6 15. h3 Ng8 16. g4 Nhf6 17. f4 e6 '\
'18. Nf3 exd5 19. cxd5 b5 20. e5 b4 21. Nd1 Ne4 22. Qd3 f5 '\
'23. e6 Qa5 24. gxf5 gxf5 25. Nh4 Ba6 26. Qxe4 fxe4 27. Bxe4+ '\
'Kh8 28. Ng6+ Kh7 29. Nxf8+ Kh8 30. Ng6+ Kh7 31. Ne5+ Kh8 '\
'32. Nf7#'
game_17 = '1. e4 e5 2. f4 exf4 3. Nf3 Nf6 4. e5 Ng4 5. d4 g5 6. Nc3 Ne3 '\
'7. Qe2 Nxf1 8. Ne4 Ne3 9. Nf6+ Ke7 10. Bd2 Nxc2+ 11. Kf2 Nxa1 '\
'12. Nd5+ Ke6 13. Qc4 b5 14. Nxg5+ Qxg5 15. Nxc7+ Ke7 16. Nd5+ '\
'Ke6 17. Nxf4+ Ke7 18. Nd5+ Ke8 19. Qxc8+ Qd8 20. Nc7+ Ke7 '\
'21. Bb4+ d6 22. Bxd6+ Qxd6 23. Qe8#'
game_18 = '1. d4 { Notes by <NAME>. Here is a brilliant win by '\
'Tarrasch. } d5 2. Nf3 c5 3. c4 e6 4. e3 Nf6 5. Bd3 Nc6 6. O-O '\
'Bd6 7. b3 O-O 8. Bb2 b6 9. Nbd2 Bb7 10. Rc1 Qe7 11. cxd5 {11 '\
'Qe2!? } 11...exd5 12. Nh4 g6 13. Nhf3 Rad8 14. dxc5 bxc5 '\
'15. Bb5 Ne4 16. Bxc6 Bxc6 17. Qc2 Nxd2 18. Nxd2 {"The guardian '\
"of the king's field leaves his post for a moment, assuming "\
'wrongly that 19 Qc3 is a major threat" -- Tartakower. If 18 '\
'Qxd2 d4 19 exd4 Bxf3 20 gxf3 Qh4 } 18...d4 {!} 19. exd4 {19 '\
'Rfe1! } Bxh2+ 20. Kxh2 Qh4+ 21. Kg1 Bxg2 {!} 22. f3 {22 Kxg2 '\
'Qg4+ 23 Kh2 Rd5-+ } 22...Rfe8 23. Ne4 Qh1+ 24. Kf2 Bxf1 25. d5 '\
'{25 Rxf1 Qh2+ or 25 Nf6+ Kf8 26 Nxe8 Qg2+ } 25...f5 26. Qc3 '\
'Qg2+ 27. Ke3 Rxe4+ 28. fxe4 f4+ {28...Qg3+! } 29. Kxf4 Rf8+ '\
'30. Ke5 Qh2+ 31. Ke6 Re8+ 32. Kd7 Bb5#'
game_19 = '1. e4 e5 2. Nc3 Nc6 3. Nf3 d6 4. Bb5 Bg4 5. Nd5 Nge7 6. c3 a6 '\
'7. Ba4 b5 8. Bb3 Na5 9. Nxe5 Bxd1 10. Nf6+ gxf6 11. Bxf7#'
game_20 = '1.e4 {Notes by <NAME>} e5 2.Nf3 Nc6 3.Bc4 Nf6 4.Ng5 Bc5 '\
'{An original combination that is better than it looks. A small '\
'mistake by white can give black a decisive attack. It is not '\
'easy to find the best defense against it in a practical game '\
'and it is probably theoretically correct. ... It somewhat '\
'resembles the Blackmar-Jerome gambit: 1.e4 e5 2.Nf3 Nc6 3.Bc4 '\
'Bc5 4.Bxf7+?! Kxf7 5.Nxe5+?!} 5.Nxf7 Bxf2+ 6.Ke2 {The best '\
'defense is 6.Kf1! although after 6...Qe7 7.Nxh8 d5 8.exd5 Nd4 '\
'Black gets a strong attack.} Nd4+ 7.Kd3 b5 | |
fields=(
self._gen_expr(name),
"strip")))]))
elif atype.startswith('store'):
elements.append(gen_store(
name=name.strip(),
value=self._gen_expr(locator_attr)))
return elements
def _gen_keys_mngr(self, atype, param, selectors):
elements = []
args = []
action = None
elements.append(self._gen_get_locator_call("var_loc_keys", selectors))
if atype == "click":
action = "click"
elif atype == "submit":
action = "submit"
elif atype in ["keys", "type"]:
if atype == "type":
elements.append(ast_call(
func=ast_attr(
fields=(
self._gen_dynamic_locator("var_loc_keys", selectors),
"clear"))))
action = "send_keys"
if isinstance(param, (string_types, text_type)) and param.startswith("KEY_"):
args = [ast_attr("Keys.%s" % param.split("KEY_")[1])]
else:
args = [self._gen_expr(str(param))]
if action:
elements.append(ast_call(
func=ast_attr(
fields=(
self._gen_dynamic_locator("var_loc_keys", selectors),
action)),
args=args))
return elements
def _gen_edit_mngr(self, param, locators):
if not param:
raise TaurusConfigError("Missing param for editContent action.")
var_name = "var_edit_content"
elements = [self._gen_get_locator_call(var_name, locators)]
locator = self._gen_dynamic_locator(var_name, locators)
tag = gen_subscript(var_name, 0)
selector = gen_subscript(var_name, 1)
if self._is_foreach_element(locators):
el = locators[0].get("byelement")
exc_msg = "The element '%s' (tag name: '%s', text: '%s') is not a contenteditable element"
exc_args = [ast.Str(el), ast_attr(el + ".tag_name"), ast_attr(el + ".text")]
else:
exc_msg = "The element (%s: %r) is not a contenteditable element"
exc_args = [tag, selector]
exc_type = ast_call(
func="NoSuchElementException",
args=[
ast.BinOp(
left=ast.Str(exc_msg),
op=ast.Mod(),
right=ast.Tuple(elts=exc_args))
]
)
if PY2:
raise_kwargs = {
"type": exc_type,
"inst": None,
"tback": None
}
else:
raise_kwargs = {
"exc": exc_type,
"cause": None}
body = ast.Expr(ast_call(func=ast_attr("self.driver.execute_script"),
args=[
ast.BinOp(
left=ast.Str("arguments[0].innerHTML = '%s';"),
op=ast.Mod(),
right=self._gen_expr(param.strip())),
locator]))
element = ast.If(
test=ast_call(
func=ast_attr(
fields=(locator, "get_attribute")),
args=[ast.Str("contenteditable")]),
body=[body],
orelse=[ast.Raise(**raise_kwargs)])
elements.append(element)
return elements
def _gen_screenshot_mngr(self, param):
elements = []
if param:
elements.append(ast_call(
func=ast_attr("self.driver.save_screenshot"),
args=[self._gen_expr(param)]))
else:
elements.append(ast.Assign(
targets=[ast.Name(id="filename")],
value=ast_call(
func=ast_attr("os.path.join"),
args=[
ast_call(
func=ast_attr("os.getenv"),
args=[ast.Str('TAURUS_ARTIFACTS_DIR')]),
ast.BinOp(
left=ast.Str('screenshot-%d.png'),
op=ast.Mod(),
right=ast.BinOp(
left=ast_call(func="time"),
op=ast.Mult(),
right=ast.Num(1000)))])))
elements.append(ast_call(
func=ast_attr("self.driver.save_screenshot"),
args=[ast.Name(id="filename")]))
return elements
def _gen_alert(self, param):
elements = []
switch, args = "self.driver.switch_to.alert.", []
if param == "OK":
elements.append(ast_call(
func=ast_attr(switch + "accept"),
args=args))
elif param == "Dismiss":
elements.append(ast_call(
func=ast_attr(switch + "dismiss"),
args=args))
return elements
def _gen_sleep_mngr(self, param):
elements = [ast_call(
func="sleep",
args=[ast.Num(dehumanize_time(param))])]
return elements
def _gen_select_mngr(self, param, selectors):
elements = [self._gen_get_locator_call("var_loc_select", selectors), ast_call(
func=ast_attr(
fields=(
ast_call(func="Select", args=[self._gen_dynamic_locator("var_loc_select", selectors)]),
"select_by_visible_text")),
args=[self._gen_expr(param)])]
return elements
def _gen_action(self, action_config):
action = self._parse_action(action_config)
if action:
atype, tag, param, value, selectors = action
else:
atype = tag = param = value = selectors = None
action_elements = []
if tag == "window":
action_elements.extend(self._gen_window_mngr(atype, param))
elif atype == "switchframe":
action_elements.extend(self._gen_frame_mngr(tag, param))
elif atype in self.ACTION_CHAINS or atype == "drag":
action_elements.extend(self._gen_chain_mngr(atype, selectors))
elif atype == "select":
action_elements.extend(self._gen_select_mngr(param, selectors))
elif atype == 'assertdialog':
action_elements.extend(self._gen_assert_dialog(param, value))
elif atype == 'answerdialog':
action_elements.extend(self._gen_answer_dialog(param, value))
elif atype is not None and (atype.startswith("assert") or atype.startswith("store")):
action_elements.extend(self._gen_assert_store_mngr(atype, tag, param, value, selectors))
elif atype in ("click", "type", "keys", "submit"):
action_elements.extend(self._gen_keys_mngr(atype, param, selectors))
elif atype == 'echo' and tag == 'string':
if len(param) > 0 and not selectors:
action_elements.append(ast_call(
func="print",
args=[self._gen_expr(param.strip())]))
elif atype == "script" and tag == "eval":
action_elements.append(ast_call(func=ast_attr("self.driver.execute_script"),
args=[self._gen_expr(param)]))
elif atype == "rawcode":
action_elements.append(ast.parse(param))
elif atype == 'go':
if param:
action_elements.append(ast_call(func=ast_attr("self.driver.get"),
args=[self._gen_expr(param.strip())]))
action_elements.append(self._gen_replace_dialogs())
elif atype == "editcontent":
action_elements.extend(self._gen_edit_mngr(param, selectors))
elif atype.startswith('wait'):
action_elements.extend(self._gen_wait_for(atype, param, value, selectors))
elif atype == 'pausefor':
action_elements.extend(self._gen_sleep_mngr(param))
elif atype == 'clear' and tag == 'cookies':
action_elements.append(ast_call(
func=ast_attr("self.driver.delete_all_cookies")))
elif atype == 'screenshot':
action_elements.extend(self._gen_screenshot_mngr(param))
elif atype == 'alert':
action_elements.extend(self._gen_alert(param))
elif atype == 'if':
action_elements.append(self._gen_condition_mngr(param, action_config))
elif atype == 'loop':
action_elements.append(self._gen_loop_mngr(action_config))
elif atype == 'foreach':
action_elements.append(self._gen_foreach_mngr(action_config))
if not action_elements and not self.ignore_unknown_actions:
raise TaurusInternalException("Could not build code for action: %s" % action_config)
return [ast.Expr(element) for element in action_elements]
def _gen_foreach_mngr(self, action_config):
self.selenium_extras.add("get_elements")
exc = TaurusConfigError("Foreach loop must contain locators and do")
elements = []
locators = action_config.get('locators', exc)
body = []
for action in action_config.get('do', exc):
body = body + self._gen_action(action)
body_list = []
# filter out empty AST expressions that cause empty lines in the generated code
for item in body:
if isinstance(item.value, list):
if len(item.value) > 0:
body_list.append(item)
else:
body_list.append(item)
elements.append(self._gen_get_elements_call("elements", locators))
elements.append(
ast.For(target=ast.Name(id=action_config.get('foreach'), ctx=ast.Store()), iter=ast.Name(id="elements"),
body=body_list,
orelse=[]))
return elements
def _gen_wait_for(self, atype, param, value, selectors):
self.selenium_extras.add("wait_for")
supported_conds = ["present", "visible", "clickable", "notpresent", "notvisible", "notclickable"]
if not atype.endswith("for"):
self.log.warning("Wait command is deprecated and will be removed soon. Use waitFor instead.")
exc = TaurusConfigError("wait action requires timeout in scenario: \n%s" % self.scenario)
timeout = dehumanize_time(self.scenario.get("timeout", exc))
if not param:
param = "present"
else:
if not value:
value = 10 # if timeout value is not present set it by default to 10s
timeout = dehumanize_time(value)
if param.lower() not in supported_conds:
raise TaurusConfigError("Invalid condition in %s: '%s'. Supported conditions are: %s." %
(atype, param, ", ".join(supported_conds)))
return [ast_call(func="wait_for",
args=[ast.Str(param),
ast.List(elts=self._gen_ast_locators_dict(selectors)),
ast.Num(timeout)])]
def _gen_answer_dialog(self, type, value):
if type not in ['alert', 'prompt', 'confirm']:
raise TaurusConfigError("answerDialog type must be one of the following: 'alert', 'prompt' or 'confirm'")
if type == 'confirm' and str(value).lower() not in ['#ok', '#cancel']:
raise TaurusConfigError("answerDialog of type confirm must have value either '#Ok' or '#Cancel'")
if type == 'alert' and str(value).lower() != '#ok':
raise TaurusConfigError("answerDialog of type alert must have value '#Ok'")
dlg_method = "dialogs_answer_on_next_%s" % type
self.selenium_extras.add(dlg_method)
return [ast_call(func=ast_attr(dlg_method), args=[ast.Str(value)])]
def _gen_assert_dialog(self, type, value):
if type not in ['alert', 'prompt', 'confirm']:
raise TaurusConfigError("assertDialog type must be one of the following: 'alert', 'prompt' or 'confirm'")
elements = []
dlg_method = "dialogs_get_next_%s" % type
self.selenium_extras.add(dlg_method)
elements.append(ast.Assign(targets=[ast.Name(id='dialog', ctx=ast.Store())],
value=ast_call(
func=ast_attr(dlg_method))))
elements.append(ast_call(
func=ast_attr("self.assertIsNotNone"),
args=[ast.Name(id='dialog'), ast.Str("No dialog of type %s appeared" % type)]))
elements.append(ast_call(
func=ast_attr("self.assertEqual"),
args=[ast.Name(id='dialog'), ast.Str(value), ast.Str("Dialog message didn't match")]))
return elements
def _gen_replace_dialogs(self):
"""
Generates the call to DialogsManager to replace dialogs
"""
method = "dialogs_replace"
self.selenium_extras.add(method)
return [
gen_empty_line_stmt(),
ast_call(
func=ast_attr(method))
]
def _gen_loop_mngr(self, action_config):
extra_method = "get_loop_range"
self.selenium_extras.add(extra_method)
exc = TaurusConfigError("Loop must contain start, end and do")
start = action_config.get('start', exc)
end = action_config.get('end', exc)
step = action_config.get('step') or 1
elements = []
body = [
ast.Assign(
targets=[self._gen_expr("${%s}" % action_config['loop'])],
value=ast_call(func=ast_attr("str"), args=[ast.Name(id=action_config['loop'])]))
]
for action in action_config.get('do', exc):
body.append(self._gen_action(action))
range_args = [self.expr_compiler.gen_expr(start),
self.expr_compiler.gen_expr(end),
self.expr_compiler.gen_expr(step)]
elements.append(
ast.For(target=ast.Name(id=action_config.get('loop'),
ctx=ast.Store()),
iter=ast_call(func=ast_attr(extra_method),
args=range_args),
body=body,
orelse=[]))
return elements
def _gen_eval_js_expression(self, js_expr):
return ast_call(func=ast_attr("self.driver.execute_script"), args=[self._gen_expr("return %s;" % js_expr)])
def _gen_condition_mngr(self, param, action_config):
if not action_config.get('then'):
raise TaurusConfigError("Missing then branch in if statement")
test = ast.Assign(targets=[ast.Name(id='test', ctx=ast.Store())],
value=self._gen_eval_js_expression(param))
body = []
for action in action_config.get('then'):
body.append(self._gen_action(action))
orelse = []
if action_config.get('else'):
for action in action_config.get('else'):
orelse.append(self._gen_action(action))
return [test,
[ast.If(
test=[ast.Name(id='test')],
body=body,
orelse=orelse)]]
def _check_platform(self):
mobile_browsers = ["chrome", "safari"]
mobile_platforms = ["android", "ios"]
browser = self.capabilities.get("browserName", "")
browser = self.scenario.get("browser", browser)
browser = browser.lower() # todo: whether we should take browser as is? (without lower case)
browser_platform = None
if browser:
browser_split = browser.split("-")
browser = browser_split[0]
browsers = ["firefox", "chrome", "ie", "opera"] + mobile_browsers
if browser not in browsers:
raise TaurusConfigError("Unsupported browser name: %s" % browser)
if len(browser_split) > 1:
browser_platform = browser_split[1]
if self.remote_address:
if browser and browser != "remote":
msg = "Forcing browser to Remote, because of remote WebDriver address, use '%s' as browserName"
self.log.warning(msg % browser)
self.capabilities["browserName"] = browser
browser = "remote"
if self.generate_markers is None: # if not set by user - set to true
self.generate_markers = True
elif browser in mobile_browsers and browser_platform in mobile_platforms:
self.appium = True
self.remote_address = "http://localhost:4723/wd/hub"
self.capabilities["platformName"] = browser_platform
self.capabilities["browserName"] = browser
browser = "remote" # Force to use remote web driver
elif not browser:
browser = "firefox"
return browser
def _get_scenario_timeout(self):
return dehumanize_time(self.scenario.get("timeout", "30s"))
def _gen_webdriver(self):
self.log.debug("Generating setUp test method")
browser = self._check_platform()
headless = self.scenario.get("headless", False)
headless_setup = []
if headless:
self.log.info("Headless mode works only with Selenium 3.8.0+, be sure to have it installed")
headless_setup = [ast.Expr(
ast_call(func=ast_attr("options.set_headless")))]
body = [ast.Assign(targets=[ast_attr("self.driver")], value=ast_attr("None"))]
if browser == 'firefox':
body.append(ast.Assign(
targets=[ast.Name(id="options")],
value=ast_call(
func=ast_attr("webdriver.FirefoxOptions"))))
body.extend(headless_setup)
body.append(ast.Assign(
targets=[ast.Name(id="profile")],
value=ast_call(func=ast_attr("webdriver.FirefoxProfile"))))
body.append(ast.Expr(
ast_call(
func=ast_attr("profile.set_preference"),
args=[ast.Str("webdriver.log.file"), ast.Str(self.wdlog)])))
body.append(ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.Firefox"),
args=[ast.Name(id="profile")],
keywords=[ast.keyword(
arg="firefox_options",
value=ast.Name(id="options"))])))
elif browser == 'chrome':
body.append(ast.Assign(
targets=[ast.Name(id="options")],
value=ast_call(
func=ast_attr("webdriver.ChromeOptions"))))
body.append(ast.Expr(
ast_call(
func=ast_attr("options.add_argument"),
args=[ast.Str("%s" % "--no-sandbox")]
)))
body.append(ast.Expr(
ast_call(
func=ast_attr("options.add_argument"),
args=[ast.Str("%s" % "--disable-dev-shm-usage")]
)))
body.extend(headless_setup)
body.append(ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.Chrome"),
keywords=[
ast.keyword(
arg="service_log_path",
value=ast.Str(self.wdlog)),
ast.keyword(
arg="chrome_options",
value=ast.Name(id="options"))])))
elif browser == 'remote':
keys = sorted(self.capabilities.keys())
values = [str(self.capabilities[key]) for key in keys]
body.append(ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.Remote"),
keywords=[
ast.keyword(
arg="command_executor",
value=ast.Str(self.remote_address)),
ast.keyword(
arg="desired_capabilities",
value=ast.Dict(
keys=[ast.Str(key) for key in keys],
values=[ast.Str(value) for value in values]))])))
else:
if headless:
self.log.warning("Browser %r doesn't support headless mode" % browser)
body.append(ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.%s" % browser)))) # todo bring 'browser' to correct case
body.append(ast.Expr(
ast_call(
func=ast_attr("self.driver.implicitly_wait"),
| |
60 * peak.FittedIntensity * GAUSSIAN_FWHM * peak.FittedWidth / GAUSSIAN_AREA
elif peak.FittedFunction == MODEL_GAMMA:
peak.Area = 60 * 100 * peak.FittedIntensity
# init profile
raster = make_raster(peak.LeftRT, peak.RightRT, peak.FWHM)
ai = [peak.GetFullIntensityAtRT(rt) for rt in raster]
peak.Profile = tuple(zip(raster, ai))
return peak
def _fix_fitted_params(self, peak_data):
"""Corrects incorrectly stored fitted params."""
# fix params
peak_data['fitted_rt'] = peak_data['apex_rt']
peak_data['fitted_int'] = peak_data['apex_int']
# fix Gamma apex
if peak_data['fitted_function'] == MODEL_GAMMA:
peak_data['apex_rt'] = peak_data['fitted_rt'] + (peak_data['fitted_asymmetry'] - 1) / peak_data['fitted_width']
peak_data['apex_int'] = calc_gamma_ai(
x = peak_data['apex_rt'],
amplitude = peak_data['fitted_int'],
start = peak_data['fitted_rt'],
flow = peak_data['fitted_width'],
mixing = peak_data['fitted_asymmetry'])
class PycoPeakModelParser(object):
"""
The pyeds.PycoPeakModelParser is used to parse a Pyco chromatogram peak
model data into a pyeds.PycoPeakModel item.
"""
def parse(self, xml, version):
"""
Parses given binary peak model data.
Args:
xml: str
Peak data XML.
version: int
Peak model version.
Returns:
pyeds.PycoPeakModel
Parsed peak.
"""
# check XML
if not xml:
return None
# parse peak XML
peak_elm = eTree.fromstring(xml)
peak_data = self._parse_peak_model(peak_elm)
peak_elm.clear()
# create peak
return self._make_peak(peak_data, version)
def _parse_peak_model(self, peak_elm):
"""Parser peak model element."""
# init peak data container
peak_data = {
'apex_rt': None,
'left_rt': None,
'left_base': None,
'right_rt': None,
'right_base': None,
'width': None,
'method': None,
'rt_curve': [],
'int_curve': []}
# get main values
elm = peak_elm.find('ApexRT')
if elm is not None:
peak_data['apex_rt'] = float(elm.text)
elm = peak_elm.find('LeftRT')
if elm is not None:
peak_data['left_rt'] = float(elm.text)
elm = peak_elm.find('LeftBaseline')
if elm is not None:
peak_data['left_base'] = float(elm.text)
elm = peak_elm.find('RightRT')
if elm is not None:
peak_data['right_rt'] = float(elm.text)
elm = peak_elm.find('RightBaseline')
if elm is not None:
peak_data['right_base'] = float(elm.text)
elm = peak_elm.find('Width')
if elm is not None:
peak_data['width'] = float(elm.text)
elm = peak_elm.find('Method')
if elm is not None:
peak_data['method'] = elm.text
# get RT curve
elm = peak_elm.find('CurvePointsRT')
if elm is not None:
for point in elm.iter('float'):
peak_data['rt_curve'].append(float(point.text))
# get ai curve
elm = peak_elm.find('CurvePointsIntensity')
if elm is not None:
for point in elm.iter('float'):
peak_data['int_curve'].append(float(point.text))
return peak_data
def _make_peak(self, peak_data, version):
"""Creates pyeds.PycoPeakModel object from raw data."""
# init peak
peak = PycoPeakModel()
peak.ApexRT = peak_data['apex_rt']
peak.LeftRT = peak_data['left_rt']
peak.LeftBaseline = peak_data['left_base']
peak.RightRT = peak_data['right_rt']
peak.RightBaseline = peak_data['right_base']
peak.FWHM = peak_data['width']
peak.Method = peak_data['method']
peak.Trace = tuple(zip(peak_data['rt_curve'], peak_data['int_curve']))
# init profile
ai = [peak.GetFullIntensityAtRT(rt) for rt in peak_data['rt_curve']]
peak.Profile = tuple(zip(peak_data['rt_curve'], ai))
return peak
class PeakModel(object):
"""
The pyeds.PeakModel serves as a base class for various types of peak models.
Attrs:
ApexRT: float
Apex retention time in minutes.
LeftRT: float
Left retention time in minutes.
RightRT: float
Right retention time in minutes.
LeftBaseline: float
Intensity of left baseline point.
RightBaseline: float
Intensity of right baseline point.
Profile: ((float, float),)
Peak full intensity profile as (rt, ai) points.
"""
MODEL = "Unknown"
def __init__(self):
"""Initializes a new instance of PeakModel."""
self.ApexRT = None
self.LeftRT = None
self.RightRT = None
self.LeftBaseline = 0
self.RightBaseline = 0
self.Profile = None
self.Cumulative = True
def __str__(self):
"""Gets standard string representation."""
data = "[%s] RT:%.3f (%.3f-%.3f)" % (self.MODEL, self.ApexRT, self.LeftRT, self.RightRT)
if self.LeftBaseline:
data += " LB:%.0f" % self.LeftBaseline
if self.RightBaseline:
data += " RB:%.0f" % self.RightBaseline
return data
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
def SetTrace(self, trace):
"""
Sets given trace into the peak. This is mainly used for visualization of
peaks having no model nor the own trace. It might have no effect for
other types of nodes.
Args:
trace: pyeds.Trace
Trace to set.
"""
pass
def GetFullIntensityAtRT(self, rt):
"""
Calculates total intensity (including baseline) at given retention time.
Args:
rt: float
Retention time in minutes.
Returns:
float
Peak ai at given retention time.
"""
return self.GetIntensityAtRT(rt) + self.GetBaselineAtRT(rt)
def GetIntensityAtRT(self, rt):
"""
Calculates intensity above baseline at given retention time.
Args:
rt: float
Retention time in minutes.
Returns:
float
Peak intensity above baseline at given retention time.
"""
raise NotImplementedError()
def GetBaselineAtRT(self, rt):
"""
Calculates baseline intensity at given retention time as linear
interpolation between left and right baseline points.
Args:
rt: float
Retention time in minutes.
Returns:
float
Peak baseline intensity at given retention time.
"""
# check peak range
if equals(rt, self.LeftRT):
return self.LeftBaseline
if equals(rt, self.RightRT):
return self.RightBaseline
if rt < self.LeftRT or rt > self.RightRT:
return 0
# interpolate in-between
return self.LeftBaseline + (self.RightBaseline - self.LeftBaseline) * (rt - self.LeftRT) / (self.RightRT - self.LeftRT)
class InterpolatedPeakModel(PeakModel):
"""
The pyeds.InterpolatedPeakModel represents a chromatographic peak model
created by trace interpolation.
Attrs:
Method: str
Interpolation method.
Trace: ((float, float),)
Peak original trace profile as (rt, ai) points. This should be the
original trace without baseline correction etc.
"""
MODEL = "Interpolated"
def __init__(self):
"""Initializes a new instance of InterpolatedPeakModel."""
super().__init__()
self.Method = None
self.Trace = None
def GetIntensityAtRT(self, rt):
"""
Calculates intensity above baseline at given retention time.
Args:
rt: float
Retention time in minutes.
Returns:
float
Peak intensity above baseline at given retention time.
"""
# interpolate linear
if self.Method == MODEL_LINEAR:
return max(0, calc_linear_ai(rt, self.Trace) - self.GetBaselineAtRT(rt))
raise ValueError("Unknown interpolation method!")
class GaussianPeakModel(PeakModel):
"""
The pyeds.GaussianPeakModel represents a chromatographic peak model created
by Gaussian shape simulation.
Attrs:
FWHM: float
Peak width at half maximum in minutes.
Area: float
Peak area in seconds*counts.
ApexIntensity: float
Apex intensity above baseline.
"""
MODEL = "Gaussian"
def __init__(self):
"""Initializes a new instance of GaussianPeakModel."""
super().__init__()
self.ApexIntensity = None
self.FWHM = None
self.Area = None
def __str__(self):
"""Gets standard string representation."""
data = super().__str__()
if self.FWHM:
data += " FWHM:%.3f" % self.FWHM
if self.Area:
data += " Area:%.0f" % self.Area
if self.ApexIntensity:
data += " Int:%.0f" % self.ApexIntensity
return data
def GetIntensityAtRT(self, rt):
"""
Calculates intensity above baseline at given retention time.
Args:
rt: float
Retention time in minutes.
Returns:
float
Peak intensity above baseline at given retention time.
"""
width = self.FWHM / GAUSSIAN_FWHM
return calc_gaussian_ai(rt, self.ApexIntensity, self.ApexRT, width)
class PPDPeakModel(PeakModel):
"""
The pyeds.PPDPeakModel represents a chromatographic peak model used by PPD
peak detection algorithm.
Attrs:
FWHM: float
Peak width at half maximum in minutes.
Area: float
Peak area in seconds*counts.
ApexIntensity: float
Apex intensity above baseline.
"""
MODEL = "PPD"
def __init__(self):
"""Initializes a new instance of PPDPeakModel."""
super().__init__()
self.ApexIntensity = None
self.FWHM = None
self.Area = None
self.FittedFunction = None
self.FittedRT = None
self.FittedIntensity = None
self.FittedWidth = None
self.FittedAsymmetry = None
self.MergedPeaks = ()
def __str__(self):
"""Gets standard string representation."""
data = super().__str__()
if self.FWHM:
data += " FWHM:%.3f" % self.FWHM
if self.Area:
data += " Area:%.0f" % self.Area
if self.ApexIntensity:
data += " Int:%.0f" % self.ApexIntensity
if self.MergedPeaks:
for peak in self.MergedPeaks:
data += "\n\t%s" % peak
elif self.FittedFunction == MODEL_GAUSS:
data += " (%s" % self.FittedFunction.upper()
data += " Center:%.3f" % self.FittedRT
data += " Amp:%.0f" % self.FittedIntensity
data += " Width:%.3f" % self.FittedWidth
data += ")"
elif self.FittedFunction == MODEL_GAMMA:
data += " (%s" % self.FittedFunction.upper()
data += " Start:%.3f" % self.FittedRT
data += " Amp:%.3f" % self.FittedIntensity
data += " Flow:%.3f" % self.FittedWidth
data += " Mix:%.3f" % self.FittedAsymmetry
data += ")"
return data
def GetIntensityAtRT(self, rt):
"""
Calculates intensity above baseline at given retention time.
Args:
rt: float
Retention time in minutes.
Returns:
float
Peak intensity above baseline | |
# Copyright (c) 2008-2011, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from numpy import *
from numpy.random import rand, random_sample
from scipy.maxentropy.maxentutils import logsumexp
import logging
import time
from pylab import * # DEBUG
import sys
from utils import *
from model import *
### RESAMPLING SCHEMES
def multinomial_resampling(weights):
"""Multinomial resampling of the given weights. The counts for each class
are simply drawn from a multinomial distribution with the given weights.
"""
return counts_to_index(rmultinomial(weights,len(weights)))
def residual_resampling(weights):
"""Residual resampling. The counts in each bin are floor(w*N) + N' where
N' is sampled from a multinomial with the residual weights."""
N = weights.shape[0]
counts = floor(weights*N)
R = int(sum(counts))
new_weights = (weights*N - counts)/(N-R)
counts += rmultinomial(new_weights,N-R)
return counts_to_index(array(counts,dtype=int32))
def stratified_resampling(weights):
N = weights.shape[0]
# obtain u_i drawn from U(i/N,(i+1)/N)
us = 1./N*arange(N) + 1./N*rand(N)
return inverseCDF(cumsum(weights),us)
def systematic_resampling(weights):
N = weights.shape[0]
u = 1./N*rand(N)
us = arange(N,dtype=double)/N+u
return inverseCDF(cumsum(weights),us)
class InferenceParams(object):
def __init__(self,rho,alpha,p_uniform_deletion,r_abs):
self.rho = rho
self.alpha = alpha
self.p_uniform_deletion = p_uniform_deletion
self.r_abs = r_abs
def __str__(self):
out = []
out.append('Inference parameters:')
out.append('rho: ' + str(self.rho))
out.append('alpha: ' + str(self.alpha))
out.append('p_uniform_deletion: ' + str(self.p_uniform_deletion))
out.append('r_abs: ' + str(self.r_abs))
return '\n'.join(out)
class Inference:
pass
class ParticleFilter(Inference):
def __init__(self,model,data,data_time,params,num_particles,
storage_class=FixedSizeStoreRing,
max_clusters = 100,
resample_fun=multinomial_resampling,
before_resampling_callback=noop):
self.model = model
self.data = data
self.data_time = data_time
self.num_particles = num_particles
self.resample_fun = resample_fun
self.params = params
self.before_resampling_callback = before_resampling_callback
self.T = data.shape[1]
self.particles = empty(num_particles,dtype=object)
for i in range(num_particles):
self.particles[i] = Particle(self.T,None,storage_class,max_clusters)
self.weights = ones(num_particles)/float(num_particles)
self.effective_sample_size = zeros(self.T)
self.filtering_entropy = zeros(self.T)
self.current_entropy = zeros(num_particles)
self.unique_particles = zeros(self.T,dtype=uint32)
self.__check()
def __check(self):
"""Check whether dimensions of parameters and data are consistent."""
if not len(self.data.shape) == 2:
raise ValueError, \
"Data should be a 2D array with data points as columns!"
if not self.model.dims == self.data.shape[0]:
raise ValueError, "Model dimension does not match data dimension: "+\
str(self.model.dims) + " != " + str(self.data.shape[0])
def run(self):
for t in range(self.T):
start_t = time.time()
logging.info('t = ' + str(t) + '/' + str(self.T))
x = self.data[:,t]
tau = self.data_time[t]
# the probability under the prior is the same for all particles
p_prior = self.model.p_prior(x)
self.model.set_data(x);
# move particles forward
for n in range(self.num_particles):
p = self.particles[n]
# perform deletion step / compute new cluster sizes {{{
if t > 0:
p.mstore.copy(t-1,t)
p.lastspike.copy(t-1,t)
m = p.mstore.get_array(t)
old_zero = m == 0;
if rand() < self.params.p_uniform_deletion: # uniform deletion
# TODO: Speed this up by sampling only from surviving allocations
U = random_sample(p.c.shape);
# delete from alive allocations with prob. 1-p.rho
# We assume that for non-assigned x we have c<0
idx = logical_and(logical_and(U<1-self.params.rho,p.d>=t), p.c>=0)
else: # size-biased deletion
i = rdiscrete(m/float(sum(m)),1)
idx = logical_and(logical_and(p.c == i, p.d>=t), p.c >= 0)
p.d[idx] = t
# compute current alive cluster sizes p.m; TODO: vectorize this?
for k in range(p.K):
nm = sum(logical_and(p.c[0:t] == k,p.d[0:t]>t))
m[k] = nm
p.mstore.set(t,k,nm)
new_zero = m == 0;
died = logical_and(new_zero,logical_not(old_zero)).nonzero()[0]
for d in died:
p.deathtime[d] = t
else:
m = array([],dtype=int32)
### sample new labels for all data points in data {{{
# We use q(c_t|m,U,z) = p(z_k|U) x p(c|m) as proposal distribution,
# i.e. the product of the CRP and the probability of the data point under
# that class assignment with the current class parameters
# (or the prior if we generate a new class).
active_idx = m>0
active = where(active_idx)[0]
# number of clusters before we see new data
Kt = len(active)
# Generalized Poly Urn / CRP
p_crp = hstack((m[active_idx],self.params.alpha))
p_crp = p_crp/sum(p_crp)
# Vector for storing the likelihood values
p_lik = zeros(Kt+1);
# compute probability of data point under all old clusters
for i in range(Kt):
isi = self.data_time[t] - p.lastspike.get(t,active[i])
if isi < self.params.r_abs:
p_crp[i] = 0
p_lik[i] = self.model.p_likelihood(x,p.U.get(t-1,active[i]))
# likelihood for new cluster
p_lik[Kt] = p_prior
# propsal distribution: CRP x likelihood
q = p_crp * p_lik
# normalize to get a proper distribution
q = q / sum(q)
self.current_entropy[n] = entropy(q)
# sample a new label from the discrete distribution q
c = rdiscrete(q,1)[0]
Z_qc = p_crp[c]/q[c]
# update data structures if we propose a new cluster
if c == Kt:
# set birthtime of cluster K to the current time
p.birthtime[p.K] = t
active = hstack((active,p.K))
p.mstore.append(t,0)
p.lastspike.append(t,0)
# update number-of-clusters counts
p.K += 1
active_c = active[c]
p.mstore.set(t,active_c,p.mstore.get(t,active_c)+1)
# assign data point to cluster
p.c[t] = active_c
p.lastspike.set(t,active_c,self.data_time[t])
### sample parameters U for all alive clusters {{{
#
# This samples from q(U|...), for each of the three conditions:
# - new cluster created at this time step
# - sample from prior updated with the data from this time step
# - old cluster, and data assigned to it in this time step
# - sample from distribution given old value and new data
# - old cluster, but no new data assigned to it
# - sample from transition kernel
#
pU_U = ones(Kt)
qU_Uz = ones(Kt)
p.U.copy(t-1,t)
qU_z = 1
G0 = 1
p_ratio = 1
for i in range(len(active)): # for all active clusters
cabs = active[i]
if i >= Kt: # cluster newly created at this time step
new_params = self.model.sample_posterior()
p.U.append(t,new_params)
# compute probability of this sample for use in weight
qU_z = self.model.p_posterior(new_params)
# compute probability of this sample under G_0
G0 = self.model.p_prior_params(new_params)
else: # old cluster
if cabs == c: # new data associated with cluster at this time step
(new_params,p_ratio) = self.model.walk_with_data(
p.U.get(t,cabs),x)
else: # no new data
new_params = self.model.walk(p.U.get(t,cabs))
p.U.set(t,cabs,new_params)
# %%% compute incremental weight for this update step {{{
# %
# % The weight is computed from the following components:
# % - prod(Z_qc) -- the normalizer of q(c|m,...); first line of (9)
# % - prod(G0) -- G0(U); num. of third line of (9)
# % - prod(qU_z) -- q(U|{z|c=k}); denom. of third line of (9)
# % - prod(pU_U) -- p(U|U_old); num. of second line of (9)
# % - prod(qU_Uz)-- q(U|U_old,z); denom. of second line of (9)
#
# w_inc = prod(Z_qc).*prod(pU_U)./prod(qU_Uz).*prod(G0)./prod(qU_z);
# compute probability of current data point under new parameters
pz_U = self.model.p_likelihood(x,p.U.get(t,active_c))
w_inc = pz_U*Z_qc*G0*p_ratio/qU_z
# print pz_U,Z_qc,G0,p_ratio,qU_z
# print pz_U*G0/qU_z
# print w_inc
self.weights[n] *= w_inc
#
# if isnan(w_inc) % bad weight -- can happen if underflow occurs
# w_inc = 0; % discard the particle by giving it weight 0
# end
### resample
# normalize weights
self.weights = self.weights / sum(self.weights)
Neff = 1/sum(self.weights**2)
self.effective_sample_size[t] = Neff
self.filtering_entropy[t] = mean(self.current_entropy)
self.before_resampling_callback(self,t)
self.unique_particles[t] = self.num_particles
# resample if Neff too small or last time step
if (Neff < (self.num_particles / 2.)) or (t == self.T-1):
resampled_indices = self.resample_fun(self.weights)
self.unique_particles[t] = unique(resampled_indices).shape[0]
# assume weights | |
<reponame>dgehringer/pyiron_atomistics<filename>pyiron_atomistics/dft/waves/electronic.py<gh_stars>0
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import print_function
import numpy as np
from pyiron_atomistics.atomistics.structure.atoms import Atoms
from pyiron_atomistics.dft.waves.dos import Dos
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH "
"- Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "Sep 1, 2017"
class ElectronicStructure(object):
"""
This is a generic module to store electronic structure data in a clean way. Kpoint and Band classes are used to
store information related to kpoints and bands respectively. Every spin configuration has a set of k-points and
every k-point has a set of bands associated with it. This is loosely adapted from the `pymatgen electronic_structure
modules`_. Many of the functions have been substantially modified for pyiron
.. _pymatgen electronic_structure modules: http://pymatgen.org/pymatgen.electronic_structure.bandstructure.html
"""
def __init__(self):
self.kpoints = list()
self._eigenvalues = list()
self._occupancies = list()
self._dos_energies = list()
self._dos_densities = list()
self._dos_idensities = list()
self._eg = None
self._vbm = None
self._cbm = None
self._efermi = None
self._eigenvalue_matrix = None
self._occupancy_matrix = None
self._grand_dos_matrix = None
self._resolved_densities = None
self._kpoint_list = list()
self._kpoint_weights = list()
self.n_spins = 1
self._structure = None
self._orbital_dict = None
self._output_dict = {}
def add_kpoint(self, value, weight):
"""
Appends a Kpoint() instance to the kpoints attribute
Args:
value (list/numpy.ndarray): Value of the k-point in cartesian reciprocal coordinates
weight (float): The weight of the particular k-point
"""
kpt_obj = Kpoint()
kpt_obj.value = value
kpt_obj.weight = weight
self.kpoints.append(kpt_obj)
def get_dos(self, n_bins=100):
"""
Gives a pyiron_atomistics.objects.waves.dos.Dos instance
Args:
n_bins (int): Number of histogram bins for the dos
Returns:
pyiron_atomistics.objects.waves.dos.Dos: Dos instance
"""
dos_obj = Dos(n_bins=n_bins, es_obj=self)
return dos_obj
@property
def dos_energies(self):
"""
numpy.ndarray: A (1xN) vector containing the energies with N grid points
"""
return self._dos_energies
@dos_energies.setter
def dos_energies(self, val):
self._dos_energies = val
@property
def dos_densities(self):
"""
numpy.ndarray: A (SxN) vector containing the density of states for every spin configuration with S spin
configurations and N grid points
"""
return self._dos_densities
@dos_densities.setter
def dos_densities(self, val):
self._dos_densities = val
@property
def dos_idensities(self):
"""
numpy.ndarray: A (SxN) vector containing the density of states for every spin configuration with S spin
configurations and N grid points
"""
return self._dos_idensities
@dos_idensities.setter
def dos_idensities(self, val):
self._dos_idensities = val
@property
def resolved_densities(self):
"""
numpy.ndarray: A (SxAxOxN) vector containing the density of states for every spin configuration with S spin
configurations, A atoms, O orbitals and N grid points. The labels of the orbitals are found on
the orbital_dict
"""
return self._resolved_densities
@resolved_densities.setter
def resolved_densities(self, val):
self._resolved_densities = val
@property
def orbital_dict(self):
"""
dict: A dictionary of the ordering of the orbitals
Examples:
>>> self.orbital_dict[0]
's'
"""
return self._orbital_dict
@orbital_dict.setter
def orbital_dict(self, val):
self._orbital_dict = val
@property
def eigenvalues(self):
"""
Returns the eigenvalues for each spin value
numpy.ndarray: Eigenvalues of the bands
"""
return np.array([val.reshape(-1) for val in self._eigenvalue_matrix])
@property
def occupancies(self):
"""
Returns the occupancies for each spin value
numpy.ndarray: Occupancies of the bands
"""
return np.array([val.reshape(-1) for val in self._occupancy_matrix])
@property
def eigenvalue_matrix(self):
"""
numpy.ndarray: A getter function to return the eigenvalue_matrix. The eigenvalue for a given kpoint index i and
band index j is given by eigenvalue_matrix[i][j]
"""
if self._eigenvalue_matrix is None and len(self.kpoints) > 0:
self._eigenvalue_matrix = np.zeros(
(len(self.kpoints), len(self.kpoints[0].bands))
)
for i, k in enumerate(self.kpoints):
self._eigenvalue_matrix[i, :] = k.eig_occ_matrix[:, 0]
return self._eigenvalue_matrix
@eigenvalue_matrix.setter
def eigenvalue_matrix(self, val):
self._eigenvalue_matrix = val
@property
def occupancy_matrix(self):
"""
numpy.ndarray: A getter function to return the occupancy_matrix. The occupancy for a given kpoint index i and
band index j is given by occupancy_matrix[i][j]
"""
if self._occupancy_matrix is None and len(self.kpoints) > 0:
self._occupancy_matrix = np.zeros(
(len(self.kpoints), len(self.kpoints[0].bands))
)
for i, k in enumerate(self.kpoints):
self._occupancy_matrix[i, :] = k.eig_occ_matrix[:, 1]
return self._occupancy_matrix
@occupancy_matrix.setter
def occupancy_matrix(self, val):
self._occupancy_matrix = val
@property
def kpoint_list(self):
"""
list: The list of kpoints in cartesian coordinates
"""
if len(self._kpoint_list) == 0:
kpt_lst = list()
for k in self.kpoints:
kpt_lst.append(k.value)
self._kpoint_list = kpt_lst
return self._kpoint_list
@kpoint_list.setter
def kpoint_list(self, val):
self._kpoint_list = val
@property
def kpoint_weights(self):
"""
list: The weights of the kpoints of the electronic structure in cartesian coordinates
"""
if len(self._kpoint_weights) == 0:
kpt_lst = list()
for k in self.kpoints:
kpt_lst.append(k.weight)
self._kpoint_weights = kpt_lst
return self._kpoint_weights
@kpoint_weights.setter
def kpoint_weights(self, val):
self._kpoint_weights = val
@property
def structure(self):
"""
atomistics.structure.atoms.Atoms: The structure associated with the electronic structure object
"""
return self._structure
@structure.setter
def structure(self, val):
self._structure = val
def get_vbm(self, resolution=1e-6):
"""
Gets the valence band maximum (VBM) of the system for each spin value
Args:
resolution (float): An occupancy below this value is considered unoccupied
Returns:
dict:
"value" (float): Absolute energy value of the VBM (eV)
"kpoint": The Kpoint instance associated with the VBM
"band": The Band instance associated with the VBM
"""
vbm_spin_dict = dict()
n_spins = len(self._eigenvalue_matrix)
for spin in range(n_spins):
vbm = None
vbm_spin_dict[spin] = dict()
vbm_dict = dict()
for kpt in self.kpoints:
for band in kpt.bands[spin]:
if band.occupancy > resolution:
if vbm is None:
vbm = band.eigenvalue
vbm_dict["value"] = vbm
vbm_dict["kpoint"] = kpt
vbm_dict["band"] = band
else:
if band.eigenvalue > vbm:
vbm = band.eigenvalue
vbm_dict["value"] = vbm
vbm_dict["kpoint"] = kpt
vbm_dict["band"] = band
vbm_spin_dict[spin] = vbm_dict
return vbm_spin_dict
def get_cbm(self, resolution=1e-6):
"""
Gets the conduction band minimum (CBM) of the system for each spin value
Args:
resolution (float): An occupancy above this value is considered occupied
Returns:
dict:
"value" (float): Absolute energy value of the CBM (eV)
"kpoint": The Kpoint instance associated with the CBM
"band": The Band instance associated with the CBM
"""
cbm_spin_dict = dict()
n_spins = len(self._eigenvalue_matrix)
for spin in range(n_spins):
cbm = None
cbm_spin_dict[spin] = dict()
cbm_dict = dict()
for kpt in self.kpoints:
for band in kpt.bands[spin]:
if band.occupancy <= resolution:
if cbm is None:
cbm = band.eigenvalue
cbm_dict["value"] = cbm
cbm_dict["kpoint"] = kpt
cbm_dict["band"] = band
else:
if band.eigenvalue < cbm:
cbm = band.eigenvalue
cbm_dict["value"] = cbm
cbm_dict["kpoint"] = kpt
cbm_dict["band"] = band
cbm_spin_dict[spin] = cbm_dict
return cbm_spin_dict
def get_band_gap(self, resolution=1e-6):
"""
Gets the band gap of the system for each spin value
Args:
resolution (float): An occupancy above this value is considered occupied
Returns:
dict:
"band gap" (float): The band gap (eV)
"vbm": The dictionary associated with the VBM
"cbm": The dictionary associated with the CBM
"""
gap_dict = {}
vbm_spin_dict = self.get_vbm(resolution)
cbm_spin_dict = self.get_cbm(resolution)
for spin, vbm_dict in vbm_spin_dict.items():
gap_dict[spin] = dict()
vbm = vbm_dict["value"]
cbm = cbm_spin_dict[spin]["value"]
gap_dict[spin]["band_gap"] = max(0.0, cbm - vbm)
gap_dict[spin]["vbm"] = vbm_dict
gap_dict[spin]["cbm"] = cbm_spin_dict[spin]
return gap_dict
@property
def eg(self):
"""
The band gap for each spin channel
Returns:
list: list of band gap values for each spin channel
"""
self._eg = [val["band_gap"] for val in self.get_band_gap().values()]
return self._eg
@eg.setter
def eg(self, val):
self._eg = val
@property
def vbm(self):
"""
The Kohn-Sham valence band maximum for each spin channel
Returns:
list: list of valence band maximum values for each spin channel
"""
self._vbm = [val["value"] for val in self.get_vbm().values()]
return self._vbm
@vbm.setter
def vbm(self, val):
self._vbm = val
@property
def cbm(self):
"""
The Kohn-Sham conduction band minimum for each spin channel
Returns:
list: list of conduction band minimum values for each spin channel
"""
self._cbm = [val["value"] for val in self.get_cbm().values()]
return self._cbm
@cbm.setter
def cbm(self, val):
self._cbm = val
@property
def efermi(self):
"""
float: The Fermi-level of the system (eV). Please note that in the case of DFT this level is the Kohn-Sham Fermi
level computed by the DFT code.
"""
return self._efermi
@efermi.setter
def efermi(self, val):
self._efermi = val
@property
def is_metal(self):
"""
Tells if the given system is metallic or not in each spin channel (up and down respectively).
The Fermi level crosses bands in the cas of metals but is present in the band gap in the
case of semi-conductors.
Returns:
list: List of boolean values for each spin channel
"""
if not (self._efermi is not | |
False,
provider = None,
test = False,
_processed = None
):
''' Get installed components using "provider" to find (and possibly
install) components.
This function is called with different provider functions in order
to retrieve a list of all of the dependencies, or install all
dependencies.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
traverse_links:
False (default) or True: whether to recurse into linked
dependencies. You normally want to set this to "True" when
getting a list of dependencies, and False when installing
them (unless the user has explicitly asked dependencies to
be installed in linked components).
provider: None (default) or function:
provider(
dependency_spec,
available_components,
search_dirs,
working_directory,
update_if_installed
)
test:
True, False, 'toplevel': should test-only dependencies be
included (yes, no, or only at this level, not recursively)
'''
def recursionFilter(c):
if not c:
logger.debug('do not recurse into failed component')
# don't recurse into failed components
return False
if c.getName() in _processed:
logger.debug('do not recurse into already processed component: %s' % c)
return False
if c.installedLinked() and not traverse_links:
return False
return True
available_components = self.ensureOrderedDict(available_components)
if search_dirs is None:
search_dirs = []
if _processed is None:
_processed = set()
assert(test in [True, False, 'toplevel'])
search_dirs.append(self.modulesPath())
logger.debug('process %s\nsearch dirs:%s' % (self.getName(), search_dirs))
if self.isTestDependency():
logger.debug("won't provide test dependencies recursively for test dependency %s", self.getName())
test = False
components, errors = self.__getDependenciesWithProvider(
available_components = available_components,
search_dirs = search_dirs,
update_installed = update_installed,
target = target,
provider = provider,
test = test
)
_processed.add(self.getName())
if errors:
errors = ['Failed to satisfy dependencies of %s:' % self.path] + errors
need_recursion = [x for x in filter(recursionFilter, components.values())]
available_components.update(components)
logger.debug('processed %s\nneed recursion: %s\navailable:%s\nsearch dirs:%s' % (self.getName(), need_recursion, available_components, search_dirs))
if test == 'toplevel':
test = False
# NB: can't perform this step in parallel, since the available
# components list must be updated in order
for c in need_recursion:
dep_components, dep_errors = c.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = traverse_links,
update_installed = update_installed,
provider = provider,
test = test,
_processed = _processed
)
available_components.update(dep_components)
components.update(dep_components)
errors += dep_errors
return (components, errors)
def provideInstalled(self,
dspec,
available_components,
search_dirs,
working_directory,
update_installed,
dep_of
):
#logger.info('%s provideInstalled: %s', dep_of.getName(), dspec.name)
r = access.satisfyFromAvailable(dspec.name, available_components)
if r:
if r.isTestDependency() and not dspec.is_test_dependency:
logger.debug('test dependency subsequently occurred as real dependency: %s', r.getName())
r.setTestDependency(False)
return r
update_if_installed = False
if update_installed is True:
update_if_installed = True
elif update_installed:
update_if_installed = dspec.name in update_installed
r = access.satisfyVersionFromSearchPaths(
dspec.name,
dspec.versionReq(),
search_dirs,
update_if_installed,
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if r:
r.setTestDependency(dspec.is_test_dependency)
return r
# return a module initialised to the path where we would have
# installed this module, so that it's possible to use
# getDependenciesRecursive to find a list of failed dependencies,
# as well as just available ones
# note that this Component object may still be valid (usable to
# attempt a build), if a different version was previously installed
# on disk at this location (which means we need to check if the
# existing version is linked)
default_path = os.path.join(self.modulesPath(), dspec.name)
r = Component(
default_path,
test_dependency = dspec.is_test_dependency,
installed_linked = fsutils.isLink(default_path),
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
return r
def getDependenciesRecursive(self,
available_components = None,
processed = None,
search_dirs = None,
target = None,
available_only = False,
test = False
):
''' Get available and already installed components, don't check for
remotely available components. See also
satisfyDependenciesRecursive()
Returns {component_name:component}
'''
components, errors = self.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = True,
update_installed = False,
provider = self.provideInstalled,
test = test
)
for error in errors:
logger.error(error)
if available_only:
components = OrderedDict((k, v) for k, v in components.items() if v)
return components
def modulesPath(self):
return os.path.join(self.path, Modules_Folder)
def targetsPath(self):
return os.path.join(self.path, Targets_Folder)
def satisfyDependenciesRecursive(
self,
available_components = None,
search_dirs = None,
update_installed = False,
traverse_links = False,
target = None,
test = False
):
''' Retrieve and install all the dependencies of this component and its
dependencies, recursively, or satisfy them from a collection of
available_components or from disk.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
update_installed:
False (default), True, or set(): whether to check the
available versions of installed components, and update if a
newer version is available. If this is a set(), only update
things in the specified set.
traverse_links:
False (default) or True: whether to recurse into linked
dependencies when updating/installing.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
test:
True, False, or 'toplevel: should test-only dependencies be
installed? (yes, no, or only for this module, not its
dependencies).
'''
def provider(
dspec,
available_components,
search_dirs,
working_directory,
update_installed,
dep_of=None
):
r = access.satisfyFromAvailable(dspec.name, available_components)
if r:
if r.isTestDependency() and not dspec.is_test_dependency:
logger.debug('test dependency subsequently occurred as real dependency: %s', r.getName())
r.setTestDependency(False)
return r
update_if_installed = False
if update_installed is True:
update_if_installed = True
elif update_installed:
update_if_installed = dspec.name in update_installed
r = access.satisfyVersionFromSearchPaths(
dspec.name,
dspec.versionReq(),
search_dirs,
update_if_installed,
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if r:
r.setTestDependency(dspec.is_test_dependency)
return r
# before resorting to install this module, check if we have an
# existing linked module (which wasn't picked up because it didn't
# match the version specification) - if we do, then we shouldn't
# try to install, but should return that anyway:
default_path = os.path.join(self.modulesPath(), dspec.name)
if fsutils.isLink(default_path):
r = Component(
default_path,
test_dependency = dspec.is_test_dependency,
installed_linked = fsutils.isLink(default_path),
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if r:
assert(r.installedLinked())
return r
else:
logger.error('linked module %s is invalid: %s', dspec.name, r.getError())
return r
r = access.satisfyVersionByInstalling(
dspec.name,
dspec.versionReq(),
self.modulesPath(),
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if not r:
logger.error('could not install %s' % dspec.name)
if r is not None:
r.setTestDependency(dspec.is_test_dependency)
return r
return self.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = traverse_links,
update_installed = update_installed,
provider = provider,
test = test
)
def satisfyTarget(self, target_name_and_version, update_installed=False, additional_config=None, install_missing=True):
''' Ensure that the specified target name (and optionally version,
github ref or URL) is installed in the targets directory of the
current component
returns (derived_target, errors)
'''
# Target, , represent an installed target, internal
from yotta.lib import target
application_dir = None
if self.isApplication():
application_dir = self.path
return target.getDerivedTarget(
target_name_and_version,
self.targetsPath(),
install_missing = install_missing,
application_dir = application_dir,
update_installed = update_installed,
additional_config = additional_config,
shrinkwrap = self.getShrinkwrap()
)
def getTarget(self, target_name_and_version, additional_config=None):
''' Return a derived target object representing the selected target: if
the target is not installed, or is invalid then the returned object
will test false in a boolean context.
Returns derived_target
Errors are not displayed.
'''
derived_target, errors = self.satisfyTarget(
target_name_and_version,
additional_config = additional_config,
install_missing = False
)
if len(errors):
return None
else:
return derived_target
def installedDependencies(self):
''' Return true if satisfyDependencies has been called.
| |
QVariant.Int):
#xval, test = attributes[zindex].toInt()
#yval, test = features[y].attributes()[zindex].toInt()
#attributes[zindex] = QVariant(xval + yval)
try:
xval = int(attributes[zindex])
yval = int(zvalue)
attributes[zindex] = xval + yval
except:
attributes[zindex] = 0
elif (zfield.type() == QVariant.Double):
# xval, test = attributes[zindex].toDouble()
# yval, test = features[y].attributes()[zindex].toDouble()
# attributes[zindex] = QVariant(xval + yval)
try:
xval = float(attributes[zindex])
yval = float(zvalue)
attributes[zindex] = xval + yval
except:
attributes[zindex] = 0
# print " Sum " + unicode(zindex) + ": " + \
# unicode(attributes[zindex].typeName())
features[y] = None
# print unicode(key) + ": " + unicode(type(newgeometry)) + ": " + unicode(len(newgeometry))
newfeature = QgsFeature()
newfeature.setAttributes(attributes)
if newtype == QGis.WKBMultiPoint:
newfeature.setGeometry(QgsGeometry.fromMultiPoint(newgeometry))
elif newtype == QGis.WKBMultiLineString:
newfeature.setGeometry(QgsGeometry.fromMultiPolyline(newgeometry))
else: # WKBMultiPolygon:
newfeature.setGeometry(QgsGeometry.fromMultiPolygon(newgeometry))
outfile.addFeature(newfeature)
merge_count = merge_count + 1
del outfile
if addlayer:
qgis.addVectorLayer(savename, os.path.basename(savename), "ogr")
mmqgis_completion_message(qgis, unicode(feature_count) +
" features merged to " + unicode(merge_count) + " features")
return None
# --------------------------------------------------------
# mmqgis_geometry_export_to_csv - Shape node dump to CSV
# --------------------------------------------------------
def mmqgis_geometry_export_to_csv(qgis, layername, node_filename, attribute_filename, field_delimiter, line_terminator):
layer = mmqgis_find_layer(layername)
if (layer == None) or (layer.type() != QgsMapLayer.VectorLayer):
return "Invalid Vector Layer " + layername
node_header = ["shapeid", "x", "y"]
attribute_header = ["shapeid"]
for index, field in enumerate(layer.fields()):
if (layer.geometryType() == QGis.Point):
node_header.append(field.name().encode("utf-8"))
else:
attribute_header.append(field.name().encode("utf-8"))
try:
nodefile = open(node_filename, 'w')
except:
return "Failure opening " + node_filename
node_writer = csv.writer(nodefile, delimiter = field_delimiter,
lineterminator = line_terminator, quoting=csv.QUOTE_NONNUMERIC)
# Encoding is forced to UTF-8 because CSV writer doesn't support Unicode
# node_writer.writerow([field.encode("utf-8") for field in node_header])
node_writer.writerow(node_header)
if (layer.geometryType() != QGis.Point):
try:
attributefile = open(attribute_filename, 'w')
except:
return "Failure opening " + attribute_filename
attribute_writer = csv.writer(attributefile, delimiter = field_delimiter,
lineterminator = line_terminator, quoting=csv.QUOTE_NONNUMERIC)
# Encoding is forced to UTF-8 because CSV writer doesn't support Unicode
# attribute_writer.writerow([field.encode("utf-8") for field in attribute_header])
attribute_writer.writerow(attribute_header)
# Iterate through each feature in the source layer
feature_type = ""
feature_count = layer.featureCount()
for feature_index, feature in enumerate(layer.getFeatures()):
feature_type = unicode(mmqgis_wkbtype_to_text(feature.geometry().wkbType()))
# shapeid = unicode(feature.id()).strip()
# print "Feature " + str(feature_index) + " = " + feature_type
if (feature_index % 10) == 0:
mmqgis_status_message(qgis, "Exporting feature " + unicode(feature_index) \
+ " of " + unicode(feature_count))
if (feature.geometry() == None):
return "Cannot export layer with no shape data"
attributes = []
for attindex, attribute in enumerate(feature.attributes()):
if type(attribute) == QDate:
attributes.append(unicode(attribute.toString("yyyy-MM-dd")).encode("utf-8"))
elif type(attribute) == QDateTime:
attributes.append(unicode(attribute.toString("yyyy-MM-dd hh:mm:ss")).encode("utf-8"))
else:
attributes.append(unicode(attribute).encode("utf-8"))
if (feature.geometry().wkbType() == QGis.WKBPoint) or \
(feature.geometry().wkbType() == QGis.WKBPoint25D):
point = feature.geometry().asPoint()
row = [ unicode(feature_index), unicode(point.x()), unicode(point.y()) ] + attributes
node_writer.writerow(row)
elif (feature.geometry().wkbType() == QGis.WKBMultiPoint) or \
(feature.geometry().wkbType() == QGis.WKBMultiPoint25D):
points = feature.geometry().asMultiPoint()
for point_index, point in enumerate(points):
shape_id = unicode(feature_index) + "." + unicode(point_index)
row = [ shape_id, unicode(point.x()), unicode(point.y()) ] + attributes
node_writer.writerow(row)
elif (feature.geometry().wkbType() == QGis.WKBLineString) or \
(feature.geometry().wkbType() == QGis.WKBLineString25D):
polyline = feature.geometry().asPolyline()
for point in polyline:
# print " Point " + str(point.x()) + ", " + str(point.y())
row = [ unicode(feature_index), unicode(point.x()), unicode(point.y()) ]
node_writer.writerow(row)
attribute_writer.writerow([feature_index] + attributes)
elif (feature.geometry().wkbType() == QGis.WKBMultiLineString) or \
(feature.geometry().wkbType() == QGis.WKBMultiLineString25D):
polylines = feature.geometry().asMultiPolyline()
for polyline_index, polyline in enumerate(polylines):
shape_id = unicode(feature_index) + "." + unicode(polyline_index)
for point in polyline:
# print " Point " + str(point.x()) + ", " + str(point.y())
row = [ shape_id, unicode(point.x()), unicode(point.y()) ]
node_writer.writerow(row)
attribute_writer.writerow([ shape_id ] + attributes)
elif (feature.geometry().wkbType() == QGis.WKBPolygon) or \
(feature.geometry().wkbType() == QGis.WKBPolygon25D):
# The first polyline in the polygon is the outer ring
# Subsequent polylines (if any) are inner rings (holes)
ring_number = 0
polygon = feature.geometry().asPolygon()
for polyline in polygon:
shape_id = unicode(feature_index)
if ring_number > 0:
shape_id = shape_id + ".ring" + unicode(ring_number)
ring_number = ring_number + 1
for point in polyline:
row = [ shape_id, unicode(point.x()), unicode(point.y()) ]
node_writer.writerow(row)
attribute_writer.writerow([ shape_id ] + attributes)
elif (feature.geometry().wkbType() == QGis.WKBMultiPolygon) or \
(feature.geometry().wkbType() == QGis.WKBMultiPolygon25D):
multipolygon = feature.geometry().asMultiPolygon()
for polygon_index, polygon in enumerate(multipolygon):
ring_number = 0
for polyline in polygon:
shape_id = unicode(feature_index) + "." + unicode(polygon_index)
if ring_number > 0:
shape_id = shape_id + ".ring" + unicode(ring_number)
ring_number = ring_number + 1
for point in polyline:
row = [ shape_id, unicode(point.x()), unicode(point.y()) ]
node_writer.writerow(row)
attribute_writer.writerow([ shape_id ] + attributes)
else:
return "Unsupported geometry: " + unicode(mmqgis_wkbtype_to_text(feature.geometry().wkbType()))
del nodefile
if (layer.geometryType() != QGis.Point):
del attributefile
mmqgis_completion_message(qgis, unicode(feature_count) + " records exported (" + feature_type + ")")
return None
# ----------------------------------------------------------------
# mmqgis_geometry_import_from_csv - Shape node import from CSV
# ----------------------------------------------------------------
def mmqgis_geometry_import_from_csv(qgis, node_filename, long_colname, lat_colname,
shapeid_colname, geometry_type, shapefile_name, addlayer):
try:
infile = open(node_filename, 'r')
dialect = csv.Sniffer().sniff(infile.read(4096))
infile.seek(0)
reader = csv.reader(infile, dialect)
header = reader.next()
except Exception as e:
return unicode(node_filename) + ": " + unicode(e)
# Decode from UTF-8 characters because csv.reader can only handle 8-bit characters
try:
header = [unicode(field, "utf-8") for field in header]
except:
return "CSV file must be in UTF-8 encoding"
lat_col = -1
long_col = -1
shapeid_col = -1
for x in range(len(header)):
# print header[x]
if (header[x] == lat_colname):
lat_col = x
elif (header[x] == long_colname):
long_col = x
elif (header[x] == shapeid_colname):
shapeid_col = x
if (lat_col < 0):
return "Invalid latitude column name: " + lat_colname
if (long_col < 0):
return "Invalid longitude column name: " + long_colname
if (shapeid_col < 0):
return "Invalid shape ID column name: " + shapeid_colname
if (geometry_type == "Point"):
wkb_type = QGis.WKBPoint
elif (geometry_type == "Polyline"):
wkb_type = QGis.WKBLineString
elif (geometry_type == "Polygon"):
wkb_type = QGis.WKBPolygon
else:
return "Invalid geometry type: " + geometry_type
# Create the output shapefile
if QFile(shapefile_name).exists():
if not QgsVectorFileWriter.deleteShapeFile(shapefile_name):
return "Failure deleting existing shapefile: " + shapefile_name
if qgis.activeLayer():
crs = qgis.activeLayer().crs()
else:
crs = QgsCoordinateReferenceSystem()
crs.createFromSrid(4326) # WGS 84
fields = QgsFields()
fields.append(QgsField(shapeid_colname, QVariant.String))
if (geometry_type == "Point"):
for x in range(len(header)):
if ((x != lat_col) and (x != long_col) and (x != shapeid_col)):
fields.append(QgsField(header[x], QVariant.String))
outfile = QgsVectorFileWriter(shapefile_name, "utf-8", fields, wkb_type, crs)
if (outfile.hasError() != QgsVectorFileWriter.NoError):
return "Failure creating output shapefile: " + unicode(outfile.errorMessage())
polyline = []
node_count = 0
shape_count = 0
current_shape_id = False
reading = True
while reading:
try:
row = reader.next()
except:
reading = False
if reading and (len(row) > long_col) and (len(row) > lat_col) and (len(row) > shapeid_col) \
and mmqgis_is_float(row[long_col]) and mmqgis_is_float(row[lat_col]):
node_count += 1
if (node_count % 10) == 0:
mmqgis_status_message(qgis, "Importing node " + unicode(node_count))
point = QgsPoint(float(row[long_col]), float(row[lat_col]))
else:
point = False
if reading and (wkb_type != QGis.WKBPoint) and (row[shapeid_col] == current_shape_id):
polyline.append(point)
else:
#print str(wkb_type) + ": " + str(current_shape_id)
#print polyline
bad_feature = False
if wkb_type == QGis.WKBPoint:
if point:
geometry = QgsGeometry.fromPoint(point)
current_shape_id = row[shapeid_col]
else:
bad_feature = True
elif wkb_type == QGis.WKBLineString:
if len(polyline) < 2:
bad_feature = True
else:
geometry = QgsGeometry.fromPolyline(polyline)
elif wkb_type == QGis.WKBPolygon:
if len(polyline) < 3:
bad_feature = True
else:
# polyline[len(polyline) - 1] = polyline[0] # must close polygons
polygon = [ polyline ]
geometry = QgsGeometry.fromPolygon(polygon)
if not bad_feature:
# attributes = QgsAttributes()
# attributes = [ QVariant(str(current_shape_id)) ]
attributes = [ unicode(current_shape_id) ]
if (geometry_type == "Point"):
for x in range(len(header)):
if x >= len(row):
attributes.append("")
elif ((x != lat_col) and (x != long_col) and (x != shapeid_col)):
attributes.append(unicode(row[x], 'utf-8'))
#print attributes
newfeature = QgsFeature()
newfeature.setAttributes(attributes)
newfeature.setGeometry(geometry)
outfile.addFeature(newfeature)
shape_count += 1
polyline = []
if reading and point:
current_shape_id = row[shapeid_col]
polyline.append(point)
del infile
del outfile
if addlayer:
qgis.addVectorLayer(shapefile_name, os.path.basename(shapefile_name), "ogr")
mmqgis_completion_message(qgis, "Loaded " + unicode(shape_count) + " shapes (" + unicode(node_count) + " nodes)")
return None
# --------------------------------------------------------
# mmqgis_grid - Grid shapefile creation
# --------------------------------------------------------
def mmqgis_grid(qgis, shapetype, crs, xspacing, yspacing, xleft, ybottom, xright, ytop, layer_name, savename, addlayer):
# Error Checks
if len(savename) <= 0:
return "No output filename given"
if (xspacing <= 0) or (yspacing <= 0):
return "Grid spacing must be positive: " + unicode(xspacing) + " x " + unicode(yspacing)
if (xleft >= xright):
return "Invalid extent width: " + unicode(xleft) + " - " + unicode(xright)
if (ybottom >= ytop):
return "Invalid extent height: " + unicode(ybottom) + " - " + unicode(ytop)
if (xspacing >= (xright - xleft)):
return "X spacing too wide for extent: " + unicode(xspacing)
if (yspacing >= (ytop - ybottom)):
return "Y spacing too tall for extent: " + unicode(yspacing)
# Fields containing coordinates
fields = QgsFields()
fields.append(QgsField("left", QVariant.Double, "real", 24, 16, "left"))
fields.append(QgsField("bottom", QVariant.Double, "real", 24, 16, "bottom"))
fields.append(QgsField("right", QVariant.Double, "real", 24, 16, "right"))
fields.append(QgsField("top", QVariant.Double, "real", 24, 16, "top"))
# Determine shapefile type
if (shapetype == "Points") or (shapetype == "Random Points"):
geometry_type = QGis.WKBPoint
elif shapetype == "Lines":
geometry_type = QGis.WKBLineString
elif (shapetype == "Rectangles") or (shapetype == "Diamonds") or (shapetype == "Hexagons"):
geometry_type = QGis.WKBPolygon
else:
return "Invalid output shape type: " + unicode(shapetype)
# Create output file
if QFile(savename).exists():
if not QgsVectorFileWriter.deleteShapeFile(savename):
return "Failure deleting existing shapefile: " + savename
outfile = QgsVectorFileWriter(savename, "utf-8", fields, geometry_type, crs)
if (outfile.hasError() != QgsVectorFileWriter.NoError):
return "Failure creating output shapefile: " + unicode(outfile.errorMessage())
# (column + 1) and (row + 1) calculation is used to maintain
# topology between adjacent shapes and avoid overlaps/holes
# due to rounding errors
rows = int(ceil((ytop - ybottom) / yspacing))
columns = int(ceil((xright - xleft) / xspacing))
feature_count = 0
if shapetype == "Lines":
for column in range(0, columns + 1):
for row in range(0, rows + 1):
x1 = xleft + (column * xspacing)
x2 = xleft + ((column + 1) * xspacing)
y1 = ybottom + (row * yspacing)
y2 = ybottom + ((row + 1) * yspacing)
# Horizontal line
if (column < columns):
line = QgsGeometry.fromPolyline([QgsPoint(x1, y1), QgsPoint(x2, y1)])
feature = QgsFeature()
feature.setGeometry(line)
feature.setAttributes([x1, y1, x2, y1])
outfile.addFeature(feature)
feature_count = feature_count + 1
# Vertical line
if (row < rows):
line = QgsGeometry.fromPolyline([QgsPoint(x1, y1), QgsPoint(x1, y2)])
feature = QgsFeature()
feature.setGeometry(line)
feature.setAttributes([x1, y1, x1, y2])
outfile.addFeature(feature)
feature_count = feature_count + 1
elif shapetype == "Rectangles":
for column in range(0, columns):
for row in range(0, rows):
x1 = xleft + (column * xspacing)
x2 = xleft + ((column + 1) * xspacing)
y1 = ybottom + (row * yspacing)
y2 = ybottom + ((row + 1) * yspacing)
polygon = QgsGeometry.fromPolygon([[QgsPoint(x1, y1), QgsPoint(x2, y1), | |
<reponame>mmngreco/PyIneq
"""Low level desciptive statistics.
References
----------
1. http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
2. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
#Weighted_sample_variance
3. https://en.wikipedia.org/wiki/Algorithms%5Ffor%5Fcalculating%5Fvariance
#Weighted_incremental_algorithm
"""
import numpy as np
from numba import guvectorize
from . import utils
def c_moment(variable=None, weights=None, order=2, param=None, ddof=0):
"""Calculate central momment.
Calculate the central moment of `x` with respect to `param` of order `n`,
given the weights `w`.
Parameters
----------
variable : 1d-array
Variable
weights : 1d-array
Weights
order : int, optional
Moment order, 2 by default (variance)
param : int or array, optional
Parameter for which the moment is calculated, the default is None,
implies use the mean.
ddof : int, optional
Degree of freedom, zero by default.
Returns
-------
central_moment : float
Notes
-----
- The cmoment of order 1 is 0
- The cmoment of order 2 is the variance.
Source : https://en.wikipedia.org/wiki/Moment_(mathematics)
Todo
----
Implement : https://en.wikipedia.org/wiki/L-moment#cite_note-wang:96-6
"""
# return np.sum((x-c)^n*counts) / np.sum(counts)
variable = variable.copy()
weights = utils.not_empty_weights(weights, like=variable)
if param is None:
param = mean(variable=variable, weights=weights)
elif not isinstance(param, (np.ndarray, int, float)):
raise NotImplementedError
return np.sum((variable - param) ** order * weights) / (
np.sum(weights) - ddof
)
def percentile(
variable, weights, percentile=50, interpolation="lower"
) -> float:
"""Calculate the percentile.
Parameters
----------
variable : str or array
weights : str or array
percentile : int or list
Percentile level, if pass 50 we get the median.
interpolation : {'lower', 'higher', 'midpoint'}, optional
Select interpolation method.
Returns
-------
percentile : float
"""
sorted_idx = np.argsort(variable)
cum_weights = np.cumsum(weights[sorted_idx])
lower_percentile_idx = np.searchsorted(
cum_weights, (percentile / 100.0) * cum_weights[-1]
)
if interpolation == "midpoint":
res = np.interp(
lower_percentile_idx + 0.5,
np.arange(len(variable)),
variable[sorted_idx],
)
elif interpolation == "lower":
res = variable[sorted_idx[lower_percentile_idx]]
elif interpolation == "higher":
res = variable[sorted_idx[lower_percentile_idx + 1]]
else:
raise NotImplementedError
return float(res)
def std_moment(variable=None, weights=None, param=None, order=3, ddof=0):
"""Calculate the standarized moment.
Calculate the standarized moment of order `c` for the variable` x` with
respect to `c`.
Parameters
----------
variable : 1d-array
Random Variable
weights : 1d-array, optional
Weights or probability
order : int, optional
Order of Moment, three by default
param : int or float or array, optional
Central trend, default is the mean.
ddof : int, optional
Degree of freedom.
Returns
-------
std_moment : float
Returns the standardized `n` order moment.
References
----------
- https://en.wikipedia.org/wiki/Moment_(mathematics)
#Significance_of_the_moments
- https://en.wikipedia.org/wiki/Standardized_moment
Todo
----
It is the general case of the raw and central moments. Review
implementation.
"""
if param is None:
param = mean(variable=variable, weights=weights)
res = c_moment(
variable=variable, weights=weights, order=order, param=param, ddof=ddof
)
res /= var(variable=variable, weights=weights, ddof=ddof) ** (order / 2)
return res
def mean(variable=None, weights=None):
"""Calculate the mean of `variable` given `weights`.
Parameters
----------
variable : array-like or str
Variable on which the mean is estimated.
weights : array-like or str
Weights of the `x` variable.
Returns
-------
mean : array-like or float
"""
# if pass a DataFrame separate variables.
variable = variable.copy()
weights = utils.not_empty_weights(weights, like=variable)
variable, weights = utils._clean_nans_values(variable, weights)
return np.average(a=variable, weights=weights, axis=0)
def var(variable=None, weights=None, ddof=0):
"""Calculate the population variance of ``variable`` given `weights`.
Parameters
----------
variable : 1d-array or pd.Series or pd.DataFrame
Variable on which the quasivariation is estimated
weights : 1d-array or pd.Series or pd.DataFrame
Weights of the `variable`.
Returns
-------
variance : 1d-array or pd.Series or float
Estimation of quasivariance of `variable`
References
----------
Moment (mathematics). (2017, May 6). In Wikipedia, The Free Encyclopedia.
Retrieved 14:40, May 15, 2017, from
https://en.wikipedia.org/w/index.php?title=Moment_(mathematics)
Notes
-----
If stratificated sample must pass with groupby each strata.
"""
return c_moment(variable=variable, weights=weights, order=2, ddof=ddof)
def coef_variation(variable=None, weights=None):
"""Calculate the coefficient of variation.
Calculate the coefficient of variation of a `variable` given weights. The
coefficient of variation is the square root of the variance of the incomes
divided by the mean income. It has the advantages of being mathematically
tractable and is subgroup decomposable, but is not bounded from above.
Parameters
----------
variable : array-like or str
weights : array-like or str
Returns
-------
coefficient_variation : float
References
----------
Coefficient of variation. (2017, May 5). In Wikipedia, The Free
Encyclopedia. Retrieved 15:03, May 15, 2017, from
https://en.wikipedia.org/w/index.php?title=Coefficient_of_variation
"""
# todo complete docstring
return var(variable=variable, weights=weights) ** 0.5 / abs(
mean(variable=variable, weights=weights)
)
def kurt(variable=None, weights=None):
"""Calculate the asymmetry coefficient.
Parameters
----------
variable : 1d-array
weights : 1d-array
Returns
-------
kurt : float
Kurtosis coefficient.
References
----------
Moment (mathematics). (2017, May 6). In Wikipedia, The Free Encyclopedia.
Retrieved 14:40, May 15, 2017, from
https://en.wikipedia.org/w/index.php?title=Moment_(mathematics)
Notes
-----
It is an alias of the standardized fourth-order moment.
"""
return std_moment(variable=variable, weights=weights, order=4)
def skew(variable=None, weights=None):
"""Return the asymmetry coefficient of a sample.
Parameters
----------
variable : array-like, str
weights : array-like, str
Returns
-------
skew : float
References
----------
Moment (mathematics). (2017, May 6). In Wikipedia, The Free Encyclopedia.
Retrieved 14:40, May 15, 2017, from
https://en.wikipedia.org/w/index.php?title=Moment_(mathematics)
Notes
-----
It is an alias of the standardized third-order moment.
"""
return std_moment(variable=variable, weights=weights, order=3)
@guvectorize(
"float64[:], float64[:], int64, float64[:]",
"(n),(n),()->()",
nopython=True,
cache=True,
)
def wvar(x, w, kind, out):
"""Calculate weighted variance of X.
Calculates the weighted variance of x according to a kind of weights.
Parameters
----------
x : np.ndarray
Main variable.
w : np.ndarray
Weigths.
kind : int
Has three modes to calculate de variance, you can control that with
this argument, the values and the output are the next:
* 1. population variance
* 2. sample frequency variance
* 3. sample reliability variance.
out : np.ndarray
Returns
-------
weighted_variance : float
References
----------
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#Weighted_incremental_algorithm
"""
wSum = wSum2 = mean = S = 0
for i in range(len(x)): # Alternatively "for x, w in zip(data, weights):"
wSum = wSum + w[i]
wSum2 = wSum2 + w[i] * w[i]
meanOld = mean
mean = meanOld + (w[i] / wSum) * (x[i] - meanOld)
S = S + w[i] * (x[i] - meanOld) * (x[i] - mean)
if kind == 1:
# population_variance
out[0] = S / wSum
elif kind == 2:
# Bessel's correction for weighted samples
# Frequency weights
# sample_frequency_variance
out[0] = S / (wSum - 1)
elif kind == 3:
# Reliability weights
# sample_reliability_variance
out[0] = S / (wSum - wSum2 / wSum)
@guvectorize(
"float64[:], float64[:], float64[:], int64, float64[:]",
"(n),(n),(n),()->()",
nopython=True,
cache=True,
)
def wcov(x, y, w, kind, out):
"""Compute weighted covariance between x and y.
Compute the weighted covariance between two variables, we can chose which
kind of covariance returns.
Parameters
----------
x : np.array
Main variable.
y : np.array
Second variable.
w : np.array
Weights.
kind : int
Kind of weighted covariance is returned:
1 : population variance
2 : sample frequency variance
3 : sample reliability variance.
out : np.array
Returns
-------
weighted_covariance = float
References
----------
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
"""
meanx = meany = 0
wsum = wsum2 = 0
C = 0
for i in range(len(x)):
wsum += w[i]
wsum2 += w[i] * w[i]
dx = x[i] - meanx
meanx += (w[i] / wsum) * dx
meany += (w[i] / wsum) * (y[i] - meany)
C += w[i] * dx * (y[i] - meany)
if kind == 1:
# population_covar
out[0] = C / wsum
@guvectorize(
"float64[:], float64[:], float64[:]",
"(n),(n)->()",
nopython=True,
cache=True,
)
def online_kurtosis(x, w, out):
"""Online kurtosis."""
n = 0
mean = 0
M2 = 0
M3 = 0
M4 = 0
for i in range(len(x)):
n1 = w[i]
n = n + w[i]
delta = x[i] - mean
delta_n = delta / n
delta_n2 = delta_n * delta_n
term1 = delta * delta_n * n1
mean = mean + w[i] * delta_n / n
M4 = (
M4
+ term1 * delta_n2 * (n * n - 3 * n + 3)
+ 6 * delta_n2 * M2
- 4 * delta_n * M3
)
M3 = M3 + term1 * delta_n * (n - 2) - 3 * delta_n * M2
M2 = M2 + term1
out[0] = (n * M4) / (M2 * M2) - 3
@guvectorize(
"float64[:], float64[:], int64, float64[:]",
"(n),(n),()->()",
nopython=True,
cache=True,
)
def Mk(x, w, k, out):
"""Calculate Mk."""
w_sum = wx_sum = 0
for | |
<filename>chef/tests/test_parser.py
from __future__ import with_statement
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import pytest
from chef.datastructures import Ingredient, IngredientProperties, Ingredients,\
unknown, undefined
from chef.errors.syntax import ChefSyntaxError, MissingEmptyLineError,\
InvalidOvenTemperature, OrdinalIdentifierError,\
InvalidCookingTimeError, InvalidCommandError,\
InvalidTimeDeclarationError
import chef.parser as chef_parser
class TestMeasurePattern(object):
params = {
'test_dry_measure': [
{'dry_measure': 'g'},
{'dry_measure': 'kg'},
{'dry_measure': 'pinch'},
{'dry_measure': 'pinches'}],
'test_liquid_measure': [
{'liquid_measure': 'l'},
{'liquid_measure': 'ml'},
{'liquid_measure': 'dash'},
{'liquid_measure': 'dashes'}],
'test_dry_or_liquid_measure': [
{'dry_or_liquid_measure': 'cup'},
{'dry_or_liquid_measure': 'cups'},
{'dry_or_liquid_measure': 'teaspoon'},
{'dry_or_liquid_measure': 'teaspoons'},
{'dry_or_liquid_measure': 'tablespoon'},
{'dry_or_liquid_measure': 'tablespoons'}],
'test_measure_type': [
{'measure_type': 'heaped'},
{'measure_type': 'level'}],
}
def test_dry_measure(self, dry_measure):
m = re.match(chef_parser.DRY_MEASURE_PATTERN, dry_measure)
assert m is not None
assert m.group() == dry_measure
def test_liquid_measure(self, liquid_measure):
m = re.match(chef_parser.LIQUID_MEASURE_PATTERN, liquid_measure)
assert m is not None
assert m.group() == liquid_measure
def test_dry_or_liquid_measure(self, dry_or_liquid_measure):
m = re.match(
chef_parser.DRY_OR_LIQUID_MEASURE_PATTERN, dry_or_liquid_measure)
assert m is not None
assert m.group() == dry_or_liquid_measure
def test_measure_type(self, measure_type):
m = re.match(chef_parser.MEASURE_TYPE_PATTERN, measure_type)
assert m is not None
assert m.group() == measure_type
class TestIngredientListItemPattern(object):
params = {
'test_measure_less': [
{'item': '32 zucchinis'},
{'item': '1 heaped water'},
{'item': '7 levels lava'}]}
def test_ingredient_name_only(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, 'orange juice')
assert m is not None
assert m.groupdict() == {
'initial_value': None,
'measure_type': None,
'measure': None,
'name': 'orange juice'}
def test_gramm(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '108 g lard')
assert m is not None
assert m.groupdict() == {
'initial_value': '108',
'measure_type': None,
'measure': 'g',
'name': 'lard'}
def test_kilogramm(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '1 kg flour')
assert m is not None
assert m.groupdict() == {
'initial_value': '1',
'measure_type': None,
'measure': 'kg',
'name': 'flour'}
def test_pinch(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '1 pinch salt')
assert m is not None
assert m.groupdict() == {
'initial_value': '1',
'measure_type': None,
'measure': 'pinch',
'name': 'salt'}
def test_pinches(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '2 pinches sugar')
assert m is not None
assert m.groupdict() == {
'initial_value': '2',
'measure_type': None,
'measure': 'pinches',
'name': 'sugar'}
def test_liter(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '5 l oil')
assert m is not None
assert m.groupdict() == {
'initial_value': '5',
'measure_type': None,
'measure': 'l',
'name': 'oil'}
def test_milliliter(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '119 ml water')
assert m is not None
assert m.groupdict() == {
'initial_value': '119',
'measure_type': None,
'measure': 'ml',
'name': 'water'}
def test_dash(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '1 dash tabasco')
assert m is not None
assert m.groupdict() == {
'initial_value': '1',
'measure_type': None,
'measure': 'dash',
'name': 'tabasco'}
def test_dashes(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '3 dashes red wine')
assert m is not None
assert m.groupdict() == {
'initial_value': '3',
'measure_type': None,
'measure': 'dashes',
'name': 'red wine'}
def test_cup(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '1 cup rice')
assert m is not None
assert m.groupdict() == {
'initial_value': '1',
'measure_type': None,
'measure': 'cup',
'name': 'rice'}
def test_cups(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '111 cups oil')
assert m is not None
assert m.groupdict() == {
'initial_value': '111',
'measure_type': None,
'measure': 'cups',
'name': 'oil'}
def test_teaspoon(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '1 teaspoon sugar')
assert m is not None
assert m.groupdict() == {
'initial_value': '1',
'measure_type': None,
'measure': 'teaspoon',
'name': 'sugar'}
def test_teaspoons(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '1337 teaspoons beer')
assert m is not None
assert m.groupdict() == {
'initial_value': '1337',
'measure_type': None,
'measure': 'teaspoons',
'name': 'beer'}
def test_tablespoon(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '1 tablespoon cocoa')
assert m is not None
assert m.groupdict() == {
'initial_value': '1',
'measure_type': None,
'measure': 'tablespoon',
'name': 'cocoa'}
def test_tablespoons(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN, '4 tablespoons milk')
assert m is not None
assert m.groupdict() == {
'initial_value': '4',
'measure_type': None,
'measure': 'tablespoons',
'name': 'milk'}
def test_measure_less(self, item):
initial_value, name = item.split(' ', 1)
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, item)
assert m is not None
assert m.groupdict() == {
'initial_value': initial_value,
'measure_type': None,
'measure': None,
'name': name}
def test_with_measure_type_heaped(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN,
'6 heaped tablespoons cough syrup')
assert m is not None
assert m.groupdict() == {
'initial_value': '6',
'measure_type': 'heaped',
'measure': 'tablespoons',
'name': 'cough syrup'}
def test_with_measure_type_level(self):
m = re.match(
chef_parser.INGREDIENT_LIST_ITEM_PATTERN,
'1 level teaspoon coffee powder')
assert m is not None
assert m.groupdict() == {
'initial_value': '1',
'measure_type': 'level',
'measure': 'teaspoon',
'name': 'coffee powder'}
def test_name_only(self):
m = re.match(chef_parser.INGREDIENT_LIST_ITEM_PATTERN, 'apple')
assert m is not None
assert m.groupdict() == {
'initial_value': None,
'measure_type': None,
'measure': None,
'name': 'apple'}
class TestCookingTimePattern(object):
s = 'Cooking time: '
def test_hour(self):
m = re.match(chef_parser.COOKING_TIME_PATTERN, self.s + '1 hour.')
assert m is not None
assert m.groupdict() == {
'cooking_time': '1',
'unit': 'hour'}
def test_hours(self):
m = re.match(chef_parser.COOKING_TIME_PATTERN, self.s + '3 hours.')
assert m is not None
assert m.groupdict() == {
'cooking_time': '3',
'unit': 'hours'}
def test_minute(self):
m = re.match(chef_parser.COOKING_TIME_PATTERN, self.s + '1 minute.')
assert m is not None
assert m.groupdict() == {
'cooking_time': '1',
'unit': 'minute'}
def test_minutes(self):
m = re.match(chef_parser.COOKING_TIME_PATTERN, self.s + '45 minutes.')
assert m is not None
assert m.groupdict() == {
'cooking_time': '45',
'unit': 'minutes'}
class TestOvenTemperaturePattern(object):
s = 'Pre-heat oven to '
def test_temperature_only(self):
m = re.match(
chef_parser.OVEN_TEMPERATURE_PATTERN,
self.s + '110 degrees Celsius.')
assert m is not None
assert m.groupdict() == {'temperature': '110', 'gas_mark': None}
def test_with_gas_mark(self):
m = re.match(
chef_parser.OVEN_TEMPERATURE_PATTERN,
self.s + '110 degrees Celsius (gas mark 4).')
assert m is not None
assert m.groupdict() == {'temperature': '110', 'gas_mark': '4'}
class TestParseOrdinalIdentifier(object):
params = {'test_invalid': [{'id': '42'}, {'id': 'first'}]}
def test_invalid(self, id):
with pytest.raises(OrdinalIdentifierError):
chef_parser.parse_ordinal_identifier(id)
def test_first(self):
assert chef_parser.parse_ordinal_identifier('1st') == 1
assert chef_parser.parse_ordinal_identifier('21st') == 21
def test_second(self):
assert chef_parser.parse_ordinal_identifier('2nd') == 2
assert chef_parser.parse_ordinal_identifier('32nd') == 32
def test_third(self):
assert chef_parser.parse_ordinal_identifier('3rd') == 3
assert chef_parser.parse_ordinal_identifier('73rd') == 73
def test_nth(self):
assert chef_parser.parse_ordinal_identifier('4th') == 4
assert chef_parser.parse_ordinal_identifier('11th') == 11
assert chef_parser.parse_ordinal_identifier('12th') == 12
assert chef_parser.parse_ordinal_identifier('13th') == 13
assert chef_parser.parse_ordinal_identifier('58th') == 58
class TestDetectIngredientState(object):
params = {
'test_dry': [
{'measure': 'g'},
{'measure': 'kg'},
{'measure': 'pinch'},
{'measure': 'pinches'}],
'test_liquid': [
{'measure': 'ml'},
{'measure': 'l'},
{'measure': 'dash'},
{'measure': 'dashes'}],
'test_unknown': [
{'measure': 'cup'},
{'measure': 'cups'},
{'measure': 'teaspoon'},
{'measure': 'teaspoons'},
{'measure': 'tablespoon'},
{'measure': 'tablespoons'}],
'test_with_measure_type': [
{'measure': 'pinch', 'measure_type': 'heaped'},
{'measure': 'pinches', 'measure_type': 'heaped'},
{'measure': 'cup', 'measure_type': 'heaped'},
{'measure': 'cups', 'measure_type': 'heaped'},
{'measure': 'teaspoon', 'measure_type': 'heaped'},
{'measure': 'teaspoons', 'measure_type': 'heaped'},
{'measure': 'tablespoon', 'measure_type': 'heaped'},
{'measure': 'tablespoons', 'measure_type': 'heaped'},
{'measure': 'pinch', 'measure_type': 'level'},
{'measure': 'pinches', 'measure_type': 'level'},
{'measure': 'cup', 'measure_type': 'level'},
{'measure': 'cups', 'measure_type': 'level'},
{'measure': 'teaspoon', 'measure_type': 'level'},
{'measure': 'teaspoons', 'measure_type': 'level'},
{'measure': 'tablespoon', 'measure_type': 'level'},
{'measure': 'tablespoons', 'measure_type': 'level'}]}
def test_measure_less(self):
is_dry, is_liquid = chef_parser.detect_ingredient_state(None)
assert not is_dry
assert not is_liquid
def test_dry(self, measure):
is_dry, is_liquid = chef_parser.detect_ingredient_state(measure)
assert is_dry
assert not is_liquid
def test_liquid(self, measure):
is_dry, is_liquid = chef_parser.detect_ingredient_state(measure)
assert not is_dry
assert is_liquid
def test_unknown(self, measure):
is_dry, is_liquid = chef_parser.detect_ingredient_state(measure)
assert is_dry is unknown
assert is_liquid is unknown
def test_with_measure_type(self, measure, measure_type):
is_dry, is_liquid = chef_parser.detect_ingredient_state(
measure, measure_type)
assert is_dry
assert not is_liquid
def test_invalid(self):
with pytest.raises(ValueError) as e:
chef_parser.detect_ingredient_state('blah')
assert e.value.message == "invalid measure: 'blah'"
class TestParseIngredientList(object):
params = {'test_nonmatching_measure': [
{'line': '5 tablespoon brown sugar\n'},
{'line': '1 cups milk'},
# ....
]}
def test_no_items(self):
ingredients, lineno = chef_parser.parse_ingredient_list('', 4)
assert ingredients == Ingredients()
assert lineno == 4
def test_one_item(self):
ingredients, lineno = chef_parser.parse_ingredient_list(
'111 cups oil\n', 4)
assert ingredients == Ingredients([
Ingredient('oil', IngredientProperties(111, unknown, unknown))])
assert lineno == 5
def test_multiple_items(self):
ingredients, lineno = chef_parser.parse_ingredient_list(
'111 cups oil\n75 heaped tablespoons sugar\neggs\n', 4)
assert ingredients == Ingredients([
Ingredient('oil', IngredientProperties(111, unknown, unknown)),
Ingredient('sugar', IngredientProperties(75, unknown, unknown)),
Ingredient('eggs', IngredientProperties(None, False, False))])
assert lineno == 7
def test_misc(self):
ingredient_list = '''72 kg tuna
2300 g lettuce
37 cups olive oil
18 peppers
'''
ingredients, lineno = chef_parser.parse_ingredient_list(
ingredient_list, 4)
assert ingredients == Ingredients([
Ingredient('tuna', IngredientProperties(72, True, False)),
Ingredient('lettuce', IngredientProperties(2300, True, False)),
Ingredient(
'olive oil', IngredientProperties(37, unknown, unknown)),
Ingredient('peppers', IngredientProperties(18, False, False))])
assert lineno == 8
def test_multiple_definition_of_same_name(self):
# the website says: "If an ingredient is repeated, the new value is
# used and previous values for that ingredient are ignored."
ingredients, lineno = chef_parser.parse_ingredient_list(
'111 cups oil\n75 cups oil\n', 4)
assert len(ingredients) == 1
assert ingredients == [
Ingredient('oil', IngredientProperties(75, unknown, unknown))]
assert lineno == 6
def test_nonmatching_measure(self, line):
ingredients, lineno = chef_parser.parse_ingredient_list(line, 4)
class TestParseCookingTime(object):
s = 'Cooking time: '
def test_hour(self):
parsed_cooking_time = chef_parser.parse_cooking_time(
self.s + '1 hour.')
assert chef_parser.is_cooking_time(self.s + '1 hour.')
assert parsed_cooking_time == (1, 'hour')
def test_hours(self):
parsed_cooking_time = chef_parser.parse_cooking_time(
self.s + '3 hours.')
| |
NW', 'TR Chain Chomps SW'),
'TR Chain Chomps Down Stairs',
('TR Pokey 2 ES', 'TR Lava Island WS'),
'TR Crystaroller Down Stairs',
('TR Dash Bridge WS', 'TR Crystal Maze ES')
],
'Ganons Tower': [
('GT Torch EN', 'GT Hope Room WN'),
('GT Tile Room EN', 'GT Speed Torch WN'),
('GT Hookshot ES', 'GT Map Room WS'),
('GT Double Switch EN', 'GT Spike Crystals WN'),
('GT Firesnake Room SW', 'GT Warp Maze (Rails) NW'),
('GT Conveyor Star Pits EN', 'GT Falling Bridge WN'),
('GT Mini Helmasaur Room WN', 'GT Bomb Conveyor EN'),
('GT Crystal Circles SW', 'GT Left Moldorm Ledge NW')
]
}
default_door_connections = [
('Hyrule Castle Lobby W', 'Hyrule Castle West Lobby E'),
('Hyrule Castle Lobby E', 'Hyrule Castle East Lobby W'),
('Hyrule Castle Lobby WN', 'Hyrule Castle West Lobby EN'),
('Hyrule Castle West Lobby N', 'Hyrule Castle West Hall S'),
('Hyrule Castle East Lobby N', 'Hyrule Castle East Hall S'),
('Hyrule Castle East Lobby NW', 'Hyrule Castle East Hall SW'),
('Hyrule Castle East Hall W', 'Hyrule Castle Back Hall E'),
('Hyrule Castle West Hall E', 'Hyrule Castle Back Hall W'),
('Hyrule Castle Throne Room N', 'Sewers Behind Tapestry S'),
('Hyrule Dungeon Guardroom N', 'Hyrule Dungeon Armory S'),
('Sewers Dark Cross Key Door N', 'Sewers Water S'),
('Sewers Water W', 'Sewers Key Rat E'),
('Sewers Key Rat Key Door N', 'Sewers Secret Room Key Door S'),
('Eastern Lobby Bridge N', 'Eastern Cannonball S'),
('Eastern Cannonball N', 'Eastern Courtyard Ledge S'),
('Eastern Cannonball Ledge WN', 'Eastern Big Key EN'),
('Eastern Cannonball Ledge Key Door EN', 'Eastern Dark Square Key Door WN'),
('Eastern Courtyard Ledge W', 'Eastern West Wing E'),
('Eastern Courtyard Ledge E', 'Eastern East Wing W'),
('Eastern Hint Tile EN', 'Eastern Courtyard WN'),
('Eastern Big Key NE', 'Eastern Hint Tile Blocked Path SE'),
('Eastern Courtyard EN', 'Eastern Map Valley WN'),
('Eastern Courtyard N', 'Eastern Darkness S'),
('Eastern Map Valley SW', 'Eastern Dark Square NW'),
('Eastern Attic Start WS', 'Eastern False Switches ES'),
('Eastern Cannonball Hell WS', 'Eastern Single Eyegore ES'),
('Desert Compass NW', 'Desert Cannonball S'),
('Desert Beamos Hall NE', 'Desert Tiles 2 SE'),
('PoD Middle Cage N', 'PoD Pit Room S'),
('PoD Pit Room NW', 'PoD Arena Main SW'),
('PoD Pit Room NE', 'PoD Arena Bridge SE'),
('PoD Arena Main NW', 'PoD Falling Bridge SW'),
('PoD Arena Crystals E', 'PoD Sexy Statue W'),
('PoD Mimics 1 NW', 'PoD Conveyor SW'),
('PoD Map Balcony WS', 'PoD Arena Ledge ES'),
('PoD Falling Bridge WN', 'PoD Dark Maze EN'),
('PoD Dark Maze E', 'PoD Big Chest Balcony W'),
('PoD Sexy Statue NW', 'PoD Mimics 2 SW'),
('Swamp Pot Row WN', 'Swamp Map Ledge EN'),
('Swamp Pot Row WS', 'Swamp Trench 1 Approach ES'),
('Swamp Trench 1 Departure WS', 'Swamp Hub ES'),
('Swamp Hammer Switch WN', 'Swamp Hub Dead Ledge EN'),
('Swamp Hub S', 'Swamp Donut Top N'),
('Swamp Hub WS', 'Swamp Trench 2 Pots ES'),
('Swamp Hub WN', 'Swamp Crystal Switch EN'),
('Swamp Hub North Ledge N', 'Swamp Push Statue S'),
('Swamp Trench 2 Departure WS', 'Swamp West Shallows ES'),
('Swamp Big Key Ledge WN', 'Swamp Barrier EN'),
('Swamp Basement Shallows NW', 'Swamp Waterfall Room SW'),
('Skull 1 Lobby WS', 'Skull Pot Prison ES'),
('Skull Map Room SE', 'Skull Pinball NE'),
('Skull Pinball WS', 'Skull Compass Room ES'),
('Skull Compass Room NE', 'Skull Pot Prison SE'),
('Skull 2 East Lobby WS', 'Skull Small Hall ES'),
('Skull 3 Lobby NW', 'Skull Star Pits SW'),
('Skull Vines NW', 'Skull Spike Corner SW'),
('Thieves Lobby E', 'Thieves Compass Room W'),
('Thieves Ambush E', 'Thieves Rail Ledge W'),
('Thieves Rail Ledge NW', 'Thieves Pot Alcove Bottom SW'),
('Thieves BK Corner NE', 'Thieves Hallway SE'),
('Thieves Pot Alcove Mid WS', 'Thieves Spike Track ES'),
('Thieves Hellway NW', 'Thieves Spike Switch SW'),
('Thieves Triple Bypass EN', 'Thieves Conveyor Maze WN'),
('Thieves Basement Block WN', 'Thieves Conveyor Bridge EN'),
('Thieves Lonely Zazak WS', 'Thieves Conveyor Bridge ES'),
('Ice Cross Bottom SE', 'Ice Compass Room NE'),
('Ice Cross Right ES', 'Ice Pengator Switch WS'),
('Ice Conveyor SW', 'Ice Bomb Jump NW'),
('Ice Pengator Trap NE', 'Ice Spike Cross SE'),
('Ice Spike Cross ES', 'Ice Spike Room WS'),
('Ice Tall Hint SE', 'Ice Lonely Freezor NE'),
('Ice Tall Hint EN', 'Ice Hookshot Ledge WN'),
('Iced T EN', 'Ice Catwalk WN'),
('Ice Catwalk NW', 'Ice Many Pots SW'),
('Ice Many Pots WS', 'Ice Crystal Right ES'),
('Ice Switch Room ES', 'Ice Refill WS'),
('Ice Switch Room SE', 'Ice Antechamber NE'),
('Mire 2 NE', 'Mire Hub SE'),
('Mire Hub ES', 'Mire Lone Shooter WS'),
('Mire Hub E', 'Mire Failure Bridge W'),
('Mire Hub NE', 'Mire Hidden Shooters SE'),
('Mire Hub WN', 'Mire Wizzrobe Bypass EN'),
('Mire Hub WS', 'Mire Conveyor Crystal ES'),
('Mire Hub Right EN', 'Mire Map Spot WN'),
('Mire Hub Top NW', 'Mire Cross SW'),
('Mire Hidden Shooters ES', 'Mire Spikes WS'),
('Mire Minibridge NE', 'Mire Right Bridge SE'),
('Mire BK Door Room EN', 'Mire Ledgehop WN'),
('Mire BK Door Room N', 'Mire Left Bridge S'),
('Mire Spikes SW', 'Mire Crystal Dead End NW'),
('Mire Ledgehop NW', 'Mire Bent Bridge SW'),
('Mire Bent Bridge W', 'Mire Over Bridge E'),
('Mire Over Bridge W', 'Mire Fishbone E'),
('Mire Fishbone SE', 'Mire Spike Barrier NE'),
('Mire Spike Barrier SE', 'Mire Wizzrobe Bypass NE'),
('Mire Conveyor Crystal SE', 'Mire Neglected Room NE'),
('Mire Tile Room SW', 'Mire Conveyor Barrier NW'),
('Mire Block X WS', 'Mire Tall Dark and Roomy ES'),
('Mire Crystal Left WS', 'Mire Falling Foes ES'),
('TR Lobby Ledge NE', 'TR Hub SE'),
('TR Compass Room NW', 'TR Hub SW'),
('TR Hub ES', 'TR Torches Ledge WS'),
('TR Hub EN', 'TR Torches WN'),
('TR Hub NW', 'TR Pokey 1 SW'),
('TR Hub NE', 'TR Tile Room SE'),
('TR Torches NW', 'TR Roller Room SW'),
('TR Pipe Pit WN', 'TR Lava Dual Pipes EN'),
('TR Lava Island ES', 'TR Pipe Ledge WS'),
('TR Lava Dual Pipes WN', 'TR Pokey 2 EN'),
('TR Lava Dual Pipes SW', 'TR Twin Pokeys NW'),
('TR Pokey 2 ES', 'TR Lava Island WS'),
('TR Dodgers NE', 'TR Lava Escape SE'),
('TR Lava Escape NW', 'TR Dash Room SW'),
('TR Hallway WS', 'TR Lazy Eyes ES'),
('TR Dark Ride SW', 'TR Dash Bridge NW'),
('TR Dash Bridge SW', 'TR Eye Bridge NW'),
('TR Dash Bridge WS', 'TR Crystal Maze ES'),
('GT Torch WN', 'GT Conveyor Cross EN'),
('GT Hope Room EN', 'GT Tile Room WN'),
('GT Big Chest SW', 'GT Invisible Catwalk NW'),
('GT Bob\'s Room SE', 'GT Invisible Catwalk NE'),
('GT Speed Torch NE', 'GT Petting Zoo SE'),
('GT Speed Torch SE', 'GT Crystal Conveyor NE'),
('GT Warp Maze (Pits) ES', 'GT Invisible Catwalk WS'),
('GT Hookshot NW', 'GT DMs Room SW'),
('GT Hookshot SW', 'GT Double Switch NW'),
('GT Warp Maze (Rails) WS', 'GT Randomizer Room ES'),
('GT Conveyor Star Pits EN', 'GT Falling Bridge WN'),
('GT Falling Bridge WS', 'GT Hidden Star ES'),
('GT Dash Hall NE', 'GT Hidden Spikes SE'),
('GT Hidden Spikes EN', 'GT Cannonball Bridge WN'),
('GT Gauntlet 3 SW', 'GT Gauntlet 4 NW'),
('GT Gauntlet 5 WS', 'GT Beam Dash ES'),
('GT Wizzrobes 2 NE', 'GT Conveyor Bridge SE'),
('GT Conveyor Bridge EN', 'GT Torch Cross WN'),
('GT Crystal Circles SW', 'GT Left Moldorm Ledge NW')
]
default_one_way_connections = [
('Sewers Pull Switch S', 'Sanctuary N'),
('Eastern Duo Eyegores NE', 'Eastern Boss SE'),
('Desert Wall Slide NW', 'Desert Boss SW'),
('Tower Altar NW', 'Tower Agahnim 1 SW'),
('PoD Harmless Hellway SE', 'PoD Arena Main NE'),
('PoD Dark Alley NE', 'PoD Boss SE'),
('Swamp T NW', 'Swamp Boss SW'),
('Thieves Hallway NE', 'Thieves Boss SE'),
('Mire Antechamber NW', 'Mire Boss SW'),
('TR Final Abyss NW', 'TR Boss SW'),
('GT Invisible Bridges WS', 'GT Invisible Catwalk ES'),
('GT Validation WS', 'GT Frozen Over ES'),
('GT Brightly Lit Hall NW', 'GT Agahnim 2 SW')
]
# For crossed
# offset from | |
var.KILLER = "" # nickname of who chose the victim
var.STARTED_DAY_PLAYERS = len(get_players())
var.LAST_GOAT.clear()
msg = messages["villagers_lynch"].format(botconfig.CMD_CHAR, len(list_players()) // 2 + 1)
channels.Main.send(msg)
var.DAY_ID = time.time()
if var.DAY_TIME_WARN > 0:
if var.STARTED_DAY_PLAYERS <= var.SHORT_DAY_PLAYERS:
t1 = threading.Timer(var.SHORT_DAY_WARN, hurry_up, [var.DAY_ID, False])
l = var.SHORT_DAY_WARN
else:
t1 = threading.Timer(var.DAY_TIME_WARN, hurry_up, [var.DAY_ID, False])
l = var.DAY_TIME_WARN
var.TIMERS["day_warn"] = (t1, var.DAY_ID, l)
t1.daemon = True
t1.start()
if var.DAY_TIME_LIMIT > 0: # Time limit enabled
if var.STARTED_DAY_PLAYERS <= var.SHORT_DAY_PLAYERS:
t2 = threading.Timer(var.SHORT_DAY_LIMIT, hurry_up, [var.DAY_ID, True])
l = var.SHORT_DAY_LIMIT
else:
t2 = threading.Timer(var.DAY_TIME_LIMIT, hurry_up, [var.DAY_ID, True])
l = var.DAY_TIME_LIMIT
var.TIMERS["day"] = (t2, var.DAY_ID, l)
t2.daemon = True
t2.start()
if var.DEVOICE_DURING_NIGHT:
modes = []
for player in get_players():
if not player.is_fake:
modes.append(("+v", player.nick))
channels.Main.mode(*modes)
event = Event("begin_day", {})
event.dispatch(var)
# induce a lynch if we need to (due to lots of pacifism/impatience totems or whatever)
chk_decision(var)
@handle_error
def night_warn(gameid):
if gameid != var.NIGHT_ID:
return
if var.PHASE != "night":
return
channels.Main.send(messages["twilight_warning"])
@handle_error
def transition_day(gameid=0):
if gameid:
if gameid != var.NIGHT_ID:
return
var.NIGHT_ID = 0
if var.PHASE not in ("night", "join"):
return
var.PHASE = "day"
var.DAY_COUNT += 1
var.FIRST_DAY = (var.DAY_COUNT == 1)
var.DAY_START_TIME = datetime.now()
event_begin = Event("transition_day_begin", {})
event_begin.dispatch(var)
if var.START_WITH_DAY and var.FIRST_DAY:
# TODO: need to message everyone their roles and give a short thing saying "it's daytime"
# but this is good enough for now to prevent it from crashing
begin_day()
return
td = var.DAY_START_TIME - var.NIGHT_START_TIME
var.NIGHT_START_TIME = None
var.NIGHT_TIMEDELTA += td
minimum, sec = td.seconds // 60, td.seconds % 60
# built-in logic runs at the following priorities:
# 1 = wolf kills
# 2 = non-wolf kills
# 3 = fixing killers dict to have correct priority (wolf-side VG kills -> non-wolf kills -> wolf kills)
# 4 = protections/fallen angel
# 4.1 = shaman, 4.2 = bodyguard/GA, 4.3 = blessed villager
# 5 = alpha wolf bite, other custom events that trigger after all protection stuff is resolved
# 6 = rearranging victim list (ensure bodyguard/harlot messages plays),
# fixing killers dict priority again (in case step 4 or 5 added to it)
# 7 = read-only operations
# Actually killing off the victims happens in transition_day_resolve
# We set the variables here first; listeners should mutate, not replace
# We don't need to use User containers here, as these don't persist long enough
# This removes the burden of having to clear them at the end or should an error happen
victims = []
killers = defaultdict(list)
evt = Event("transition_day", {
"victims": victims,
"killers": killers,
})
evt.dispatch(var)
# remove duplicates
victims_set = set(victims)
vappend = []
victims.clear()
# Ensures that special events play for bodyguard and harlot-visiting-victim so that kill can
# be correctly attributed to wolves (for vengeful ghost lover), and that any gunner events
# can play. Harlot visiting wolf doesn't play special events if they die via other means since
# that assumes they die en route to the wolves (and thus don't shoot/give out gun/etc.)
# TODO: this needs to be split off into bodyguard.py and harlot.py
from src.roles import bodyguard, harlot
for v in victims_set:
if is_dying(var, v):
victims.append(v)
elif v in var.ROLES["bodyguard"] and v in bodyguard.GUARDED and bodyguard.GUARDED[v] in victims_set:
vappend.append(v)
elif harlot.VISITED.get(v) in victims_set:
vappend.append(v)
else:
victims.append(v)
prevlen = var.MAX_PLAYERS + 10
while len(vappend) > 0:
if len(vappend) == prevlen:
# have a circular dependency, try to break it by appending the next value
v = vappend[0]
vappend.remove(v)
victims.append(v)
continue
prevlen = len(vappend)
for v in vappend[:]:
if v in var.ROLES["bodyguard"] and bodyguard.GUARDED.get(v) not in vappend:
vappend.remove(v)
victims.append(v)
elif harlot.VISITED.get(v) not in vappend:
vappend.remove(v)
victims.append(v)
message = defaultdict(list)
message["*"].append(messages["sunrise"].format(minimum, sec))
dead = []
vlist = victims[:]
revt = Event("transition_day_resolve", {
"message": message,
"novictmsg": True,
"dead": dead,
"killers": killers,
})
# transition_day_resolve priorities:
# 1: target not home
# 2: protection
# 6: riders on default logic
# In general, an event listener < 6 should both stop propagation and prevent default
# Priority 6 listeners add additional stuff to the default action and should not prevent default
for victim in vlist:
if not revt.dispatch(var, victim):
continue
if victim not in revt.data["dead"]: # not already dead via some other means
for killer in list(killers[victim]):
if killer == "@wolves":
attacker = None
role = "wolf"
else:
attacker = killer
role = get_main_role(killer)
protected = try_protection(var, victim, attacker, role, reason="night_death")
if protected is not None:
revt.data["message"][victim].extend(protected)
killers[victim].remove(killer)
revt.data["novictmsg"] = False
if not killers[victim]:
continue
if var.ROLE_REVEAL in ("on", "team"):
role = get_reveal_role(victim)
an = "n" if role.startswith(("a", "e", "i", "o", "u")) else ""
revt.data["message"][victim].append(messages["death"].format(victim, an, role))
else:
revt.data["message"][victim].append(messages["death_no_reveal"].format(victim))
revt.data["dead"].append(victim)
# Priorities:
# 1 = harlot/succubus visiting victim (things that kill the role itself)
# 2 = howl/novictmsg processing, alpha wolf bite/lycan turning (roleswaps)
# 3 = harlot visiting wolf, bodyguard/GA guarding wolf (things that kill the role itself -- should move to pri 1)
# 4 = gunner shooting wolf, retribution totem (things that kill the victim's killers)
# 5 = wolves killing diseased, wolves stealing gun (all deaths must be finalized before pri 5)
# Note that changing the "novictmsg" data item only makes sense for priority 2 events,
# as after that point the message was already added (at priority 2.9).
revt2 = Event("transition_day_resolve_end", {
"message": message,
"novictmsg": revt.data["novictmsg"],
"howl": 0,
"dead": dead,
"killers": killers,
})
revt2.dispatch(var, victims)
# flatten message, * goes first then everyone else
to_send = message["*"]
del message["*"]
for msg in message.values():
to_send.extend(msg)
if random.random() < var.GIF_CHANCE:
to_send.append(random.choice(
["https://i.imgur.com/nO8rZ.gifv",
"https://i.imgur.com/uGVfZ.gifv",
"https://i.imgur.com/mUcM09n.gifv",
"https://i.imgur.com/b8HAvjL.gifv",
"https://i.imgur.com/PIIfL15.gifv",
"https://i.imgur.com/nly0Cmm.gifv"]
))
channels.Main.send("\n".join(to_send))
# chilling howl message was played, give roles the opportunity to update !stats
# to account for this
event = Event("reconfigure_stats", {"new": []})
for i in range(revt2.data["howl"]):
newstats = set()
for rs in var.ROLE_STATS:
d = Counter(dict(rs))
event.data["new"] = [d]
event.dispatch(var, d, "howl")
for v in event.data["new"]:
if min(v.values()) >= 0:
newstats.add(frozenset(v.items()))
var.ROLE_STATS = frozenset(newstats)
killer_role = {}
for deadperson in dead:
if killers.get(deadperson):
killer = killers[deadperson][0]
if killer == "@wolves":
killer_role[deadperson] = "wolf"
else:
killer_role[deadperson] = get_main_role(killer)
else:
# no killers, so assume suicide
killer_role[deadperson] = get_main_role(deadperson)
for deadperson in dead:
add_dying(var, deadperson, killer_role[deadperson], "night_kill")
kill_players(var, end_game=False) # temporary hack; end_game=False also prevents kill_players from attempting phase transitions
event_end = Event("transition_day_end", {"begin_day": begin_day})
event_end.dispatch(var)
# make sure that we process ALL of the transition_day events before checking for game end
if chk_win(): # game ending
return
event_end.data["begin_day"]()
@event_listener("transition_day_resolve_end", priority=2.9)
def on_transition_day_resolve_end(evt, var, victims):
if evt.data["novictmsg"] and len(evt.data["dead"]) == 0:
evt.data["message"]["*"].append(random.choice(messages["no_victims"]) + messages["no_victims_append"])
for i in range(evt.data["howl"]):
evt.data["message"]["*"].append(messages["new_wolf"])
def chk_nightdone():
if var.PHASE != "night":
return
event = Event("chk_nightdone", {"actedcount": 0, "nightroles": [], "transition_day": transition_day})
event.dispatch(var)
actedcount = event.data["actedcount"]
# remove all instances of them if they are silenced (makes implementing the event easier)
nightroles = [p for p in event.data["nightroles"] if not is_silent(var, p)]
if var.PHASE == "night" and actedcount >= len(nightroles):
for x, t in var.TIMERS.items():
t[0].cancel()
var.TIMERS = {}
if var.PHASE == "night": # Double check
event.data["transition_day"]()
@hook("featurelist") # For multiple targets with PRIVMSG
def getfeatures(cli, nick, *rest):
for r in rest:
if r.startswith("TARGMAX="):
x = r[r.index("PRIVMSG:"):]
if "," in x:
l = x[x.index(":")+1:x.index(",")]
else:
l = x[x.index(":")+1:]
l = l.strip()
if not l or not l.isdigit():
continue
else:
var.MAX_PRIVMSG_TARGETS = int(l)
continue
if r.startswith("PREFIX="):
prefs = r[7:]
chp = []
nlp = []
finder = True
for char in prefs:
if char == "(":
continue
if char == ")":
finder = False
continue
if finder:
chp.append(char)
else:
nlp.append(char)
allp = zip(chp, nlp)
var.MODES_PREFIXES = {}
for combo in allp:
var.MODES_PREFIXES[combo[1]] = combo[0] # For some reason this needs to be backwards
var.AUTO_TOGGLE_MODES = set(var.AUTO_TOGGLE_MODES)
if var.AUTO_TOGGLE_MODES: # this is ugly, but I'm too lazy to fix it. it works, so that's fine
tocheck = set(var.AUTO_TOGGLE_MODES)
for mode in tocheck:
if not mode in var.MODES_PREFIXES.keys() and not mode in var.MODES_PREFIXES.values():
var.AUTO_TOGGLE_MODES.remove(mode)
continue
if not mode in var.MODES_PREFIXES.values():
for chp in var.MODES_PREFIXES.keys():
| |
<filename>clevr-dataset-gen/question_generation/question_engine.py<gh_stars>0
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import json, os, math
from collections import defaultdict
"""
Utilities for working with function program representations of questions.
Some of the metadata about what question node types are available etc are stored
in a JSON metadata file.
"""
# Handlers for answering questions. Each handler receives the scene structure
# that was output from Blender, the node, and a list of values that were output
# from each of the node's inputs; the handler should return the computed output
# value from this node.
def scene_handler(scene_struct, inputs, side_inputs):
# Just return all objects in the scene
return list(range(len(scene_struct['objects'])))
def make_filter_handler(attribute):
def filter_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
value = side_inputs[0]
output = []
for idx in inputs[0]:
atr = scene_struct['objects'][idx][attribute]
if value == atr or value in atr:
output.append(idx)
return output
return filter_handler
def unique_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
if len(inputs[0]) != 1:
return '__INVALID__'
return inputs[0][0]
def vg_relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
output = set()
for rel in scene_struct['relationships']:
if rel['predicate'] == side_inputs[0] and rel['subject_idx'] == inputs[0]:
output.add(rel['object_idx'])
return sorted(list(output))
def relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
relation = side_inputs[0]
return scene_struct['relationships'][relation][inputs[0]]
def union_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) | set(inputs[1])))
def intersect_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) & set(inputs[1])))
def count_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
if len(inputs[0]) == 1:
return ("Yes, it is ")
elif len(inputs[0]) == 0:
return ("No (we cannot find the thing meet the requirement)")
else:
return (len(inputs[0]))
def make_same_attr_handler(attribute):
def same_attr_handler(scene_struct, inputs, side_inputs):
cache_key = '_same_%s' % attribute
if cache_key not in scene_struct:
cache = {}
for i, obj1 in enumerate(scene_struct['objects']):
same = []
for j, obj2 in enumerate(scene_struct['objects']):
if i != j and obj1[attribute] == obj2[attribute]:
same.append(j)
cache[i] = same
scene_struct[cache_key] = cache
cache = scene_struct[cache_key]
assert len(inputs) == 1
assert len(side_inputs) == 0
return cache[inputs[0]]
return same_attr_handler
def make_query_handler(attribute):
def query_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
idx = inputs[0]
obj = scene_struct['objects'][idx]
assert attribute in obj
val = obj[attribute]
if type(val) == list and len(val) != 1:
return '__INVALID__'
elif type(val) == list and len(val) == 1:
return val[0]
else:
return val
return query_handler
def exist_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
return len(inputs[0]) > 0
def equal_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] == inputs[1]
def less_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] < inputs[1]
def greater_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] > inputs[1]
def generate_text_handler(scene_struct, inputs, side_inputs):
if inputs[0] == False:
return ("No (we cannot find the thing meet the requirement)")
elif inputs[0] == True:
return (str(str("Yes, It is ")+str(inputs[1])))
def text_handler_oh1(scene_struct, inputs, side_inputs):
if inputs[0] == "No (we cannot find the thing meet the requirement)":
return ("No (we cannot find the thing meet the requirement)")
elif inputs[0] == "Yes, it is ":
return (str(str("It is only one, which is ")+str(inputs[-1])))
else:
return(str(str("The number is ")+str(inputs[0])))
def text_handler_oh2(scene_struct, inputs, side_inputs):
return(inputs[0][0])
def text_handler_oh3(scene_struct, inputs, side_inputs):
if inputs[0] == False:
return ("We cannot find the thing meet the requirement")
elif inputs[0] == True:
return (str(str("It is ")+str(inputs[1])))
def text_handler_th1(scene_struct, inputs, side_inputs):
if inputs[0] == False:
return ("No (we cannot find the thing meet the requirement)")
elif inputs[0] == True:
return (str(str("Yes")))
def text_handler_oh4(scene_struct, inputs, side_inputs):
if inputs[0] == child:
return ("No (we cannot find the thing meet the requirement)")
elif inputs[0] == True:
return (str(str("Yes")))
def make_filter_handler1(scene_struct, inputs, side_inputs):
if inputs[0][0] == 'child':
temp_atr = scene_struct["objects"][int(inputs[0][1])]['size']
return temp_atr
if inputs[0][0] == 'parent':
return (str("No title"))
def filter_shape_count(scene_struct, inputs, side_inputs):
if len(inputs[0]) == 1:
return ("There is only one")
elif len(inputs[0]) == 0:
return ("No (we cannot find the thing meet the requirement)")
else:
return (len(inputs[0]))
def all_output(a):
for x in a:
if a.index(x) == 0:
temp = x.rstrip()
else:
temp = temp + ',' + x.rstrip()
return temp
def xxx(p1,p2,p3):
def XXX(scene_struct, inputs, side_inputs):
child = ['text', 'table', 'figure','list']
# parent = ['title', 'table_caption', 'figure_caption', 'list']
B=[]
T=""
output=[]
output_top = []
output_bottom = []
output_f = []
output_top_f = []
output_bottom_f = []
hint=""
for idx in inputs[0]:
T = scene_struct['objects'][idx]["shape"]
if T==p1:
B=scene_struct['objects'][idx]["bbox"]
if p2=="left" and (int(B[0])<200 and int(B[2])<300):
output.append(scene_struct['objects'][idx]["size"])
output_f += scene_struct['objects'][idx]["family"]
if int(B[3] < 400):
output_top.append(scene_struct['objects'][idx]["size"])
output_top_f += scene_struct['objects'][idx]["family"]
else:
output_bottom.append(scene_struct['objects'][idx]["size"])
output_bottom_f += scene_struct['objects'][idx]["family"]
elif p2=="right" and (int(B[0])>300 and int(B[2])<300):
output.append(scene_struct['objects'][idx]["size"])
output_f += scene_struct['objects'][idx]["family"]
if int(B[3] < 400):
output_top.append(scene_struct['objects'][idx]["size"])
output_top_f += scene_struct['objects'][idx]["family"]
else:
output_bottom.append(scene_struct['objects'][idx]["size"])
output_bottom_f += scene_struct['objects'][idx]["family"]
if int(B[2])>300:
hint="This page is Centered layout"
else:
hint= "There is no " +p1+" in the "+p3+" of page"
#获取关系中的size
new = set()
new_t = set()
new_b = set()
for i in output_f:
new.add(scene_struct['objects'][int(i[1])]["size"])
for i in output_top_f:
new_t.add(scene_struct['objects'][int(i[1])]["size"])
for i in output_bottom_f:
new_b.add(scene_struct['objects'][int(i[1])]["size"])
new = list(new)
new_t = list(new_t)
new_b = list(new_b)
if p1 in child:
if (p3 == 'left' or p3 == 'right'):
if new ==[]:
return 'It is unclear'
else:
return all_output(new)
if p3 == 'top left' or p3 == 'top right':
if new_t == []:
return 'It is unclear'
else:
return all_output(new_t)
if p3 == 'bottom left' or p3 == 'bottom right':
if new_b == []:
return 'It is unclear'
else:
return all_output(new_b)
else:
if (p3 == 'left' or p3 == 'right'):
if output ==[]:
return hint
else:
return all_output(output)
if p3 == 'top left' or p3 == 'top right':
if output_top == []:
return "There is no " +p1+" in the "+p3+" of page"
else:
return all_output(output_top)
if p3 == 'bottom left' or p3 == 'bottom right':
if output_bottom == []:
return "There is no " +p1+" in the "+p3+" of page"
else:
return all_output(output_bottom)
# if (p3 == 'left' or p3 == 'right'):
# if output ==[]:
# return hint
# else:
# return all_output(output)
# if p3 == 'top left' or p3 == 'top right':
# if output_top == []:
# return "There is no " +p1+" in the "+p3+" of page"
# else:
# return all_output(output_top)
# if p3 == 'bottom left' or p3 == 'bottom right':
# if output_bottom == []:
# return "There is no " +p1+" in the "+p3+" of page"
# else:
# return all_output(output_bottom)
return XXX
# def filter_shape_count(scene_struct, _output, side_inputs):
# temp_count = 0
# for i in _output:
# if scene_struct["objects"][int(i)]['shape'] == 'title':
# temp_count += 1
# if temp_count == 1:
# return ("There is only one")
# elif temp_count == 0:
# return ("No (we cannot find the thing meet the requirement)")
# else:
# return (temp_count)
# Register all of the answering handlers here.
# TODO maybe this would be cleaner with a function decorator that takes
# care of registration? Not sure. Also what if we want to reuse the same engine
# for different sets of node types?
execute_handlers = {
'make_position_title_l': xxx('title','left','left'),
'make_position_title_l_top': xxx('title','left','top left'),
'make_position_title_l_bottom': xxx('title','left','bottom left'),
'make_position_title_r': xxx('title','right','right'),
'make_position_title_r_top': xxx('title','right','top right'),
'make_position_title_r_bottom': xxx('title','right','bottom right'),
'make_position_text_l': xxx('text','left','left'),
'make_position_text_l_top': xxx('text','left','top left'),
'make_position_text_l_bottom': xxx('text','left','bottom left'),
'make_position_text_r': xxx('text','right','right'),
'make_position_text_r_top': xxx('text','right','top right'),
'make_position_text_r_bottom': xxx('text','right','bottom right'),
'make_position_table_caption_l': xxx('table_caption','left','right'),
'make_position_table_caption_l_top': xxx('table_caption','left','top right'),
'make_position_table_caption_l_bottom': xxx('table_caption','left','bottom right'),
'make_position_table_caption_r': xxx('table_caption','right','right'),
'make_position_table_caption_r_top': xxx('table_caption','right','top right'),
'make_position_table_caption_r_bottom': xxx('table_caption','right','bottom right'),
'make_position_table_l': xxx('table','left','left'),
'make_position_table_l_top': xxx('table','left','top left'),
'make_position_table_l_bottom': xxx('table','left','bottom left'),
'make_position_table_r': xxx('table','right','right'),
'make_position_table_r_top': xxx('table','right','top right'),
'make_position_table_r_bottom': xxx('table','right','bottom right'),
'make_position_list_l': xxx('list','left','left'),
'make_position_list_l_top': xxx('list','left','top left'),
'make_position_list_l_bottom': xxx('list','left','bottom left'),
'make_position_list_r': xxx('list','right','right'),
'make_position_list_r_top': xxx('list','right','top right'),
'make_position_list_r_bottom': xxx('list','right','bottom right'),
'make_position_figure_l': xxx('figure','left','left'),
'make_position_figure_l_top': xxx('figure','left','top left'),
'make_position_figure_l_bottom': xxx('figure','left','bottom left'),
'make_position_figure_r': xxx('figure','right','right'),
'make_position_figure_r_top': xxx('figure','right','top right'),
'make_position_figure_r_bottom': xxx('figure','right','bottom right'),
'make_position_figure_caption_l': xxx('figure_caption','left','left'),
'make_position_figure_caption_l_top': xxx('figure_caption','left','top left'),
'make_position_figure_caption_l_bottom': xxx('figure_caption','left','bottom left'),
'make_position_figure_caption_r': xxx('figure_caption','right','right'),
'make_position_figure_caption_r_top': xxx('figure_caption','right','top right'),
'make_position_figure_caption_r_bottom': xxx('figure_caption','right','bottom right'),
'filter_shape_count': filter_shape_count,
'make_filter_handler1': make_filter_handler1,
'text_handler_oh3': text_handler_oh4,
'text_handler_oh3': text_handler_oh3,
'text_handler_th1': text_handler_th1,
'text_handler_oh2': text_handler_oh2,
'query_family':make_query_handler('family'),
'text_handler_oh1': text_handler_oh1,
'generate_text': generate_text_handler,
'scene': scene_handler,
'filter_color': make_filter_handler('color'),
'filter_shape': | |
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import json
import logging
from unittest.mock import Mock, patch
from argparse import Namespace
import pytest
from bigchaindb import ValidatorElection
from bigchaindb.commands.bigchaindb import run_election_show
from bigchaindb.elections.election import Election
from bigchaindb.lib import Block
from bigchaindb.migrations.chain_migration_election import ChainMigrationElection
from tests.utils import generate_election, generate_validators
def test_make_sure_we_dont_remove_any_command():
# thanks to: http://stackoverflow.com/a/18161115/597097
from bigchaindb.commands.bigchaindb import create_parser
parser = create_parser()
assert parser.parse_args(['configure', 'localmongodb']).command
assert parser.parse_args(['show-config']).command
assert parser.parse_args(['init']).command
assert parser.parse_args(['drop']).command
assert parser.parse_args(['start']).command
assert parser.parse_args(['election', 'new', 'upsert-validator', 'TEMP_PUB_KEYPAIR', '10', 'TEMP_NODE_ID',
'--private-key', 'TEMP_PATH_TO_PRIVATE_KEY']).command
assert parser.parse_args(['election', 'new', 'chain-migration',
'--private-key', 'TEMP_PATH_TO_PRIVATE_KEY']).command
assert parser.parse_args(['election', 'approve', 'ELECTION_ID', '--private-key',
'TEMP_PATH_TO_PRIVATE_KEY']).command
assert parser.parse_args(['election', 'show', 'ELECTION_ID']).command
assert parser.parse_args(['tendermint-version']).command
@patch('bigchaindb.commands.utils.start')
def test_main_entrypoint(mock_start):
from bigchaindb.commands.bigchaindb import main
main()
assert mock_start.called
@patch('bigchaindb.log.setup_logging')
@patch('bigchaindb.commands.bigchaindb._run_init')
@patch('bigchaindb.config_utils.autoconfigure')
def test_bigchain_run_start(mock_setup_logging, mock_run_init,
mock_autoconfigure, mock_processes_start):
from bigchaindb.commands.bigchaindb import run_start
args = Namespace(config=None, yes=True,
skip_initialize_database=False)
run_start(args)
assert mock_setup_logging.called
# TODO Please beware, that if debugging, the "-s" switch for pytest will
# interfere with capsys.
# See related issue: https://github.com/pytest-dev/pytest/issues/128
@pytest.mark.usefixtures('ignore_local_config_file')
def test_bigchain_show_config(capsys):
from bigchaindb.commands.bigchaindb import run_show_config
args = Namespace(config=None)
_, _ = capsys.readouterr()
run_show_config(args)
output_config = json.loads(capsys.readouterr()[0])
# Note: This test passed previously because we were always
# using the default configuration parameters, but since we
# are running with docker-compose now and expose parameters like
# BIGCHAINDB_SERVER_BIND, BIGCHAINDB_WSSERVER_HOST, BIGCHAINDB_WSSERVER_ADVERTISED_HOST
# the default comparison fails i.e. when config is imported at the beginning the
# dict returned is different that what is expected after run_show_config
# and run_show_config updates the bigchaindb.config
from bigchaindb import config
del config['CONFIGURED']
assert output_config == config
def test__run_init(mocker):
from bigchaindb.commands.bigchaindb import _run_init
bigchain_mock = mocker.patch(
'bigchaindb.commands.bigchaindb.bigchaindb.BigchainDB')
init_db_mock = mocker.patch(
'bigchaindb.commands.bigchaindb.schema.init_database',
autospec=True,
spec_set=True,
)
_run_init()
bigchain_mock.assert_called_once_with()
init_db_mock.assert_called_once_with(
connection=bigchain_mock.return_value.connection)
@patch('bigchaindb.backend.schema.drop_database')
def test_drop_db_when_assumed_yes(mock_db_drop):
from bigchaindb.commands.bigchaindb import run_drop
args = Namespace(config=None, yes=True)
run_drop(args)
assert mock_db_drop.called
@patch('bigchaindb.backend.schema.drop_database')
def test_drop_db_when_interactive_yes(mock_db_drop, monkeypatch):
from bigchaindb.commands.bigchaindb import run_drop
args = Namespace(config=None, yes=False)
monkeypatch.setattr(
'bigchaindb.commands.bigchaindb.input_on_stderr', lambda x: 'y')
run_drop(args)
assert mock_db_drop.called
@patch('bigchaindb.backend.schema.drop_database')
def test_drop_db_when_db_does_not_exist(mock_db_drop, capsys):
from bigchaindb import config
from bigchaindb.commands.bigchaindb import run_drop
from bigchaindb.common.exceptions import DatabaseDoesNotExist
args = Namespace(config=None, yes=True)
mock_db_drop.side_effect = DatabaseDoesNotExist
run_drop(args)
output_message = capsys.readouterr()[1]
assert output_message == "Cannot drop '{name}'. The database does not exist.\n".format(
name=config['database']['name'])
@patch('bigchaindb.backend.schema.drop_database')
def test_drop_db_does_not_drop_when_interactive_no(mock_db_drop, monkeypatch):
from bigchaindb.commands.bigchaindb import run_drop
args = Namespace(config=None, yes=False)
monkeypatch.setattr(
'bigchaindb.commands.bigchaindb.input_on_stderr', lambda x: 'n')
run_drop(args)
assert not mock_db_drop.called
# TODO Beware if you are putting breakpoints in there, and using the '-s'
# switch with pytest. It will just hang. Seems related to the monkeypatching of
# input_on_stderr.
def test_run_configure_when_config_does_not_exist(monkeypatch,
mock_write_config,
mock_generate_key_pair,
mock_bigchaindb_backup_config):
from bigchaindb.commands.bigchaindb import run_configure
monkeypatch.setattr('os.path.exists', lambda path: False)
monkeypatch.setattr('builtins.input', lambda: '\n')
args = Namespace(config=None, backend='localmongodb', yes=True)
return_value = run_configure(args)
assert return_value is None
def test_run_configure_when_config_does_exist(monkeypatch,
mock_write_config,
mock_generate_key_pair,
mock_bigchaindb_backup_config):
value = {}
def mock_write_config(newconfig):
value['return'] = newconfig
from bigchaindb.commands.bigchaindb import run_configure
monkeypatch.setattr('os.path.exists', lambda path: True)
monkeypatch.setattr('builtins.input', lambda: '\n')
monkeypatch.setattr(
'bigchaindb.config_utils.write_config', mock_write_config)
args = Namespace(config=None, yes=None)
run_configure(args)
assert value == {}
@pytest.mark.skip
@pytest.mark.parametrize('backend', (
'localmongodb',
))
def test_run_configure_with_backend(backend, monkeypatch, mock_write_config):
import bigchaindb
from bigchaindb.commands.bigchaindb import run_configure
value = {}
def mock_write_config(new_config, filename=None):
value['return'] = new_config
monkeypatch.setattr('os.path.exists', lambda path: False)
monkeypatch.setattr('builtins.input', lambda: '\n')
monkeypatch.setattr('bigchaindb.config_utils.write_config',
mock_write_config)
args = Namespace(config=None, backend=backend, yes=True)
expected_config = bigchaindb.config
run_configure(args)
# update the expected config with the correct backend and keypair
backend_conf = getattr(bigchaindb, '_database_' + backend)
expected_config.update({'database': backend_conf,
'keypair': value['return']['keypair']})
assert value['return'] == expected_config
@patch('bigchaindb.commands.utils.start')
def test_calling_main(start_mock, monkeypatch):
from bigchaindb.commands.bigchaindb import main
argparser_mock = Mock()
parser = Mock()
subparsers = Mock()
subsubparsers = Mock()
subparsers.add_parser.return_value = subsubparsers
parser.add_subparsers.return_value = subparsers
argparser_mock.return_value = parser
monkeypatch.setattr('argparse.ArgumentParser', argparser_mock)
main()
assert argparser_mock.called is True
parser.add_subparsers.assert_called_with(title='Commands',
dest='command')
subparsers.add_parser.assert_any_call('configure',
help='Prepare the config file.')
subparsers.add_parser.assert_any_call('show-config',
help='Show the current '
'configuration')
subparsers.add_parser.assert_any_call('init', help='Init the database')
subparsers.add_parser.assert_any_call('drop', help='Drop the database')
subparsers.add_parser.assert_any_call('start', help='Start BigchainDB')
subparsers.add_parser.assert_any_call('tendermint-version',
help='Show the Tendermint supported '
'versions')
assert start_mock.called is True
@patch('bigchaindb.commands.bigchaindb.run_recover')
@patch('bigchaindb.start.start')
def test_recover_db_on_start(mock_run_recover,
mock_start,
mocked_setup_logging):
from bigchaindb.commands.bigchaindb import run_start
args = Namespace(config=None, yes=True,
skip_initialize_database=False)
run_start(args)
assert mock_run_recover.called
assert mock_start.called
@pytest.mark.bdb
def test_run_recover(b, alice, bob):
from bigchaindb.commands.bigchaindb import run_recover
from bigchaindb.models import Transaction
from bigchaindb.lib import Block
from bigchaindb.backend import query
tx1 = Transaction.create([alice.public_key],
[([alice.public_key], 1)],
asset={'cycle': 'hero'},
metadata={'name': 'hohenheim'}) \
.sign([alice.private_key])
tx2 = Transaction.create([bob.public_key],
[([bob.public_key], 1)],
asset={'cycle': 'hero'},
metadata={'name': 'hohenheim'}) \
.sign([bob.private_key])
# store the transactions
b.store_bulk_transactions([tx1, tx2])
# create a random block
block8 = Block(app_hash='random_app_hash1', height=8,
transactions=['txid_doesnt_matter'])._asdict()
b.store_block(block8)
# create the next block
block9 = Block(app_hash='random_app_hash1', height=9,
transactions=[tx1.id])._asdict()
b.store_block(block9)
# create a pre_commit state which is ahead of the commit state
pre_commit_state = dict(height=10, transactions=[tx2.id])
b.store_pre_commit_state(pre_commit_state)
run_recover(b)
assert not query.get_transaction(b.connection, tx2.id)
# Helper
class MockResponse():
def __init__(self, height):
self.height = height
def json(self):
return {'result': {'latest_block_height': self.height}}
@pytest.mark.abci
def test_election_new_upsert_validator_with_tendermint(b, priv_validator_path, user_sk, validators):
from bigchaindb.commands.bigchaindb import run_election_new_upsert_validator
new_args = Namespace(action='new',
election_type='upsert-validator',
public_key='<KEY>
power=1,
node_id='unique_node_id_for_test_upsert_validator_new_with_tendermint',
sk=priv_validator_path,
config={})
election_id = run_election_new_upsert_validator(new_args, b)
assert b.get_transaction(election_id)
@pytest.mark.bdb
def test_election_new_upsert_validator_without_tendermint(caplog, b, priv_validator_path, user_sk):
from bigchaindb.commands.bigchaindb import run_election_new_upsert_validator
def mock_write(tx, mode):
b.store_bulk_transactions([tx])
return (202, '')
b.get_validators = mock_get_validators
b.write_transaction = mock_write
args = Namespace(action='new',
election_type='upsert-validator',
public_key='<KEY>
power=1,
node_id='fb7140f03a4ffad899fabbbf655b97e0321add66',
sk=priv_validator_path,
config={})
with caplog.at_level(logging.INFO):
election_id = run_election_new_upsert_validator(args, b)
assert caplog.records[0].msg == '[SUCCESS] Submitted proposal with id: ' + election_id
assert b.get_transaction(election_id)
@pytest.mark.abci
def test_election_new_chain_migration_with_tendermint(b, priv_validator_path, user_sk, validators):
from bigchaindb.commands.bigchaindb import run_election_new_chain_migration
new_args = Namespace(action='new',
election_type='migration',
sk=priv_validator_path,
config={})
election_id = run_election_new_chain_migration(new_args, b)
assert b.get_transaction(election_id)
@pytest.mark.bdb
def test_election_new_chain_migration_without_tendermint(caplog, b, priv_validator_path, user_sk):
from bigchaindb.commands.bigchaindb import run_election_new_chain_migration
def mock_write(tx, mode):
b.store_bulk_transactions([tx])
return (202, '')
b.get_validators = mock_get_validators
b.write_transaction = mock_write
args = Namespace(action='new',
election_type='migration',
sk=priv_validator_path,
config={})
with caplog.at_level(logging.INFO):
election_id = run_election_new_chain_migration(args, b)
assert caplog.records[0].msg == '[SUCCESS] Submitted proposal with id: ' + election_id
assert b.get_transaction(election_id)
@pytest.mark.bdb
def test_election_new_upsert_validator_invalid_election(caplog, b, priv_validator_path, user_sk):
from bigchaindb.commands.bigchaindb import run_election_new_upsert_validator
args = Namespace(action='new',
election_type='upsert-validator',
public_key='<KEY>
power=10,
node_id='fb7140f03a4ffad899fabbbf655b97e0321add66',
sk='/tmp/invalid/path/key.json',
config={})
with caplog.at_level(logging.ERROR):
assert not run_election_new_upsert_validator(args, b)
assert caplog.records[0].msg.__class__ == FileNotFoundError
@pytest.mark.bdb
def test_election_new_upsert_validator_invalid_power(caplog, b, priv_validator_path, user_sk):
from bigchaindb.commands.bigchaindb import run_election_new_upsert_validator
from bigchaindb.common.exceptions import InvalidPowerChange
def mock_write(tx, mode):
b.store_bulk_transactions([tx])
return (400, '')
b.write_transaction = mock_write
b.get_validators = mock_get_validators
args = Namespace(action='new',
election_type='upsert-validator',
public_key='<KEY>
power=10,
node_id='fb7140f03a4ffad899fabbbf655b97e0321add66',
sk=priv_validator_path,
config={})
with caplog.at_level(logging.ERROR):
assert not run_election_new_upsert_validator(args, b)
assert caplog.records[0].msg.__class__ == InvalidPowerChange
@pytest.mark.abci
def test_election_approve_with_tendermint(b, priv_validator_path, user_sk, validators):
from bigchaindb.commands.bigchaindb import (run_election_new_upsert_validator,
run_election_approve)
public_key = '<KEY>
new_args = Namespace(action='new',
election_type='upsert-validator',
public_key=public_key,
power=1,
node_id='fb7140f03a4ffad899fabbbf655b97e0321add66',
sk=priv_validator_path,
config={})
election_id = run_election_new_upsert_validator(new_args, b)
assert election_id
args = Namespace(action='approve',
election_id=election_id,
sk=priv_validator_path,
config={})
approve = run_election_approve(args, b)
assert b.get_transaction(approve)
@pytest.mark.bdb
def test_election_approve_without_tendermint(caplog, b, priv_validator_path, new_validator, node_key):
from bigchaindb.commands.bigchaindb import run_election_approve
from argparse import Namespace
b, election_id = call_election(b, new_validator, node_key)
# call run_election_approve with args that point to the election
args = Namespace(action='approve',
election_id=election_id,
sk=priv_validator_path,
config={})
# assert returned id is in the db
with caplog.at_level(logging.INFO):
approval_id = run_election_approve(args, b)
assert caplog.records[0].msg == '[SUCCESS] Your vote has been submitted'
assert b.get_transaction(approval_id)
@pytest.mark.bdb
def test_election_approve_failure(caplog, b, priv_validator_path, new_validator, node_key):
from bigchaindb.commands.bigchaindb import run_election_approve
from argparse import Namespace
b, election_id = call_election(b, new_validator, node_key)
def mock_write(tx, mode):
b.store_bulk_transactions([tx])
return (400, '')
b.write_transaction = mock_write
# call run_upsert_validator_approve with args that point to the election
args = Namespace(action='approve',
election_id=election_id,
sk=priv_validator_path,
config={})
with caplog.at_level(logging.ERROR):
assert not run_election_approve(args, b)
assert caplog.records[0].msg == 'Failed to commit vote'
@pytest.mark.bdb
def test_election_approve_called_with_bad_key(caplog, b, bad_validator_path, new_validator, node_key):
from bigchaindb.commands.bigchaindb import run_election_approve
from argparse import Namespace
b, election_id = call_election(b, new_validator, node_key)
# call run_upsert_validator_approve with args that point to the election, but a bad signing key
args = Namespace(action='approve',
election_id=election_id,
sk=bad_validator_path,
config={})
with caplog.at_level(logging.ERROR):
assert not run_election_approve(args, b)
assert caplog.records[0].msg == 'The key you provided does not match any of '\
'the eligible voters in this election.'
@pytest.mark.bdb
def test_chain_migration_election_show_shows_inconclusive(b):
validators = generate_validators([1] * 4)
b.store_validator_set(1, [v['storage'] for v in validators])
public_key = validators[0]['public_key']
private_key = validators[0]['private_key']
voter_keys = [v['private_key'] for v in validators]
election, votes = generate_election(b,
ChainMigrationElection,
public_key, private_key,
{},
voter_keys)
assert not run_election_show(Namespace(election_id=election.id), b)
Election.process_block(b, 1, [election])
b.store_bulk_transactions([election])
assert run_election_show(Namespace(election_id=election.id), b) == \
'status=ongoing'
b.store_block(Block(height=1, transactions=[], app_hash='')._asdict())
b.store_validator_set(2, [v['storage'] for v in validators])
assert run_election_show(Namespace(election_id=election.id), b) == \
'status=ongoing'
b.store_block(Block(height=2, transactions=[], app_hash='')._asdict())
# TODO insert yet another block here when upgrading to Tendermint 0.22.4.
assert run_election_show(Namespace(election_id=election.id), b) == \
'status=inconclusive'
@pytest.mark.bdb
def test_chain_migration_election_show_shows_concluded(b):
validators = generate_validators([1] * 4)
b.store_validator_set(1, [v['storage'] for v in validators])
public_key = validators[0]['public_key']
private_key = validators[0]['private_key']
voter_keys = [v['private_key'] for v in validators]
election, votes = generate_election(b,
ChainMigrationElection,
public_key, private_key,
{},
voter_keys)
assert not run_election_show(Namespace(election_id=election.id), b)
b.store_bulk_transactions([election])
Election.process_block(b, 1, [election])
assert run_election_show(Namespace(election_id=election.id), b) == \
'status=ongoing'
b.store_abci_chain(1, 'chain-X')
b.store_block(Block(height=1,
transactions=[v.id for v in votes],
app_hash='last_app_hash')._asdict())
Election.process_block(b, 2, votes)
assert run_election_show(Namespace(election_id=election.id), b) == \
f'''status=concluded
chain_id=chain-X-migrated-at-height-1
app_hash=last_app_hash
validators=[{''.join([f"""
{{
"pub_key": {{
"type": "tendermint/PubKeyEd25519",
"value": "{v['public_key']}"
}},
"power": {v['storage']['voting_power']}
}}{',' if i + | |
positive
"""
N = ZZ(N)
if N <= 0:
raise ValueError("N must be positive")
c = []
for d in divisors(N):
n = num_cusps_of_width(N, d)
if n == 1:
c.append(CuspFamily(N, d))
elif n > 1:
for i in range(n):
c.append(CuspFamily(N, d, label=str(i + 1)))
return c
@richcmp_method
class CuspFamily(SageObject):
r"""
A family of elliptic curves parametrising a region of `X_0(N)`.
"""
def __init__(self, N, width, label=None):
r"""
Create the cusp of width d on X_0(N) corresponding to the family
of Tate curves `(\CC_p/q^d, \langle \zeta q\rangle)`.
Here `\zeta` is a primitive root of unity of order `r` with
`\mathrm{lcm}(r,d) = N`. The cusp does not store zeta, so we
store an arbitrary label instead.
EXAMPLES::
sage: CuspFamily(8, 4)
(c_{4})
sage: CuspFamily(16, 4, '1')
(c_{4,1})
"""
N = ZZ(N)
if N <= 0:
raise ValueError("N must be positive")
self._N = N
self._width = width
if N % width:
raise ValueError("bad width")
if num_cusps_of_width(N, width) > 1 and label is None:
raise ValueError("there are %s > 1 cusps of width %s on X_0(%s): specify a label" % (num_cusps_of_width(N, width), width, N))
if num_cusps_of_width(N, width) == 1 and label is not None:
raise ValueError("there is only one cusp of width %s on X_0(%s): no need to specify a label" % (width, N))
self.label = label
@property
def __tuple(self):
"""
The defining data of this ``CuspFamily`` as tuple, used for
comparisons.
"""
return (self._N, self._width, self.label)
def __richcmp__(self, other, op) -> bool:
"""
EXAMPLES::
sage: a = CuspFamily(16, 4, "1"); a
(c_{4,1})
sage: b = CuspFamily(16, 4, "2"); b
(c_{4,2})
sage: c = CuspFamily(8, 8); c
(0)
sage: a == a
True
sage: a == b
False
sage: a != b
True
sage: a == c
False
sage: a < c
False
sage: a > c
True
sage: a != "foo"
True
"""
if not isinstance(other, CuspFamily):
return NotImplemented
return richcmp(self.__tuple, other.__tuple, op)
def __hash__(self):
"""
EXAMPLES::
sage: hash(CuspFamily(10, 1)) # random
-4769758480201659164
"""
return hash(self.__tuple)
def width(self) -> Integer:
r"""
Return the width of this cusp.
EXAMPLES::
sage: e = CuspFamily(10, 1)
sage: e.width()
1
"""
return self._width
def level(self) -> Integer:
r"""
Return the level of this cusp.
EXAMPLES::
sage: e = CuspFamily(10, 1)
sage: e.level()
10
"""
return self._N
def sage_cusp(self):
r"""
Return the corresponding element of `\mathbb{P}^1(\QQ)`.
EXAMPLES::
sage: CuspFamily(10, 1).sage_cusp() # not implemented
Infinity
"""
raise NotImplementedError
def _repr_(self) -> str:
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: CuspFamily(16, 4, "1")._repr_()
'(c_{4,1})'
"""
if self.width() == 1:
return "(Inf)"
elif self.width() == self.level():
return "(0)"
return "(c_{%s%s})" % (self.width(), ((self.label and ("," + self.label)) or ""))
def qexp_eta(ps_ring, prec):
r"""
Return the q-expansion of `\eta(q) / q^{1/24}`.
Here `\eta(q)` is Dedekind's function
.. MATH::
\eta(q) = q^{1/24}\prod_{n=1}^\infty (1-q^n).
The result is an element of ``ps_ring``, with precision ``prec``.
INPUT:
- ``ps_ring`` -- (PowerSeriesRing): a power series ring
- ``prec`` -- (integer): the number of terms to compute
OUTPUT: An element of ps_ring which is the q-expansion of
`\eta(q)/q^{1/24}` truncated to prec terms.
ALGORITHM: We use the Euler identity
.. MATH::
\eta(q) = q^{1/24}( 1 + \sum_{n \ge 1} (-1)^n (q^{n(3n+1)/2} + q^{n(3n-1)/2})
to compute the expansion.
EXAMPLES::
sage: from sage.modular.etaproducts import qexp_eta
sage: qexp_eta(ZZ[['q']], 100)
1 - q - q^2 + q^5 + q^7 - q^12 - q^15 + q^22 + q^26 - q^35 - q^40 + q^51 + q^57 - q^70 - q^77 + q^92 + O(q^100)
"""
prec = Integer(prec)
if not prec > 0:
raise ValueError("prec must be a positive integer")
v = [Integer(0)] * prec
pm = Integer(1)
v[0] = pm
try:
n = 1
while True:
pm = -pm
v[n * (3 * n - 1) // 2] = pm
v[n * (3 * n + 1) // 2] = pm
n += 1
except IndexError:
pass
return ps_ring(v, prec=prec)
def eta_poly_relations(eta_elements, degree, labels=['x1', 'x2'],
verbose=False):
r"""
Find polynomial relations between eta products.
INPUT:
- ``eta_elements`` - (list): a list of EtaGroupElement objects.
Not implemented unless this list has precisely two elements. degree
- ``degree`` - (integer): the maximal degree of polynomial to look for.
- ``labels`` - (list of strings): labels to use for the polynomial returned.
- ``verbose``` - (boolean, default False): if True, prints information as
it goes.
OUTPUT: a list of polynomials which is a Groebner basis for the
part of the ideal of relations between eta_elements which is
generated by elements up to the given degree; or None, if no
relations were found.
ALGORITHM: An expression of the form
`\sum_{0 \le i,j \le d} a_{ij} x^i y^j` is zero if and
only if it vanishes at the cusp infinity to degree at least
`v = d(deg(x) + deg(y))`. For all terms up to `q^v`
in the `q`-expansion of this expression to be zero is a
system of `v + k` linear equations in `d^2`
coefficients, where `k` is the number of nonzero negative
coefficients that can appear.
Solving these equations and calculating a basis for the solution
space gives us a set of polynomial relations, but this is generally
far from a minimal generating set for the ideal, so we calculate a
Groebner basis.
As a test, we calculate five extra terms of `q`-expansion
and check that this doesn't change the answer.
EXAMPLES::
sage: from sage.modular.etaproducts import eta_poly_relations
sage: t = EtaProduct(26, {2:2,13:2,26:-2,1:-2})
sage: u = EtaProduct(26, {2:4,13:2,26:-4,1:-2})
sage: eta_poly_relations([t, u], 3)
sage: eta_poly_relations([t, u], 4)
[x1^3*x2 - 13*x1^3 - 4*x1^2*x2 - 4*x1*x2 - x2^2 + x2]
Use ``verbose=True`` to see the details of the computation::
sage: eta_poly_relations([t, u], 3, verbose=True)
Trying to find a relation of degree 3
Lowest order of a term at infinity = -12
Highest possible degree of a term = 15
Trying all coefficients from q^-12 to q^15 inclusive
No polynomial relation of order 3 valid for 28 terms
Check:
Trying all coefficients from q^-12 to q^20 inclusive
No polynomial relation of order 3 valid for 33 terms
::
sage: eta_poly_relations([t, u], 4, verbose=True)
Trying to find a relation of degree 4
Lowest order of a term at infinity = -16
Highest possible degree of a term = 20
Trying all coefficients from q^-16 to q^20 inclusive
Check:
Trying all coefficients from q^-16 to q^25 inclusive
[x1^3*x2 - 13*x1^3 - 4*x1^2*x2 - 4*x1*x2 - x2^2 + x2]
"""
if len(eta_elements) > 2:
raise NotImplementedError("do not know how to find relations between more than two elements")
eta1, eta2 = eta_elements
if verbose:
print("Trying to find a relation of degree %s" % degree)
inf = CuspFamily(eta1.level(), 1)
loterm = -(min([0, eta1.order_at_cusp(inf)]) + min([0, eta2.order_at_cusp(inf)])) * degree
if verbose:
print("Lowest order of a term at infinity = %s" % -loterm)
maxdeg = sum([eta1.degree(), eta2.degree()]) * degree
if verbose:
print("Highest possible degree of a term = %s" % maxdeg)
m = loterm + maxdeg + 1
oldgrob = _eta_relations_helper(eta1, eta2, degree, m, labels, verbose)
if verbose:
print("Check:")
newgrob = _eta_relations_helper(eta1, eta2, degree, m + 5, labels, verbose)
if oldgrob != newgrob:
raise ArithmeticError("Check: answers different!")
return newgrob
def _eta_relations_helper(eta1, eta2, degree, qexp_terms, labels, verbose):
r"""
Helper function used by eta_poly_relations. Finds a basis for the
space of linear relations between the first qexp_terms of the
`q`-expansions of the monomials
`\eta_1^i * \eta_2^j` for `0 \le i,j < degree`,
and calculates a Groebner basis for the ideal generated by these
relations.
Liable to return meaningless results if qexp_terms isn't at least
`1 + d*(m_1,m_2)` where
.. MATH::
m_i = min(0, {\text degree of the pole of $\eta_i$ at $\infty$})
as then 1 will be in the ideal.
EXAMPLES::
sage: from sage.modular.etaproducts import _eta_relations_helper
sage: r,s = EtaGroup(4).basis()
sage: _eta_relations_helper(r,s,4,100,['a','b'],False)
[a + 1/16*b - 1/16]
sage: _eta_relations_helper(EtaProduct(26, {2:2,13:2,26:-2,1:-2}),EtaProduct(26, {2:4,13:2,26:-4,1:-2}),3,12,['a','b'],False) # not enough terms, will return rubbish
[1]
"""
indices = [(i, j) for j in range(degree) for i in range(degree)]
inf = CuspFamily(eta1.level(), | |
images
# need to make two passes/invocations of this method: one
# with push_late=False and one with push_late=True.
is_late_push = False
if self.config.push.late is not Missing:
is_late_push = self.config.push.late
if push_late != is_late_push:
return (self.metadata.distgit_key, True)
push_names = []
if push_to_defaults:
push_names.extend(self.metadata.get_default_push_names())
push_names.extend(self.metadata.get_additional_push_names(additional_registries))
# Nothing to push to? We are done.
if not push_names:
return (self.metadata.distgit_key, True)
# get registry_config_json file must before with Dir(self.distgit_dir)
# so that relative path or env like DOCKER_CONFIG will not pointed to distgit dir.
registry_config_file = ''
if registry_config_dir is not None:
registry_config_file = util.get_docker_config_json(registry_config_dir)
with Dir(self.distgit_dir):
if version_release_tuple:
version = version_release_tuple[0]
release = version_release_tuple[1]
else:
# History
# We used to rely on the "release" label being set in the Dockerfile, but this is problematic for several reasons.
# (1) If 'release' is not set, OSBS will determine one automatically that does not conflict
# with a pre-existing image build. This is extremely helpful since we don't have to
# worry about bumping the release during refresh images. This means we generally DON'T
# want the release label in the file and can't, therefore, rely on it being there.
# (2) People have logged into distgit before in order to bump the release field. This happening
# at the wrong time breaks the build.
# If the version & release information was not specified,
# try to detect latest build from brew.
# Read in version information from the Distgit dockerfile
_, version, release = self.metadata.get_latest_build_info()
image_name_and_version = "%s:%s-%s" % (self.config.name, version, release)
brew_image_url = self.runtime.resolve_brew_image_url(image_name_and_version)
push_tags = list(tag_list)
# If no tags were specified, build defaults
if not push_tags:
push_tags = self.metadata.get_default_push_tags(version, release)
all_push_urls = []
for image_name in push_names:
try:
repo = image_name.split('/', 1)
action = "push"
record = {
"distgit_key": self.metadata.distgit_key,
"distgit": '{}/{}'.format(self.metadata.namespace, self.metadata.name),
"repo": repo, # ns/repo
"name": image_name, # full registry/ns/repo
"version": version,
"release": release,
"message": "Unknown failure",
"tags": ", ".join(push_tags),
"status": -1,
# Status defaults to failure until explicitly set by success. This handles raised exceptions.
}
for push_tag in push_tags:
# Collect next SRC=DEST input
url = '{}:{}'.format(image_name, push_tag)
self.logger.info("Adding '{}' to push list".format(url))
all_push_urls.append("{}={}".format(brew_image_url, url))
if dry_run:
for push_url in all_push_urls:
self.logger.info('Would have tagged {} as {}'.format(brew_image_url, push_url.split('=')[1]))
dr = "--dry-run=true"
else:
dr = ""
if self.runtime.group_config.insecure_source:
insecure = "--insecure=true"
else:
insecure = ""
push_config_dir = os.path.join(self.runtime.working_dir, 'push')
if not os.path.isdir(push_config_dir):
try:
os.mkdir(push_config_dir)
except OSError as e:
# File exists, and it's a directory,
# another thread already created this dir, that's OK.
if e.errno == errno.EEXIST and os.path.isdir(push_config_dir):
pass
else:
raise
push_config = os.path.join(push_config_dir, self.metadata.distgit_key)
if os.path.isfile(push_config):
# just delete it to ease creating new config
os.remove(push_config)
with io.open(push_config, 'w', encoding="utf-8") as pc:
pc.write('\n'.join(all_push_urls))
mirror_cmd = 'oc image mirror '
if filter_by_os is not None:
mirror_cmd += "--filter-by-os={}".format(filter_by_os)
mirror_cmd += " {} {} --filename={}".format(dr, insecure, push_config)
if registry_config_file != '':
mirror_cmd += f" --registry-config={registry_config_file}"
if dry_run: # skip everything else if dry run
continue
else:
for r in range(10):
self.logger.info("Mirroring image [retry={}]".format(r))
rc, out, err = exectools.cmd_gather(mirror_cmd)
if rc == 0:
break
self.logger.info("Error mirroring image -- retrying in 60 seconds.\n{}".format(err))
time.sleep(60)
lstate = self.runtime.state[self.runtime.command] if self.runtime.command == 'images:push' else None
if rc != 0:
if lstate:
state.record_image_fail(lstate, self.metadata, 'Build failure', self.runtime.logger)
# Unable to push to registry
raise IOError('Error pushing image: {}'.format(err))
else:
if lstate:
state.record_image_success(lstate, self.metadata)
self.logger.info('Success mirroring image')
record["message"] = "Successfully pushed all tags"
record["status"] = 0
except Exception as ex:
lstate = self.runtime.state[self.runtime.command] if self.runtime.command == 'images:push' else None
if lstate:
state.record_image_fail(lstate, self.metadata, str(ex), self.runtime.logger)
record["message"] = "Exception occurred: %s" % str(ex)
self.logger.info("Error pushing %s: %s" % (self.metadata.distgit_key, ex))
raise
finally:
self.runtime.add_record(action, **record)
return (self.metadata.distgit_key, True)
def wait_for_build(self, who_is_waiting):
"""
Blocks the calling thread until this image has been built by doozer or throws an exception if this
image cannot be built.
:param who_is_waiting: The caller's distgit_key (i.e. the waiting image).
:return: Returns when the image has been built or throws an exception if the image could not be built.
"""
self.logger.info("Member waiting for me to build: %s" % who_is_waiting)
# This lock is in an acquired state until this image definitively succeeds or fails.
# It is then released. Child images waiting on this image should block here.
with self.build_lock:
if not self.build_status:
raise IOError(
"Error building image: %s (%s was waiting)" % (self.metadata.qualified_name, who_is_waiting))
else:
self.logger.info("Member successfully waited for me to build: %s" % who_is_waiting)
def _set_wait_for(self, image_name, terminate_event):
image = self.runtime.resolve_image(image_name, False)
if image is None:
self.logger.info("Skipping image build since it is not included: %s" % image_name)
return
parent_dgr = image.distgit_repo()
parent_dgr.wait_for_build(self.metadata.qualified_name)
if terminate_event.is_set():
raise KeyboardInterrupt()
def wait_for_rebase(self, image_name, terminate_event):
""" Wait for image_name to be rebased. """
image = self.runtime.resolve_image(image_name, False)
if image is None:
self.logger.info("Skipping image build since it is not included: %s" % image_name)
return
dgr = image.distgit_repo()
dgr.rebase_event.wait()
if not dgr.rebase_status: # failed to rebase
raise IOError(f"Error building image: {self.metadata.qualified_name} ({image_name} was waiting)")
if terminate_event.is_set():
raise KeyboardInterrupt()
pass
def build_container(
self, profile, push_to_defaults, additional_registries, terminate_event,
scratch=False, retries=3, realtime=False, dry_run=False, registry_config_dir=None, filter_by_os=None):
"""
This method is designed to be thread-safe. Multiple builds should take place in brew
at the same time. After a build, images are pushed serially to all mirrors.
DONT try to change cwd during this time, all threads active will change cwd
:param profile: image build profile
:param push_to_defaults: If default registries should be pushed to.
:param additional_registries: A list of non-default registries resultant builds should be pushed to.
:param terminate_event: Allows the main thread to interrupt the build.
:param scratch: Whether this is a scratch build. UNTESTED.
:param retries: Number of times the build should be retried.
:return: True if the build was successful
"""
if self.org_image_name is None or self.org_version is None:
if not os.path.isfile(os.path.join(self.distgit_dir, 'Dockerfile')):
msg = ('No Dockerfile found in {}'.format(self.distgit_dir))
else:
msg = ('Unknown error loading Dockerfile information')
self.logger.info(msg)
state.record_image_fail(self.runtime.state[self.runtime.command], self.metadata, msg, self.runtime.logger)
return (self.metadata.distgit_key, False)
action = "build"
release = self.org_release if self.org_release is not None else '?'
record = {
"dir": self.distgit_dir,
"dockerfile": "%s/Dockerfile" % self.distgit_dir,
"distgit": self.metadata.distgit_key,
"image": self.org_image_name,
"owners": ",".join(list(self.config.owners) if self.config.owners is not Missing else []),
"version": self.org_version,
"release": release,
"message": "Unknown failure",
"task_id": "n/a",
"task_url": "n/a",
"status": -1,
"push_status": -1,
"has_olm_bundle": 1 if self.config['update-csv'] is not Missing else 0,
# Status defaults to failure until explicitly set by success. This handles raised exceptions.
}
if self.runtime.local and release == '?':
target_tag = self.org_version
else:
target_tag = "{}-{}".format(self.org_version, release)
target_image = ":".join((self.org_image_name, target_tag))
try:
# If this image is FROM another group member, we need to wait on that group member
# Use .get('from',None) since from is a reserved word.
image_from = Model(self.config.get('from', None))
if image_from.member is not Missing:
self._set_wait_for(image_from.member, terminate_event)
for builder in image_from.get('builder', []):
if 'member' in builder:
self._set_wait_for(builder['member'], terminate_event)
if self.runtime.assembly and util.isolate_assembly_in_release(release) != self.runtime.assembly:
# Assemblies should follow its naming convention
raise ValueError(f"Image {self.name} is not rebased with assembly '{self.runtime.assembly}'.")
# Allow an image to wait on an arbitrary image in the group. This is presently
# just a workaround for: https://projects.engineering.redhat.com/browse/OSBS-5592
if self.config.wait_for is not Missing:
self._set_wait_for(self.config.wait_for, terminate_event)
if self.runtime.local:
self.build_status = self._build_container_local(target_image, profile["repo_type"], realtime)
if not self.build_status:
state.record_image_fail(self.runtime.state[self.runtime.command], self.metadata, 'Build failure', self.runtime.logger)
else:
state.record_image_success(self.runtime.state[self.runtime.command], self.metadata)
return (self.metadata.distgit_key, self.build_status) # do nothing more since it's local only
else:
def wait(n):
self.logger.info("Async error in image build thread [attempt #{}]".format(n + 1))
# No need to retry if the failure will just recur
error = self._detect_permanent_build_failures(self.runtime.group_config.image_build_log_scanner)
if error is not None:
for match in re.finditer("No package (.*) available", error):
self._add_missing_pkgs(match.group(1))
raise exectools.RetryException(
"Saw permanent error in build logs:\n{}\nWill not retry after {} failed attempt(s)"
.format(error, n + 1)
)
# Brew does not handle an immediate retry correctly, wait
# before trying another build, terminating if interrupted.
if terminate_event.wait(timeout=5 * 60):
raise KeyboardInterrupt()
if len(self.metadata.targets) > 1:
# FIXME: Currently we don't really support building images against multiple targets,
| |
import csv
import dataclasses
import math
import pathlib
import threading
import time
from typing import Optional, Tuple, Dict, List, Callable, Union, Iterable, TypeVar, Sequence
import numpy as np
import stim
from .decoding import sample_decode_count_correct
from .probability_util import log_binomial, binary_search
CSV_HEADER = ",".join([
"data_width",
"data_height",
"rounds",
"noise",
"circuit_style",
"preserved_observable",
"code_distance",
"num_qubits",
"num_shots",
"num_correct",
"total_processing_seconds",
"decoder",
"version",
])
CSV_HEADER_VERSION = 2
@dataclasses.dataclass(frozen=True, unsafe_hash=True, order=True)
class DecodingProblemDesc:
# noinspection PyUnresolvedReferences
"""Succinct data summarizing a decoding problem.
Attributes:
data_width: The width of the grid of data qubits.
data_height: The height of the grid of data qubits.
code_distance: Number of physical errors required to cause a logical error.
num_qubits: Total number of noisy qubits in the system.
rounds: The number of times measurement qubits are measured.
noise: The strength of noise being applied.
circuit_style: Names the circuit being run.
preserved_observable: Names the observable being preserved against noise.
decoder: The name of the decoder used to correct errors.
"""
data_width: int
data_height: int
code_distance: int
num_qubits: int
rounds: int
noise: float
circuit_style: str
preserved_observable: str
decoder: str
def with_changes(self,
*,
data_width: Optional[int] = None,
data_height: Optional[int] = None,
code_distance: Optional[int] = None,
num_qubits: Optional[int] = None,
rounds: Optional[int] = None,
noise: Optional[float] = None,
circuit_style: Optional[str] = None,
preserved_observable: Optional[str] = None,
decoder: Optional[str] = None,
) -> 'DecodingProblemDesc':
return DecodingProblemDesc(
data_width=self.data_width if data_width is None else data_width,
data_height=self.data_height if data_height is None else data_height,
code_distance=self.code_distance if code_distance is None else code_distance,
num_qubits=self.num_qubits if num_qubits is None else num_qubits,
rounds=self.rounds if rounds is None else rounds,
noise=self.noise if noise is None else noise,
circuit_style=self.circuit_style if circuit_style is None else circuit_style,
preserved_observable=self.preserved_observable if preserved_observable is None else preserved_observable,
decoder=self.decoder if decoder is None else decoder,
)
@dataclasses.dataclass
class DecodingProblem:
# noinspection PyUnresolvedReferences
"""Defines a decoding problem to sample from.
Attributes:
desc: Identifying information about the problem.
circuit_maker: Produces a stim circuit with annotated noise and detectors.
"""
desc: DecodingProblemDesc
circuit_maker: Callable[[], stim.Circuit]
def sample_correct_count(self, shots: int) -> int:
return sample_decode_count_correct(
num_shots=shots,
circuit=self.circuit_maker(),
decoder=self.desc.decoder,
)
class WorkManager:
def __init__(self, num_threads: int):
self.lock = threading.Lock()
self.available = threading.Semaphore(value=num_threads)
self.exceptions = []
def has_failed(self) -> bool:
with self.lock:
return bool(self.exceptions)
def collect_simulated_experiment_data(problems: Iterable[DecodingProblem],
*,
start_batch_size: int,
max_shots: int,
max_batch_size: Optional[int] = None,
max_errors: int,
num_threads: int = 1,
out_path: Optional[Union[str, pathlib.Path]],
alt_in_paths: Sequence[str] = (),
merge_mode: str):
"""
Args:
problems: The decoding problems to collect sample data from.
start_batch_size: The minimum number of samples to take from each case.
This property effectively controls the quality of estimates of error rates when the true
error is close to 50%
max_shots: The maximum cutoff number of samples to take from each case.
This property effectively controls the "noise floor" below which error rates cannot
be estimated well. For example, setting this to 1e6 means that error rates below
5e-5 will have estimates with large similar-relative-likelihood regions.
This property overrides all the properties that ask for more samples until some
statistical requirement is met.
max_errors: More samples will be taken until the number of logical errors seen
is at least this large. Set to 10 or 100 for fast estimates. Set to 1000 or 10000 for
good statistical estimates of low probability errors.
out_path: Where to write the CSV sample statistic data. Setting this to none doesn't write
to file; only writes to stdout.
alt_in_paths: Files to read initial CSV stats data from. This CSV data is included when
deciding when to stop collecting (similar to out_path when using saturate mode).
max_batch_size: Defaults to unused. If set, then at most this many shots are collected at one
time.
merge_mode: Determines how new data and previous data are combined.
"replace": Deletes previous data. Generates new data until the new data, by itself,
meets the requested statistical requirements.
"saturate": Generates new data until the new data, combined with previous data, meets
the requested statistical requirements.
"append": Ignores previous data. Generates new data until the new data, by itself, meets
the requested statistical requirements.
num_threads: Number of threads to use for parallel collection. This presumes you're using
a C++ decoder that's safe to invoke separately across multiple threads.
"""
print(CSV_HEADER, flush=True)
all_input_paths_including_saturate_out = list(alt_in_paths)
if out_path is not None:
if merge_mode == "replace" or not pathlib.Path(out_path).exists():
with open(out_path, "w") as f:
print(CSV_HEADER, file=f)
if merge_mode == "saturate":
all_input_paths_including_saturate_out.append(out_path)
previous_data = MultiStats.from_recorded_data(*all_input_paths_including_saturate_out)
if max_batch_size is None:
max_batch_size = max_shots
lock = threading.Lock()
def line_writer(line: str) -> None:
with lock:
if out_path is not None:
with open(out_path, "a") as f:
print(line, file=f)
print(line, flush=True)
manager = WorkManager(num_threads=num_threads)
try:
for problem in problems:
if manager.has_failed():
break
manager.available.acquire()
threaded_collect_for_problem(
problem=problem,
prev_data=previous_data.data.get(problem.desc, CaseStats()),
start_batch_size=start_batch_size,
max_batch_size=max_batch_size,
max_errors=max_errors,
max_shots=max_shots,
line_writer=line_writer,
manager=manager,
)
for k in range(num_threads):
manager.available.acquire()
except BaseException as ex:
with manager.lock:
manager.exceptions.append(ex)
raise
if manager.has_failed():
raise manager.exceptions[0]
def threaded_collect_for_problem(*,
problem: DecodingProblem,
prev_data: 'CaseStats',
start_batch_size: int,
max_batch_size: Optional[int],
max_errors: int,
max_shots: int,
line_writer: Callable[[str], None],
manager: WorkManager):
def run():
try:
collect_for_problem(
problem=problem,
prev_data=prev_data,
start_batch_size=start_batch_size,
max_batch_size=max_batch_size,
max_errors=max_errors,
max_shots=max_shots,
line_writer=line_writer,
manager=manager,
)
except BaseException as ex:
with manager.lock:
manager.exceptions.append(ex)
raise
finally:
manager.available.release()
threading.Thread(target=run).start()
def collect_for_problem(*,
problem: DecodingProblem,
prev_data: 'CaseStats',
start_batch_size: int,
max_batch_size: Optional[int],
max_errors: int,
max_shots: int,
line_writer: Callable[[str], None],
manager: WorkManager):
new_data = prev_data.copy()
num_next_shots = start_batch_size
if max_batch_size is not None and max_batch_size < start_batch_size:
num_next_shots = max_batch_size
while new_data.num_errors < max_errors and new_data.num_shots < max_shots and not manager.has_failed():
num_next_shots = min(num_next_shots, max_batch_size, max_shots - new_data.num_shots)
t0 = time.monotonic()
num_correct = sample_decode_count_correct(
num_shots=num_next_shots,
circuit=problem.circuit_maker(),
decoder=problem.desc.decoder,
)
t1 = time.monotonic()
record = ",".join(str(e) for e in [
problem.desc.data_width,
problem.desc.data_height,
problem.desc.rounds,
problem.desc.noise,
problem.desc.circuit_style,
problem.desc.preserved_observable,
problem.desc.code_distance,
problem.desc.num_qubits,
num_next_shots,
num_correct,
t1 - t0,
problem.desc.decoder,
CSV_HEADER_VERSION,
])
if manager.has_failed():
# Don't write more results after KeyboardInterrupt.
return
line_writer(record)
new_data.num_shots += num_next_shots
new_data.num_correct += num_correct
new_data.total_processing_seconds += t1 - t0
num_next_shots *= 2
def collect_detection_fraction_data(problems: Iterable[DecodingProblem],
*,
shots: int,
out_path: Optional[Union[str, pathlib.Path]],
discard_previous_data: bool):
print(CSV_HEADER, flush=True)
if out_path is not None:
if discard_previous_data or not pathlib.Path(out_path).exists():
with open(out_path, "w") as f:
print(CSV_HEADER, file=f)
for problem in problems:
t0 = time.monotonic()
samples = problem.circuit_maker().compile_detector_sampler().sample(shots)
num_detections = np.count_nonzero(samples)
num_samples = math.prod(samples.shape)
t1 = time.monotonic()
record = ",".join(str(e) for e in [
problem.desc.data_width,
problem.desc.data_height,
problem.desc.rounds,
problem.desc.noise,
problem.desc.circuit_style,
"-",
problem.desc.code_distance,
problem.desc.num_qubits,
num_samples,
num_samples - num_detections,
t1 - t0,
"detection_fraction",
CSV_HEADER_VERSION,
])
if out_path is not None:
with open(out_path, "a") as f:
print(record, file=f)
print(record, flush=True)
@dataclasses.dataclass
class CaseStats:
num_shots: int = 0
num_correct: int = 0
total_processing_seconds: float = 0
def copy(self) -> 'CaseStats':
return CaseStats(num_shots=self.num_shots,
num_correct=self.num_correct,
total_processing_seconds=self.total_processing_seconds)
def to_csv_line(self, desc: DecodingProblemDesc) -> str:
return (
f"{desc.data_width},"
f"{desc.data_height},"
f"{desc.rounds},"
f"{desc.noise},"
f"{desc.circuit_style},"
f"{desc.preserved_observable},"
f"{desc.code_distance},"
f"{desc.num_qubits},"
f"{self.num_shots},"
f"{self.num_correct},"
f"{self.total_processing_seconds},"
f"{desc.decoder},"
f"{CSV_HEADER_VERSION}"
)
def extrapolate_intersection(self, other: 'CaseStats') -> 'CaseStats':
p0 = self.logical_error_rate
p1 = other.logical_error_rate
p01 = 1 - (1 - p0) * (1 - p1)
n = min(self.num_shots, other.num_shots)
c = int(n * (1 - p01))
return CaseStats(
num_shots=n,
num_correct=c,
total_processing_seconds=self.total_processing_seconds + other.total_processing_seconds,
)
@property
def num_errors(self) -> int:
return self.num_shots - self.num_correct
def likely_error_rate_bounds(self, *, desired_ratio_vs_max_likelihood: float) -> Tuple[float, float]:
"""Compute relative-likelihood bounds.
Returns the min/max error rates whose Bayes factors are within the given ratio of the maximum
likelihood estimate.
"""
actual_errors = self.num_shots - self.num_correct
log_max_likelihood = log_binomial(p=actual_errors / self.num_shots, n=self.num_shots, hits=actual_errors)
target_log_likelihood = log_max_likelihood + math.log(desired_ratio_vs_max_likelihood)
acc = 100
low = binary_search(
func=lambda exp_err: log_binomial(p=exp_err / (acc * self.num_shots), n=self.num_shots, hits=actual_errors),
target=target_log_likelihood,
min_x=0,
max_x=actual_errors * acc) / acc
high = binary_search(
func=lambda exp_err: -log_binomial(p=exp_err / (acc * self.num_shots), n=self.num_shots, hits=actual_errors),
target=-target_log_likelihood,
min_x=actual_errors * acc,
max_x=self.num_shots * acc) / acc
return low / self.num_shots, high / self.num_shots
@property
def logical_error_rate(self) -> float:
if self.num_shots == 0:
return 1
return (self.num_shots - self.num_correct) / self.num_shots
TKey = TypeVar('TKey')
@dataclasses.dataclass
class MultiStats:
data: Dict[DecodingProblemDesc, CaseStats] = dataclasses.field(default_factory=dict)
def to_csv(self) -> str:
lines = [CSV_HEADER]
ks = sorted(self.data.keys(),
key=lambda e: (
e.circuit_style,
e.preserved_observable,
e.noise,
e.code_distance,
e.rounds,
e.num_qubits,
e.data_width,
e.data_height,
e.decoder,
))
for k in ks:
v = self.data[k]
lines.append(v.to_csv_line(k))
return '\n'.join(lines) + '\n'
def grouped_by(self,
key: Callable[[DecodingProblemDesc], TKey],
*,
reverse: bool = False) -> Dict[TKey, 'MultiStats']:
groups = {}
for k, v in self.data.items():
group = key(k)
groups.setdefault(group, MultiStats({})).data[k] = v
| |
Hash || _sjcl.hash.sha256;
var exKey = [[],[]], i,
bs = Hash.prototype.blockSize / 32;
this._baseHash = [new Hash(), new Hash()];
if (key.length > bs) {
key = Hash.hash(key);
}
for (i=0; i<bs; i++) {
exKey[0][i] = key[i]^0x36363636;
exKey[1][i] = key[i]^0x5C5C5C5C;
}
this._baseHash[0].update(exKey[0]);
this._baseHash[1].update(exKey[1]);
this._resultHash = new Hash(this._baseHash[0]);
};
/** HMAC with the specified hash function. Also called encrypt since it's a prf.
* @param {bitArray|String} data The data to mac.
*/
_sjcl.misc.hmac.prototype.encrypt = _sjcl.misc.hmac.prototype.mac = function (data) {
if (!this._updated) {
this.update(data);
return this.digest(data);
} else {
throw new _sjcl.exception.invalid("encrypt on already updated hmac called!");
}
};
_sjcl.misc.hmac.prototype.reset = function () {
this._resultHash = new this._hash(this._baseHash[0]);
this._updated = false;
};
_sjcl.misc.hmac.prototype.update = function (data) {
this._updated = true;
this._resultHash.update(data);
};
_sjcl.misc.hmac.prototype.digest = function () {
var w = this._resultHash.finalize(), result = new (this._hash)(this._baseHash[1]).update(w).finalize();
this.reset();
return result;
};
/** @fileOverview Password-based key-derivation function, version 2.0.
*
* @author <NAME>
* @author <NAME>
* @author <NAME>
*/
/** Password-Based Key-Derivation Function, version 2.0.
*
* Generate keys from passwords using PBKDF2-HMAC-SHA256.
*
* This is the method specified by RSA's PKCS #5 standard.
*
* @param {bitArray|String} password The password.
* @param {bitArray|String} salt The salt. Should have lots of entropy.
* @param {Number} [count=1000] The number of iterations. Higher numbers make the function slower but more secure.
* @param {Number} [length] The length of the derived key. Defaults to the
output size of the hash function.
* @param {Object} [Prff=_sjcl.misc.hmac] The pseudorandom function family.
* @return {bitArray} the derived key.
*/
_sjcl.misc.pbkdf2 = function (password, salt, count, length, Prff) {
count = count || 10000;
if (length < 0 || count < 0) {
throw new _sjcl.exception.invalid("invalid params to pbkdf2");
}
if (typeof password === "string") {
password = _sjcl.codec.utf8String.toBits(password);
}
if (typeof salt === "string") {
salt = _sjcl.codec.utf8String.toBits(salt);
}
Prff = Prff || _sjcl.misc.hmac;
var prf = new Prff(password),
u, ui, i, j, k, out = [], b = _sjcl.bitArray;
for (k = 1; 32 * out.length < (length || 1); k++) {
u = ui = prf.encrypt(b.concat(salt,[k]));
for (i=1; i<count; i++) {
ui = prf.encrypt(ui);
for (j=0; j<ui.length; j++) {
u[j] ^= ui[j];
}
}
out = out.concat(u);
}
if (length) { out = b.clamp(out, length); }
return out;
};
/** @fileOverview Random number generator.
*
* @author <NAME>
* @author <NAME>
* @author <NAME>
* @author <NAME>
* @author <NAME>
*/
/**
* @class Random number generator
* @description
* <b>Use _sjcl.random as a singleton for this class!</b>
* <p>
* This random number generator is a derivative of Ferguson and Schneier's
* generator Fortuna. It collects entropy from various events into several
* pools, implemented by streaming SHA-256 instances. It differs from
* ordinary Fortuna in a few ways, though.
* </p>
*
* <p>
* Most importantly, it has an entropy estimator. This is present because
* there is a strong conflict here between making the generator available
* as soon as possible, and making sure that it doesn't "run on empty".
* In Fortuna, there is a saved state file, and the system is likely to have
* time to warm up.
* </p>
*
* <p>
* Second, because users are unlikely to stay on the page for very long,
* and to speed startup time, the number of pools increases logarithmically:
* a new pool is created when the previous one is actually used for a reseed.
* This gives the same asymptotic guarantees as Fortuna, but gives more
* entropy to early reseeds.
* </p>
*
* <p>
* The entire mechanism here feels pretty klunky. Furthermore, there are
* several improvements that should be made, including support for
* dedicated cryptographic functions that may be present in some browsers;
* state files in local storage; cookies containing randomness; etc. So
* look for improvements in future versions.
* </p>
* @constructor
*/
_sjcl.prng = function(defaultParanoia) {
/* private */
this._pools = [new _sjcl.hash.sha256()];
this._poolEntropy = [0];
this._reseedCount = 0;
this._robins = {};
this._eventId = 0;
this._collectorIds = {};
this._collectorIdNext = 0;
this._strength = 0;
this._poolStrength = 0;
this._nextReseed = 0;
this._key = [0,0,0,0,0,0,0,0];
this._counter = [0,0,0,0];
this._cipher = undefined;
this._defaultParanoia = defaultParanoia;
/* event listener stuff */
this._collectorsStarted = false;
this._callbacks = {progress: {}, seeded: {}};
this._callbackI = 0;
/* constants */
this._NOT_READY = 0;
this._READY = 1;
this._REQUIRES_RESEED = 2;
this._MAX_WORDS_PER_BURST = 65536;
this._PARANOIA_LEVELS = [0,48,64,96,128,192,256,384,512,768,1024];
this._MILLISECONDS_PER_RESEED = 30000;
this._BITS_PER_RESEED = 80;
};
_sjcl.prng.prototype = {
/** Generate several random words, and return them in an array.
* A word consists of 32 bits (4 bytes)
* @param {Number} nwords The number of words to generate.
*/
randomWords: function (nwords, paranoia) {
var out = [], i, readiness = this.isReady(paranoia), g;
if (readiness === this._NOT_READY) {
throw new _sjcl.exception.notReady("generator isn't seeded");
} else if (readiness & this._REQUIRES_RESEED) {
this._reseedFromPools(!(readiness & this._READY));
}
for (i=0; i<nwords; i+= 4) {
if ((i+1) % this._MAX_WORDS_PER_BURST === 0) {
this._gate();
}
g = this._gen4words();
out.push(g[0],g[1],g[2],g[3]);
}
this._gate();
return out.slice(0,nwords);
},
setDefaultParanoia: function (paranoia, allowZeroParanoia) {
if (paranoia === 0 && allowZeroParanoia !== "Setting paranoia=0 will ruin your security; use it only for testing") {
throw new _sjcl.exception.invalid("Setting paranoia=0 will ruin your security; use it only for testing");
}
this._defaultParanoia = paranoia;
},
/**
* Add entropy to the pools.
* @param data The entropic value. Should be a 32-bit integer, array of 32-bit integers, or string
* @param {Number} estimatedEntropy The estimated entropy of data, in bits
* @param {String} source The source of the entropy, eg "mouse"
*/
addEntropy: function (data, estimatedEntropy, source) {
source = source || "user";
var id,
i, tmp,
t = (new Date()).valueOf(),
robin = this._robins[source],
oldReady = this.isReady(), err = 0, objName;
id = this._collectorIds[source];
if (id === undefined) { id = this._collectorIds[source] = this._collectorIdNext ++; }
if (robin === undefined) { robin = this._robins[source] = 0; }
this._robins[source] = ( this._robins[source] + 1 ) % this._pools.length;
switch(typeof(data)) {
case "number":
if (estimatedEntropy === undefined) {
estimatedEntropy = 1;
}
this._pools[robin].update([id,this._eventId++,1,estimatedEntropy,t,1,data|0]);
break;
case "object":
objName = Object.prototype.toString.call(data);
if (objName === "[object Uint32Array]") {
tmp = [];
for (i = 0; i < data.length; i++) {
tmp.push(data[i]);
}
data = tmp;
} else {
if (objName !== "[object Array]") {
err = 1;
}
for (i=0; i<data.length && !err; i++) {
if (typeof(data[i]) !== "number") {
err = 1;
}
}
}
if (!err) {
if (estimatedEntropy === undefined) {
/* horrible entropy estimator */
estimatedEntropy = 0;
for (i=0; i<data.length; i++) {
tmp= data[i];
while (tmp>0) {
estimatedEntropy++;
tmp = tmp >>> 1;
}
}
}
this._pools[robin].update([id,this._eventId++,2,estimatedEntropy,t,data.length].concat(data));
}
break;
case "string":
if (estimatedEntropy === undefined) {
/* English text has just over 1 bit per character of entropy.
* But this might be HTML or something, and have far less
* entropy than English... Oh well, let's just say one bit.
*/
estimatedEntropy = data.length;
}
this._pools[robin].update([id,this._eventId++,3,estimatedEntropy,t,data.length]);
this._pools[robin].update(data);
break;
default:
err=1;
}
if (err) {
throw new _sjcl.exception.bug("random: addEntropy only supports number, array of numbers or string");
}
/* record the new strength */
this._poolEntropy[robin] += estimatedEntropy;
this._poolStrength += estimatedEntropy;
/* fire off events */
if (oldReady === this._NOT_READY) {
if (this.isReady() !== this._NOT_READY) {
this._fireEvent("seeded", Math.max(this._strength, this._poolStrength));
}
this._fireEvent("progress", this.getProgress());
}
},
/** Is the generator ready? */
isReady: function (paranoia) {
var entropyRequired = this._PARANOIA_LEVELS[ (paranoia !== undefined) ? paranoia : this._defaultParanoia ];
if (this._strength && this._strength >= entropyRequired) {
return (this._poolEntropy[0] > this._BITS_PER_RESEED && (new Date()).valueOf() > this._nextReseed) ?
this._REQUIRES_RESEED | this._READY :
this._READY;
} else {
return (this._poolStrength >= entropyRequired) ?
this._REQUIRES_RESEED | this._NOT_READY :
this._NOT_READY;
}
},
/** Get the generator's progress toward readiness, as a fraction | |
angular velocity transformation matrix
:rtype: ndarray(6,6) or ndarray(3,3)
Computes the transformation from spatial velocity :math:`\nu`, where
rotation rate is expressed as angular velocity, to analytical rates
:math:`\dvec{x}` where the rotational part is expressed as rate of change in
some other representation
.. math::
\dvec{x} = \mat{A} \vec{\nu}
where :math:`\mat{A}` is a block diagonal 6x6 matrix
================== ========================================
``representation`` Rotational representation
================== ========================================
``'rpy/xyz'`` RPY angular rates in XYZ order (default)
``'rpy/zyx'`` RPY angular rates in XYZ order
``'eul'`` Euler angular rates in ZYZ order
``'exp'`` exponential coordinate rates
================= ========================================
.. note:: Compared to :func:`eul2jac`, :func:`rpy2jac`, :func:`exp2jac`
- This performs the inverse mapping
- This maps a 6-vector, the others map a 3-vector
Reference:
- ``symbolic/angvelxform.ipynb`` in this Toolbox
- Robot Dynamics Lecture Notes
Robotic Systems Lab, ETH Zurich, 2018
https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
:seealso: :func:`rot2jac`, :func:`eul2jac`, :func:`rpy2r`, :func:`exp2jac`
"""
if representation == "rpy/xyz":
alpha = 𝚪[0]
beta = 𝚪[1]
gamma = 𝚪[2]
# autogenerated by symbolic/angvelxform.ipynb
if inverse:
# analytical rates -> angular velocity
# fmt: off
A = np.array([
[math.sin(beta), 0, 1],
[-math.sin(gamma)*math.cos(beta), math.cos(gamma), 0],
[math.cos(beta)*math.cos(gamma), math.sin(gamma), 0]
])
# fmt: on
else:
# angular velocity -> analytical rates
# fmt: off
A = np.array([
[0, -math.sin(gamma)/math.cos(beta), math.cos(gamma)/math.cos(beta)],
[0, math.cos(gamma), math.sin(gamma)],
[1, math.sin(gamma)*math.tan(beta), -math.cos(gamma)*math.tan(beta)]
])
# fmt: on
elif representation == "rpy/zyx":
alpha = 𝚪[0]
beta = 𝚪[1]
gamma = 𝚪[2]
# autogenerated by symbolic/angvelxform.ipynb
if inverse:
# analytical rates -> angular velocity
# fmt: off
A = np.array([
[math.cos(beta)*math.cos(gamma), -math.sin(gamma), 0],
[math.sin(gamma)*math.cos(beta), math.cos(gamma), 0],
[-math.sin(beta), 0, 1]
])
# fmt: on
else:
# angular velocity -> analytical rates
# fmt: off
A = np.array([
[math.cos(gamma)/math.cos(beta), math.sin(gamma)/math.cos(beta), 0],
[-math.sin(gamma), math.cos(gamma), 0],
[math.cos(gamma)*math.tan(beta), math.sin(gamma)*math.tan(beta), 1]
])
# fmt: on
elif representation == "eul":
phi = 𝚪[0]
theta = 𝚪[1]
psi = 𝚪[2]
# autogenerated by symbolic/angvelxform.ipynb
if inverse:
# analytical rates -> angular velocity
# fmt: off
A = np.array([
[0, -math.sin(phi), math.sin(theta)*math.cos(phi)],
[0, math.cos(phi), math.sin(phi)*math.sin(theta)],
[1, 0, math.cos(theta)]
])
# fmt: on
else:
# angular velocity -> analytical rates
# fmt: off
A = np.array([
[-math.cos(phi)/math.tan(theta), -math.sin(phi)/math.tan(theta), 1],
[-math.sin(phi), math.cos(phi), 0],
[math.cos(phi)/math.sin(theta), math.sin(phi)/math.sin(theta), 0]
])
# fmt: on
elif representation == "exp":
# from ETHZ class notes
sk = base.skew(𝚪)
theta = base.norm(𝚪)
if inverse:
# analytical rates -> angular velocity
# (2.106)
A = (
np.eye(3)
+ sk * (1 - np.cos(theta)) / theta ** 2
+ sk @ sk * (theta - np.sin(theta)) / theta ** 3
)
else:
# angular velocity -> analytical rates
# (2.107)
A = (
np.eye(3)
- sk / 2
+ sk
@ sk
/ theta ** 2
* (1 - (theta / 2) * (np.sin(theta) / (1 - np.cos(theta))))
)
else:
raise ValueError("bad representation specified")
if full:
return sp.linalg.block_diag(np.eye(3, 3), A)
else:
return A
def angvelxform_dot(𝚪, 𝚪d, full=True, representation="rpy/xyz"):
"""
Angular acceleratipn transformation
:param 𝚪: angular representation
:type 𝚪: ndarray(3)
:param 𝚪d: angular representation rate
:type 𝚪d: ndarray(3)
:param representation: defaults to 'rpy-xyz'
:type representation: str, optional
:param full: return 6x6 transform for spatial velocity
:type full: bool
:return: angular velocity transformation matrix
:rtype: ndarray(6,6) or ndarray(3,3)
Computes the transformation from spatial acceleration :math:`\dot{\nu}`,
where the rotational part is expressed as angular acceleration, to
analytical rates :math:`\ddvec{x}` where the rotational part is expressed as
acceleration in some other representation
.. math::
\ddvec{x} = \mat{A}_d \dvec{\nu}
where :math:`\mat{A}_d` is a block diagonal 6x6 matrix
================== ========================================
``representation`` Rotational representation
================== ========================================
``'rpy/xyz'`` RPY angular rates in XYZ order (default)
``'rpy/zyx'`` RPY angular rates in XYZ order
``'eul'`` Euler angular rates in ZYZ order
``'exp'`` exponential coordinate rates
================= ========================================
.. note:: Compared to :func:`eul2jac`, :func:`rpy2jac`, :func:`exp2jac`
- This performs the inverse mapping
- This maps a 6-vector, the others map a 3-vector
Reference:
- ``symbolic/angvelxform.ipynb`` in this Toolbox
- ``symbolic/angvelxform_dot.ipynb`` in this Toolbox
:seealso: :func:`rot2jac`, :func:`eul2jac`, :func:`rpy2r`, :func:`exp2jac`
"""
if representation == "rpy/xyz":
# autogenerated by symbolic/angvelxform.ipynb
alpha = 𝚪[0]
beta = 𝚪[1]
gamma = 𝚪[2]
alpha_dot = 𝚪d[0]
beta_dot = 𝚪d[1]
gamma_dot = 𝚪d[2]
Ad = np.array(
[
[
0,
-(
beta_dot * math.sin(beta) * math.sin(gamma) / math.cos(beta)
+ gamma_dot * math.cos(gamma)
)
/ math.cos(beta),
(
beta_dot * math.sin(beta) * math.cos(gamma) / math.cos(beta)
- gamma_dot * math.sin(gamma)
)
/ math.cos(beta),
],
[0, -gamma_dot * math.sin(gamma), gamma_dot * math.cos(gamma)],
[
0,
beta_dot * math.sin(gamma) / math.cos(beta) ** 2
+ gamma_dot * math.cos(gamma) * math.tan(beta),
-beta_dot * math.cos(gamma) / math.cos(beta) ** 2
+ gamma_dot * math.sin(gamma) * math.tan(beta),
],
]
)
elif representation == "rpy/zyx":
# autogenerated by symbolic/angvelxform.ipynb
alpha = 𝚪[0]
beta = 𝚪[1]
gamma = 𝚪[2]
alpha_dot = 𝚪d[0]
beta_dot = 𝚪d[1]
gamma_dot = 𝚪d[2]
Ad = np.array(
[
[
(
beta_dot * math.sin(beta) * math.cos(gamma) / math.cos(beta)
- gamma_dot * math.sin(gamma)
)
/ math.cos(beta),
(
beta_dot * math.sin(beta) * math.sin(gamma) / math.cos(beta)
+ gamma_dot * math.cos(gamma)
)
/ math.cos(beta),
0,
],
[-gamma_dot * math.cos(gamma), -gamma_dot * math.sin(gamma), 0],
[
beta_dot * math.cos(gamma) / math.cos(beta) ** 2
- gamma_dot * math.sin(gamma) * math.tan(beta),
beta_dot * math.sin(gamma) / math.cos(beta) ** 2
+ gamma_dot * math.cos(gamma) * math.tan(beta),
0,
],
]
)
elif representation == "eul":
# autogenerated by symbolic/angvelxform.ipynb
phi = 𝚪[0]
theta = 𝚪[1]
psi = 𝚪[2]
phi_dot = 𝚪d[0]
theta_dot = 𝚪d[1]
psi_dot = 𝚪d[2]
Ad = np.array(
[
[
phi_dot * math.sin(phi) / math.tan(theta)
+ theta_dot * math.cos(phi) / math.sin(theta) ** 2,
-phi_dot * math.cos(phi) / math.tan(theta)
+ theta_dot * math.sin(phi) / math.sin(theta) ** 2,
0,
],
[-phi_dot * math.cos(phi), -phi_dot * math.sin(phi), 0],
[
-(
phi_dot * math.sin(phi)
+ theta_dot * math.cos(phi) * math.cos(theta) / math.sin(theta)
)
/ math.sin(theta),
(
phi_dot * math.cos(phi)
- theta_dot * math.sin(phi) * math.cos(theta) / math.sin(theta)
)
/ math.sin(theta),
0,
],
]
)
elif representation == "exp":
# autogenerated by symbolic/angvelxform_dot.ipynb
v = 𝚪
vd = 𝚪d
sk = base.skew(v)
skd = base.skew(vd)
theta_dot = np.inner(𝚪, 𝚪d) / base.norm(𝚪)
theta = base.norm(𝚪)
Theta = 1 - theta / 2 * np.sin(theta) / (1 - np.cos(theta))
Theta_dot = (
-0.5 * theta * theta_dot * math.cos(theta) / (1 - math.cos(theta))
+ 0.5
* theta
* theta_dot
* math.sin(theta) ** 2
/ (1 - math.cos(theta)) ** 2
- 0.5 * theta_dot * math.sin(theta) / (1 - math.cos(theta))
) / theta ** 2 - 2 * theta_dot * (
-1 / 2 * theta * math.sin(theta) / (1 - math.cos(theta)) + 1
) / theta ** 3
Ad = -0.5 * skd + 2 * sk @ skd * Theta + sk @ sk * Theta_dot
else:
raise ValueError("bad representation specified")
if full:
return sp.linalg.block_diag(np.eye(3, 3), Ad)
else:
return Ad
def tr2adjoint(T):
r"""
SE(3) adjoint matrix
:param T: SE(3) matrix
:type T: ndarray(4,4)
:return: adjoint matrix
:rtype: ndarray(6,6)
Computes an adjoint matrix that maps spatial velocity between two frames defined by
an SE(3) matrix.
``tr2jac(T)`` is an adjoint matrix (6x6) that maps spatial velocity or
differential motion between frame {B} to frame {A} which are attached to the
same moving body. The pose of {B} relative to {A} is represented by the
homogeneous transform T = :math:`{}^A {\bf T}_B`.
.. runblock:: pycon
>>> from spatialmath.base import *
>>> T = trotx(0.3, t=[4,5,6])
>>> tr2adjoint(T)
:Reference:
- Robotics, Vision & Control: Second Edition, <NAME>, Springer 2016; p65.
- `Lie groups for 2D and 3D Transformations <http://ethaneade.com/lie.pdf>_
:SymPy: supported
"""
Z = np.zeros((3, 3), dtype=T.dtype)
if T.shape == (3, 3):
# SO(3) adjoint
# fmt: off
return np.block([
[R, Z],
[Z, R]
])
# fmt: on
elif T.shape == (4, 4):
# SE(3) adjoint
(R, t) = base.tr2rt(T)
# fmt: off
return np.block([
[R, base.skew(t) @ R],
[Z, R]
])
# fmt: on
else:
raise ValueError("bad argument")
def trprint(
T,
orient="rpy/zyx",
label=None,
file=sys.stdout,
fmt="{:.3g}",
degsym=True,
unit="deg",
):
"""
Compact display of SO(3) or SE(3) matrices
:param T: SE(3) or SO(3) matrix
:type T: | |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import hashlib
import logging
import os
import time
import boto3
import click
import yaml
from jinja2 import Environment, FileSystemLoader, Template
from pykwalify.core import Core
import collections
from copy import deepcopy
from betterboto import client as betterboto_client
from threading import Thread
import shutil
import pkg_resources
import requests
CONFIG_PARAM_NAME = "/servicecatalog-factory/config"
PUBLISHED_VERSION = pkg_resources.require("aws-service-catalog-factory")[0].version
VERSION = PUBLISHED_VERSION
BOOTSTRAP_STACK_NAME = 'servicecatalog-factory'
SERVICE_CATALOG_FACTORY_REPO_NAME = 'ServiceCatalogFactory'
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
HOME_REGION = os.environ.get('AWS_DEFAULT_REGION', 'eu-west-1')
NON_RECOVERABLE_STATES = [
"ROLLBACK_COMPLETE",
'CREATE_IN_PROGRESS',
'ROLLBACK_IN_PROGRESS',
'DELETE_IN_PROGRESS',
'UPDATE_IN_PROGRESS',
'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'REVIEW_IN_PROGRESS',
]
COMPONENT = 'component.j2'
COMPONENT_GROUP = 'component_group.j2'
ASSOCIATIONS = 'associations.j2'
def resolve_from_site_packages(what):
return os.path.sep.join([
os.path.dirname(os.path.abspath(__file__)),
what
])
def read_from_site_packages(what):
return open(
resolve_from_site_packages(what),
'r'
).read()
TEMPLATE_DIR = resolve_from_site_packages('templates')
ENV = Environment(
loader=FileSystemLoader(TEMPLATE_DIR),
extensions=['jinja2.ext.do'],
)
def get_regions():
with betterboto_client.ClientContextManager('ssm', region_name=HOME_REGION) as ssm:
response = ssm.get_parameter(Name=CONFIG_PARAM_NAME)
config = yaml.safe_load(response.get('Parameter').get('Value'))
return config.get('regions')
def merge(dict1, dict2):
result = deepcopy(dict1)
for key, value in dict2.items():
if isinstance(value, collections.Mapping):
result[key] = merge(result.get(key, {}), value)
else:
result[key] = deepcopy(dict2[key])
return result
def find_portfolio(service_catalog, portfolio_searching_for):
LOGGER.info('Searching for portfolio for: {}'.format(portfolio_searching_for))
response = service_catalog.list_portfolios_single_page()
for detail in response.get('PortfolioDetails'):
if detail.get('DisplayName') == portfolio_searching_for:
LOGGER.info('Found portfolio: {}'.format(portfolio_searching_for))
return detail
return {}
def create_portfolio(service_catalog, portfolio_searching_for, portfolios_groups_name, portfolio):
LOGGER.info('Creating portfolio: {}'.format(portfolio_searching_for))
args = {
'DisplayName': portfolio_searching_for,
'ProviderName': portfolios_groups_name,
}
if portfolio.get('Description'):
args['Description'] = portfolio.get('Description')
return service_catalog.create_portfolio(
**args
).get('PortfolioDetail').get('Id')
def product_exists(service_catalog, product, **kwargs):
product_to_find = product.get('Name')
LOGGER.info('Searching for product for: {}'.format(product_to_find))
response = service_catalog.search_products_as_admin_single_page(
Filters={'FullTextSearch': [product_to_find]}
)
for product_view_details in response.get('ProductViewDetails'):
product_view = product_view_details.get('ProductViewSummary')
if product_view.get('Name') == product_to_find:
LOGGER.info('Found product: {}'.format(product_view))
return product_view
def create_product(service_catalog, portfolio, product, s3_bucket_name):
LOGGER.info('Creating a product: {}'.format(product.get('Name')))
args = product.copy()
args.update({
'ProductType': 'CLOUD_FORMATION_TEMPLATE',
'ProvisioningArtifactParameters': {
'Name': "-",
'Type': 'CLOUD_FORMATION_TEMPLATE',
'Description': 'Placeholder version, do not provision',
"Info": {
"LoadTemplateFromURL": "https://s3.amazonaws.com/{}/{}".format(
s3_bucket_name, "empty.template.yaml"
)
}
}
})
del args['Versions']
if args.get('Options'):
del args['Options']
if args.get('Id'):
del args['Id']
if args.get('Source'):
del args['Source']
LOGGER.info("Creating a product: {}".format(args))
response = service_catalog.create_product(
**args
)
product_view = response.get('ProductViewDetail').get('ProductViewSummary')
product_id = product_view.get('ProductId')
# create_product is not a synchronous request and describe product doesnt work here
LOGGER.info('Waiting for the product to register: {}'.format(product.get('Name')))
found = False
while not found:
response = service_catalog.search_products_as_admin_single_page(
Filters={'FullTextSearch': [args.get("Name")]}
)
time.sleep(1)
product_view_details = response.get('ProductViewDetails')
for product_view_detail in product_view_details:
found = product_view_detail.get('ProductViewSummary').get('ProductId') == product_id
break
service_catalog.associate_product_with_portfolio(
ProductId=product_id,
PortfolioId=portfolio.get('Id')
)
return product_view
def get_bucket_name():
s3_bucket_url = None
with betterboto_client.ClientContextManager(
'cloudformation', region_name=HOME_REGION
) as cloudformation:
response = cloudformation.describe_stacks(
StackName=BOOTSTRAP_STACK_NAME
)
assert len(response.get('Stacks')) == 1, "There should only be one stack with the name"
outputs = response.get('Stacks')[0].get('Outputs')
for output in outputs:
if output.get('OutputKey') == "CatalogBucketName":
s3_bucket_url = output.get('OutputValue')
assert s3_bucket_url is not None, "Could not find bucket"
return s3_bucket_url
def ensure_portfolio(portfolios_groups_name, portfolio, service_catalog):
portfolio_searching_for = "{}-{}".format(portfolios_groups_name, portfolio.get('DisplayName'))
remote_portfolio = find_portfolio(service_catalog, portfolio_searching_for)
if remote_portfolio.get('Id') is None:
LOGGER.info("Couldn't find portfolio, creating one for: {}".format(portfolio_searching_for))
portfolio['Id'] = create_portfolio(
service_catalog,
portfolio_searching_for,
portfolios_groups_name,
portfolio
)
else:
portfolio['Id'] = remote_portfolio.get('Id')
def ensure_product(product, portfolio, service_catalog):
s3_bucket_name = get_bucket_name()
remote_product = product_exists(service_catalog, product)
if remote_product is None:
remote_product = create_product(
service_catalog,
portfolio,
product,
s3_bucket_name,
)
product['Id'] = remote_product.get('ProductId')
def generate_and_run(portfolios_groups_name, portfolio, what, stack_name, region, portfolio_id):
LOGGER.info("Generating: {} for: {} in region: {}".format(
what, portfolio.get('DisplayName'), region
))
template = ENV.get_template(what).render(
portfolio=portfolio, portfolio_id=portfolio_id
)
stack_name = "-".join([portfolios_groups_name, portfolio.get('DisplayName'), stack_name])
with betterboto_client.ClientContextManager(
'cloudformation', region_name=region
) as cloudformation:
cloudformation.create_or_update(
StackName=stack_name,
TemplateBody=template,
Capabilities=['CAPABILITY_IAM'],
)
LOGGER.info("Finished creating/updating: {}".format(stack_name))
def ensure_product_versions_active_is_correct(product, service_catalog):
LOGGER.info("Ensuring product version active setting is in sync for: {}".format(product.get('Name')))
product_id = product.get('Id')
response = service_catalog.list_provisioning_artifacts(
ProductId=product_id
)
for version in product.get('Versions', []):
LOGGER.info('Checking for version: {}'.format(version.get('Name')))
active = version.get('Active', True)
LOGGER.info("Checking through: {}".format(response))
for provisioning_artifact_detail in response.get('ProvisioningArtifactDetails', []):
if provisioning_artifact_detail.get('Name') == version.get('Name'):
LOGGER.info("Found matching")
if provisioning_artifact_detail.get('Active') != active:
LOGGER.info("Active status needs to change")
service_catalog.update_provisioning_artifact(
ProductId=product_id,
ProvisioningArtifactId=provisioning_artifact_detail.get('Id'),
Active=active,
)
def generate_pipeline(template, portfolios_groups_name, output_path, version, product, portfolio):
LOGGER.info('Generating pipeline for {}:{}'.format(
portfolios_groups_name, product.get('Name')
))
product_ids_by_region = {}
portfolio_ids_by_region = {}
all_regions = get_regions()
for region in all_regions:
with betterboto_client.ClientContextManager(
'servicecatalog', region_name=region
) as service_catalog:
ensure_portfolio(portfolios_groups_name, portfolio, service_catalog)
portfolio_ids_by_region[region] = portfolio.get('Id')
ensure_product(product, portfolio, service_catalog)
ensure_product_versions_active_is_correct(product, service_catalog)
product_ids_by_region[region] = product.get('Id')
friendly_uid = "-".join(
[
portfolios_groups_name,
portfolio.get('DisplayName'),
product.get('Name'),
version.get('Name')
]
)
rendered = template.render(
friendly_uid=friendly_uid,
portfolios_groups_name=portfolios_groups_name,
version=version,
product=product,
portfolio=portfolio,
Options=merge(product.get('Options', {}), version.get('Options', {})),
Source=merge(product.get('Source', {}), version.get('Source', {})),
ProductIdsByRegion=product_ids_by_region,
PortfolioIdsByRegion=portfolio_ids_by_region,
ALL_REGIONS=all_regions,
)
rendered = Template(rendered).render(
friendly_uid=friendly_uid,
portfolios_groups_name=portfolios_groups_name,
version=version,
product=product,
portfolio=portfolio,
Options=merge(product.get('Options', {}), version.get('Options', {})),
Source=merge(product.get('Source', {}), version.get('Source', {})),
ProductIdsByRegion=product_ids_by_region,
PortfolioIdsByRegion=portfolio_ids_by_region,
ALL_REGIONS=all_regions,
)
output_file_path = os.path.sep.join([output_path, friendly_uid + ".template.yaml"])
with open(output_file_path, 'w') as output_file:
output_file.write(rendered)
return portfolio_ids_by_region, product_ids_by_region
def generate_pipelines(portfolios_groups_name, portfolios, output_path):
LOGGER.info('Generating pipelines for {}'.format(portfolios_groups_name))
os.makedirs(output_path)
all_regions = get_regions()
for portfolio in portfolios.get('Portfolios'):
portfolio_ids_by_region = {}
for product in portfolio.get('Components', []):
for version in product.get('Versions'):
portfolio_ids_by_region_for_component, product_ids_by_region = generate_pipeline(
ENV.get_template(COMPONENT),
portfolios_groups_name,
output_path,
version,
product,
portfolio,
)
portfolio_ids_by_region.update(portfolio_ids_by_region_for_component)
for product in portfolio.get('ComponentGroups', []):
for version in product.get('Versions'):
portfolio_ids_by_region_for_group, product_ids_by_region = generate_pipeline(
ENV.get_template(COMPONENT_GROUP),
portfolios_groups_name,
output_path,
version,
product,
portfolio,
)
portfolio_ids_by_region.update(portfolio_ids_by_region_for_group)
threads = []
for region in all_regions:
process = Thread(
name=region,
target=generate_and_run,
args=[
portfolios_groups_name,
portfolio,
ASSOCIATIONS,
'associations',
region,
portfolio_ids_by_region[region]
]
)
process.start()
threads.append(process)
for process in threads:
process.join()
@click.group()
@click.option('--info/--no-info', default=False)
@click.option('--info-line-numbers/--no-info-line-numbers', default=False)
def cli(info, info_line_numbers):
"""cli for pipeline tools"""
if info:
logging.basicConfig(
format='%(levelname)s %(threadName)s %(message)s', level=logging.INFO
)
if info_line_numbers:
logging.basicConfig(
format='%(levelname)s %(threadName)s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO
)
@cli.command()
@click.argument('p', type=click.Path(exists=True))
def validate(p):
for portfolio_file_name in os.listdir(p):
portfolios_file_path = os.path.sep.join([p, portfolio_file_name])
LOGGER.info('Validating {}'.format(portfolios_file_path))
core = Core(
source_file=portfolios_file_path,
schema_files=[resolve_from_site_packages('schema.yaml')]
)
core.validate(raise_exception=True)
click.echo("Finished validating: {}".format(portfolios_file_path))
click.echo("Finished validating: OK")
@cli.command()
@click.argument('p', type=click.Path(exists=True))
def generate(p):
LOGGER.info('Generating')
for portfolio_file_name in os.listdir(p):
p_name = portfolio_file_name.split(".")[0]
output_path = os.path.sep.join(["output", p_name])
portfolios_file_path = os.path.sep.join([p, portfolio_file_name])
with open(portfolios_file_path) as portfolios_file:
portfolios_file_contents = portfolios_file.read()
portfolios = yaml.safe_load(portfolios_file_contents)
generate_pipelines(p_name, portfolios, output_path)
def get_stacks():
with betterboto_client.ClientContextManager('cloudformation') as cloudformation:
stack_summaries = []
args = {
"StackStatusFilter": [
'CREATE_IN_PROGRESS',
'CREATE_FAILED',
'CREATE_COMPLETE',
'ROLLBACK_IN_PROGRESS',
'ROLLBACK_FAILED',
'ROLLBACK_COMPLETE',
'DELETE_IN_PROGRESS',
'DELETE_FAILED',
'UPDATE_IN_PROGRESS',
'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_COMPLETE',
'UPDATE_ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_FAILED',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE',
'REVIEW_IN_PROGRESS',
]
}
while True:
response = cloudformation.list_stacks(
**args
)
stack_summaries += response.get('StackSummaries')
if response.get('NextToken'):
args['NextToken'] = response.get('NextToken')
else:
break
results = {}
for stack_summary in stack_summaries:
results[stack_summary.get('StackName')] = stack_summary.get('StackStatus')
return results
@cli.command()
@click.argument('p', type=click.Path(exists=True))
def deploy(p):
stacks = get_stacks()
for portfolio_file_name in os.listdir(p):
p_name = portfolio_file_name.split(".")[0]
output_path = os.path.sep.join(["output", p_name])
portfolios_file_path = os.path.sep.join([p, portfolio_file_name])
with open(portfolios_file_path) as portfolios_file:
portfolios_file_contents = portfolios_file.read()
portfolios = yaml.safe_load(portfolios_file_contents)
for portfolio in portfolios.get('Portfolios'):
for product in portfolio.get('Components', []):
for version in product.get('Versions'):
run_deploy_for_component(
p_name,
output_path,
portfolio,
product,
version,
stacks,
)
for product in portfolio.get('ComponentGroups', []):
for version in product.get('Versions'):
run_deploy_for_component_groups(
p_name,
output_path,
portfolio,
product,
version,
stacks,
)
def get_hash_for_template(template):
hasher = hashlib.md5()
hasher.update(str.encode(template))
return "{}{}".format('a', hasher.hexdigest())
def run_deploy_for_component_groups(group_name, path, portfolio, product, version, stacks):
friendly_uid = "-".join([
group_name, portfolio.get('DisplayName'), product.get('Name'), version.get('Name')
])
first_run_of_stack = stacks.get(friendly_uid, False) is False
LOGGER.info('Running deploy for: {}. Is first run: {}'.format(
friendly_uid, first_run_of_stack
))
staging_template_path = os.path.sep.join([path, "{}.template.yaml".format(friendly_uid)])
with open(staging_template_path) as staging_template:
staging_template_contents = staging_template.read()
s3_bucket_name = get_bucket_name()
s3 = boto3.resource('s3')
template_path = "{}/{}/product.template.yaml".format(product.get('Name'), version.get('Name'))
obj = s3.Object(s3_bucket_name, template_path)
obj.put(Body=staging_template_contents)
with betterboto_client.ClientContextManager('servicecatalog') as service_catalog:
product_to_find = product.get('Name')
response = service_catalog.search_products_as_admin_single_page(
Filters={'FullTextSearch': [product_to_find]}
)
product_id = None
for product_view_details in response.get('ProductViewDetails'):
product_view = product_view_details.get('ProductViewSummary')
if product_view.get('Name') == product_to_find:
LOGGER.info('Found product: {}'.format(product_view))
product_id = product_view.get("ProductId")
break
assert product_id is not None, "Could not find product"
found = False
response = service_catalog.list_provisioning_artifacts_single_page(ProductId=product_id)
for provisioning_artifact_detail in response.get('ProvisioningArtifactDetails'):
if provisioning_artifact_detail.get('Name') == version.get("Name"):
found = True
if not found:
LOGGER.info("Creating version: {}. It didn't exist".format(version.get("Name")))
create_args = {
"ProductId": product_id,
"Parameters": {
'Name': version.get('Name'),
'Info': {
"LoadTemplateFromURL": "https://s3.amazonaws.com/{}/{}".format(
s3_bucket_name, template_path
)
},
'Type': 'CLOUD_FORMATION_TEMPLATE'
}
}
if version.get("Description"):
create_args['Parameters']['Description'] = version.get("Description")
service_catalog.create_provisioning_artifact(**create_args)
else:
LOGGER.info(
'Skipped creating version: {}. It already exists'.format(version.get("Name"))
)
def run_deploy_for_component(group_name, path, portfolio, product, version, stacks):
friendly_uid = "-".join([
group_name,
portfolio.get('DisplayName'),
product.get('Name'),
version.get('Name')
])
first_run_of_stack = stacks.get(friendly_uid, False) is False
LOGGER.info(
'Running deploy for: {}. Is first run: {}'.format(friendly_uid, first_run_of_stack)
)
staging_template_path = os.path.sep.join([path, "{}.template.yaml".format(friendly_uid)])
with open(staging_template_path) as staging_template:
staging_template_contents = staging_template.read()
with betterboto_client.ClientContextManager('cloudformation') as cloudformation:
cloudformation.create_or_update(
StackName=friendly_uid,
TemplateBody=staging_template_contents,
)
LOGGER.info('Finished stack: {}'.format(friendly_uid))
@cli.command()
@click.argument('portfolio-name')
@click.argument('product')
@click.argument('version')
def nuke_product_version(portfolio_name, product, version):
click.echo("Nuking service catalog traces")
with betterboto_client.ClientContextManager('servicecatalog') as servicecatalog:
response = servicecatalog.list_portfolios_single_page()
portfolio_id = None
for portfolio_detail in response.get('PortfolioDetails'):
if portfolio_detail.get('DisplayName') == portfolio_name:
portfolio_id = portfolio_detail.get('Id')
break
if portfolio_id is None:
raise Exception("Could not find your portfolio: {}".format(portfolio_name))
else:
LOGGER.info('Portfolio_id found: {}'.format(portfolio_id))
product_name = "-".join([product, version])
LOGGER.info('Looking for product: {}'.format(product_name))
result = product_exists(servicecatalog, {'Name': product}, PortfolioId=portfolio_id)
if result is None:
click.echo("Could not find product: {}".format(product))
else:
product_id = result.get('ProductId')
LOGGER.info("p: {}".format(product_id))
LOGGER.info('Looking for version: {}'.format(version))
response = servicecatalog.list_provisioning_artifacts(
ProductId=product_id,
)
version_id = None
for provisioning_artifact_detail in response.get('ProvisioningArtifactDetails'):
if provisioning_artifact_detail.get('Name') == version:
version_id = provisioning_artifact_detail.get('Id')
if version_id is | |
<reponame>konradotto/TS<gh_stars>0
# Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
"""
Created on May 21, 2013
@author: ionadmin
"""
import logging
from django.conf import settings
from django.template.defaultfilters import truncatechars
from django.utils.translation import ugettext_lazy
from iondb.utils import validation
from iondb.rundb.labels import PlanTemplate, Plan
from iondb.rundb.plan.page_plan.application_step_data import ApplicationStepData
from iondb.rundb.plan.page_plan.kits_step_data import KitsStepData, KitsFieldNames
from iondb.rundb.plan.page_plan.monitoring_step_data import MonitoringStepData
from iondb.rundb.plan.page_plan.reference_step_data import ReferenceStepData
from iondb.rundb.plan.page_plan.plugins_step_data import PluginsStepData
from iondb.rundb.plan.page_plan.output_step_data import OutputStepData
from iondb.rundb.plan.page_plan.ionreporter_step_data import IonreporterStepData
from iondb.rundb.plan.page_plan.save_template_step_data import SaveTemplateStepData
from iondb.rundb.plan.page_plan.save_plan_step_data import (
SavePlanStepData,
SavePlanFieldNames,
)
from iondb.rundb.plan.page_plan.barcode_by_sample_step_data import (
BarcodeBySampleStepData,
BarcodeBySampleFieldNames,
)
from iondb.rundb.plan.page_plan.save_plan_by_sample_step_data import (
SavePlanBySampleStepData,
)
from iondb.rundb.plan.page_plan.save_template_by_sample_step_data import (
SaveTemplateBySampleStepData,
)
from iondb.rundb.plan.page_plan.analysis_params_step_data import AnalysisParamsStepData
from iondb.rundb.plan.page_plan.step_names import StepNames
from iondb.rundb.plan.page_plan.application_step_data import ApplicationFieldNames
from iondb.rundb.plan.page_plan.step_helper_types import StepHelperType
from iondb.rundb.plan.views_helper import isOCP_enabled
from collections import OrderedDict
from iondb.rundb.models import (
ApplicationGroup,
PlannedExperiment,
KitInfo,
Chip,
dnaBarcode,
)
logger = logging.getLogger(__name__)
class StepHelper(object):
"""
Helper class for interacting with the plan/template creation steps.
"""
def __init__(
self,
sh_type=StepHelperType.CREATE_NEW_TEMPLATE,
previous_template_id=-1,
previous_plan_id=-1,
experiment_id=-1,
):
self.sh_type = sh_type
self.previous_template_id = previous_template_id
self.previous_plan_id = previous_plan_id
self.experiment_id = experiment_id
self.parentName = None
self.isFromScratch = False
# set to true if we are creating a plan or template from a system template
self.isParentSystem = False
if (
sh_type
in [
StepHelperType.EDIT_PLAN,
StepHelperType.EDIT_PLAN_BY_SAMPLE,
StepHelperType.EDIT_RUN,
StepHelperType.EDIT_TEMPLATE,
]
and previous_template_id == -1
and previous_plan_id == -1
):
logger.error(
"step_helper - StepHelper.init() for EDIT should have an existing ID."
)
raise ValueError(
validation.invalid_required_at_least_one_polymorphic_type_value(
validation.Entity_EntityFieldName(Plan.verbose_name, "id"),
validation.Entity_EntityFieldName(PlanTemplate.verbose_name, "id"),
)
) # "You must pass in a Plan id or a Template id."
self.steps = OrderedDict()
steps_list = []
if settings.FEATURE_FLAGS.IONREPORTERUPLOADER:
steps_list.append(IonreporterStepData(sh_type))
steps_list.append(ApplicationStepData(sh_type))
else:
steps_list.append(IonreporterStepData(sh_type, next_step_url=None))
steps_list.append(ApplicationStepData(sh_type, prev_step_url=None))
if (
sh_type == StepHelperType.CREATE_NEW_PLAN_BY_SAMPLE
or sh_type == StepHelperType.EDIT_PLAN_BY_SAMPLE
or sh_type == StepHelperType.COPY_PLAN_BY_SAMPLE
):
referenceStepData = ReferenceStepData(sh_type)
barcodeBySampleStepData = BarcodeBySampleStepData(sh_type)
analysisParamsStepData = AnalysisParamsStepData(sh_type)
steps_list.extend(
[
KitsStepData(sh_type),
referenceStepData,
analysisParamsStepData,
PluginsStepData(sh_type),
barcodeBySampleStepData,
OutputStepData(sh_type),
SavePlanBySampleStepData(sh_type),
]
)
# some section can appear in multiple chevrons, key is the step name and value is the step_data object
barcodeBySampleStepData.step_sections.update(
{StepNames.REFERENCE: referenceStepData}
)
barcodeBySampleStepData.step_sections.update(
{StepNames.ANALYSIS_PARAMS: analysisParamsStepData}
)
elif sh_type == StepHelperType.CREATE_NEW_TEMPLATE_BY_SAMPLE:
referenceStepData = ReferenceStepData(sh_type)
saveTemplateBySampleData = SaveTemplateBySampleStepData(sh_type)
analysisParamsStepData = AnalysisParamsStepData(sh_type)
steps_list.extend(
[
KitsStepData(sh_type),
referenceStepData,
analysisParamsStepData,
PluginsStepData(sh_type),
OutputStepData(sh_type),
saveTemplateBySampleData,
]
)
saveTemplateBySampleData.step_sections.update(
{StepNames.REFERENCE: referenceStepData}
)
saveTemplateBySampleData.step_sections.update(
{StepNames.ANALYSIS_PARAMS: analysisParamsStepData}
)
elif (
sh_type == StepHelperType.COPY_TEMPLATE
or sh_type == StepHelperType.CREATE_NEW_TEMPLATE
or sh_type == StepHelperType.EDIT_TEMPLATE
):
referenceStepData = ReferenceStepData(sh_type)
saveTemplateStepData = SaveTemplateStepData(sh_type)
analysisParamsStepData = AnalysisParamsStepData(sh_type)
steps_list.extend(
[
KitsStepData(sh_type),
referenceStepData,
analysisParamsStepData,
PluginsStepData(sh_type),
OutputStepData(sh_type),
saveTemplateStepData,
]
)
saveTemplateStepData.step_sections.update(
{StepNames.REFERENCE: referenceStepData}
)
saveTemplateStepData.step_sections.update(
{StepNames.ANALYSIS_PARAMS: analysisParamsStepData}
)
else:
referenceStepData = ReferenceStepData(sh_type)
# SaveTemplateStepData is needed for the last chevron during plan creation
saveTemplateStepData = SaveTemplateStepData(sh_type)
savePlanStepData = SavePlanStepData(sh_type)
analysisParamsStepData = AnalysisParamsStepData(sh_type)
steps_list.extend(
[
KitsStepData(sh_type),
referenceStepData,
analysisParamsStepData,
PluginsStepData(sh_type),
OutputStepData(sh_type),
saveTemplateStepData,
savePlanStepData,
]
)
savePlanStepData.step_sections.update(
{StepNames.REFERENCE: referenceStepData}
) ###referenceStepData.sectionParentStep = savePlanStepData
savePlanStepData.step_sections.update(
{StepNames.ANALYSIS_PARAMS: analysisParamsStepData}
)
for step in steps_list:
self.steps[step.getStepName()] = step
self.update_dependent_steps(self.steps[StepNames.APPLICATION])
def getStepDict(self):
return self.steps
def updateStepFromRequest(self, request, step_name):
logger.debug(
"updateStepFromRequest... Updating %s with data from %s"
% (step_name, str(request.POST))
)
if step_name in self.getStepDict():
step = self.steps[step_name]
retval = step.updateSavedFieldValuesFromRequest(request)
if retval:
step.updateSavedObjectsFromSavedFields()
self.update_dependent_steps(step)
return retval
return False
def update_dependent_steps(self, updated_step):
"""
Applies updates to all steps that depend on the updated step.
If other steps depend on a step that got updated does that too.
"""
updated_steps = [updated_step]
while updated_steps:
updated_step = updated_steps[0]
for dependent_step in list(self.steps.values()):
# if editing run post-sequencing, don't load defaults when application changes
if (
self.isEditRun()
and updated_step.getStepName() == StepNames.APPLICATION
):
if updated_step.getStepName() in dependent_step._dependsOn:
dependent_step.alternateUpdateFromStep(updated_step)
updated_steps.append(dependent_step)
continue
if updated_step.getStepName() in dependent_step._dependsOn:
dependent_step.updateFromStep(updated_step)
updated_steps.append(dependent_step)
# need to update barcode Set here to avoid circular dependency
if updated_step.getStepName() == StepNames.SAVE_PLAN:
self.steps[StepNames.KITS].savedFields[
KitsFieldNames.BARCODE_ID
] = updated_step.savedFields[SavePlanFieldNames.BARCODE_SET]
updated_steps.remove(updated_step)
def isPlan(self):
return self.sh_type in StepHelperType.PLAN_TYPES
def isPlanBySample(self):
return self.sh_type in StepHelperType.PLAN_BY_SAMPLE_TYPES
def isTemplate(self):
return self.sh_type in StepHelperType.TEMPLATE_TYPES
def isTemplateBySample(self):
return self.sh_type == StepHelperType.CREATE_NEW_TEMPLATE_BY_SAMPLE
def isBarcoded(self):
if self.steps[StepNames.KITS].savedFields[KitsFieldNames.BARCODE_ID]:
return True
return False
def isDualBarcodingBySampleSupported(self):
if self.getApplProduct():
return self.getApplProduct().isDualBarcodingBySampleSupported
return False
def isDynamicDualBarcoding(self):
if not self.isBarcoded() or not self.isDualBarcodingBySampleSupported():
return False
barcode = dnaBarcode.objects.filter(
name=self.steps[StepNames.KITS].savedFields[KitsFieldNames.BARCODE_ID]
).first()
if barcode and not barcode.is_static_pairing():
return True
return False
def isDualBarcoded(self):
if not self.isDynamicDualBarcoding():
return False
step = self.steps.get(StepNames.SAVE_PLAN, None) or self.steps.get(
StepNames.BARCODE_BY_SAMPLE, None
)
if step and step.savedFields[SavePlanFieldNames.END_BARCODE_SET]:
return True
return False
def isCreate(self):
return self.sh_type in [
StepHelperType.CREATE_NEW_PLAN,
StepHelperType.CREATE_NEW_TEMPLATE,
StepHelperType.CREATE_NEW_PLAN_BY_SAMPLE,
]
def isEdit(self):
return self.sh_type in [
StepHelperType.EDIT_PLAN,
StepHelperType.EDIT_TEMPLATE,
StepHelperType.EDIT_PLAN_BY_SAMPLE,
]
def isEditRun(self):
return self.sh_type in [StepHelperType.EDIT_RUN]
def isCopy(self):
return self.sh_type in [
StepHelperType.COPY_PLAN,
StepHelperType.COPY_TEMPLATE,
StepHelperType.COPY_PLAN_BY_SAMPLE,
]
def isIonChef(self):
selectedTemplateKit = self.steps[StepNames.KITS].savedFields[
KitsFieldNames.TEMPLATE_KIT_NAME
]
isIonChef = False
if selectedTemplateKit:
kits = KitInfo.objects.filter(name=selectedTemplateKit)
if kits:
isIonChef = kits[0].kitType == "IonChefPrepKit"
return isIonChef
def isProton(self):
selectedChipType = self.steps[StepNames.KITS].savedFields[
KitsFieldNames.CHIP_TYPE
]
isProton = False
if selectedChipType:
chips = Chip.objects.filter(
name=selectedChipType, instrumentType__iexact="proton"
)
if chips:
isProton = True
return isProton
def isTargetStepAfterOriginal(self, original_step_name, target_step_name):
if original_step_name == StepNames.EXPORT:
return True
if (
original_step_name == StepNames.SAVE_TEMPLATE
or original_step_name == StepNames.SAVE_PLAN
or original_step_name == StepNames.SAVE_TEMPLATE_BY_SAMPLE
or original_step_name == StepNames.SAVE_PLAN_BY_SAMPLE
):
return False
original_index = list(self.steps.keys()).index(original_step_name)
target_index = list(self.steps.keys()).index(target_step_name)
return target_index >= original_index
def getApplProduct(self):
if (
self.steps[StepNames.APPLICATION]
and self.steps[StepNames.APPLICATION].savedObjects[
ApplicationFieldNames.APPL_PRODUCT
]
):
return self.steps[StepNames.APPLICATION].savedObjects[
ApplicationFieldNames.APPL_PRODUCT
]
else:
return None
def getApplProducts(self):
"""
returns a collection of applProduct entries for the selected application and target technique
"""
return self.steps[StepNames.APPLICATION].prepopulatedFields[
ApplicationFieldNames.APPL_PRODUCTS
]
def getCategorizedApplProducts(self):
"""
returns a collection of categorized applProduct entries for the selected application and target technique
"""
return self.steps[StepNames.APPLICATION].prepopulatedFields[
ApplicationFieldNames.APPL_PRODUCTS_CATEGORIZED
]
def getApplProductByInstrumentType(self, instrumentType):
applProducts = self.getApplProducts()
if applProducts:
for applProduct in applProducts:
if applProduct.instrumentType.lower() == instrumentType.lower():
return applProduct
return self.getApplProduct()
def isToMandateTargetTechniqueToShow(self):
"""
this is a workaround until applproduct supports application-group sepecific rules
"""
if self.getApplicationGroupName():
return True if self.getApplicationGroupName() in ["DNA + RNA"] else False
else:
return True
#
# def getApplProductByRunType(self, runTypeId):
# applProducts = self.steps[StepNames.APPLICATION].prepopulatedFields[ApplicationFieldNames.APPL_PRODUCTS]
#
# for applProduct in applProducts:
# if applProduct.applType_id == runTypeId:
# logger.debug("step_helper.getApplProductByRunType() runTypeId=%s; going to return applProduct=%s" %(runTypeId, applProduct))
# return applProduct
# return None
def getRunTypeObject(self):
# logger.debug("getRunTypeObject nucleotideType=%s" %(self.steps[StepNames.APPLICATION].savedObjects[ApplicationFieldNames.RUN_TYPE].nucleotideType))
# save_plan_step_data.savedFields[SavePlanFieldNames.SAMPLES_TABLE]
return self.steps[StepNames.APPLICATION].savedObjects[
ApplicationFieldNames.RUN_TYPE
]
def getApplicationGroupName(self):
return self.steps[StepNames.APPLICATION].savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
]
def getApplicationCategoryDisplayedName(self):
runType_obj = self.getRunTypeObject()
categories = self.steps[StepNames.APPLICATION].prepopulatedFields[
ApplicationFieldNames.CATEGORIES
]
if categories:
if runType_obj:
return PlannedExperiment.get_validatedApplicationCategoryDisplayedName(
categories, runType_obj.runType
)
else:
return PlannedExperiment.get_applicationCategoryDisplayedName(
categories
)
else:
return ""
def isControlSeqTypeBySample(self):
return self.getApplProduct().isControlSeqTypeBySampleSupported
def isReferenceBySample(self):
return (
self.getApplProduct().isReferenceBySampleSupported
and self.getApplProduct().isReferenceSelectionSupported
)
def isDualNucleotideTypeBySample(self):
return self.getApplProduct().isDualNucleotideTypeBySampleSupported
def isBarcodeKitSelectionRequired(self):
return self.getApplProduct().isBarcodeKitSelectionRequired
def isOCPEnabled(self):
return isOCP_enabled()
def isOCPApplicationGroup(self):
return self.getApplicationGroupName() == "DNA + RNA"
def getNucleotideTypeList(self):
return ["", "DNA", "RNA"]
def getIruQcUploadModeList(self):
iruQcUploadModes = {
"Review results after run completion, then upload to Ion Reporter *": "manual_check",
"Automatically upload to Ion Reporter after run completion": "no_check",
} # TODO: i18n
return iruQcUploadModes
def hasPgsData(self):
if self.isTemplate():
return False
if self.isPlanBySample():
step = self.steps.get(StepNames.BARCODE_BY_SAMPLE, None)
if step:
return step.prepopulatedFields[BarcodeBySampleFieldNames.HAS_PGS_DATA]
else:
step = self.steps.get(StepNames.SAVE_PLAN, None)
if step:
return step.prepopulatedFields[SavePlanFieldNames.HAS_PGS_DATA]
return False
def hasOncoData(self):
if self.isTemplate():
return False
categories = self.steps[StepNames.APPLICATION].prepopulatedFields[
ApplicationFieldNames.CATEGORIES
]
if "Oncomine" in categories or "Onconet" in categories:
return True
if self.isPlanBySample():
step = self.steps.get(StepNames.BARCODE_BY_SAMPLE, None)
if step:
return step.prepopulatedFields[BarcodeBySampleFieldNames.HAS_ONCO_DATA]
else:
step = self.steps.get(StepNames.SAVE_PLAN, None)
if step:
return step.prepopulatedFields[SavePlanFieldNames.HAS_ONCO_DATA]
return False
def validateAll(self):
for step_name, step in list(self.steps.items()):
# do not validate plan step if this is a template helper and vice versa
if (self.isPlan() and step_name != StepNames.SAVE_TEMPLATE) or (
self.isTemplate() and step_name != StepNames.SAVE_PLAN
):
step.validate()
if step.hasErrors():
logger.debug(
"step_helper.validateAll() HAS ERRORS! step_name=%s"
% (step_name)
)
return step_name
return None
def getChangedFields(self):
changed = {}
for step in list(self.steps.values()):
for key, values in list(step._changedFields.items()):
if (
(values[0] or values[1])
and (values[0] != values[1])
and str(values[0] != values[1])
):
changed[key] = values
return changed
def isS5(self):
selectedChipType = self.steps[StepNames.KITS].savedFields[
KitsFieldNames.CHIP_TYPE
]
isS5 = False
if selectedChipType:
chips = Chip.objects.filter(
name=selectedChipType, instrumentType__iexact="s5"
)
if chips:
isS5 = True
return isS5
def getSubNavMenuLabel(self, truncate=40, verbose=True):
label = ""
truncatedParentName = truncatechars(self.parentName, truncate)
if self.isPlan():
if self.isEdit():
# Translators: Dynamic Menu text to Edit Plan
label = ugettext_lazy("plan.workflow.name.edit.label") % {
"planname": truncatedParentName
}
elif self.isEditRun():
# Translators: Dynamic Menu text to Edit Run
label = ugettext_lazy("plan.workflow.name.editrun.label") % {
"planname": truncatedParentName
}
elif self.isCopy():
# Translators: Dynamic Menu text to copy Plan
label = ugettext_lazy("plan.workflow.name.copy.label") % {
"planname": truncatedParentName
}
else:
if not verbose:
# Translators: Dynamic Menu text Create Plan
label = ugettext_lazy("plan.workflow.name.create.label")
else:
# | |
<gh_stars>0
"""Basic specification for a frontend.
See docs on Frontend class for further information.
"""
import doctest
import msgpack
import oxtie
class Frontend(object):
"""Class specifying the main interface for a frontend and some utils.
All other frontend implementations shall inherit from this.
The following illustrates example usage. First we do imports:
>>> import tempfile, os, shutil
>>> from oxtie.fronts import base
>>> from oxtie.backs import simple
Next we create an instance of simple FileBackend and give it a
root directory so it knows where to store things.
>>> backend = simple.FileBackend(root=tempfile.mkdtemp(suffix='.oxtie'))
Now we crate an instance of our Frontend object with the name
'test' and some other facets, and tell it to use our backend.
>>> f = base.Frontend({'name': 'test', '_last_update': '2017', '_bar': 'baz'},
... backend)
We could have sub-classed base.Frontend to have additional properties
or we can just add them whenever we like:
>>> f.big_data = '*' * 10 # You can add properties and they will get saved
Now we simply call the save method and the backend saves it. The nice
thing here is that the f.save() could would work equally well with any
backend. This gives you a lot of flexbility since you can choose or
change your backend later.
>>> f.save()
We can load just the header for our saved object if desired. This is
usually very cheap and fast. The header also provides the facets
(which should be kept small) so you can load the header to decide if
you want to do further processing:
>>> hdr = backend.load({'name': 'test'}, only_hdr=True) # Fast load just header
>>> hdr['_facets']['_last_update']
'2017'
Imagine we do want to load the item. We can just call the backend.load
method. Here we explicitly tell the backend what python class to load
the data into. Later we can explore some clever tricks so that you
don't need to specify the frontend because the backend can read it
from the header and find the right class itself.
>>> g = backend.load({'name': 'test'}, front_cls=base.Frontend)
>>> f.big_data == g.big_data # Verify things worked
True
If we wanted to explicitly lookup the frontend class, we could do:
>>> import importlib
>>> mod = importlib.import_module(hdr['mod_name'])
>>> h = backend.load({'name': 'test'}, front_cls=getattr(mod, hdr['cls_name']))
>>> h.big_data == f.big_data
True
If we are OK with implicitly loading modules we can just specify
`allow_load=True` and things will be loaded implicitly. This is
not done by default since loading arbitrary modules is potentially
a security risk and it might get confusing if your paths are not set right.
>>> i = backend.load({'name': 'test'}, allow_load=True)
>>> i.big_data == f.big_data
True
Now cleanup the temporary directory we used for this demo.
>>> shutil.rmtree(backend.root)
"""
def __init__(self, facets, backend=None):
"""Initializer.
:param facets: Dictionary of "facets" for the item. Ideally it
should be a dictionary of string keys and
simple types (e.g., int, float, string, dates)
as values. Which can be serialized with msgpack.
An important feature of the facets is that
they are packed/unpacked in the header so you
can easily/quickly assess whether to do a full
unpack or not or scan packed items. The KEY
for the object must come from the facets.
:param backend=None: Instance of a backend to use in saving. This can
be None if you don't intend to save it or if you
want the get_backend method to choose it laer.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Main initializer for a Frontend. Sub-classes will almost
certainly have additional properties to store.
"""
self._facets = facets
self._backend = backend
@classmethod
def facets2key(cls, facets):
"""Take in the facets dict and return the key dict.
The `facets` input is a dictionary with string keys and values such
that str(VALUE) is stable (i.e., not something like a float which may
have a different string representation on different systems).
This function shall return a dict which represents the unique key
identifying this object. By default, we just return everything in
`facets` where the key name does not begin with an underscore, but
sub-classes may want to do something more intelligent like return only
`facets['name']` or `'%s/%s' % (facets['category'], facets['name'])`.
The returned value must satisfy the following criteria:
1. It must uniquely identify the object.
2. It must be a dictionary with string keys and string values.
3. The algorithm to get the key from the facets must be a stable
function of only the input facets (e.g., it should not call
random, or use values in the object outside the header).
This key is used to identify the object when saving and loading
to and from the backend.
"""
dummy = cls # Sub-classes may want to use cls, but we ignore it here
return {n: v for n, v in facets.items() if n and n[0] != '_'}
def get_key(self):
"""Return a dict representing key for this object.
The default version simply does self.facets2key(self.get_facets_dict())
Sub-classes may which to override this or just override facets2key.
"""
return self.facets2key(self.get_facets_dict())
def get_facets_dict(self):
"""Return a reference to the self._facets dict set in __init__.
"""
return self._facets
def serialize(self, body_mode=None):
"""Serialize self into a string.
:param mode=None: Optional string indicating mode. If None, then
we use self.get_default_body_mode().
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A serialized representation of self.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Workhorse to do the serialization before we save something.
You generally do **NOT** need to override this method in
a sub-class. Instead, you can implement a new mode and
provide a serialize_body_<MODE> method which this method will
lookup and call.
By default we always serialize the header with msgpack but
allow serializing the body in some other mode. This is so
that in deserializing we can always unpack the header using
msgpack and then decide what to do next.
"""
if body_mode is None:
body_mode = self.get_default_body_mode()
hdr = self.serialize_header()
bfunc = getattr(self, 'serialize_body_%s' % body_mode, None)
if bfunc is None:
raise ValueError('Invalid body serialization mode %s' % body_mode)
body = bfunc() # pylint: disable=not-callable
return hdr + msgpack.packb(body, use_bin_type=True)
def serialize_header(self):
"""Serialize the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A seraizlied version of the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: We generally want to serialize the header and body
separately. This is because:
1. We want the header in a simple standard format that is
easily read. (We use msgpack by default). While there may
be reasons to use different serialization for the body, it is
essential that you can *READ* the header easily so you know
what is going on with the body.
2. You generally want the header to be short and quick so you
can get a little information before deciding whether to
read/parse the body.
"""
header = {'body_mode': 'msgpack', '__oxtie_version__': oxtie.version,
'_facets': self._facets,
'mod_name': self.__module__,
'cls_name': self.__class__.__name__}
return msgpack.packb(header, use_bin_type=True)
def serialize_body_msgpack(self, do_pack=True, skips=()):
"""Serialize the body in msgpack format and return it.
:param do_pack=True: If True, return body in msgpack format, otherwise
return the dictionary we would pack but leave it
as a dict in case the caller wants to modify it
before serializing. This is useful for
sub-classes.
:param skips=(): Tuple of strings indicating properties to skip. We
always skip _backend and _facets since
those eitehr go in the header or are not supposed
to be serialized. If you implement a sub-class that
serializes some properties in a special way, you can
pass in the name of properties you handle serparately
here.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: Bytes representing serialized version of the body of self.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Meant to be called by serialize method and not directly.
The default implementation serializes everything in self.__dict__
except for the given `skips` which are serialized in the header.
If you have special properties, you may want to override this method
to serialize your special items differntly.
NOTE: If you do something special here, you may also need to
override deserialize_body_msgpack as well.
"""
skips = set(skips).union(('_backend', '_facets'))
body = {n: v for n, v in self.__dict__.items() if n not in skips}
if do_pack:
body = msgpack.packb(body, use_bin_type=True)
return body
@classmethod
def get_default_body_mode(cls):
"""Return string indicating default serialization body mode.
"""
| |
else:
if f7 <= 11:
return 7
else:
if f6 <= 23:
return 6
else:
return 3
else:
if f3 <= 19:
if f3 <= 18:
if f2 <= 15:
if f2 <= 14:
if f2 <= 10:
if f1 <= 19:
if f1 <= 18:
return 0
else:
if f2 <= 3:
if f7 <= 1:
return 1
else:
if f4 <= 17:
if f4 <= 14:
return 1
else:
return 0
else:
return 1
else:
if f2 <= 5:
return 2
else:
return 1
else:
return 0
else:
if f4 <= 32:
return 0
else:
return 2
else:
if f4 <= 8:
if f4 <= 6:
if f6 <= 0:
if f8 <= 1:
return 2
else:
return 7
else:
return 2
else:
if f3 <= 17:
return 2
else:
return 1
else:
if f1 <= 19:
return 2
else:
return 0
else:
if f2 <= 17:
if f4 <= 21:
if f2 <= 16:
if f3 <= 11:
if f3 <= 10:
return 2
else:
if f4 <= 12:
if f4 <= 9:
return 3
else:
return 4
else:
return 3
else:
if f3 <= 12:
return 2
else:
if f3 <= 13:
return 3
else:
if f3 <= 14:
return 1
else:
if f3 <= 15:
return 4
else:
return 1
else:
if f4 <= 2:
if f5 <= 15:
if f6 <= 1:
if f5 <= 3:
return 5
else:
return 3
else:
return 3
else:
return 3
else:
if f3 <= 11:
if f3 <= 10:
if f3 <= 8:
if f4 <= 14:
return 3
else:
return 4
else:
return 2
else:
if f4 <= 12:
if f4 <= 9:
return 3
else:
return 4
else:
return 3
else:
if f4 <= 11:
if f5 <= 15:
if f5 <= 10:
if f5 <= 6:
return 4
else:
return 5
else:
return 4
else:
return 3
else:
if f3 <= 14:
if f3 <= 13:
if f5 <= 9:
if f5 <= 6:
if f6 <= 32:
return 3
else:
return 4
else:
return 3
else:
return 3
else:
return 3
else:
return 2
else:
if f4 <= 23:
if f5 <= 21:
if f2 <= 16:
if f3 <= 11:
return 4
else:
if f5 <= 3:
return 2
else:
return 4
else:
return 4
else:
if f5 <= 22:
if f4 <= 22:
return 3
else:
return 5
else:
return 4
else:
if f4 <= 24:
if f5 <= 27:
if f5 <= 9:
return 3
else:
if f5 <= 12:
if f6 <= 5:
return 3
else:
if f6 <= 20:
return 4
else:
return 3
else:
if f2 <= 16:
return 3
else:
if f5 <= 16:
if f7 <= 24:
return 3
else:
if f7 <= 25:
return 4
else:
return 3
else:
return 3
else:
return 3
else:
if f2 <= 16:
if f3 <= 11:
return 3
else:
return 2
else:
if f4 <= 31:
if f3 <= 11:
return 3
else:
if f5 <= 20:
return 4
else:
return 3
else:
if f4 <= 33:
if f5 <= 22:
return 3
else:
if f5 <= 23:
return 2
else:
return 3
else:
return 4
else:
if f3 <= 13:
if f3 <= 10:
if f3 <= 8:
return 3
else:
return 0
else:
if f2 <= 18:
if f1 <= 20:
return 1
else:
return 0
else:
if f2 <= 19:
return 0
else:
if f1 <= 19:
return 1
else:
return 0
else:
if f2 <= 19:
return 0
else:
return 1
else:
if f2 <= 5:
if f2 <= 3:
return 1
else:
if f4 <= 30:
if f4 <= 3:
return 3
else:
if f8 <= 0:
return 3
else:
if f5 <= 34:
if f4 <= 14:
if f4 <= 12:
return 3
else:
return 4
else:
if f6 <= 34:
return 3
else:
return 6
else:
return 5
else:
return 4
else:
if f1 <= 19:
if f2 <= 16:
if f2 <= 11:
return 1
else:
if f2 <= 15:
return 0
else:
return 3
else:
if f1 <= 18:
return 0
else:
return 1
else:
return 0
else:
if f2 <= 14:
if f2 <= 6:
if f2 <= 1:
if f3 <= 27:
return 0
else:
if f1 <= 22:
return 0
else:
return 1
else:
if f2 <= 3:
return 1
else:
if f2 <= 5:
return 2
else:
return 1
else:
if f2 <= 11:
if f2 <= 10:
if f2 <= 9:
return 0
else:
return 1
else:
return 0
else:
return 0
else:
if f2 <= 17:
if f2 <= 15:
if f3 <= 30:
if f1 <= 19:
return 2
else:
return 0
else:
if f4 <= 21:
if f5 <= 18:
if f4 <= 17:
return 2
else:
return 0
else:
return 2
else:
return 1
else:
if f3 <= 32:
if f3 <= 23:
if f2 <= 16:
if f5 <= 16:
return 2
else:
return 4
else:
return 2
else:
if f4 <= 18:
if f4 <= 15:
if f3 <= 30:
return 2
else:
if f4 <= 8:
return 3
else:
return 4
else:
return 3
else:
if f3 <= 30:
if f3 <= 25:
return 1
else:
return 2
else:
if f5 <= 21:
return 4
else:
if f5 <= 22:
return 5
else:
return 4
else:
if f4 <= 14:
return 3
else:
if f4 <= 22:
return 2
else:
return 4
else:
if f2 <= 19:
if f3 <= 32:
return 0
else:
if f4 <= 14:
return 3
else:
return 2
else:
if f1 <= 19:
return 1
else:
return 0
else:
if f3 <= 18:
if f4 <= 22:
if f2 <= 15:
if f4 <= 14:
if f2 <= 14:
if f2 <= 3:
return 1
else:
if f3 <= 6:
if f3 <= 5:
if f2 <= 4:
return 2
else:
return 1
else:
return 2
else:
if f3 <= 7:
return 1
else:
if f1 <= 24:
return 1
else:
return 0
else:
if f4 <= 10:
if f4 <= 1:
return 2
else:
return 1
else:
if f3 <= 3:
return 2
else:
if f3 <= 15:
if f6 <= 21:
return 2
else:
return 1
else:
return 2
else:
if f4 <= 18:
if f3 <= 11:
return 1
else:
if f2 <= 13:
return 1
else:
return 2
else:
if f2 <= 8:
if f2 <= 3:
return 1
else:
if f2 <= 5:
return 2
else:
return 1
else:
if f4 <= 19:
return 2
else:
return 1
else:
if f3 <= 1:
if f7 <= 0:
if f10 <= 9:
return 1
else:
return 10
else:
return 1
else:
if f3 <= 5:
if f4 <= 11:
return 1
else:
if f3 <= 2:
return 1
else:
if f4 <= 14:
return 2
else:
return 1
else:
if f5 <= 31:
if f8 <= 0:
return 1
else:
if f5 <= 1:
return 1
else:
if f6 <= 0:
return 9
else:
if f7 <= 0:
return 8
else:
return 1
else:
if f4 <= 17:
return 1
else:
if f4 <= 18:
return 3
else:
return 1
else:
if f4 <= 23:
if f7 <= 23:
if f2 <= 16:
if f2 <= 13:
return 1
else:
if f2 <= 15:
return 2
else:
return 1
else:
if f2 <= 17:
if f5 <= 3:
if f6 <= 26:
if f6 <= 22:
if f7 <= 2:
if f6 <= 3:
return 6
else:
return 4
else:
return 4
else:
return 4
else:
return 4
else:
if f5 <= 11:
if f6 <= 21:
return 5
else:
if f6 <= 24:
if f7 <= | |
= copy.copy(curr_client_info.output_to_client)
return
ok = True
first_pass = True
can_call__inline_processor__on__output_buffers_are_empty = True
while ok:
try:
if curr_client_info.current_memoryview_output:
nsent = writable_socket.send(curr_client_info.current_memoryview_output)
curr_client_info.current_memoryview_output = curr_client_info.current_memoryview_output[nsent:]
else:
curr_client_info.current_memoryview_output = None
output_fifo_size = curr_client_info.output_to_client.size()
if output_fifo_size > 1:
result_data, result_size, result_qnt = \
curr_client_info.output_to_client.get_at_least_size(524288)
if result_qnt > 1:
curr_client_info.current_memoryview_output = memoryview(b''.join(result_data))
else:
curr_client_info.current_memoryview_output = memoryview(result_data.popleft())
elif output_fifo_size == 1:
curr_client_info.current_memoryview_output = memoryview(curr_client_info.output_to_client.get())
if curr_client_info.current_memoryview_output is None:
# if curr_client_info.ready_to_be_closed:
# if first_pass:
# # Т.е. если данных на отправку небыло даже при первом проходе цикла - т.е. изначально.
# # Это значит что все данные были отправлены, и можно закрывать соединение.
# self._output_check_sockets.remove(writable_socket)
# self._mark_connection_to_be_closed_immediately(curr_client_info)
# # Если соединение помечено как "Готово к закрытию" - то нам надо дождаться момента когда
# # данные будут отправлены, и только в этот момент закрывать соединение. Поэтому надо
# # сохранить сокет в списке проверяемых для отправки.
# else:
# self._output_check_sockets.remove(writable_socket)
if not curr_client_info.ready_to_be_closed:
# Если соединение помечено как "Готово к закрытию" - то нам надо дождаться момента когда
# данные будут отправлены, и только в этот момент закрывать соединение. Поэтому надо
# сохранить сокет в списке проверяемых для отправки.
self._output_check_sockets.remove(writable_socket)
if first_pass and curr_client_info.ready_to_be_closed:
# Т.е. если данных на отправку небыло даже при первом проходе цикла - т.е. изначально.
# Это значит что все данные были отправлены, и можно закрывать соединение.
self._output_check_sockets.remove(writable_socket)
self._mark_connection_to_be_closed_immediately(curr_client_info)
ok = False
if expected_client_info:
self._io_iteration_result.clients_with_empty_output_fifo.add(
curr_client_info.connected_expected_client_id)
if curr_client_info.has_inline_processor:
if can_call__inline_processor__on__output_buffers_are_empty:
if self._inline_processor__on__output_buffers_are_empty(curr_client_info,
expected_client_info):
ok = True
can_call__inline_processor__on__output_buffers_are_empty = False
except BlockingIOError as err:
ok = False
except InterruptedError as err:
pass
except ConnectionError as err:
# An established connection was aborted by the software in your host machine
if __debug__: self._log('CLOSING {}: Connection reset by peer'.format(curr_client_info.addr.result))
if __debug__: self._log('EXCEPTION: WRITE DATA TO SOCKET: "{}", {}, {}'.format(
curr_client_info.addr.result, err.errno, err.strerror))
self._mark_connection_to_be_closed_immediately(curr_client_info)
ok = False
except (socket.error, OSError) as err:
if (errno.EAGAIN == err.errno) or (errno.EWOULDBLOCK == err.errno):
ok = False
elif errno.EINTR == err.errno:
pass
elif err.errno in SET_OF_CONNECTION_ERRORS:
# Connection reset by peer
if __debug__: self._log(
'CLOSING {}: Connection reset by peer ({})'.format(curr_client_info.addr.result,
err.strerror))
if __debug__: self._log('EXCEPTION: WRITE DATA TO SOCKET: "{}", {}, {}'.format(
curr_client_info.addr.result, err.errno, err.strerror))
self._mark_connection_to_be_closed_immediately(curr_client_info)
ok = False
else:
if 'nt' == os.name:
if errno.WSAECONNRESET == err.errno:
# An existing connection was forcibly closed by the remote host
if __debug__: self._log(
'CLOSING {}: Connection reset by peer'.format(curr_client_info.addr.result))
if __debug__: self._log('EXCEPTION: WRITE DATA TO SOCKET: "{}", {}, {}'.format(
curr_client_info.addr.result, err.errno, err.strerror))
self._mark_connection_to_be_closed_immediately(curr_client_info)
ok = False
else:
raise err
else:
raise err
first_pass = False
def _mark_connection_to_be_closed_immediately(self, client_info: Connection):
client_info.should_be_closed = True
client_info.current_memoryview_input = None
self._connections_marked_to_be_closed_immediately.add(client_info.conn.result)
if client_info.connected_expected_client_id is not None:
self._io_iteration_result.clients_with_disconnected_connection.add(
client_info.connected_expected_client_id)
def _mark_connection_as_ready_to_be_closed(self, client_info: Connection):
client_info.ready_to_be_closed = True
def _mark_connection_as_ready_for_deletion(self, client_info: Connection):
client_info.ready_for_deletion = True
self._connections_marked_as_ready_to_be_deleted.add(client_info.conn.result)
def _handle_connection_error(self, writable_socket: socket.socket):
# Data read from already connected client
curr_client_id = self._connection_by_conn[writable_socket]
curr_client_info = self._connections[curr_client_id]
if curr_client_info.should_be_closed:
return
if __debug__: self._log('handling exceptional condition for {}'.format(curr_client_info.addr.result))
self._mark_connection_to_be_closed_immediately(curr_client_info)
# @profile
def _read_messages_from_raw_input_into_fifo(self, curr_client_info: Connection):
result = False
try:
while True:
if curr_client_info.current_message_length is None:
current_message_length = curr_client_info.raw_input_from_client.get_data(self.message_size_len)
curr_client_info.current_message_length = int.from_bytes(current_message_length, 'little')
current_message = curr_client_info.raw_input_from_client.get_data(
curr_client_info.current_message_length)
if current_message is None:
break
else:
curr_client_info.input_from_client.put(current_message)
curr_client_info.current_message_length = None
result = True
except TypeError:
pass
return result
def _send_message_through_connection(self, client_info: Connection, data):
if client_info.conn.existence:
client_info.output_to_client.put(len(data).to_bytes(self.message_size_len, 'little'))
client_info.output_to_client.put(data)
self._output_check_sockets.add(client_info.conn.result)
else:
if __debug__: self._log('ERROR: SEND MESSAGE TO CLIENT {}: "{}"'.format(client_info.addr.result, data))
raise Exception('EXCEPTION: SEND MESSAGE TO CLIENT: Client is disconnected! You can not send data to him!')
def _generate_list_of_messages_with_their_length(self, messages_list):
for message in messages_list:
yield len(message).to_bytes(self.message_size_len, 'little')
yield message
def _send_messages_through_connection(self, client_info: Connection, messages_list):
if client_info.conn.existence:
client_info.output_to_client.extend(self._generate_list_of_messages_with_their_length(messages_list))
self._output_check_sockets.add(client_info.conn.result)
else:
if __debug__: self._log(
'ERROR: SEND MESSAGES TO CLIENT {}: "{}"'.format(client_info.addr.result, messages_list))
raise Exception('EXCEPTION: SEND MESSAGES TO CLIENT: Client is disconnected! You can not send data to him!')
def _send_message_through_connection_raw(self, client_info: Connection, data):
if client_info.conn.existence:
client_info.output_to_client.put(data)
self._output_check_sockets.add(client_info.conn.result)
else:
if __debug__: self._log('ERROR: SEND MESSAGE TO CLIENT {}: "{}"'.format(client_info.addr.result, data))
raise Exception(
'EXCEPTION: SEND MESSAGE TO CLIENT RAW: Client is disconnected! You can not send data to him!')
def _send_messages_through_connection_raw(self, client_info: Connection, messages_list):
if client_info.conn.existence:
client_info.output_to_client.extend(messages_list)
self._output_check_sockets.add(client_info.conn.result)
else:
if __debug__: self._log(
'ERROR: SEND MESSAGES TO CLIENT {}: "{}"'.format(client_info.addr.result, messages_list))
raise Exception(
'EXCEPTION: SEND MESSAGES TO CLIENT RAW: Client is disconnected! You can not send data to him!')
def _move_message_from_fifo_to_memoryview(self, client_info: Connection):
if client_info.current_memoryview_output is None:
if client_info.output_to_client.size():
client_info.current_memoryview_output = memoryview(client_info.output_to_client.get())
# @profile
def _consolidate_and_move_messages_from_fifo_to_memoryview(self, client_info: Connection):
output_fifo_size = client_info.output_to_client.size()
if output_fifo_size > 1:
result_data, result_size, result_qnt = \
client_info.output_to_client.get_at_least_size(524288)
if result_qnt > 1:
client_info.current_memoryview_output = memoryview(b''.join(result_data))
else:
client_info.current_memoryview_output = memoryview(result_data.popleft())
elif output_fifo_size == 1:
client_info.current_memoryview_output = memoryview(client_info.output_to_client.get())
def _process_client_keyword(self, client_socket: socket.socket):
curr_client_id = self._connection_by_conn[client_socket]
curr_client_info = self._connections[curr_client_id]
if curr_client_info.input_from_client.size() >= 0:
expected_client_id = None
expected_client_info = None
this_is_super_server_client = False
if curr_client_info.connected_expected_client is not None:
expected_client_info = curr_client_info.connected_expected_client
expected_client_id = expected_client_info.id
if ConnectionDirectionRole.server == expected_client_info.connection_settings.direction_role:
this_is_super_server_client = True
if this_is_super_server_client:
# This is connection to Super-Server. So we expect an answer like b'OK'
super_server_answer__keyword_accepted = curr_client_info.input_from_client.get()
super_server_answer__keyword_accepted = bytes(super_server_answer__keyword_accepted)
if super_server_answer__keyword_accepted == self.server_answer__keyword_accepted:
# Answer was acceptable
self._unconfirmed_clients.remove(client_socket)
self._io_iteration_result.newly_connected_expected_clients.add(expected_client_id)
if expected_client_info.will_use_raw_client_connection:
curr_client_info.this_is_raw_connection = True
else:
# Answer was NOT acceptable
self._mark_connection_to_be_closed_immediately(curr_client_info)
self._mark_connection_as_ready_for_deletion(curr_client_info)
if __debug__: self._log('ERROR: SUPER SERVER ANSWER - KEYWORD WAS NOT ACCEPTED: {}'.format(
super_server_answer__keyword_accepted))
else:
# This is connection to client. So we expect a keyword
keyword = curr_client_info.input_from_client.get()
keyword = bytes(keyword)
curr_client_info.keyword = keyword
self._unconfirmed_clients.remove(client_socket)
self._send_message_through_connection(curr_client_info, self.server_answer__keyword_accepted)
if keyword in self._keywords_for_expected_clients:
# empty expected client was already registered
expected_client_id = self._keywords_for_expected_clients[keyword]
expected_client_info = self._expected_clients[expected_client_id]
expected_client_info.connection_id = curr_client_id
expected_client_info._Client__connection = curr_client_info
self._conns_of_expected_clients[client_socket] = expected_client_id
curr_client_info.connected_expected_client_id = expected_client_id
curr_client_info.connected_expected_client = expected_client_info
self._io_iteration_result.newly_connected_expected_clients.add(expected_client_id)
if expected_client_info.will_use_raw_client_connection:
curr_client_info.this_is_raw_connection = True
else:
# it is unknown expected client
if self.unexpected_clients_are_allowed:
self._add_unexpected_client(curr_client_id, keyword)
else:
self._mark_connection_to_be_closed_immediately(curr_client_info)
self._mark_connection_as_ready_for_deletion(curr_client_info)
def _check_is_client_have_data_to_read_in_fifo(self, readable_socket: socket.socket):
client_info = self._connections[self._connection_by_conn[readable_socket]]
if client_info.connected_expected_client_id is not None:
if client_info.input_from_client.size():
self._io_iteration_result.clients_have_data_to_read.add(
client_info.connected_expected_client_id)
if client_info.has_inline_processor:
self._inline_processor__on__data_received(client_info)
def _client_have_data_to_read_in_fifo(self, readable_socket: socket.socket):
if readable_socket in self._conns_of_expected_clients:
expected_client_id = self._conns_of_expected_clients[readable_socket]
expected_client = self._expected_clients[expected_client_id]
self._io_iteration_result.clients_have_data_to_read.add(expected_client_id)
client_info = expected_client._Client__connection
if client_info.has_inline_processor:
self._inline_processor__on__data_received(client_info)
def _inline_processor__apply_parameters(self, connection_info: Connection,
expected_client: Client):
inline_processor = expected_client.obj_for_inline_processing
inline_processor.is_in_raw_mode = inline_processor._InlineProcessor__set__is_in_raw_mode
connection_info.this_is_raw_connection = inline_processor._InlineProcessor__set__is_in_raw_mode
if inline_processor._InlineProcessor__set__mark_socket_as_should_be_closed_immediately:
inline_processor._InlineProcessor__set__mark_socket_as_should_be_closed_immediately = False
self._mark_connection_to_be_closed_immediately(connection_info)
if inline_processor._InlineProcessor__set__mark_socket_as_ready_to_be_closed:
inline_processor._InlineProcessor__set__mark_socket_as_ready_to_be_closed = False
self._mark_connection_as_ready_to_be_closed(connection_info)
def _inline_processor__init_parameters(self, connection_info: Connection,
expected_client: Client):
inline_processor = expected_client.obj_for_inline_processing
inline_processor.is_in_raw_mode = connection_info.this_is_raw_connection
inline_processor._InlineProcessor__set__is_in_raw_mode = connection_info.this_is_raw_connection
def _inline_processor__on__data_received(self, connection_info: Connection):
expected_client = connection_info.connected_expected_client
inline_processor = expected_client.obj_for_inline_processing
try:
while connection_info.input_from_client.size():
inline_processor.on__data_received(connection_info.input_from_client.get())
if inline_processor.output_messages:
while inline_processor.output_messages:
another_message = inline_processor.output_messages.popleft()
if not connection_info.this_is_raw_connection:
connection_info.output_to_client.put(
len(another_message).to_bytes(self.message_size_len, 'little'))
connection_info.output_to_client.put(another_message)
self._output_check_sockets.add(connection_info.conn.result)
if connection_info.output_to_client.get_data_full_size() >= 65536:
self._write_data_to_socket(connection_info)
return True
except:
self.remove_client(expected_client.id)
exc = sys.exc_info()
exception = exc
error_str = '{} {}'.format(str(exception[0]), str(exception[1].args[0]))
formatted_traceback = traceback.format_exception(exception[0], exception[1], exception[2])
exception = exception[:2] + (formatted_traceback,)
trace_str = ''.join(exception[2])
result_string = '\n\tEXCEPTION:{}\n\tTRACE:{}'.format(error_str, trace_str)
if __debug__: self._log('EXCEPTION: INLINE PROCESSOR: ON DATA RECEIVED: {}'.format(result_string))
return False
def _inline_processor__on__output_buffers_are_empty(self, connection_info: Connection,
expected_client: Client):
inline_processor = expected_client.obj_for_inline_processing
if not connection_info.has_inline_processor:
return False
try:
inline_processor.on__output_buffers_are_empty()
if inline_processor.output_messages:
while inline_processor.output_messages:
another_message = inline_processor.output_messages.popleft()
if not connection_info.this_is_raw_connection:
connection_info.output_to_client.put(
len(another_message).to_bytes(self.message_size_len, 'little'))
connection_info.output_to_client.put(another_message)
self._output_check_sockets.add(connection_info.conn.result)
if connection_info.output_to_client.get_data_full_size() >= 65536:
# self._write_data_to_socket(connection_info.conn.result)
self._write_data_to_socket(connection_info)
return True
except:
self.remove_client(expected_client.id)
exc = sys.exc_info()
exception = exc
error_str = '{} {}'.format(str(exception[0]), str(exception[1].args[0]))
formatted_traceback = traceback.format_exception(exception[0], exception[1], exception[2])
exception = exception[:2] + (formatted_traceback,)
trace_str = ''.join(exception[2])
result_string = '\n\tEXCEPTION:{}\n\tTRACE:{}'.format(error_str, trace_str)
if __debug__: self._log(
'EXCEPTION: INLINE PROCESSOR: ON OUTPUT BUFFERS ARE EMPTY: {}'.format(result_string))
return False
def _inline_processor__on__connection_lost(self, connection_info: Connection,
expected_client: Client):
inline_processor = expected_client.obj_for_inline_processing
if not connection_info.has_inline_processor:
return False
try:
inline_processor.on__connection_lost()
except:
exc = sys.exc_info()
exception = exc
error_str = '{} {}'.format(str(exception[0]), str(exception[1].args[0]))
formatted_traceback = traceback.format_exception(exception[0], exception[1], exception[2])
exception = exception[:2] + (formatted_traceback,)
trace_str = ''.join(exception[2])
result_string = '\n\tEXCEPTION:{}\n\tTRACE:{}'.format(error_str, trace_str)
if __debug__: self._log('EXCEPTION: INLINE PROCESSOR: ON CONNECTION LOST: {}'.format(result_string))
self.remove_client(expected_client.id)
def _inline_processor__on__connection_lost_by_connection_id(self, connection_id):
connection_info = self._connections[connection_id]
expected_client_info = connection_info.connected_expected_client
if (expected_client_info is not None) and connection_info.has_inline_processor:
self._inline_processor__on__connection_lost(connection_info, expected_client_info)
self._io_iteration_result.clients_with_disconnected_connection.remove(
expected_client_info.id)
def _unlink_good_af_unix_sockets(self):
if 'posix' == os.name:
for gate_connection_settings in self.gates_connections_settings:
if gate_connection_settings.socket_family == socket.AF_UNIX:
try:
os.unlink(gate_connection_settings.socket_address)
except:
if __debug__: self._log('EXCEPTION: SERVER END: TRYING TO UNLINK GOOD AF_UNIX GATE: {}'.format(
gate_connection_settings.socket_address))
raise
def _check_for_initial_af_unix_socket_unlink(self, connection_settings: ConnectionSettings):
if 'posix' == os.name:
if connection_settings.socket_family == socket.AF_UNIX:
if os.path.exists(connection_settings.socket_address):
if __debug__: self._log('EXCEPTION: INITIATION: GATE: AF_UNIX SOCKET IS ALREADY EXIST: {}'.format(
connection_settings.socket_address))
class ThereAreNoGateConections(Exception):
pass
class NotEnoughGateConnections(Exception):
pass
@contextmanager
def asock_io_core_connect(asock_io_core_obj: ASockIOCore, should_have_gate_connections: | |
<gh_stars>0
from __future__ import division
from googleapiclient.discovery import build
from oauth2client import client, file, tools
from oauth2client.service_account import ServiceAccountCredentials
from sys import stdout
from copy import deepcopy
import pandas as pd
import numpy as np
import httplib2, os
from ._query_parser import QueryParser
no_callback = client.OOB_CALLBACK_URN
default_scope = 'https://www.googleapis.com/auth/analytics.readonly'
default_discovery = 'https://analyticsreporting.googleapis.com/$discovery/rest'
default_token_file = os.path.join(os.path.dirname(__file__), 'analytics.dat')
default_secrets_v3 = os.path.join(os.path.dirname(__file__), 'client_secrets_v3.json')
default_secrets_v4 = os.path.join(os.path.dirname(__file__), 'client_secrets_v4.json')
class OAuthDataReaderV4(object):
'''
Abstract class for handling OAuth2 authentication using the Google
oauth2client library and the V4 Analytics API
'''
def __init__(self, scope, discovery_uri):
'''
Parameters:
-----------
secrets : string
Path to client_secrets.json file. p12 formatted keys not
supported at this point.
scope : list or string
Designates the authentication scope(s).
discovery_uri : tuple or string
Designates discovery uri(s)
'''
self._scope_ = scope
self._discovery_ = discovery_uri
self._api_ = 'v4'
def _init_service(self, secrets):
creds = ServiceAccountCredentials.from_json_keyfile_name(secrets,
scopes=self._scope_)
http = creds.authorize(httplib2.Http())
return build('analytics',
self._api_,
http=http,
discoveryServiceUrl=self._discovery_)
class OAuthDataReader(object):
'''
Abstract class for handling OAuth2 authentication using the Google
oauth2client library
'''
def __init__(self, scope, token_file_name, redirect):
'''
Parameters:
-----------
scope : str
Designates the authentication scope
token_file_name : str
Location of cache for authenticated tokens
redirect : str
Redirect URL
'''
self._scope_ = scope
self._redirect_url_ = redirect
self._token_store_ = file.Storage(token_file_name)
self._api_ = 'v3'
# NOTE:
# This is a bit rough...
self._flags_ = tools.argparser.parse_args(args=[])
def _authenticate(self, secrets):
'''
Run the authentication process and return an authorized
http object
Parameters
----------
secrets : str
File name for client secrets
Notes
-----
See google documention for format of secrets file
'''
flow = self._create_flow(secrets)
credentials = self._token_store_.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, self._token_store_, self._flags_)
http = credentials.authorize(http=httplib2.Http())
return http
def _create_flow(self, secrets):
'''
Create an authentication flow based on the secrets file
Parameters
----------
secrets : str
File name for client secrets
Notes
-----
See google documentation for format of secrets file
'''
flow = client.flow_from_clientsecrets(secrets,
scope=self._scope_,
message=tools.message_if_missing(secrets))
return flow
def _init_service(self, secrets):
'''
Build an authenticated google api request service using the given
secrets file
'''
http = self._authenticate(secrets)
return build('analytics', self._api_, http=http)
def _reset_default_token_store(self):
os.remove(default_token_file)
class GoogleAnalyticsQuery(OAuthDataReader):
def __init__(self,
scope=default_scope,
token_file_name=default_token_file,
redirect=no_callback,
secrets=default_secrets_v3):
'''
Query the GA API with ease! Simply obtain the 'client_secrets.json' file
as usual and move it to the same directory as this file (default) or
specify the file location when instantiating this class.
If one does not exist, an 'analytics.dat' token file will also be
created / read from the current working directory or whatever has
imported the class (default) or, one may specify the desired
location when instantiating this class. Note that this file requires
write access, so you may need to either adjust the file permissions if
using the default value.
API queries must be provided as a dict. object, see the execute_query
docstring for valid options.
'''
super(GoogleAnalyticsQuery, self).__init__(scope,
token_file_name,
redirect)
self._service = self._init_service(secrets)
def execute_query(self, as_dict=False, all_results=False, **query):
'''
Execute **query and translate it to a pandas.DataFrame object.
Parameters:
-----------
as_dict : Boolean
Return the dict object provided by GA instead of the DataFrame
object. Default = False
all_results : Boolean
Obtain the full query results availble from GA (up to sampling limit).
This can be VERY time / bandwidth intensive! Default = False
query : dict.
GA query, only with some added flexibility to be a bit sloppy. Adapted from
https://developers.google.com/analytics/devguides/reporting/core/v3/reference
The valid keys are:
Key Value Reqd. Summary
--------------------------------------------------------------------------------
ids int Y The unique table ID of the form ga:XXXX or simply
XXXX, where XXXX is the Analytics view (profile)
ID for which the query will retrieve the data.
start_date str Y Start date for fetching Analytics data. Requests can
specify a start date formatted as YYYY-MM-DD, or as
a relative date (e.g., today, yesterday, or NdaysAgo
where N is a positive integer).
end_date str Y End date for fetching Analytics data. Request can
specify an end date formatted as YYYY-MM-DD, or as
a relative date (e.g., today, yesterday, or NdaysAgo
where N is a positive integer).
metrics list Y A list of comma-separated metrics, such as
'ga:sessions', 'ga:bounces', or simply 'sessions', etc.
dimensions list N A list of comma-separated dimensions for your
Analytics data, such as 'ga:browser', 'ga:city',
or simply 'browser', etc.
sort list N A list of comma-separated dimensions and metrics
indicating the sorting order and sorting direction
for the returned data.
filters list N Dimension or metric filters that restrict the data
returned for your request. Multiple filters must
be connected with 'and' or 'or' entries, with no
default behaviour prescribed.
segment str N Segments the data returned for your request.
samplingLevel str N The desired sampling level. Allowed Values:
'DEFAULT' - Returns response with a sample size that
balances speed and accuracy.
'FASTER' - Returns a fast response with a smaller
sample size.
'HIGHER_PRECISION' - Returns a more accurate response
using a large sample size, but this may
result in the response being slower.
start_index int N The first row of data to retrieve, starting at 1.
Use this parameter as a pagination mechanism along
with the max-results parameter.
max_results int N The maximum number of rows to include in the response.
output str N The desired output type for the Analytics data returned
in the response. Acceptable values are 'json' and
'dataTable'. Default is 'json'; if this option is
used the 'as_dict' keyword argument is set
to True and a dict object is returned.
fields list N Selector specifying a subset of fields to include in
the response.
***NOT CURRENTLY FORMAT-CHECKED***
userIp str N Specifies IP address of the end user for whom the API
call is being made. Used to cap usage per IP.
***NOT CURRENTLY FORMAT-CHECKED***
quotaUser str N Alternative to userIp in cases when the user's IP
address is unknown.
***NOT CURRENTLY FORMAT-CHECKED***
access_token DISABLED; behaviour is captured in class instantiation.
callback DISABLED; behaviour is captured in class instantiation.
prettyPrint DISABLED.
key DISABLED.
Returns:
-----------
reuslt : pd.DataFrame or dict
metadata : summary data supplied with query result
'''
try:
formatted_query = QueryParser().parse(**query)
try:
if formatted_query['output']:
as_dict = True
except KeyError as e:
pass
ga_query = self._service.data().ga().get(**formatted_query)
except TypeError as e:
raise ValueError('Error making query: {0}'.format(e))
res = ga_query.execute()
# Fix the 'query' field to be useful to us
for key in res['query'].keys():
res['query'][key.replace('-', '_')] = res['query'].pop(key)
if as_dict:
return res
else:
# re-cast query result (dict) to a pd.DataFrame object
cols = [col[u'name'][3:] for col in res[u'columnHeaders']]
try:
df = pd.DataFrame(res[u'rows'], columns=cols)
# Some kludge to optionally get the the complete query result
# up to the sampling limit
if all_results:
print('Obtianing full data set (up to sampling limit).')
print('This can take a VERY long time!')
more = True
temp_qry = formatted_query.copy()
while more:
try:
temp_qry['start_index'] = \
res['nextLink'].split('start-index=')[1].split('&')[0]
# Monitor progress
curr = int(temp_qry['start_index'])
block = int(res['itemsPerPage'])
total = res['totalResults']
stdout.write('\rGetting rows {0} - {1} of {2}'.\
format(curr, curr + block - 1, total))
stdout.flush()
temp_res = self._service.data().ga().get(**temp_qry).execute()
temp_df = pd.DataFrame(temp_res['rows'], columns=cols)
df = pd.concat((df, temp_df), ignore_index=True)
res['nextLink'] = temp_res['nextLink']
except KeyError:
more = False
except KeyError:
df = pd.DataFrame(columns=cols)
pass
# TODO:
# A tool to accurtely set the dtype for all columns of df would
# be nice, but is probably far more effort than it's worth.
# This will get the ball rolling, but the end user is likely
# going to be stuck dealing with things on a per-case basis.
def my_mapper(x):
if x == u'INTEGER':
return int
elif x == u'BOOLEAN':
return bool
else:
# this should work with both 2.7 and 3.4
if isinstance(x, str):
return str
else:
return unicode
for hdr in | |
BA This is where we "strip" the invisible stuff from the end
row_text_offset_end = 0
for token in reversed(all_tokens_list[lc:end + 1]):
if token.visible:
break
row_text_offset_end += 1
# if token.token == '<':
# break
end = end - row_text_offset_end
# TODO: BA And we add some stuff to the end of the last one because it causes some issues...
# if idx == len(rows)-1:
# row_text_offset_last = 0
# for token in all_tokens_list[end + 1:]:
# if token.visible:
# break
# row_text_offset_last += 1
# # if token.token == '<':
# # break
# end = end + row_text_offset_last
# get the location info between...
# from all tokens, get all the tokens between and get the string
# then add the markup (include the sequency number)
markup_value = all_tokens_list.getTokensAsString(lc, end + 1, whitespace=True)
# MariaM: 012017
if idx < len(rows) - 1:
# collect all start tags
seen_etags = [s for s in re.findall("<[a-z]+", markup_value)]
prev_start_tags.extend(seen_etags)
markup_data = {
'extract': markup_value,
'sequence_number': idx + 1,
'starting_token_location': lc,
'ending_token_location': end
}
# print "%s: (%s)" % (page, str(markup_data))
if path_for_start not in markup_by_page[page]:
markup_by_page[page][path_for_start] = {
'sequence': []
}
markup_by_page[page][path_for_start]['sequence'].append(markup_data)
for page in markup_by_page:
for path_for_start in markup_by_page[page]:
min_location = 9999999999
max_location = -1
for idx in range(len(markup_by_page[page][path_for_start]['sequence'])):
if markup_by_page[page][path_for_start]['sequence'][idx]['starting_token_location'] < min_location:
min_location = markup_by_page[page][path_for_start]['sequence'][idx]['starting_token_location']
if markup_by_page[page][path_for_start]['sequence'][idx]['ending_token_location'] > max_location:
max_location = markup_by_page[page][path_for_start]['sequence'][idx]['ending_token_location']
markup_by_page[page][path_for_start]['starting_token_location'] = min_location
markup_by_page[page][path_for_start]['ending_token_location'] = max_location
return markup_by_page
@staticmethod
def create_row_markups_old(valid_rows, page_manager):
valid_rows_by_page = {}
for path in valid_rows:
for row in valid_rows[path]:
pg = row['page_id']
if pg not in valid_rows_by_page:
valid_rows_by_page[pg] = {}
if path not in valid_rows_by_page[pg]:
valid_rows_by_page[pg][path] = []
valid_rows_by_page[pg][path].append(row)
markup_by_page = {}
for page in valid_rows_by_page:
all_tokens_list = page_manager.getPage(page).tokens
markup_by_page[page] = {}
valid_rows_for_page = valid_rows_by_page[page]
# print "VALID ROWS FOR: %s" % page
# print str(valid_rows_for_page)
earliest_latest_row_locations = {}
for path in valid_rows_for_page: # the path defines the row...
earliest = -1
latest = -1
for row in valid_rows_for_page[path]:
s_loc = row['first_vis_token_loc']
e_loc = row['last_vis_token_loc']
if earliest == -1:
earliest = row['first_vis_token_loc']
latest = row['last_vis_token_loc']
continue
if s_loc < earliest:
earliest = s_loc
if e_loc > latest:
latest = e_loc
earliest_latest_row_locations[path] = (earliest, latest)
# print str(earliest_latest_row_locations)
overlaps = []
for pth in earliest_latest_row_locations:
begin = earliest_latest_row_locations[pth][0]
end = earliest_latest_row_locations[pth][1]
if begin == -1 or end == -1: # ill defined locations
continue
if len(overlaps) == 0: # first guy...
overlaps.append([pth])
continue
overlap_clust = -1
for clust_id in range(len(overlaps)):
cluster = overlaps[clust_id]
for cpath in cluster: # could probably just find min and max of cluster and check w/ that, but easier for now...
p_begin = earliest_latest_row_locations[cpath][0]
p_end = earliest_latest_row_locations[cpath][1]
# now, see if there is not overlap...
if p_end < begin or p_begin > end:
continue
overlap_clust = clust_id
if overlap_clust == -1:
overlaps.append([pth])
else:
overlaps[overlap_clust].append(pth)
# print "OVERLAPS"
# print str(overlaps)
for clust in overlaps:
# print "===oo00 CLUSTER 00oo==="
# print clust
path_for_start = ""
# left most, largest row is the beginning, so use that one as A's'
rows_start_location = 999999999999
rows_end_location = 0
# first, find the member with the most rows
max_rows = max([len(valid_rows_for_page[member]) for member in clust])
# Ok, so the HTML between rows could have been messed up before bc we didn't know that these were
# overlapping lists. For instance, the first row could be alone and now it's merged, so let's remake
# the html between...
by_location_tuples = [] # it will be (start, end, path) just to make it super easy to build the markup
# then once we have this filled in, and we know which path demarcates each row, we simply sort
# then iterate thru making up the rows...
for member in clust:
num_rows = len(
valid_rows_for_page[member]) # its ok that its combined across pages... bc aggregate number
# print "\t--> (%d, %d): %d" % (earliest_latest_row_locations[member][0],
# earliest_latest_row_locations[member][1], num_rows)
# print "\t\t PATH: " + member
for b in valid_rows_for_page[member]:
by_location_tuples.append(
(b['first_vis_token_loc'], b['last_vis_token_loc'], member, b['page_id']))
# print "\t\t\t%s %s %s %s" % (str(b['first_vis_token_loc']), b['visible_text'],
# str(b['last_vis_token_loc']), b['page_id'])
if num_rows == max_rows:
if earliest_latest_row_locations[member][0] < rows_start_location:
rows_start_location = earliest_latest_row_locations[member][0]
path_for_start = member
if earliest_latest_row_locations[member][1] > rows_end_location:
# TODO: BA I think we need to extend this "if it still has overlap with the others??"
rows_end_location = earliest_latest_row_locations[member][1]
print ">> Row starts at: %d and ends at %d (%s) " % (rows_start_location, rows_end_location, path_for_start)
sorted_loc_triples = sorted(by_location_tuples)
# print "SORTED LOCATION TRIPLES"
# print str(by_location_tuples)
# MariaM: 012017
prev_start_tags = []
# <NAME>: 041317
# now we know which path is the "start" and where each one begins and ends, so let's make the structure
# first we want to find all entries of path_for_start
#MariaM: 092717
#this works with shooterswap, but will not work for jair where we have rows <div> <div odd>
rows = [(tpl[0], tpl[3]) for tpl in sorted_loc_triples if tpl[2] == path_for_start]
# Below works with jair, but only with jair; all tuples in sorted_loc_triples are between
# rows_start_location and rows_end_location so below test will choose all tuples
# rows = [(tpl[0], tpl[3]) for tpl in sorted_loc_triples
# if tpl[0] >= rows_start_location and tpl[1] <= rows_end_location]
for idx in range(len(rows)):
lc = rows[idx][0]
if idx < len(rows) - 1:
lc_next = rows[idx + 1][0]
end = lc_next - 1 # we go till right before this guy
else:
end = rows_end_location
#MariaM: 012017
#this is the last row; extend it to the first start tag that did not appear
#in previous rows
prev_start_tags = list(set(prev_start_tags))
for token_index in range(end + 1, len(all_tokens_list)):
#I need just the beginning of the string
last_token = min(token_index + 100, len(all_tokens_list))
after_last_row = all_tokens_list.getTokensAsString(token_index, last_token, whitespace=True)
tag = re.match("<[a-z]+", after_last_row.strip())
if tag is not None and tag.group(0) not in prev_start_tags:
# we stop here with the last row;
end = token_index - 1
break
# TODO: BA This is where we "strip" the invisible stuff from the end
row_text_offset_end = 0
for token in reversed(all_tokens_list[lc:end+1]):
if token.visible:
break
row_text_offset_end += 1
# if token.token == '<':
# break
end = end - row_text_offset_end
# TODO: BA And we add some stuff to the end of the last one because it causes some issues...
# if idx == len(rows)-1:
# row_text_offset_last = 0
# for token in all_tokens_list[end + 1:]:
# if token.visible:
# break
# row_text_offset_last += 1
# # if token.token == '<':
# # break
# end = end + row_text_offset_last
# get the location info between...
# from all tokens, get all the tokens between and get the string
# then add the markup (include the sequency number)
markup_value = all_tokens_list.getTokensAsString(lc, end + 1, whitespace=True)
# MariaM: 012017
if idx < len(rows) - 1:
#collect all start tags
seen_etags = [s for s in re.findall("<[a-z]+", markup_value)]
prev_start_tags.extend(seen_etags)
markup_data = {
'extract': markup_value,
'sequence_number': idx + 1,
'starting_token_location': lc,
'ending_token_location': end
}
# print "%s: (%s)" % (page, str(markup_data))
if path_for_start not in markup_by_page[page]:
markup_by_page[page][path_for_start] = {
'sequence': []
}
markup_by_page[page][path_for_start]['sequence'].append(markup_data)
for page in markup_by_page:
for path_for_start in markup_by_page[page]:
min_location = 9999999999
max_location = -1
for idx in range(len(markup_by_page[page][path_for_start]['sequence'])):
if markup_by_page[page][path_for_start]['sequence'][idx]['starting_token_location'] < min_location:
min_location = markup_by_page[page][path_for_start]['sequence'][idx]['starting_token_location']
if markup_by_page[page][path_for_start]['sequence'][idx]['ending_token_location'] > max_location:
max_location = markup_by_page[page][path_for_start]['sequence'][idx]['ending_token_location']
markup_by_page[page][path_for_start]['starting_token_location'] = min_location
markup_by_page[page][path_for_start]['ending_token_location'] = max_location
return markup_by_page
#If all pages contain the same list, remove that list; changes input markup
@staticmethod
def remove_duplicate_lists(markup):
all_lists = {}
for page in markup:
#for each list
for list in markup[page]:
if list not in all_lists:
all_lists[list] = []
sequence = markup[page][list]['sequence']
#all extracted values for this list
extract_for_page = []
for id in range(len(sequence)):
extract = sequence[id]['extract']
extract_for_page.append(extract)
all_lists[list].append(extract_for_page)
#print "All Lists==================="
#print json.dumps(all_lists, sort_keys=True, indent=2, separators=(',', ': '))
#check if we have duplicate lists
for list in all_lists:
if len(all_lists[list]) == len(markup):
first_extract = ''.join(all_lists[list][0])
same_extract = True
for id in range(1,len(all_lists[list])):
#check if first _extract is the same | |
default code address to value"""
self._default_code_address = value
def set_default_data_address(self, value):
"""Set the default code address to value"""
self._default_data_address = value
def register_variable_definition(self, definition):
"""Register a new variable definition."""
self._variables.append(definition)
def set_variables_definition(self, definitions):
"""Register a new set of variable definitions."""
self._variables = definitions
def update_register_definition(self, definition):
"""Update a register definition."""
self._registers = [elem for elem in self._registers
if elem.name != definition.name]
self._registers.append(definition)
def register_register_definition(self, definition):
"""Register a new register definition."""
self._registers.append(definition)
def register_instruction_definitions(self, definitions, prepend=False):
"""Register new instruction definitions."""
if prepend:
self._code = definitions + self._code
else:
self._code += definitions
def set_instruction_definitions(self, definitions):
"""Set new instruction definitions."""
self._code = definitions
def register_raw_definition(self, name, value):
"""Register a new raw definition."""
self._raw[name] = self._raw.get(name, '') + value
def register_dat_mapping(self, definition):
"""Register a new DAT mapping."""
if isinstance(definition, list):
self._dat += definition[:]
else:
self._dat.append(definition)
def register_dat_property(self, prop, value):
"""Register a new DAT property."""
try:
self._dat_prop[prop] = value
except MicroprobeDuplicatedValueError:
raise MicroprobeMPTFormatError(
"DAT property '%s' specified twice" % prop
)
def set_roi_ins(self, value):
"""Set region of interest (in instruction)"""
if (value[0] >= value[1]):
raise MicroprobeMPTFormatError(
"Empty instruction region of interest range specified "
"%s" % str(value)
)
self._roi_ins = value
def set_roi_memory_access_trace(self, trace):
"""Set memory access trace"""
if not trace:
raise MicroprobeMPTFormatError(
"Empty memory access trace"
)
self._roi_memory_access_trace = trace
def set_roi_cyc(self, value):
"""Set region of interest (in cycles)"""
if (value[0] >= value[1]):
raise MicroprobeMPTFormatError(
"Empty cycle region of interest range specified "
"'%s'" % list(value)
)
self._roi_cyc = value
def set_instruction_count(self, value):
"""Set instruction count"""
self._instruction_count = value
def set_cycle_count(self, value):
"""Set cycle count"""
self._cycle_count = value
def set_state(self, state):
"""Set state file"""
self._state = state
class MicroprobeTestDefinitionV0x5(MicroprobeTestDefinitionDefault):
"""Class to represent a Microprobe Test configuration (v0.5)"""
version = 0.5
class MicroprobeTestParser(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract class to represent a Microprobe Test configuration parser."""
@abc.abstractmethod
def __init__(self):
""" """
pass
@abc.abstractmethod
def parse_filename(self, filename):
""" """
raise NotImplementedError
@abc.abstractmethod
def parse_contents(self, contents):
""" """
raise NotImplementedError
@abc.abstractmethod
def parse_variable(self, contents):
""" """
raise NotImplementedError
@abc.abstractmethod
def parse_register(self, contents):
""" """
raise NotImplementedError
@abc.abstractmethod
def parse_instruction(self, contents):
""" """
raise NotImplementedError
@abc.abstractmethod
def dump_mpt_config(self, mpt_config, filename):
""" """
raise NotImplementedError
class MicroprobeTestParserDefault(MicroprobeTestParser):
"""Class to represent a Microprobe Test configuration parser."""
version = 0.5
def __init__(self):
""" """
super(MicroprobeTestParserDefault, self).__init__()
self._configparser_cls = six.moves.configparser.SafeConfigParser
self._configparser_default = {}
self._configparser_dict = OrderedDict
self._files_readed = RejectingOrderedDict()
self._filename = None
self._definition_class = None
def parse_filename(self, filename):
""" """
filename = os.path.abspath(filename)
LOG.debug("Start parsing microprobe test file: '%s'", filename)
self._filename = filename
self._basepath = os.path.dirname(filename)
self._files_readed[filename] = 0
contents = self._read_file_contents(filename)
contents = self._expand(contents)
try:
parser = self._parse_contents(contents)
except six.moves.configparser.ParsingError as exc:
raise MicroprobeMPTFormatError(
exc.message.replace(
"???", filename
)
)
self._check_sections(parser)
# Check version number for the parser
try:
required_version = float(parser.get("MPT", "mpt_version"))
except AttributeError as exc:
raise MicroprobeMPTFormatError(
"Unable to process the"
" mpt_version string"
)
except ValueError as exc:
raise MicroprobeMPTFormatError(
"mpt_version should be a numerical"
" value"
)
LOG.debug("Required version: '%s'", required_version)
definition = [
definition_class
for definition_class in get_all_subclasses(
MicroprobeTestDefinition
) if definition_class.version == required_version
]
if len(definition) == 0:
versions = [
definition_class.version
for definition_class in get_all_subclasses(
MicroprobeTestDefinition
)
]
raise MicroprobeMPTFormatError(
"Unable to find the specified test definition for "
"mpt_version: %s. Valid versions: %s" %
(required_version, versions)
)
elif len(definition) > 1:
raise MicroprobeMPTFormatError(
"Multiple test format definitions for mpt_version: %s" %
required_version
)
assert len(definition) == 1
definition = definition[0]
self._definition_class = definition
if required_version != self.version:
# Parse with an appropriate instance version
parser = [
parser_class
for parser_class in get_all_subclasses(MicroprobeTestParser)
if parser_class.version == required_version
]
if len(parser) == 1:
return parser[0]().parse_contents(contents)
elif len(parser) == 0:
versions = [
parser_class.version
for parser_class in get_all_subclasses(
MicroprobeTestParser
)
]
raise MicroprobeMPTFormatError(
"Unable to find the specified parser for mpt_version: %s."
" Valid versions: %s" % (required_version, versions)
)
elif len(parser) > 1:
raise MicroprobeMPTFormatError(
"Multiple parser definitions for mpt_version: %s" %
required_version
)
else:
return self.parse_contents(contents)
def parse_instruction(self, contents):
return contents
def parse_contents(self, contents):
""" """
# Parse the contents
LOG.debug("Start parsing contents: \n%s", contents)
parser = self._parse_contents(contents)
# Minimum format checks
LOG.debug("Check sections")
self._check_sections(parser)
# Create the test definition object
test_definition = self._definition_class()
if parser.has_section("STATE"):
LOG.debug("Parsing [STATE] section")
items = parser.items("STATE")
if "contents" in dict(items):
content_path = dict(items)["contents"]
if not os.path.isabs(content_path):
content_path = os.path.join(self._basepath,
content_path)
if not os.path.isfile(content_path):
raise MicroprobeMPTFormatError(
"Unable to find state content file:"
" %s" % content_path
)
test_definition.set_state(content_path)
with open_generic_fd(content_path, "r") as content_file:
lineno = 0
lines = content_file.readlines()
progress = Progress(
len(lines),
msg="State lines parsed:"
)
for line in lines:
progress()
if not isinstance(line, str):
line = line.decode()
words = line.split(";")[0].split()
lineno += 1
# Empty line
if len(words) == 0:
continue
prefix = words[0]
if prefix == "R":
if len(words) != 3:
raise MicroprobeMPTFormatError(
"Unable to parse content file %s:%d: "
"Bad register format" %
(content_path, lineno)
)
LOG.debug(
"%s:%d: Register %s = %s" %
(content_path, lineno, words[1], words[2]))
name = words[1]
value = words[2]
try:
register_definition = self.parse_register(
(name.upper(), value)
)
except SyntaxError:
raise MicroprobeMPTFormatError(
"Unable to parse content file %s:%d: "
"Bad register format" %
(content_path, lineno)
)
except ValueError:
raise MicroprobeMPTFormatError(
"Unable to parse content file %s:%d: "
"Bad register format" %
(content_path, lineno)
)
if register_definition.name in [
register.name for register
in test_definition.registers
]:
LOG.warning(
"Register '%s' defined multiple times",
name.upper()
)
test_definition.update_register_definition(
register_definition
)
else:
test_definition.register_register_definition(
register_definition
)
elif prefix == "M":
if len(words) != 3:
raise MicroprobeMPTFormatError(
"Unable to parse content file %s:%d: "
"Bad memory format" %
(content_path, lineno)
)
address = words[1]
data = words[2]
var_name = "mem_" + address
# TODO uint32_t instead?
var_type = "uint8_t"
var_chars = 2
var_align = None
var_len = len(data)
var_items = [
int(data[i:i + var_chars], 16)
for i in range(0, var_len, var_chars)
]
var_nelems = len(var_items)
# TODO Show length
LOG.debug(
"%s:%d: Memory %s = [%d]",
content_path, lineno, words[1], var_nelems
)
var_def = MicroprobeTestVariableDefinition(
var_name.upper().strip(), var_type,
var_nelems, int(address, 16),
var_align, var_items
)
test_definition.register_variable_definition(
var_def
)
else:
raise MicroprobeMPTFormatError(
"Unable to parse content file %s:%d: "
"Unknown prefix '%s'" %
(content_path, lineno, prefix)
)
del progress
# Populate the test definition object
if parser.has_section("DATA"):
LOG.debug("Parsing [DATA] section")
items = parser.items("DATA")
for name, value in items:
value = value.replace("\t", " ")
LOG.debug("Parsing '%s = %s'", name, value)
try:
if name == "default_address":
if test_definition.default_data_address is not None:
LOG.warning(
"default address of '[DATA]' specified"
" at least twice"
)
test_definition.set_default_data_address(
_parse_value(value)
)
else:
variable_definition = self.parse_variable(
(name, value)
)
test_definition.register_variable_definition(
variable_definition
)
except SyntaxError:
LOG.critical("Syntax error")
raise MicroprobeMPTFormatError(
"Unable to parse line '%s = %s' in "
"section [DATA] of file: '%s'" %
(name, value, self._filename)
)
except ValueError:
LOG.critical("Value error")
raise MicroprobeMPTFormatError(
"Unable to parse line '%s = %s' in "
"section [DATA] of file: '%s'" %
(name, value, self._filename)
)
if parser.has_section("REGISTERS"):
LOG.debug("Parsing [REGISTERS] section")
items = parser.items("REGISTERS")
for name, value in items:
LOG.debug("Parsing '%s = %s'", name.upper(), value)
value = value.replace("\t", " ")
try:
register_definition = self.parse_register(
(name.upper(), value)
)
except SyntaxError:
raise MicroprobeMPTFormatError(
"Unable to parse line '%s = %s' in "
"section [REGISTERS] of file: '%s'" %
(name, value, self._filename)
)
except ValueError:
raise MicroprobeMPTFormatError(
"Unable to parse line '%s = %s' in "
"section [REGISTERS] of file: '%s'" %
(name, value, self._filename)
)
# TODO Collides with registers in [STATE]
if register_definition.name in [
register.name for register in test_definition.registers
]:
LOG.warning(
"Register '%s' defined multiple times "
" in [REGISTERS] section", name.upper()
)
test_definition.update_register_definition(
register_definition
)
else:
test_definition.register_register_definition(
register_definition
)
if parser.has_section("RAW"):
LOG.debug("Parsing [RAW] section")
items = parser.items("RAW")
for name, value in items:
value = value.replace("\t", " ")
raw_definition = _parse_raw(name.upper(), value)
if raw_definition[0] not in [
'FILE_HEADER', 'FILE_FOOTER', 'CODE_HEADER', 'CODE_FOOTER'
]:
LOG.warning(
"Skipping RAW entry '%s' in [RAW] section",
name.upper()
)
continue
if raw_definition[0] in [raw for raw in test_definition.raw]:
LOG.warning(
"RAW entry '%s' defined multiple times "
" in [RAW] section. Appending.", name.upper()
)
test_definition.register_raw_definition(*raw_definition)
if parser.has_section("CODE"):
LOG.debug("Parsing [CODE] section")
items = parser.items("CODE")
if | |
window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__Concat (self, wherex, wherey)\n'))
self.objMT_post__Concat.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(10, 10,self.objMT_post__Concat)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__Concat.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__Concat)
self.globalAndLocalPostcondition(self.objMT_post__Concat, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__Constant=ButtonConfig(self)
self.objMT_post__Constant.Contents.Text.setValue('New Constant')
self.objMT_post__Constant.Contents.Image.setValue('')
self.objMT_post__Constant.Contents.lastSelected= 'Text'
self.objMT_post__Constant.Drawing_Mode.setValue(1)
self.objMT_post__Constant.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__Constant (self, wherex, wherey)\n'))
self.objMT_post__Constant.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(135, 80,self.objMT_post__Constant)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__Constant.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__Constant)
self.globalAndLocalPostcondition(self.objMT_post__Constant, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__GenericNode_UMLRT2Kiltera_MM=ButtonConfig(self)
self.objMT_post__GenericNode_UMLRT2Kiltera_MM.Contents.Text.setValue('New GenericNode')
self.objMT_post__GenericNode_UMLRT2Kiltera_MM.Contents.Image.setValue('')
self.objMT_post__GenericNode_UMLRT2Kiltera_MM.Contents.lastSelected= 'Text'
self.objMT_post__GenericNode_UMLRT2Kiltera_MM.Drawing_Mode.setValue(1)
self.objMT_post__GenericNode_UMLRT2Kiltera_MM.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__GenericNode_UMLRT2Kiltera_MM (self, wherex, wherey)\n'))
self.objMT_post__GenericNode_UMLRT2Kiltera_MM.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(260, 150,self.objMT_post__GenericNode_UMLRT2Kiltera_MM)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__GenericNode_UMLRT2Kiltera_MM.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__GenericNode_UMLRT2Kiltera_MM)
self.globalAndLocalPostcondition(self.objMT_post__GenericNode_UMLRT2Kiltera_MM, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__paired_with=ButtonConfig(self)
self.objMT_post__paired_with.Contents.Text.setValue('New paired_with')
self.objMT_post__paired_with.Contents.Image.setValue('')
self.objMT_post__paired_with.Contents.lastSelected= 'Text'
self.objMT_post__paired_with.Drawing_Mode.setValue(1)
self.objMT_post__paired_with.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__paired_with (self, wherex, wherey)\n'))
self.objMT_post__paired_with.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(10, 10,self.objMT_post__paired_with)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__paired_with.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__paired_with)
self.globalAndLocalPostcondition(self.objMT_post__paired_with, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__match_contains=ButtonConfig(self)
self.objMT_post__match_contains.Contents.Text.setValue('New match_contains')
self.objMT_post__match_contains.Contents.Image.setValue('')
self.objMT_post__match_contains.Contents.lastSelected= 'Text'
self.objMT_post__match_contains.Drawing_Mode.setValue(1)
self.objMT_post__match_contains.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__match_contains (self, wherex, wherey)\n'))
self.objMT_post__match_contains.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(135, 80,self.objMT_post__match_contains)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__match_contains.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__match_contains)
self.globalAndLocalPostcondition(self.objMT_post__match_contains, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__apply_contains=ButtonConfig(self)
self.objMT_post__apply_contains.Contents.Text.setValue('New apply_contains')
self.objMT_post__apply_contains.Contents.Image.setValue('')
self.objMT_post__apply_contains.Contents.lastSelected= 'Text'
self.objMT_post__apply_contains.Drawing_Mode.setValue(1)
self.objMT_post__apply_contains.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__apply_contains (self, wherex, wherey)\n'))
self.objMT_post__apply_contains.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(260, 150,self.objMT_post__apply_contains)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__apply_contains.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__apply_contains)
self.globalAndLocalPostcondition(self.objMT_post__apply_contains, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__directLink_T=ButtonConfig(self)
self.objMT_post__directLink_T.Contents.Text.setValue('New directLink_T')
self.objMT_post__directLink_T.Contents.Image.setValue('')
self.objMT_post__directLink_T.Contents.lastSelected= 'Text'
self.objMT_post__directLink_T.Drawing_Mode.setValue(1)
self.objMT_post__directLink_T.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__directLink_T (self, wherex, wherey)\n'))
self.objMT_post__directLink_T.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(10, 10,self.objMT_post__directLink_T)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__directLink_T.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__directLink_T)
self.globalAndLocalPostcondition(self.objMT_post__directLink_T, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__directLink_S=ButtonConfig(self)
self.objMT_post__directLink_S.Contents.Text.setValue('New directLink_S')
self.objMT_post__directLink_S.Contents.Image.setValue('')
self.objMT_post__directLink_S.Contents.lastSelected= 'Text'
self.objMT_post__directLink_S.Drawing_Mode.setValue(1)
self.objMT_post__directLink_S.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__directLink_S (self, wherex, wherey)\n'))
self.objMT_post__directLink_S.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(135, 80,self.objMT_post__directLink_S)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__directLink_S.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__directLink_S)
self.globalAndLocalPostcondition(self.objMT_post__directLink_S, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__indirectLink_S=ButtonConfig(self)
self.objMT_post__indirectLink_S.Contents.Text.setValue('New indirectLink_S')
self.objMT_post__indirectLink_S.Contents.Image.setValue('')
self.objMT_post__indirectLink_S.Contents.lastSelected= 'Text'
self.objMT_post__indirectLink_S.Drawing_Mode.setValue(1)
self.objMT_post__indirectLink_S.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__indirectLink_S (self, wherex, wherey)\n'))
self.objMT_post__indirectLink_S.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(260, 150,self.objMT_post__indirectLink_S)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__indirectLink_S.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__indirectLink_S)
self.globalAndLocalPostcondition(self.objMT_post__indirectLink_S, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__backward_link=ButtonConfig(self)
self.objMT_post__backward_link.Contents.Text.setValue('New backward_link')
self.objMT_post__backward_link.Contents.Image.setValue('')
self.objMT_post__backward_link.Contents.lastSelected= 'Text'
self.objMT_post__backward_link.Drawing_Mode.setValue(1)
self.objMT_post__backward_link.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__backward_link (self, wherex, wherey)\n'))
self.objMT_post__backward_link.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(10, 10,self.objMT_post__backward_link)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__backward_link.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__backward_link)
self.globalAndLocalPostcondition(self.objMT_post__backward_link, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__trace_link=ButtonConfig(self)
self.objMT_post__trace_link.Contents.Text.setValue('New trace_link')
self.objMT_post__trace_link.Contents.Image.setValue('')
self.objMT_post__trace_link.Contents.lastSelected= 'Text'
self.objMT_post__trace_link.Drawing_Mode.setValue(1)
self.objMT_post__trace_link.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__trace_link (self, wherex, wherey)\n'))
self.objMT_post__trace_link.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(135, 80,self.objMT_post__trace_link)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__trace_link.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__trace_link)
self.globalAndLocalPostcondition(self.objMT_post__trace_link, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__hasAttribute_S=ButtonConfig(self)
self.objMT_post__hasAttribute_S.Contents.Text.setValue('New hasAttribute_S')
self.objMT_post__hasAttribute_S.Contents.Image.setValue('')
self.objMT_post__hasAttribute_S.Contents.lastSelected= 'Text'
self.objMT_post__hasAttribute_S.Drawing_Mode.setValue(1)
self.objMT_post__hasAttribute_S.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__hasAttribute_S (self, wherex, wherey)\n'))
self.objMT_post__hasAttribute_S.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(260, 150,self.objMT_post__hasAttribute_S)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__hasAttribute_S.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__hasAttribute_S)
self.globalAndLocalPostcondition(self.objMT_post__hasAttribute_S, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__hasAttribute_T=ButtonConfig(self)
self.objMT_post__hasAttribute_T.Contents.Text.setValue('New hasAttribute_T')
self.objMT_post__hasAttribute_T.Contents.Image.setValue('')
self.objMT_post__hasAttribute_T.Contents.lastSelected= 'Text'
self.objMT_post__hasAttribute_T.Drawing_Mode.setValue(1)
self.objMT_post__hasAttribute_T.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = self.createNewMT_post__hasAttribute_T (self, wherex, wherey)\n'))
self.objMT_post__hasAttribute_T.graphClass_= graph_ButtonConfig
if self.genGraphics:
from graph_ButtonConfig import *
new_obj = graph_ButtonConfig(10, 10,self.objMT_post__hasAttribute_T)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag('ButtonConfig', new_obj.tag)
else: new_obj = None
self.objMT_post__hasAttribute_T.graphObject_ = new_obj
rootNode.addNode(self.objMT_post__hasAttribute_T)
self.globalAndLocalPostcondition(self.objMT_post__hasAttribute_T, rootNode)
self.globalPrecondition(rootNode)
self.objMT_post__leftExpr=ButtonConfig(self)
self.objMT_post__leftExpr.Contents.Text.setValue('New leftExpr')
self.objMT_post__leftExpr.Contents.Image.setValue('')
self.objMT_post__leftExpr.Contents.lastSelected= 'Text'
self.objMT_post__leftExpr.Drawing_Mode.setValue(1)
self.objMT_post__leftExpr.Action.setValue(('ActionButton1', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1),(['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE OBJECT'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '# This method has as parameters:\n# - wherex : X Position in window coordinates where the user clicked.\n# - wherey : Y Position in window coordinates where the user clicked.\nnewPlace = | |
<reponame>EulerWong/director
from director import lcmUtils
from director import objectmodel as om
from director import visualization as vis
from director.utime import getUtime
from director import transformUtils
from director.debugVis import DebugData
from director import ioUtils
from director import robotstate
from director import applogic as app
from director import vtkAll as vtk
from director.lcmframe import frameFromPositionMessage, positionMessageFromFrame
from director.simpletimer import SimpleTimer
from director.shallowCopy import shallowCopy
from director import roboturdf
from director import filterUtils
import director.vtkNumpy as vnp
import os
import math
import numpy as np
from director import drcargs
import drc as lcmdrc
from bot_core.pose_t import pose_t
from bot_core.robot_state_t import robot_state_t
import functools
import json
from PythonQt import QtGui, QtCore
_footMeshes = None
_footMeshFiles = []
_modelName = "valkyrie" # either atlas_v3/v4/v5 or valkyrie
_pelvisLink = '' # pelvis
_leftFootLink = '' # l_foot
_rightFootLink = '' # r_foot
with open(drcargs.args().directorConfigFile) as directorConfigFile:
directorConfig = json.load(directorConfigFile)
_modelName = directorConfig['modelName']
directorConfigDirectory = os.path.dirname(os.path.abspath(directorConfigFile.name))
if 'leftFootMeshFiles' in directorConfig:
_footMeshFiles.append( directorConfig['leftFootMeshFiles'] )
_footMeshFiles.append( directorConfig['rightFootMeshFiles'] )
for j in range(0,2):
for i in range(len(_footMeshFiles[j])):
_footMeshFiles[j][i] = os.path.join(directorConfigDirectory, _footMeshFiles[j][i])
if 'pelvisLink' in directorConfig:
_pelvisLink = directorConfig['pelvisLink']
if 'leftFootLink' in directorConfig:
_leftFootLink = directorConfig['leftFootLink']
_rightFootLink = directorConfig['rightFootLink']
DEFAULT_PARAM_SET = 'Drake Nominal'
DEFAULT_STEP_PARAMS = {'BDI': {'Min Num Steps': 0,
'Max Num Steps': 12,
'Min Step Width': 0.20,
'Nominal Step Width': 0.26,
'Nominal Forward Step': 0.15,
'Max Forward Step': 0.40,
'Max Step Width': 0.4,
'Max Upward Step': 0.18,
'Max Downward Step': 0.18,
'Behavior': 0,
'Leading Foot': 0,
'Swing Height': 0.05,
'Drake Swing Speed': 0.2,
'Drake Instep Shift': 0.0275,
'Drake Min Hold Time': 2.0,
'Support Contact Groups': 0,
'Prevent Swing Undershoot': 0,
'Prevent Swing Overshoot': 0,
'Map Mode': 0,
'IHMC Transfer Time': 1.0,
'IHMC Swing Time': 1.2},
'Drake Nominal': {'Min Num Steps': 0,
'Max Num Steps': 16,
'Min Step Width': 0.20,
'Nominal Step Width': 0.26,
'Nominal Forward Step': 0.26,
'Max Forward Step': 0.30,
'Max Step Width': 0.32,
'Max Upward Step': 0.18,
'Max Downward Step': 0.18,
'Behavior': 2,
'Leading Foot': 0,
'Swing Height': 0.03,
'Drake Swing Speed': 0.6,
'Drake Instep Shift': 0.005,
'Drake Min Hold Time': 1.0,
'Support Contact Groups': 0,
'Prevent Swing Undershoot': 0,
'Prevent Swing Overshoot': 0,
'Map Mode': 0,
'IHMC Transfer Time': 1.0,
'IHMC Swing Time': 1.2},
'IHMC Nominal': {'Min Num Steps': 0,
'Max Num Steps': 16,
'Min Step Width': 0.20,
'Nominal Step Width': 0.26,
'Nominal Forward Step': 0.26,
'Max Forward Step': 0.30,
'Max Step Width': 0.32,
'Max Upward Step': 0.18,
'Max Downward Step': 0.18,
'Behavior': 2,
'Leading Foot': 0,
'Swing Height': 0.05,
'Drake Swing Speed': 0.2,
'Drake Instep Shift': 0.0275,
'Drake Min Hold Time': 2.0,
'Support Contact Groups': 0,
'Prevent Swing Undershoot': 0,
'Prevent Swing Overshoot': 0,
'Map Mode': 0,
'IHMC Transfer Time': 1.0,
'IHMC Swing Time': 1.2}}
DEFAULT_STEP_PARAMS['Terrain'] = DEFAULT_STEP_PARAMS['Drake Nominal'].copy()
DEFAULT_STEP_PARAMS['Terrain'].update({'Drake Min Hold Time': 1.0,
'Drake Swing Speed': 0.6,
'Swing Height': 0.05,
'Max Forward Step': 0.36,
'Max Num Steps': 6,
'Nominal Step Width': 0.22,
'Map Mode': 1})
DEFAULT_STEP_PARAMS['Stairs'] = DEFAULT_STEP_PARAMS['Drake Nominal'].copy()
DEFAULT_STEP_PARAMS['Stairs'].update({'Drake Min Hold Time': 2.0,
'Swing Height': 0.05,
'Max Num Steps': 8,
'Min Num Steps': 8,
'Drake Swing Speed': 0.6,
'Support Contact Groups': lcmdrc.footstep_params_t.SUPPORT_GROUPS_MIDFOOT_TOE,
'Map Mode': 2})
DEFAULT_STEP_PARAMS['Polaris Platform'] = DEFAULT_STEP_PARAMS['Drake Nominal'].copy()
DEFAULT_STEP_PARAMS['Polaris Platform'].update({'Drake Min Hold Time': 2.0,
'Prevent Swing Undershoot': 1,
'Swing Height': 0.05,
'Map Mode': 1})
DEFAULT_CONTACT_SLICES = {(0.05, 0.3): np.array([[-0.13, -0.13, 0.13, 0.13],
[0.0562, -0.0562, 0.0562, -0.0562]]),
(0.3, .75): np.array([[-0.13, -0.13, 0.25, 0.25],
[.25, -.25, .25, -.25]]),
(0.75, 1.05): np.array([[-0.2, -0.2, 0.25, 0.25],
[.4, -.4, .4, -.4]]),
(1.05, 1.85): np.array([[-0.35, -0.35, 0.28, 0.28],
[.4, -.4, .4, -.4]])
}
def loadFootMeshes():
meshes = []
for i in range(0,2):
d = DebugData()
for footMeshFile in _footMeshFiles[i]:
d.addPolyData(ioUtils.readPolyData( footMeshFile , computeNormals=True))
t = vtk.vtkTransform()
t.Scale(0.98, 0.98, 0.98)
pd = filterUtils.transformPolyData(d.getPolyData(), t)
meshes.append(pd)
return meshes
def getLeftFootMesh():
return shallowCopy(getFootMeshes()[0])
def getRightFootMesh():
return shallowCopy(getFootMeshes()[1])
def getLeftFootColor():
return [1.0, 1.0, 0.0]
def getRightFootColor():
return [0.33, 1.0, 0.0]
def getFootMeshes():
global _footMeshes
if not _footMeshes:
_footMeshes = loadFootMeshes()
return _footMeshes
def getFootstepsFolder():
obj = om.findObjectByName('footstep plan')
if obj is None:
obj = om.getOrCreateContainer('footstep plan', parentObj=om.getOrCreateContainer('planning'))
obj.setIcon(om.Icons.Feet)
om.collapse(obj)
return obj
def getWalkingVolumesFolder():
obj = om.findObjectByName('walking volumes')
if obj is None:
obj = om.getOrCreateContainer('walking volumes', parentObj=getFootstepsFolder())
om.collapse(obj)
return obj
def getTerrainSlicesFolder():
obj = om.findObjectByName('terrain slices')
if obj is None:
obj = om.getOrCreateContainer('terrain slices', parentObj=getFootstepsFolder())
obj.setProperty('Visible', False)
om.collapse(obj)
return obj
def getBDIAdjustedFootstepsFolder():
obj = om.findObjectByName('BDI adj footstep plan')
if obj is None:
obj = om.getOrCreateContainer('BDI adj footstep plan')
obj.setIcon(om.Icons.Feet)
om.collapse(obj)
return obj
class FootstepsDriver(object):
def __init__(self, jointController):
self.jointController = jointController
self.lastFootstepPlan = None
self.lastFootstepRequest = None
self.goalSteps = None
self.lastWalkingPlan = None
self.walkingPlanCallback = None
self.default_step_params = DEFAULT_STEP_PARAMS
self.contact_slices = DEFAULT_CONTACT_SLICES
self.show_contact_slices = False
self.toolbarWidget = None
### Stuff pertaining to rendering BDI-frame steps
self.poseAlt = None
self.bdi_plan = None
self.bdi_plan_adjusted = None
view = app.getDRCView()
self.altRobotModel, self.altJointController = roboturdf.loadRobotModel('alt model', view, parent='alt model', color=roboturdf.getRobotOrangeColor(), visible=False)
self.altRobotModel.setProperty('Visible', False)
self.showBDIPlan = False # hide the BDI plans when created
self.altChannel = "POSE_BODY_ALT"
self.altSubscribe = None
#enable this to used the alt model to render a different state
#self.altJointController.addLCMUpdater("EST_ROBOT_STATE_ALT")
self._setupSubscriptions()
self._setupProperties()
self.showToolbarWidget()
# If we're a consoleapp and have no main window execButton won't exist
if hasattr(self, 'execButton'):
self.execButton.setEnabled(False)
self.committedPlans = []
def _setupProperties(self):
self.params = om.ObjectModelItem('Footstep Params')
self.defaults_map = ['Drake Nominal', 'BDI', 'IHMC Nominal', 'Terrain', 'Stairs', 'Polaris Platform']
self.params.addProperty('Defaults', 0, attributes=om.PropertyAttributes(enumNames=self.defaults_map))
self.params.addProperty('Behavior', 0, attributes=om.PropertyAttributes(enumNames=['BDI Stepping', 'BDI Walking', 'Drake Walking']))
self.params.addProperty('Leading Foot', 1, attributes=om.PropertyAttributes(enumNames=['Auto', 'Left', 'Right']))
self.leading_foot_map = [lcmdrc.footstep_plan_params_t.LEAD_AUTO,
lcmdrc.footstep_plan_params_t.LEAD_LEFT,
lcmdrc.footstep_plan_params_t.LEAD_RIGHT]
# self.params.addProperty('Map Command', 0, attributes=om.PropertyAttributes(enumNames=['Full Heightmap', 'Flat Ground', 'Z Normals']))
self.params.addProperty('Map Mode', 0, attributes=om.PropertyAttributes(enumNames=['Foot Plane', 'Terrain Heights & Normals', 'Terrain Heights, Z Normals', 'Horizontal Plane']))
self.map_mode_map = [
lcmdrc.footstep_plan_params_t.FOOT_PLANE,
lcmdrc.footstep_plan_params_t.TERRAIN_HEIGHTS_AND_NORMALS,
lcmdrc.footstep_plan_params_t.TERRAIN_HEIGHTS_Z_NORMALS,
lcmdrc.footstep_plan_params_t.HORIZONTAL_PLANE
]
# self.params.addProperty('Heights Source', attributes=om.PropertyAttributes(enumNames=['Map Data', 'Foot Plane']))
# self.params.addProperty('Normals Source', attributes=om.PropertyAttributes(enumNames=['Map Data', 'Foot Plane']))
self.params.addProperty('Min Num Steps', None, attributes=om.PropertyAttributes(decimals=0, minimum=0, maximum=30, singleStep=1))
self.params.addProperty('Max Num Steps', None, attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=30, singleStep=1))
self.params.addProperty('Min Step Width', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.1, maximum=0.35, singleStep=0.01))
self.params.addProperty('Nominal Step Width', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.21, maximum=0.4, singleStep=0.01))
self.params.addProperty('Max Step Width', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.22, maximum=0.5, singleStep=0.01))
self.params.addProperty('Nominal Forward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Max Forward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Swing Height', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.005))
self.params.addProperty('Max Upward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Max Downward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Drake Swing Speed', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.05, maximum=5.0, singleStep=0.05))
self.params.addProperty('Drake Min Hold Time', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=10.0, singleStep=0.05))
self.params.addProperty('Drake Instep Shift', None, attributes=om.PropertyAttributes(decimals=4, minimum=-0.3, maximum=0.3, singleStep=0.0005))
self.behavior_lcm_map = {
0: lcmdrc.footstep_plan_params_t.BEHAVIOR_BDI_STEPPING,
1: lcmdrc.footstep_plan_params_t.BEHAVIOR_BDI_WALKING,
2: lcmdrc.footstep_plan_params_t.BEHAVIOR_WALKING}
self.params.addProperty('Planner Mode', 0, attributes=om.PropertyAttributes(enumNames=['Fast MIQP', 'Slow MISOCP']))
self.params.addProperty('Support Contact Groups', 0, attributes=om.PropertyAttributes(enumNames=['Whole Foot', 'Front 2/3', 'Back 2/3']))
self.params.addProperty('Prevent Swing Undershoot', 0, attributes=om.PropertyAttributes(enumNames=['False', 'True']))
self.params.addProperty('Prevent Swing Overshoot', 0, attributes=om.PropertyAttributes(enumNames=['False', 'True']))
self.params.addProperty('IHMC Transfer Time', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.6, maximum=5.0, singleStep=0.05))
self.params.addProperty('IHMC Swing Time', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.6, maximum=5.0, singleStep=0.05))
self.applyDefaults(DEFAULT_PARAM_SET)
def applyDefaults(self, set_name):
defaults = self.default_step_params[set_name]
for k, v in defaults.iteritems():
self.params.setProperty(k, v)
def _setupSubscriptions(self):
useHistoricalLoader = False
historicalLoader = lcmUtils.HistoricalLCMLoader('drc', 'software/drc_lcmtypes/lcmtypes', os.getenv('DRC_BASE')) if useHistoricalLoader else None
lcmUtils.addSubscriber('FOOTSTEP_PLAN_RESPONSE', lcmdrc.footstep_plan_t, self.onFootstepPlan, historicalLoader)
lcmUtils.addSubscriber('WALKING_TRAJ_RESPONSE', lcmdrc.robot_plan_t, self.onWalkingPlan)
lcmUtils.addSubscriber('WALKING_SIMULATION_TRAJ_RESPONSE', lcmdrc.robot_plan_t, self.onWalkingPlan)
### Related to BDI-frame adjustment:
self.altSubscribe = lcmUtils.addSubscriber( self.altChannel , pose_t, self.onPoseAlt)
self.altSubscribe.setSpeedLimit(60)
sub2 = lcmUtils.addSubscriber('BDI_ADJUSTED_FOOTSTEP_PLAN', lcmdrc.footstep_plan_t, self.onBDIAdjustedFootstepPlan)
sub2.setSpeedLimit(1) # was 5 but was slow rendering
def changeSubscriptionAlt(self, newAltChannel="POSE_BODY_ALT"):
# used to monitor a different pose e.g. POSE_BODY_LOGGED in playback
self.altChannel = newAltChannel
lcmUtils.removeSubscriber ( self.altSubscribe )
self.altSubscribe = lcmUtils.addSubscriber( self.altChannel , pose_t, self.onPoseAlt)
self.altSubscribe.setSpeedLimit(60)
##############################
def getDefaultStepParams(self):
default_step_params = lcmdrc.footstep_params_t()
default_step_params.step_speed = self.params.properties.drake_swing_speed
default_step_params.drake_min_hold_time = self.params.properties.drake_min_hold_time
default_step_params.drake_instep_shift = self.params.properties.drake_instep_shift
default_step_params.step_height = self.params.properties.swing_height
default_step_params.constrain_full_foot_pose = True
default_step_params.bdi_step_duration = 2.0
default_step_params.bdi_sway_duration = 0.0
default_step_params.bdi_lift_height = 0.065
default_step_params.bdi_toe_off = 1
default_step_params.bdi_knee_nominal = 0.0
default_step_params.bdi_max_foot_vel = 0.0
default_step_params.bdi_sway_end_dist = 0.02
default_step_params.bdi_step_end_dist = 0.02
default_step_params.mu = 1.0
default_step_params.ihmc_transfer_time = self.params.properties.ihmc_transfer_time
default_step_params.ihmc_swing_time = self.params.properties.ihmc_swing_time
default_step_params.support_contact_groups = self.params.properties.support_contact_groups
default_step_params.prevent_swing_undershoot = self.params.properties.prevent_swing_undershoot
default_step_params.prevent_swing_overshoot = self.params.properties.prevent_swing_overshoot
return default_step_params
def onWalkingPlan(self, msg):
self.lastWalkingPlan = msg
if self.walkingPlanCallback:
self.walkingPlanCallback(self.lastWalkingPlan)
def onBDIAdjustedFootstepPlan(self, msg):
folder = getBDIAdjustedFootstepsFolder()
om.removeFromObjectModel(folder)
folder = getBDIAdjustedFootstepsFolder()
self.drawFootstepPlan(msg, folder)
def onFootstepPlan(self, msg):
#self.clearFootstepPlan()
self.lastFootstepPlan = msg
planFolder = getFootstepsFolder()
self.drawFootstepPlan( self.lastFootstepPlan , planFolder)
self.transformPlanToBDIFrame( self.lastFootstepPlan )
self.showToolbarWidget()
def showToolbarWidget(self):
if app.getMainWindow() is None:
return
if self.toolbarWidget:
self.execButton.setEnabled(True)
return
w = QtGui.QWidget()
l = QtGui.QHBoxLayout(w)
label = QtGui.QLabel('Walk plan:')
execButton = QtGui.QPushButton('')
execButton.setIcon(QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_MediaPlay))
clearButton = QtGui.QPushButton('')
clearButton.setIcon(QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_TrashIcon))
stopButton = QtGui.QPushButton('')
stopButton.setIcon(QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_MediaStop))
l.addWidget(label)
l.addWidget(execButton)
l.addWidget(stopButton)
l.addWidget(clearButton)
l.setContentsMargins(0, 0, 0, 0)
execButton.setShortcut(QtGui.QKeySequence('Ctrl+Return'))
execButton.connect('clicked()', self.onExecClicked)
clearButton.connect('clicked()', self.onClearClicked)
stopButton.connect('clicked()', self.sendStopWalking)
self.execButton = execButton
self.stopButton = stopButton
self.toolbarWidget = app.getMainWindow().toolBar().addWidget(w)
self.execButton.show()
def onExecClicked(self):
self.commitFootstepPlan(self.lastFootstepPlan)
om.removeFromObjectModel(om.findObjectByName('footstep widget'))
walkGoal = | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'BranchProtectionAllowedToMerge',
'BranchProtectionAllowedToPush',
'ProjectPushRules',
'GetGroupMembershipMemberResult',
'GetProjectPushRulesResult',
'GetProjectsProjectResult',
'GetProjectsProjectForkedFromProjectResult',
'GetProjectsProjectNamespaceResult',
'GetProjectsProjectOwnerResult',
'GetProjectsProjectPermissionsResult',
'GetProjectsProjectSharedWithGroupResult',
'GetUsersUserResult',
]
@pulumi.output_type
class BranchProtectionAllowedToMerge(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessLevel":
suggest = "access_level"
elif key == "accessLevelDescription":
suggest = "access_level_description"
elif key == "groupId":
suggest = "group_id"
elif key == "userId":
suggest = "user_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BranchProtectionAllowedToMerge. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BranchProtectionAllowedToMerge.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BranchProtectionAllowedToMerge.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_level: Optional[str] = None,
access_level_description: Optional[str] = None,
group_id: Optional[int] = None,
user_id: Optional[int] = None):
"""
:param int group_id: The ID of a GitLab group allowed to perform the relevant action. Mutually exclusive with `user_id`.
:param int user_id: The ID of a GitLab user allowed to perform the relevant action. Mutually exclusive with `group_id`.
"""
if access_level is not None:
pulumi.set(__self__, "access_level", access_level)
if access_level_description is not None:
pulumi.set(__self__, "access_level_description", access_level_description)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="accessLevel")
def access_level(self) -> Optional[str]:
return pulumi.get(self, "access_level")
@property
@pulumi.getter(name="accessLevelDescription")
def access_level_description(self) -> Optional[str]:
return pulumi.get(self, "access_level_description")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[int]:
"""
The ID of a GitLab group allowed to perform the relevant action. Mutually exclusive with `user_id`.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[int]:
"""
The ID of a GitLab user allowed to perform the relevant action. Mutually exclusive with `group_id`.
"""
return pulumi.get(self, "user_id")
@pulumi.output_type
class BranchProtectionAllowedToPush(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessLevel":
suggest = "access_level"
elif key == "accessLevelDescription":
suggest = "access_level_description"
elif key == "groupId":
suggest = "group_id"
elif key == "userId":
suggest = "user_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BranchProtectionAllowedToPush. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BranchProtectionAllowedToPush.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BranchProtectionAllowedToPush.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_level: Optional[str] = None,
access_level_description: Optional[str] = None,
group_id: Optional[int] = None,
user_id: Optional[int] = None):
"""
:param int group_id: The ID of a GitLab group allowed to perform the relevant action. Mutually exclusive with `user_id`.
:param int user_id: The ID of a GitLab user allowed to perform the relevant action. Mutually exclusive with `group_id`.
"""
if access_level is not None:
pulumi.set(__self__, "access_level", access_level)
if access_level_description is not None:
pulumi.set(__self__, "access_level_description", access_level_description)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="accessLevel")
def access_level(self) -> Optional[str]:
return pulumi.get(self, "access_level")
@property
@pulumi.getter(name="accessLevelDescription")
def access_level_description(self) -> Optional[str]:
return pulumi.get(self, "access_level_description")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[int]:
"""
The ID of a GitLab group allowed to perform the relevant action. Mutually exclusive with `user_id`.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[int]:
"""
The ID of a GitLab user allowed to perform the relevant action. Mutually exclusive with `group_id`.
"""
return pulumi.get(self, "user_id")
@pulumi.output_type
class ProjectPushRules(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorEmailRegex":
suggest = "author_email_regex"
elif key == "branchNameRegex":
suggest = "branch_name_regex"
elif key == "commitCommitterCheck":
suggest = "commit_committer_check"
elif key == "commitMessageNegativeRegex":
suggest = "commit_message_negative_regex"
elif key == "commitMessageRegex":
suggest = "commit_message_regex"
elif key == "denyDeleteTag":
suggest = "deny_delete_tag"
elif key == "fileNameRegex":
suggest = "file_name_regex"
elif key == "maxFileSize":
suggest = "max_file_size"
elif key == "memberCheck":
suggest = "member_check"
elif key == "preventSecrets":
suggest = "prevent_secrets"
elif key == "rejectUnsignedCommits":
suggest = "reject_unsigned_commits"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ProjectPushRules. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ProjectPushRules.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ProjectPushRules.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
author_email_regex: Optional[str] = None,
branch_name_regex: Optional[str] = None,
commit_committer_check: Optional[bool] = None,
commit_message_negative_regex: Optional[str] = None,
commit_message_regex: Optional[str] = None,
deny_delete_tag: Optional[bool] = None,
file_name_regex: Optional[str] = None,
max_file_size: Optional[int] = None,
member_check: Optional[bool] = None,
prevent_secrets: Optional[bool] = None,
reject_unsigned_commits: Optional[bool] = None):
"""
:param str author_email_regex: All commit author emails must match this regex, e.g. `@my-company.com$`.
:param str branch_name_regex: All branch names must match this regex, e.g. `(feature|hotfix)\/*`.
:param bool commit_committer_check: Users can only push commits to this repository that were committed with one of their own verified emails.
:param str commit_message_negative_regex: No commit message is allowed to match this regex, for example `ssh\:\/\/`.
:param str commit_message_regex: All commit messages must match this regex, e.g. `Fixed \d+\..*`.
:param bool deny_delete_tag: Deny deleting a tag.
:param str file_name_regex: All commited filenames must not match this regex, e.g. `(jar|exe)$`.
:param int max_file_size: Maximum file size (MB).
:param bool member_check: Restrict commits by author (email) to existing GitLab users.
:param bool prevent_secrets: GitLab will reject any files that are likely to contain secrets.
:param bool reject_unsigned_commits: Reject commit when it’s not signed through GPG.
"""
if author_email_regex is not None:
pulumi.set(__self__, "author_email_regex", author_email_regex)
if branch_name_regex is not None:
pulumi.set(__self__, "branch_name_regex", branch_name_regex)
if commit_committer_check is not None:
pulumi.set(__self__, "commit_committer_check", commit_committer_check)
if commit_message_negative_regex is not None:
pulumi.set(__self__, "commit_message_negative_regex", commit_message_negative_regex)
if commit_message_regex is not None:
pulumi.set(__self__, "commit_message_regex", commit_message_regex)
if deny_delete_tag is not None:
pulumi.set(__self__, "deny_delete_tag", deny_delete_tag)
if file_name_regex is not None:
pulumi.set(__self__, "file_name_regex", file_name_regex)
if max_file_size is not None:
pulumi.set(__self__, "max_file_size", max_file_size)
if member_check is not None:
pulumi.set(__self__, "member_check", member_check)
if prevent_secrets is not None:
pulumi.set(__self__, "prevent_secrets", prevent_secrets)
if reject_unsigned_commits is not None:
pulumi.set(__self__, "reject_unsigned_commits", reject_unsigned_commits)
@property
@pulumi.getter(name="authorEmailRegex")
def author_email_regex(self) -> Optional[str]:
"""
All commit author emails must match this regex, e.g. `@my-company.com$`.
"""
return pulumi.get(self, "author_email_regex")
@property
@pulumi.getter(name="branchNameRegex")
def branch_name_regex(self) -> Optional[str]:
"""
All branch names must match this regex, e.g. `(feature|hotfix)\/*`.
"""
return pulumi.get(self, "branch_name_regex")
@property
@pulumi.getter(name="commitCommitterCheck")
def commit_committer_check(self) -> Optional[bool]:
"""
Users can only push commits to this repository that were committed with one of their own verified emails.
"""
return pulumi.get(self, "commit_committer_check")
@property
@pulumi.getter(name="commitMessageNegativeRegex")
def commit_message_negative_regex(self) -> Optional[str]:
"""
No commit message is allowed to match this regex, for example `ssh\:\/\/`.
"""
return pulumi.get(self, "commit_message_negative_regex")
@property
@pulumi.getter(name="commitMessageRegex")
def commit_message_regex(self) -> Optional[str]:
"""
All commit messages must match this regex, e.g. `Fixed \d+\..*`.
"""
return pulumi.get(self, "commit_message_regex")
@property
@pulumi.getter(name="denyDeleteTag")
def deny_delete_tag(self) -> Optional[bool]:
"""
Deny deleting a tag.
"""
return pulumi.get(self, "deny_delete_tag")
@property
@pulumi.getter(name="fileNameRegex")
def file_name_regex(self) -> Optional[str]:
"""
All commited filenames must not match this regex, e.g. `(jar|exe)$`.
"""
return pulumi.get(self, "file_name_regex")
@property
@pulumi.getter(name="maxFileSize")
def max_file_size(self) -> Optional[int]:
"""
Maximum file size (MB).
"""
return pulumi.get(self, "max_file_size")
@property
@pulumi.getter(name="memberCheck")
def member_check(self) -> Optional[bool]:
"""
Restrict commits by author (email) to existing GitLab users.
"""
return pulumi.get(self, "member_check")
@property
@pulumi.getter(name="preventSecrets")
def prevent_secrets(self) -> Optional[bool]:
"""
GitLab will reject any files that are likely to contain secrets.
"""
return pulumi.get(self, "prevent_secrets")
@property
@pulumi.getter(name="rejectUnsignedCommits")
def reject_unsigned_commits(self) -> Optional[bool]:
"""
Reject commit when it’s not signed through GPG.
"""
return pulumi.get(self, "reject_unsigned_commits")
@pulumi.output_type
class GetGroupMembershipMemberResult(dict):
def __init__(__self__, *,
access_level: str,
avatar_url: str,
expires_at: str,
id: int,
name: str,
state: str,
username: str,
web_url: str):
"""
:param str access_level: Only return members with the desired access level. Acceptable values are: `guest`, `reporter`, `developer`, `maintainer`, `owner`.
:param str avatar_url: The avatar URL of the user.
:param str expires_at: Expiration date for the group membership.
:param int id: The unique id assigned to the user by the gitlab server.
:param str name: The name of the user.
:param str state: Whether the user is active or blocked.
:param str username: The username of the user.
:param str web_url: User's website URL.
"""
pulumi.set(__self__, "access_level", access_level)
pulumi.set(__self__, "avatar_url", avatar_url)
pulumi.set(__self__, "expires_at", expires_at)
pulumi.set(__self__, "id", | |
"""
Stars module, modified to work with crystal class
Classes to generate star sets, double star sets, and vector star sets; a lot of indexing functionality.
NOTE: The naming follows that of stars; the functionality is extremely similar, and this code
was modified as little as possible to translate that functionality to *crystals* which possess
a basis. In the case of a single atom basis, this should reduce to the stars object functionality.
The big changes are:
* Replacing NNvect star (which represents the jumps) with the jumpnetwork type found in crystal
* Using the jumpnetwork_latt representation from crystal
* Representing a "point" as a solute + vacancy. In this case, it is a tuple (s,v) of unit cell
indices and a vector dx or dR (dx = Cartesian vector pointing from solute to vacancy;
dR = lattice vector pointing from unit cell of solute to unit cell of vacancy). This is equivalent
to our old representation if the tuple (s,v) = (0,0) for all sites. Due to translational invariance,
the solute always stays inside the unit cell
* Using indices into the point list rather than just making lists of the vectors themselves. This
is because the "points" now have a more complex representation (see above).
"""
__author__ = '<NAME>'
import numpy as np
import collections, copy, itertools, yaml
from onsager import crystal
from onsager.crystal import DB_disp, DB_disp4, pureDBContainer, mixedDBContainer
from onsager.DB_structs import *
from onsager.DB_collisions import *
import itertools
from collections import defaultdict
import time
from functools import reduce
# YAML tags
PAIRSTATE_YAMLTAG = '!PairState'
class PairState(collections.namedtuple('PairState', 'i j R dx')):
"""
A class corresponding to a "pair" state; in this case, a solute-vacancy pair, but can
also be a transition state pair. The solute (or initial state) is in unit cell 0, in position
indexed i; the vacancy (or final state) is in unit cell R, in position indexed j.
The cartesian vector dx connects them. We can add and subtract, negate, and "endpoint"
subtract (useful for determining what Green function entry to use)
:param i: index of the first member of the pair (solute)
:param j: index of the second member of the pair (vacancy)
:param R: lattice vector pointing from unit cell of i to unit cell of j
:param dx: Cartesian vector pointing from first to second member of pair
"""
@classmethod
def zero(cls, n=0, dim=3):
"""Return a "zero" state"""
return cls(i=n, j=n, R=np.zeros(dim, dtype=int), dx=np.zeros(dim))
@classmethod
def fromcrys(cls, crys, chem, ij, dx):
"""Convert (i,j), dx into PairState"""
return cls(i=ij[0],
j=ij[1],
R=np.round(np.dot(crys.invlatt, dx) - crys.basis[chem][ij[1]] + crys.basis[chem][ij[0]]).astype(int),
dx=dx)
@classmethod
def fromcrys_latt(cls, crys, chem, ij, R):
"""Convert (i,j), R into PairState"""
return cls(i=ij[0],
j=ij[1],
R=R,
dx=np.dot(crys.lattice, R + crys.basis[chem][ij[1]] - crys.basis[chem][ij[0]]))
def _asdict(self):
"""Return a proper dict"""
return {'i': self.i, 'j': self.j, 'R': self.R, 'dx': self.dx}
def __sane__(self, crys, chem):
"""Determine if the dx value makes sense given everything else..."""
return np.allclose(self.dx, np.dot(crys.lattice, self.R + crys.basis[chem][self.j] - crys.basis[chem][self.i]))
def iszero(self):
"""Quicker than self == PairState.zero()"""
return self.i == self.j and np.all(self.R == 0)
def __eq__(self, other):
"""Test for equality--we don't bother checking dx"""
return isinstance(other, self.__class__) and \
(self.i == other.i and self.j == other.j and np.all(self.R == other.R))
def __ne__(self, other):
"""Inequality == not __eq__"""
return not self.__eq__(other)
def __hash__(self):
"""Hash, so that we can make sets of states"""
# return self.i ^ (self.j << 1) ^ (self.R[0] << 2) ^ (self.R[1] << 3) ^ (self.R[2] << 4)
return hash((self.i, self.j) + tuple(self.R))
def __add__(self, other):
"""Add two states: works if and only if self.j == other.i
(i,j) R + (j,k) R' = (i,k) R+R' : works for thinking about transitions...
Note: a + b != b + a, and may be that only one of those is even defined
"""
if not isinstance(other, self.__class__): return NotImplemented
if self.iszero() and self.j == -1: return other
if other.iszero() and other.i == -1: return self
if self.j != other.i:
raise ArithmeticError(
'Can only add matching endpoints: ({} {})+({} {}) not compatible'.format(self.i, self.j, other.i,
other.j))
return self.__class__(i=self.i, j=other.j, R=self.R + other.R, dx=self.dx + other.dx)
def __neg__(self):
"""Negation of state (swap members of pair)
- (i,j) R = (j,i) -R
Note: a + (-a) == (-a) + a == 0 because we define what "zero" is.
"""
return self.__class__(i=self.j, j=self.i, R=-self.R, dx=-self.dx)
def __sub__(self, other):
"""Add a negative:
a-b points from initial of a to initial of b if same final state
(i,j) R - (k,j) R' = (i,k) R-R'
Note: this means that (a-b) + b = a, but b + (a-b) is an error. (b-a) + a = b
"""
if not isinstance(other, self.__class__): return NotImplemented
return self.__add__(-other)
def __xor__(self, other):
"""Subtraction on the endpoints (sort of the "opposite" of a-b):
a^b points from final of b to final of a if same initial state
(i,j) R ^ (i,k) R' = (k,j) R-R'
Note: b + (a^b) = a but (a^b) + b is an error. a + (b^a) = b
"""
if not isinstance(other, self.__class__): return NotImplemented
# if self.iszero(): raise ArithmeticError('Cannot endpoint substract from zero')
# if other.iszero(): raise ArithmeticError('Cannot endpoint subtract zero')
if self.i != other.i:
raise ArithmeticError(
'Can only endpoint subtract matching starts: ({} {})^({} {}) not compatible'.format(self.i, self.j,
other.i, other.j))
return self.__class__(i=other.j, j=self.j, R=self.R - other.R, dx=self.dx - other.dx)
def g(self, crys, chem, g):
"""
Apply group operation.
:param crys: crystal
:param chem: chemical index
:param g: group operation (from crys)
:return g*PairState: corresponding to group operation applied to self
"""
gRi, (c, gi) = crys.g_pos(g, np.zeros(len(self.R), dtype=int), (chem, self.i))
gRj, (c, gj) = crys.g_pos(g, self.R, (chem, self.j))
gdx = crys.g_direc(g, self.dx)
return self.__class__(i=gi, j=gj, R=gRj - gRi, dx=gdx)
def __str__(self):
"""Human readable version"""
if len(self.R) == 3:
return "{}.[0,0,0]:{}.[{},{},{}] (dx=[{},{},{}])".format(self.i, self.j,
self.R[0], self.R[1], self.R[2],
self.dx[0], self.dx[1], self.dx[2])
else:
return "{}.[0,0]:{}.[{},{}] (dx=[{},{}])".format(self.i, self.j,
self.R[0], self.R[1],
self.dx[0], self.dx[1])
@classmethod
def sortkey(cls, entry):
return np.dot(entry.dx, entry.dx)
@staticmethod
def PairState_representer(dumper, data):
"""Output a PairState"""
# asdict() returns an OrderedDictionary, so pass through dict()
# had to rewrite _asdict() for some reason...?
return dumper.represent_mapping(PAIRSTATE_YAMLTAG, data._asdict())
@staticmethod
def PairState_constructor(loader, node):
"""Construct a GroupOp from YAML"""
# ** turns the dictionary into parameters for PairState constructor
return PairState(**loader.construct_mapping(node, deep=True))
yaml.add_representer(PairState, PairState.PairState_representer)
yaml.add_constructor(PAIRSTATE_YAMLTAG, PairState.PairState_constructor)
# HDF5 conversion routines: PairState, and list-of-list structures
def PSlist2array(PSlist):
"""
Take in a list of pair states; return arrays that can be stored in HDF5 format
:param PSlist: list of pair states
:return ij: int_array[N][2] = (i,j)
:return R: int[N][3]
:return dx: float[N][3]
"""
N = len(PSlist)
ij = np.zeros((N, 2), dtype=int)
dim = len(PSlist[0].R)
R = np.zeros((N, dim), dtype=int)
dx = np.zeros((N, dim))
for n, PS in enumerate(PSlist):
ij[n, 0], ij[n, 1], R[n, :], dx[n, :] = PS.i, PS.j, PS.R, PS.dx
return ij, R, dx
def array2PSlist(ij, R, dx):
"""
Take in arrays of ij, R, dx (from HDF5), return a list of PairStates
:param ij: int_array[N][2] = (i,j)
:param R: int[N][3]
:param dx: float[N][3]
:return PSlist: list of pair states
"""
return [PairState(i=ij0[0], j=ij0[1], R=R0, dx=dx0) for ij0, R0, dx0 in zip(ij, R, dx)]
def doublelist2flatlistindex(listlist):
"""
Takes a list of lists, returns a flattened list and an index array
:param listlist: list of lists of objects
:return flatlist: flat list of objects (preserving order)
:return indexarray: array indexing which original list it came from
"""
flatlist = []
indexlist = []
for ind, entries in enumerate(listlist):
flatlist += entries
indexlist += [ind for j in entries]
return flatlist, np.array(indexlist)
def flatlistindex2doublelist(flatlist, indexarray):
"""
Takes a flattened list and an index array, returns a list of lists
:param flatlist: flat list of objects (preserving order)
:param indexarray: array indexing which original list it came from
:return listlist: list of lists of objects
"""
Nlist = max(indexarray) + 1
listlist = [[] for n in range(Nlist)]
for entry, ind in zip(flatlist, indexarray):
listlist[ind].append(entry)
return listlist
class StarSet(object):
"""
A class to construct crystal stars, and be able to efficiently index.
Takes in a jumpnetwork, which is used to construct the corresponding stars, a crystal
object | |
<reponame>wclark3/machine-learning
#!/usr/bin/env python
'''Main Program for Training over the Facial Keypoints dataset.
'''
import argparse
import code
import datetime
import json
import os
import re
import subprocess
import sys
import time
import argcomplete
import lasagne
import numpy as np
import pandas as pd
import batch
import data_logger
import fileio
import partition
import perceptron
import preprocess
ACTION_TRAIN_MISSING = "train_missing"
class Tee(object):
'''Tees file descriptors so that writes are made everywhere simultaneously.
'''
def __init__(self, *files):
self.files = files
def write(self, obj):
'''Writes to all file descriptors.
'''
# pylint: disable=invalid-name
for f in self.files:
f.write(obj)
f.flush() # If you want the output to be visible immediately
def flush(self):
'''Flushes all file descriptors.
'''
# pylint: disable=invalid-name
for f in self.files:
f.flush()
def get_version():
'''Returns the current git revision as a string
'''
return "git rev = %s" % (
subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip())
def print_partition_shapes(partitions):
for k in partitions.keys():
assert len(partitions[k]['X']) == len(partitions[k]['Y'])
assert len(partitions[k]['Index']) == len(partitions[k]['X'])
assert (list(partitions[k]['Y'].shape)[1] ==
len(partitions[k]['Y_Labels']))
print("%8s X.shape=%s, Y.shape=%s, indices=%s, labels=%s" % (
k, partitions[k]['X'].shape, partitions[k]['Y'].shape,
len(partitions[k]['Index']), len(partitions[k]['Y_Labels'])))
def load_nnet_config(options):
'''Load the neural network config file and address overrides
'''
print "Loading NNET configuration from %s" % options.config_file
assert os.path.exists(options.config_file)
with open(options.config_file) as json_fp:
nnet_config = json.load(json_fp)
# Handle command-line overrides that affect the config
if options.batchsize is not None:
nnet_config['batchsize'] = options.batchsize
if options.action == ACTION_TRAIN_MISSING:
nnet_config[perceptron.MultiLevelPerceptron.PREDICT_MISSING] = True
else:
nnet_config[perceptron.MultiLevelPerceptron.PREDICT_MISSING] = False
return nnet_config
def select_data(options, feature_cols, partitioned, keep_all_data=False):
#
# Select the columns.
#
if options.action == ACTION_TRAIN_MISSING:
binary = {}
for name in partitioned.keys():
num_vars = len(partitioned[name]['Y_Labels'])
odd_stride = range(1, num_vars, 2)
even_stride = range(0, num_vars, 2)
nans = np.isnan(partitioned[name]['Y'])
even_cols = nans[:, odd_stride]
odd_cols = nans[:, even_stride]
assert np.array_equal(odd_cols, even_cols)
col_names = ([re.sub(r'_[xy]$', "", l) for
l in partitioned[name]['Y_Labels']])
even_colnames = [col_names[i] for i in even_stride]
odd_colnames = [col_names[i] for i in odd_stride]
assert np.array_equal(even_colnames, odd_colnames)
binary[name] = {
'X': partitioned[name]['X'],
'Y': even_cols,
'Index': partitioned[name]['Index'],
'Y_Labels': ['missing_' + c for c in even_colnames]
}
print "Binarized the Missing Values"
return binary
selected = {}
for name in partitioned.keys():
selected[name] = {}
selected[name]['X'] = partitioned[name]['X']
selected[name]['Y'] = partitioned[name]['Y'][:, feature_cols]
selected[name]['Y_Labels'] = (
[partitioned[name]['Y_Labels'][i] for i in feature_cols])
selected[name]['Index'] = partitioned[name]['Index']
if keep_all_data:
return selected
#
# Map/Drop NaNs
#
if options.keep_nans:
replaced = 0
total = 0
for name in partitioned.keys():
to_replace = np.isnan(selected[name]['Y'])
selected[name]['Y'][to_replace] = options.nan_cardinal
replaced += np.sum(to_replace)
total += to_replace.size
print "Replaced NaNs with cardinal=%d [%3.1f%% of data]" % (
options.nan_cardinal, float(replaced)/float(total)*100.)
return selected
else:
dropped = 0
total = 0
for name in partitioned.keys():
to_keep = ~(np.isnan(selected[name]['Y']).any(1))
selected[name]['X'] = selected[name]['X'][to_keep]
selected[name]['Y'] = selected[name]['Y'][to_keep]
selected[name]['Index'] = (
[selected[name]['Index'][i] for (i, keep)
in enumerate(to_keep) if keep])
assert not np.isnan(selected[name]['Y']).any()
dropped += sum(~to_keep)
total += len(to_keep)
print "Dropping samples with NaNs: {:3.1f}% dropped".format(
float(dropped)/float(total)*100.)
return selected
def combine_loss_main(options):
with open(options.feature_group_file) as feat_fd:
feature_groups = json.load(feat_fd)
start_time = time.time()
combine_loss(options, feature_groups)
print "Combining CSVs took {:.3f}s".format(time.time() - start_time)
def combine_loss(options, feature_groups):
assert os.getcwd() == options.run_data_path
loss_dfs = {}
for feature_idx, (feature_name, feature_cols) in enumerate(sorted(
feature_groups.items())):
if not os.path.exists(feature_name):
print "Could not find directory for %s" % feature_name
continue
os.chdir(feature_name)
with open('loss.csv') as loss_fd:
df = pd.read_csv(loss_fd, index_col="epoch")
df.rename(columns={
"train_loss": "train_loss_" + feature_name,
"train_rmse": "train_rmse_" + feature_name
}, inplace=True)
loss_dfs[feature_name] = df
os.chdir(options.run_data_path)
assert os.getcwd() == options.run_data_path
aggregated_loss = pd.concat(loss_dfs.values(), axis=1)
with open("loss.csv", 'w') as write_fd:
aggregated_loss.to_csv(write_fd)
def train_feature(options, selected, nnet_config,
original_data, feature_cols, kaggle):
#
# Instantiate and Build the Convolutional Multi-Level Perceptron
#
start_time = time.time()
if options.amputate:
print "Chose an Amputated MLP"
perceptron_type = perceptron.AmputatedMLP
else:
print "Chose an Convolutional MLP"
perceptron_type = perceptron.ConvolutionalMLP
mlp = perceptron_type(
nnet_config, (1, 96, 96), len(selected['train']['Y_Labels']))
print mlp
mlp.build_network()
print "Building Network Took {:.3f}s".format(time.time() - start_time)
#
# Finally, launch the training loop.
#
print "Starting training..."
loss_log = data_logger.CSVEpochLogger(
"loss_%05d.csv", "loss.csv",
np.concatenate((['train_loss', 'train_rmse'],
selected['train']['Y_Labels'])))
resumer = batch.TrainingResumer(
mlp, "epochs_done.txt", "state_%05d.pkl.gz",
options.save_state_interval)
# if options.action == ACTION_TRAIN_MISSING:
# in_out_scale = None
# else:
# in_out_scale = batch.Scaler(offset=1., scale=48.)
trainer = batch.BatchedTrainer(mlp, nnet_config['batchsize'],
selected, loss_log, resumer)
trainer.train(options.num_epochs)
if options.amputate or options.predict or options.kaggle:
def write_pred(data, filename, header):
data_frame = pd.DataFrame(data, columns=header)
data_frame[['index']] = data_frame[['index']].astype(int)
with open(filename, 'w') as file_desc:
data_frame.to_csv(file_desc, index=False)
start_time = time.time()
print "Saving Predictions"
prediction_header = np.concatenate(
(['index'], selected['validate']['Y_Labels']))
last_layer_header = None
if not options.amputate:
last_layer_header = prediction_header
if options.predict:
index_train = np.transpose([original_data['train']['Index']])
index_valid = np.transpose([original_data['validate']['Index']])
last_layer_train = np.concatenate(
(index_train, trainer.predict_y(original_data['train']['X'])),
axis=1)
last_layer_val = np.concatenate(
(index_valid, trainer.predict_y(original_data['validate']['X'])),
axis=1)
write_pred(last_layer_train, "last_layer_train.csv",
last_layer_header)
write_pred(last_layer_val, "last_layer_val.csv",
last_layer_header)
all_data = select_data(options, feature_cols, original_data, True)
write_pred(np.concatenate(
(index_train, all_data['train']['Y']), axis=1),
"y_train.csv", prediction_header)
write_pred(np.concatenate(
(index_valid, all_data['validate']['Y']), axis=1),
"y_validate.csv", prediction_header)
if options.kaggle:
kaggle_test = np.concatenate(
(np.transpose([kaggle['Index']]),
trainer.predict_y(kaggle['X'])),
axis=1)
write_pred(kaggle_test, "kaggle.csv", prediction_header)
print " took {:.3f}s".format(time.time() - start_time)
def select_features(options):
if options.action == ACTION_TRAIN_MISSING:
return {'all_binary': range(30)}
with open(options.feature_group_file) as feature_fd:
feature_dict = json.load(feature_fd)
if options.feature_groups is None:
features_to_train = feature_dict
else:
if not all(k in feature_dict for k in options.feature_groups):
raise KeyError(
("one or more of the following features cannot be found %s" %
options.feature_groups))
features_to_train = dict((k, feature_dict[k]) for k in
options.feature_groups if k in feature_dict)
return features_to_train
def train_main(options):
'''This loads the data, manipulates it and trains the model, it's the glue
that does all of the work.
'''
# Load the nnet_config
nnet_config = load_nnet_config(options)
#
# Load the Dataset
#
start_time = time.time()
faces = fileio.FaceReader(options.faces_csv, options.faces_pickled,
fast_nrows=options.num_rows)
faces.load_file()
print "Read Took {:.3f}s".format(time.time() - start_time)
kaggle = None
if options.kaggle:
kaggle = fileio.ReadTestCSV(
"../../data/test.csv", "../../data/test.pkl")
#
# Partition the Dataset
#
# Convert raw data from float64 to floatX
# (32/64-bit depending on GPU/CPU)
typed_data = faces.get_data()
typed_data['X'] = lasagne.utils.floatX(typed_data['X'])
typed_data['Y'] = lasagne.utils.floatX(typed_data['Y'])
for i in range(typed_data['X'].shape[0]):
typed_data['X'][i][0] = np.swapaxes(typed_data['X'][i][0], 0, 1)
start_time = time.time()
partitioner = partition.Partitioner(
typed_data, {'train': 70, 'validate': 30},
os.path.join(options.run_data_path, "partition_indices.pkl"))
partitions = partitioner.run()
print_partition_shapes(partitions)
print "Partition Took {:.3f}s".format(time.time() - start_time)
#
# Run any transformations on the training dataset here.
#
#
# Which features to predict
#
features_to_train = select_features(options)
for feature_index, (feature_name, feature_cols) in enumerate(sorted(
features_to_train.items())):
#
# Setup environment for training a feature
#
print "\n\n%s\nFeature Set %s (%d of %d)\n%s" % (
"#" * 80, feature_name, feature_index+1,
len(features_to_train), "#" * 80)
feature_path = os.path.abspath(feature_name)
print "Changing to %s" % feature_path
if not os.path.exists(feature_path):
os.mkdir(feature_path)
os.chdir(feature_path)
#
# Select the Training data
#
# Select feature columns
start_time = time.time()
print "Selecting features %s" % feature_name
selected = select_data(options, feature_cols, partitions)
print selected['train']['Y_Labels']
print_partition_shapes(selected)
print "Selecting Data Took {:.3f}s".format(time.time() - start_time)
train_feature(options, selected, nnet_config,
partitions, feature_cols, kaggle)
#
# Change back to the run directory for the next run.
#
print "Changing to %s" % options.run_data_path
os.chdir(options.run_data_path)
# Drop into a console so that we do anything additional we need.
if options.drop_to_console:
code.interact(local=locals())
# Finally combine all of the loss-functions to produce a loss output.
start_time = time.time()
combine_loss(options, features_to_train)
print "Combining CSVs took {:.3f}s".format(time.time() - start_time)
def main():
'''Parse Arguments and call real_main
'''
#
# Create list of options and parse them.
#
parser = argparse.ArgumentParser(
version=get_version(),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'action', nargs='?', choices=('train', 'loss', ACTION_TRAIN_MISSING),
default='train', help="action to perform")
parser.add_argument(
'-o', '--output_dir', dest='run_data_path',
metavar="PATH",
default=datetime.datetime.now().strftime('run_%Y-%m-%d__%H_%M_%S'),
help="directory to place run information and state")
parser.add_argument(
'-c', '--config_file', dest='config_file',
metavar="FILE", default="configs/default.cfg",
help="neural network configuration file")
parser.add_argument(
'--console', dest='drop_to_console', action="store_true",
help="drop to console after finishing processing")
parser.add_argument(
'--feature_groups', dest='feature_groups', metavar="FEAT",
default=None, nargs="+",
help="feature groups to train")
parser.add_argument(
'--amputate', dest='amputate', action="store_true",
help="train a neural network and save output of penultimate layer")
parser.add_argument(
'--predict', dest='predict', action="store_true",
help="predict output after training")
parser.add_argument(
'--kaggle', dest='kaggle', action="store_true",
help="load the test data and predict values for kaggle")
train_group = parser.add_argument_group(
"Training Control", "Options for Controlling Training")
train_group.add_argument(
'-e', '--epochs', dest='num_epochs', type=int, metavar="EPOCHS",
default=100,
help="number of epochs to train against")
train_group.add_argument(
'-i', '--interval', dest='save_state_interval', type=int,
metavar="EPOCHS",
default=10,
help="how often (in epochs) to save internal model state")
train_group.add_argument(
'-b', '--batchsize', dest='batchsize', type=int, metavar="ROWS",
default=None,
help="override the batchsize specified in config_file")
data_group = parser.add_argument_group(
"Data Options", "Options for controlling the input data.")
data_group.add_argument(
'--faces_csv', dest='faces_csv',
metavar="PATH", default=os.path.abspath("../data/training.csv"),
help="path to the faces CSV file")
data_group.add_argument(
'--faces_pickle', dest='faces_pickled',
metavar="PATH", default=os.path.abspath("../data/training.pkl.gz"),
help="path to the faces pickle file")
data_group.add_argument(
'--feature_group_file', dest='feature_group_file',
metavar="PATH", default=os.path.abspath("feature_groups.json"),
help="path to the featuer groups")
data_group.add_argument(
'--num_rows', | |
<filename>pyDubMod.py
#pydub imports
import os
from requests import Request
import requests
import json
import uuid
import string
from pydub import AudioSegment
import io, subprocess, wave, aifc, base64
import math, audioop, collections, threading
import platform, stat, random, uuid
# define exceptions
class TimeoutError(Exception): pass
class RequestError(Exception): pass
class UnknownValueError(Exception): pass
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
class AudioFile(AudioSource):
"""
Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file `filename_or_fileobject`. Subclass of ``AudioSource``.
If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.
Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.
WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.
Both AIFF and AIFF-C (compressed AIFF) formats are supported.
FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.
"""
def __init__(self, filename_or_fileobject):
if str is bytes: # Python 2 - if a file path is specified, it must either be a `str` instance or a `unicode` instance
assert isinstance(filename_or_fileobject, (str, unicode)) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
else: # Python 3 - if a file path is specified, it must be a `str` instance
assert isinstance(filename_or_fileobject, str) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
self.filename_or_fileobject = filename_or_fileobject
self.stream = None
self.DURATION = None
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
try:
# attempt to read the file as WAV
self.audio_reader = wave.open(self.filename_or_fileobject, "rb")
self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form)
except wave.Error:
try:
# attempt to read the file as AIFF
self.audio_reader = aifc.open(self.filename_or_fileobject, "rb")
self.little_endian = False # AIFF is a big-endian format
except aifc.Error:
# attempt to read the file as FLAC
if hasattr(self.filename_or_fileobject, "read"):
flac_data = self.filename_or_fileobject.read()
else:
with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read()
# run the FLAC converter with the FLAC data to get the AIFF data
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output
"--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
aiff_data, stderr = process.communicate(flac_data)
aiff_file = io.BytesIO(aiff_data)
try:
self.audio_reader = aifc.open(aiff_file, "rb")
except aifc.Error:
assert False, "Audio file could not be read as WAV, AIFF, or FLAC; check if file is corrupted"
self.little_endian = False # AIFF is a big-endian format
assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo"
self.SAMPLE_WIDTH = self.audio_reader.getsampwidth()
# 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866)
samples_24_bit_pretending_to_be_32_bit = False
if self.SAMPLE_WIDTH == 3: # 24-bit audio
try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit
self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading
self.SAMPLE_RATE = self.audio_reader.getframerate()
self.CHUNK = 4096
self.FRAME_COUNT = self.audio_reader.getnframes()
self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE)
self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path)
self.audio_reader.close()
self.stream = None
self.DURATION = None
class AudioFileStream(object):
def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit):
self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance)
self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it)
self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly
def read(self, size = -1):
buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608
sample_width = self.audio_reader.getsampwidth()
if not self.little_endian: # big endian format, convert to little endian on the fly
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
buffer = audioop.byteswap(buffer, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))
# workaround for https://bugs.python.org/issue12866
if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
buffer = b"".join("\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
if self.audio_reader.getnchannels() != 1: # stereo audio
buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
return buffer
class AudioData(object):
def __init__(self, frame_data, sample_rate, sample_width):
assert sample_rate > 0, "Sample rate must be a positive integer"
assert sample_width % 1 == 0 and 1 <= sample_width <= 4, "Sample width must be between 1 and 4 inclusive"
self.frame_data = frame_data
self.sample_rate = sample_rate
self.sample_width = int(sample_width)
def get_raw_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `RAW/PCM audio file <https://en.wikipedia.org/wiki/Raw_audio_format>`__.
"""
assert convert_rate is None or convert_rate > 0, "Sample rate to convert to must be a positive integer"
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 4), "Sample width to convert to must be between 1 and 4 inclusive"
raw_data = self.frame_data
# make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples)
if self.sample_width == 1:
raw_data = audioop.bias(raw_data, 1, -128) # subtract 128 from every sample to make them act like signed samples
# resample audio at the desired rate if specified
if convert_rate is not None and self.sample_rate != convert_rate:
raw_data, _ = audioop.ratecv(raw_data, self.sample_width, 1, self.sample_rate, convert_rate, None)
# convert samples to desired sample width if specified
if convert_width is not None and self.sample_width != convert_width:
if convert_width == 3: # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866)
raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) # convert audio into 32-bit first, which is always | |
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['QueueArgs', 'Queue']
@pulumi.input_type
class QueueArgs:
def __init__(__self__, *,
content_based_deduplication: Optional[pulumi.Input[bool]] = None,
deduplication_scope: Optional[pulumi.Input[str]] = None,
delay_seconds: Optional[pulumi.Input[int]] = None,
fifo_queue: Optional[pulumi.Input[bool]] = None,
fifo_throughput_limit: Optional[pulumi.Input[str]] = None,
kms_data_key_reuse_period_seconds: Optional[pulumi.Input[int]] = None,
kms_master_key_id: Optional[pulumi.Input[str]] = None,
maximum_message_size: Optional[pulumi.Input[int]] = None,
message_retention_period: Optional[pulumi.Input[int]] = None,
queue_name: Optional[pulumi.Input[str]] = None,
receive_message_wait_time_seconds: Optional[pulumi.Input[int]] = None,
redrive_allow_policy: Optional[Any] = None,
redrive_policy: Optional[Any] = None,
sqs_managed_sse_enabled: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['QueueTagArgs']]]] = None,
visibility_timeout: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Queue resource.
:param pulumi.Input[bool] content_based_deduplication: For first-in-first-out (FIFO) queues, specifies whether to enable content-based deduplication. During the deduplication interval, Amazon SQS treats messages that are sent with identical content as duplicates and delivers only one copy of the message.
:param pulumi.Input[str] deduplication_scope: Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.
:param pulumi.Input[int] delay_seconds: The time in seconds for which the delivery of all messages in the queue is delayed. You can specify an integer value of 0 to 900 (15 minutes). The default value is 0.
:param pulumi.Input[bool] fifo_queue: If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue.
:param pulumi.Input[str] fifo_throughput_limit: Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.
:param pulumi.Input[int] kms_data_key_reuse_period_seconds: The length of time in seconds for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. The value must be an integer between 60 (1 minute) and 86,400 (24 hours). The default is 300 (5 minutes).
:param pulumi.Input[str] kms_master_key_id: The ID of an AWS managed customer master key (CMK) for Amazon SQS or a custom CMK. To use the AWS managed CMK for Amazon SQS, specify the (default) alias alias/aws/sqs.
:param pulumi.Input[int] maximum_message_size: The limit of how many bytes that a message can contain before Amazon SQS rejects it. You can specify an integer value from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). The default value is 262,144 (256 KiB).
:param pulumi.Input[int] message_retention_period: The number of seconds that Amazon SQS retains a message. You can specify an integer value from 60 seconds (1 minute) to 1,209,600 seconds (14 days). The default value is 345,600 seconds (4 days).
:param pulumi.Input[str] queue_name: A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the .fifo suffix.
:param pulumi.Input[int] receive_message_wait_time_seconds: Specifies the duration, in seconds, that the ReceiveMessage action call waits until a message is in the queue in order to include it in the response, rather than returning an empty response if a message isn't yet available. You can specify an integer from 1 to 20. Short polling is used as the default or when you specify 0 for this property.
:param Any redrive_allow_policy: The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object.
:param Any redrive_policy: A string that includes the parameters for the dead-letter queue functionality (redrive policy) of the source queue.
:param pulumi.Input[bool] sqs_managed_sse_enabled: Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (e.g. SSE-KMS or SSE-SQS ).
:param pulumi.Input[Sequence[pulumi.Input['QueueTagArgs']]] tags: The tags that you attach to this queue.
:param pulumi.Input[int] visibility_timeout: The length of time during which a message will be unavailable after a message is delivered from the queue. This blocks other components from receiving the same message and gives the initial component time to process and delete the message from the queue. Values must be from 0 to 43,200 seconds (12 hours). If you don't specify a value, AWS CloudFormation uses the default value of 30 seconds.
"""
if content_based_deduplication is not None:
pulumi.set(__self__, "content_based_deduplication", content_based_deduplication)
if deduplication_scope is not None:
pulumi.set(__self__, "deduplication_scope", deduplication_scope)
if delay_seconds is not None:
pulumi.set(__self__, "delay_seconds", delay_seconds)
if fifo_queue is not None:
pulumi.set(__self__, "fifo_queue", fifo_queue)
if fifo_throughput_limit is not None:
pulumi.set(__self__, "fifo_throughput_limit", fifo_throughput_limit)
if kms_data_key_reuse_period_seconds is not None:
pulumi.set(__self__, "kms_data_key_reuse_period_seconds", kms_data_key_reuse_period_seconds)
if kms_master_key_id is not None:
pulumi.set(__self__, "kms_master_key_id", kms_master_key_id)
if maximum_message_size is not None:
pulumi.set(__self__, "maximum_message_size", maximum_message_size)
if message_retention_period is not None:
pulumi.set(__self__, "message_retention_period", message_retention_period)
if queue_name is not None:
pulumi.set(__self__, "queue_name", queue_name)
if receive_message_wait_time_seconds is not None:
pulumi.set(__self__, "receive_message_wait_time_seconds", receive_message_wait_time_seconds)
if redrive_allow_policy is not None:
pulumi.set(__self__, "redrive_allow_policy", redrive_allow_policy)
if redrive_policy is not None:
pulumi.set(__self__, "redrive_policy", redrive_policy)
if sqs_managed_sse_enabled is not None:
pulumi.set(__self__, "sqs_managed_sse_enabled", sqs_managed_sse_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if visibility_timeout is not None:
pulumi.set(__self__, "visibility_timeout", visibility_timeout)
@property
@pulumi.getter(name="contentBasedDeduplication")
def content_based_deduplication(self) -> Optional[pulumi.Input[bool]]:
"""
For first-in-first-out (FIFO) queues, specifies whether to enable content-based deduplication. During the deduplication interval, Amazon SQS treats messages that are sent with identical content as duplicates and delivers only one copy of the message.
"""
return pulumi.get(self, "content_based_deduplication")
@content_based_deduplication.setter
def content_based_deduplication(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "content_based_deduplication", value)
@property
@pulumi.getter(name="deduplicationScope")
def deduplication_scope(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.
"""
return pulumi.get(self, "deduplication_scope")
@deduplication_scope.setter
def deduplication_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deduplication_scope", value)
@property
@pulumi.getter(name="delaySeconds")
def delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The time in seconds for which the delivery of all messages in the queue is delayed. You can specify an integer value of 0 to 900 (15 minutes). The default value is 0.
"""
return pulumi.get(self, "delay_seconds")
@delay_seconds.setter
def delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delay_seconds", value)
@property
@pulumi.getter(name="fifoQueue")
def fifo_queue(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue.
"""
return pulumi.get(self, "fifo_queue")
@fifo_queue.setter
def fifo_queue(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fifo_queue", value)
@property
@pulumi.getter(name="fifoThroughputLimit")
def fifo_throughput_limit(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.
"""
return pulumi.get(self, "fifo_throughput_limit")
@fifo_throughput_limit.setter
def fifo_throughput_limit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fifo_throughput_limit", value)
@property
@pulumi.getter(name="kmsDataKeyReusePeriodSeconds")
def kms_data_key_reuse_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The length of time in seconds for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. The value must be an integer between 60 (1 minute) and 86,400 (24 hours). The default is 300 (5 minutes).
"""
return pulumi.get(self, "kms_data_key_reuse_period_seconds")
@kms_data_key_reuse_period_seconds.setter
def kms_data_key_reuse_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "kms_data_key_reuse_period_seconds", value)
@property
@pulumi.getter(name="kmsMasterKeyId")
def kms_master_key_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of an AWS managed customer master key (CMK) for Amazon SQS or a custom CMK. To use the AWS managed CMK for Amazon SQS, specify the (default) alias alias/aws/sqs.
"""
return pulumi.get(self, "kms_master_key_id")
@kms_master_key_id.setter
def kms_master_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_master_key_id", value)
@property
@pulumi.getter(name="maximumMessageSize")
def maximum_message_size(self) -> Optional[pulumi.Input[int]]:
"""
The limit of how many bytes that a message can contain before Amazon SQS rejects it. You can specify an integer value from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). The default value is 262,144 (256 KiB).
"""
return pulumi.get(self, "maximum_message_size")
@maximum_message_size.setter
def maximum_message_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_message_size", value)
@property
@pulumi.getter(name="messageRetentionPeriod")
def message_retention_period(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds that Amazon SQS retains a message. You can specify an integer value from 60 seconds (1 minute) to 1,209,600 seconds (14 days). The default value is 345,600 seconds (4 days).
"""
return pulumi.get(self, "message_retention_period")
@message_retention_period.setter
def message_retention_period(self, value: | |
< 1:
this_event_type = 1
if this_event_type > n_types:
this_event_type = n_types
event_types[model_name][this_e_range]['true'].append(this_event_type)
if return_partition:
return event_types, event_type_bins
else:
return event_types
def predicted_event_types(dtf_e_test, trained_models, n_types=2):
'''
Get the true and predicted event types for n_types event types.
Two lists of types are returned per model and per energy range, one true and one predicted.
This function is meant to be used only for the classification case.
Parameters
----------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range,
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
n_types: int (default=2)
The number of types used in the training.
Returns
-------
event_types: nested dict
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values=3rddict
3rd dict:
keys=true or reco, values=event type
'''
event_types = dict()
for model_name, model in trained_models.items():
event_types[model_name] = dict()
for this_e_range, this_model in model.items():
event_types[model_name][this_e_range] = defaultdict(list)
event_types[model_name][this_e_range] = defaultdict(list)
# To keep lines short
dtf_this_e = dtf_e_test[this_model['test_data_suffix']][this_e_range]
event_types[model_name][this_e_range]['true'] = dtf_this_e[
'event_type_{:d}'.format(n_types)
]
X_test = dtf_this_e[this_model['train_features']].values
event_types[model_name][this_e_range]['reco'] = this_model['model'].predict(X_test)
return event_types
def add_event_types_column(dtf_e, labels, n_types=[2, 3, 4]):
'''
Divide the events into n_types event types.
The bins defining the types are calculated from the label values.
The data will be divided to n number of types with equivalent number of events in each type.
A column with the type will be added to the DataFrame per entry in the n_types list.
Parameters
----------
dtf_e: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data.
The keys of the dict are the energy ranges of the data.
labels: str
The variable to use as a basis on which to divide the data.
n_types: list of ints (default=[2, 3, 4])
The data will be divided to n number of types
with equivalent number of events in each type.
A column with the type will be added to the DataFrame per entry in the n_types list.
Returns
-------
dtf_e: dict of pandas DataFrames
The same DataFrame as the input but with added columns for event types,
one column per n_types entry. The column names are event_type_n.
'''
pd.options.mode.chained_assignment = None
for this_n_type in n_types:
for this_e_range, this_dtf in dtf_e.items():
event_types = list()
event_types_bins = mstats.mquantiles(
this_dtf[labels].values,
np.linspace(0, 1, this_n_type + 1)
)
for this_value in this_dtf[labels].values:
this_event_type = np.searchsorted(event_types_bins, this_value)
if this_event_type < 1:
this_event_type = 1
if this_event_type > this_n_type:
this_event_type = this_n_type
event_types.append(this_event_type)
this_dtf.loc[:, 'event_type_{:d}'.format(this_n_type)] = event_types
return dtf_e
def extract_unique_dataset_names(trained_models):
'''
Extract all test datasets names necessary for the given trained models.
Parameters
----------
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range.
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
Returns
-------
dataset_names: set
Set of unique data set names
'''
dataset_names = set()
for model in trained_models.values():
for this_model in model.values():
dataset_names.add(this_model['test_data_suffix'])
return dataset_names
def plot_pearson_correlation(dtf, title):
'''
Calculate the Pearson correlation between all variables in this DataFrame.
Parameters
----------
dtf: pandas DataFrame
The DataFrame containing the data.
title: str
A title to add to the olot (will be added to 'Pearson correlation')
Returns
-------
A pyplot instance with the Pearson correlation plot.
'''
plt.subplots(figsize=[16, 16])
corr_matrix = dtf.corr(method='pearson')
sns.heatmap(
corr_matrix,
vmin=-1.,
vmax=1.,
annot=True,
fmt='.2f',
cmap="YlGnBu",
cbar=True,
linewidths=0.5
)
plt.title('Pearson correlations {}'.format(title))
plt.tight_layout()
return plt
def plot_test_vs_predict(dtf_e_test, trained_models, trained_model_name):
'''
Plot true values vs. the predictions of the model for all energy bins.
Parameters
----------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
trained_models: a nested dict of one trained sklearn model per energy range.
1st dict:
keys=energy ranges, values=2nd dict
2nd dict:
'model': trained model for this energy range
'train_features': list of variable names trained with.
'labels': Name of the variable used as the labels in the training.
trained_model_name: str
Name of the model trained.
Returns
-------
A pyplot instance with the test vs. prediction plot.
'''
nrows = 5
ncols = 4
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[14, 18])
for i_plot, (this_e_range, this_model) in enumerate(trained_models.items()):
# To keep lines short
dtf_this_e = dtf_e_test[this_model['test_data_suffix']][this_e_range]
X_test = dtf_this_e[this_model['train_features']].values
y_test = dtf_this_e[this_model['labels']].values
if np.any(np.isinf(X_test)):
# Remove positive infs
X_test[X_test > 999999] = 999999
# Remove negative infs
X_test[X_test < -999999] = -999999
y_pred = this_model['model'].predict(X_test)
ax = axs[int(np.floor(i_plot/ncols)), i_plot % ncols]
ax.hist2d(y_pred, y_test, bins=(50, 50), cmap=plt.cm.jet)
ax.plot(
[min(y_test), max(y_test)], [min(y_test), max(y_test)],
linestyle='--',
lw=2,
color='white'
)
ax.set_xlim(np.quantile(y_pred, [0.01, 0.99]))
ax.set_ylim(np.quantile(y_test, [0.01, 0.99]))
ax.set_title(this_e_range)
ax.set_ylabel('True')
ax.set_xlabel('Predicted')
axs[nrows - 1, ncols - 1].axis('off')
axs[nrows - 1, ncols - 1].text(
0.5,
0.5,
trained_model_name,
horizontalalignment='left',
verticalalignment='center',
fontsize=18,
transform=axs[nrows - 1, ncols - 1].transAxes
)
plt.tight_layout()
return plt
def plot_matrix(dtf, train_features, labels, n_types=2, plot_events=20000):
'''
Plot a matrix of each variable in train_features against another (not all combinations).
The data is divided to n_types bins of equal statistics based on the labels.
Each type is plotted in a different colour.
This function produces mutliple plots, where in each plot a maximum of 5 variables are plotted.
Unlike in most cases in this code, dtf is the DataFrame itself,
not a dict of energy ranges. This function should be called per energy bin.
Parameters
----------
dtf: pandas DataFrames
A DataFrame to add event types to.
train_features: list
List of variable names trained with.
labels: str
Name of the variable used as the labels in the training.
n_types: int (default=2)
The number of types to divide the data in.
plot_events: int (default=20000)
For efficiency, limit the number of events that will be used for the plots
Returns
-------
A list of seaborn.PairGrid instances, each with one matrix plot.
'''
setStyle()
# Check if event_type column already present within dtf:
if "event_type" not in dtf.columns:
dtf = add_event_type_column(dtf, labels, n_types)
# Mask out the events without a clear event type
dtf = dtf[dtf['event_type'] > 0]
type_colors = {
1: "#ba2c54",
2: "#5B90DC",
3: '#FFAB44',
4: '#0C9FB3'
}
vars_to_plot = np.array_split(
[labels] + train_features,
round(len([labels] + train_features)/5)
)
grid_plots = list()
for these_vars in vars_to_plot:
grid_plots.append(
sns.pairplot(
dtf.sample(n=plot_events),
vars=these_vars,
hue='event_type',
palette=type_colors,
corner=True
)
)
return grid_plots
def plot_score_comparison(dtf_e_test, trained_models):
'''
Plot the score of the model as a function of energy.
#TODO add a similar function that plots from saved scores instead of calculating every time.
Parameters
----------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': dict of trained models for this energy range.
'train_features': list of variable names | |
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_40_3 = settings_1_40_2
settings_1_40_4 = settings_1_40_3
settings_1_41_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
intel-cc:
version: ["2021.1", "2021.2", "2021.3"]
update: [None, ANY]
mode: ["icx", "classic", "dpcpp"]
libcxx: [None, libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, static, dynamic]
runtime_type: [None, Debug, Release]
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_42_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: | |
exit_on_failure: bool = True,
exit_code: int = 3,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 8 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 3)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 8 bit data
"""
try:
return pack('B', data)
except error:
if not quiet:
print('Bad value for 8 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
@staticmethod
def pack16(data: Union[int, str, bytes],
exit_on_failure: bool = True,
exit_code: int = 4,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 16 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 4)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 16 bit data
"""
try:
return pack('!H', data)
except error:
if not quiet:
print('Bad value for 16 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
@staticmethod
def pack32(data: Union[int, str, bytes],
exit_on_failure: bool = True,
exit_code: int = 5,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 32 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 5)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 32 bit data
"""
try:
return pack('!I', data)
except error:
if not quiet:
print('Bad value for 32 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
@staticmethod
def pack64(data: Union[int, str, bytes],
exit_on_failure: bool = True,
exit_code: int = 6,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 64 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 6)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 64 bit data
"""
try:
return pack('!Q', data)
except error:
if not quiet:
print('Bad value for 64 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
# endregion
# region Network interface functions
def list_of_network_interfaces(self) -> Union[None, List[str]]:
"""
Get list of network interfaces
:return: list of network interfaces (example: ['lo', 'eth0'])
"""
if self.get_platform().startswith('Windows'):
result_list: List[str] = list()
for adapter in self._windows_adapters:
for ip in adapter.ips:
if ip.nice_name not in result_list:
result_list.append(ip.nice_name)
return result_list
else:
return interfaces()
def list_of_wireless_network_interfaces(self) -> List[str]:
"""
Get list of wireless network interfaces
:return: list of wireless network interfaces (example: ['wlan0', 'wlan1'])
"""
try:
wireless_network_interfaces: List[str] = list()
current_platform: str = self.get_platform()
# Mac OS
if current_platform.startswith('Darwin'):
interfaces_info: sub.CompletedProcess = \
sub.run(['networksetup -listnetworkserviceorder'], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interfaces_info: str = interfaces_info.stdout.decode('utf-8')
interfaces_info: List[str] = interfaces_info.splitlines()
for output_line in interfaces_info:
if 'Wi-Fi' in output_line and 'Device: ' in output_line:
search_result = search(r'Device: (?P<interface_name>[a-zA-Z0-9]{2,16})\)', output_line)
if search_result is not None:
wireless_network_interfaces.append(search_result.group('interface_name'))
# Linux
elif current_platform.startswith('Linux'):
interfaces_info: sub.CompletedProcess = \
sub.run(['iwconfig'], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interfaces_info: str = interfaces_info.stdout.decode('utf-8')
interfaces_info: List[str] = interfaces_info.splitlines()
for output_line in interfaces_info:
if 'IEEE 802.11' in output_line:
search_result = search(r'^(?P<interface_name>[a-zA-Z0-9]{2,32}) +IEEE', output_line)
if search_result is not None:
wireless_network_interfaces.append(search_result.group('interface_name'))
# Windows
elif current_platform.startswith('Windows'):
netsh_command: sub.Popen = \
sub.Popen('netsh wlan show interfaces', shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
netsh_command_output, netsh_command_error = netsh_command.communicate()
interfaces_info: str = netsh_command_output.decode('gbk') + \
netsh_command_error.decode('gbk')
interfaces_info: List[str] = interfaces_info.splitlines()
for output_line in interfaces_info:
if 'Name' in output_line:
search_result = search(r'^ +Name +: (?P<interface_name>.*)$', output_line)
if search_result is not None:
wireless_network_interfaces.append(search_result.group('interface_name'))
# Other
else:
pass
return wireless_network_interfaces
except AssertionError:
return list()
def network_interface_selection(self,
interface_name: Union[None, str] = None,
exclude_interface: Union[None, str] = None,
only_wireless: bool = False,
message: Union[None, str] = None) -> str:
"""
Select network interface
:param interface_name: Network interface name (example: 'eth0'; default: None)
:param exclude_interface: Exclude network interface from list of interfaces (example: 'eth1'; default: None)
:param only_wireless: Select network interface only from wireless interfaces (default: False)
:param message: Print message before select network interface from table (example: 'Select network interface from table: ')
:return: Network interface name (example: 'eth0')
"""
network_interface_index: int = 1
if not only_wireless:
available_network_interfaces: List[str] = self.list_of_network_interfaces()
else:
available_network_interfaces: List[str] = self.list_of_wireless_network_interfaces()
if exclude_interface is not None:
available_network_interfaces.remove(exclude_interface)
if interface_name is not None:
if interface_name in available_network_interfaces:
self.get_interface_settings(interface_name=interface_name, required_parameters=[], quiet=True)
return interface_name
else:
if not only_wireless:
self.print_error('Network interface: ', interface_name, ' does not exist!')
else:
self.print_error('Wireless network interface: ', interface_name, ' does not exist!')
exit(1)
else:
if 'lo' in available_network_interfaces:
available_network_interfaces.remove('lo')
if len(available_network_interfaces) > 1:
if message is not None:
self.print_warning(message)
interfaces_pretty_table = PrettyTable([self.info_text('Index'),
self.info_text('Interface name'),
self.info_text('MAC address'),
self.info_text('IPv4 address'),
self.info_text('IPv6 link address')])
for network_interface in available_network_interfaces:
network_interface_settings = self.get_interface_settings(interface_name=network_interface,
required_parameters=[], quiet=True)
network_interface_mac_address: Union[None, str] = \
network_interface_settings['mac-address']
if network_interface_mac_address is None:
network_interface_mac_address = 'None'
network_interface_ipv4_address: Union[None, str] = \
network_interface_settings['ipv4-address']
if network_interface_ipv4_address is None:
network_interface_ipv4_address = 'None'
network_interface_ipv6_link_address: Union[None, str] = \
network_interface_settings['ipv6-link-address']
if network_interface_ipv6_link_address is None:
network_interface_ipv6_link_address = 'None'
interfaces_pretty_table.add_row([str(network_interface_index),
network_interface,
network_interface_mac_address,
network_interface_ipv4_address,
network_interface_ipv6_link_address])
network_interface_index += 1
print(interfaces_pretty_table)
network_interface_index -= 1
print(self.c_warning + 'Select network interface from range (1-' +
str(network_interface_index) + '): ', end='')
current_network_interface_index = input()
if not current_network_interface_index.isdigit():
self.print_error('Your input data: ', current_network_interface_index, ' is not digit!')
exit(1)
else:
current_network_interface_index = int(current_network_interface_index)
if any([int(current_network_interface_index) < 1,
int(current_network_interface_index) > network_interface_index]):
self.print_error('Your number: ', current_network_interface_index,
' is not within range (', '1-' + str(network_interface_index), ')')
exit(1)
current_network_interface = ''
try:
current_network_interface = str(available_network_interfaces[current_network_interface_index - 1])
except:
self.print_error('This network interface has some problem!')
exit(1)
if not only_wireless:
self.print_info('Your choose network interface: ', current_network_interface)
else:
self.print_info('Your choose wireless network interface: ', current_network_interface)
return current_network_interface
if len(available_network_interfaces) == 1:
self.get_interface_settings(interface_name=available_network_interfaces[0],
required_parameters=[], quiet=True)
if not only_wireless:
self.print_info('You have only one network interface: ', available_network_interfaces[0])
else:
self.print_info('You have only one wireless network interface: ', available_network_interfaces[0])
return available_network_interfaces[0]
if len(available_network_interfaces) == 0:
if not only_wireless:
self.print_error('Network interfaces not found!')
else:
self.print_error('Wireless network interfaces not found!')
exit(1)
def check_network_interface_is_wireless(self, interface_name: str = 'wlan0') -> bool:
"""
Check network interface is wireless
:param interface_name: Network interface name (example: 'wlan0')
:return: True or False
"""
try:
current_platform: str = self.get_platform()
# Mac OS
if current_platform.startswith('Darwin'):
interface_info: sub.CompletedProcess = \
sub.run(['networksetup -listnetworkserviceorder | grep ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interface_info: str = interface_info.stdout.decode('utf-8')
assert 'Wi-Fi' in interface_info, 'Is not wireless interface!'
return True
# Linux
elif current_platform.startswith('Linux'):
interface_info: sub.CompletedProcess = \
sub.run(['iwconfig ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interface_info: str = interface_info.stdout.decode('utf-8')
assert 'no wireless extensions' not in interface_info, 'Is not wireless interface!'
return True
# Windows
elif current_platform.startswith('Windows'):
netsh_command: sub.Popen = \
sub.Popen('netsh wlan show interfaces', shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
netsh_command_output, netsh_command_error = netsh_command.communicate()
interfaces_info: str = netsh_command_output.decode('gbk') + \
netsh_command_error.decode('gbk')
assert 'no wireless extensions' not in interfaces_info, 'Is not wireless interface!'
return True
# Other
else:
return False
except AssertionError:
return False
# @staticmethod
# def get_netiface_essid(interface_name):
# try:
# wifi = Wireless(interface_name)
# essid = wifi.getEssid()
# except:
# essid = None
# return essid
#
# @staticmethod
# def get_netiface_frequency(interface_name):
# try:
# wifi = Wireless(interface_name)
# frequency = wifi.getFrequency()
# except:
# frequency = 0
# return frequency
def get_interface_settings(self,
interface_name: str = 'eth0',
required_parameters: List[str] = ['mac-address'],
quiet: bool = True) -> Dict[str, Union[None, str, List[str]]]:
"""
Get network interface settings
:param interface_name: Network interface name (default: 'eth0')
:param required_parameters: Required Network interface parameters list (default: ['mac-address'])
:param quiet: Quiet mode, if True no console output (default: True)
:return: Network interface settings dictionary
(example: {'network-interface': 'example-network-interface',
'is-wireless': False,
'essid': 'AP',
'bssid': '12:34:56:78:90:ab',
'channel': 1,
'mac-address': '12:34:56:78:90:ab',
'ipv4-address': '192.168.0.1',
'ipv6-link-address': 'fe80::1234:5678:90ab:cdef',
'ipv6-global-address': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'ipv6-global-addresses': ['fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 'fc00:db20:35b:7399::5'],
'ipv4-netmask': '255.255.255.0',
'ipv4-network': '192.168.0.0/24',
'first-ipv4-address': '192.168.0.1',
'second-ipv4-address': '192.168.0.2',
'penultimate-ipv4-address': '192.168.0.253',
'last-ipv4-address': '192.168.0.254',
'ipv4-broadcast': '192.168.0.255',
'ipv4-gateway': '192.168.0.254',
'ipv6-gateway': 'fe80::1234:5678:8765:4321'})
"""
if interface_name not in self._network_interfaces_settings.keys():
wireless_interface_settings: Dict[str, Union[None, int, float, str]] = \
self.get_wireless_interface_settings(interface_name=interface_name)
self._network_interfaces_settings[interface_name]: \
Dict[str, Union[None, bool, int, float, str, List[str]]] = {
'network-interface': interface_name,
'is-wireless': self.check_network_interface_is_wireless(interface_name=interface_name),
'essid': wireless_interface_settings['essid'],
'bssid': wireless_interface_settings['bssid'],
'channel': wireless_interface_settings['channel'],
'mac-address': self.get_interface_mac_address(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv4-address': self.get_interface_ip_address(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv6-link-address': self.get_interface_ipv6_link_address(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv6-global-address': | |
to select the appropriate nested schema for serialization.
"""
if self.__class__.__name__ != raw_node.__class__.__name__:
raise TypeError(f"Cannot serialize {raw_node} with {self}")
return raw_node
class KerasHdf5WeightsEntry(WeightsEntryBase):
bioimageio_description = "Keras HDF5 weights format"
weights_format = fields.String(validate=field_validators.Equal("keras_hdf5"), required=True, load_only=True)
tensorflow_version = fields.StrictVersion() # todo: required=True
class OnnxWeightsEntry(WeightsEntryBase):
bioimageio_description = "ONNX weights format"
weights_format = fields.String(validate=field_validators.Equal("onnx"), required=True, load_only=True)
opset_version = fields.Integer() # todo: required=True
class PytorchStateDictWeightsEntry(WeightsEntryBase):
bioimageio_description = "PyTorch state dictionary weights format"
weights_format = fields.String(validate=field_validators.Equal("pytorch_state_dict"), required=True, load_only=True)
class PytorchScriptWeightsEntry(WeightsEntryBase):
bioimageio_description = "Torch Script weights format"
weights_format = fields.String(validate=field_validators.Equal("pytorch_script"), required=True, load_only=True)
class TensorflowJsWeightsEntry(WeightsEntryBase):
bioimageio_description = "Tensorflow Javascript weights format"
weights_format = fields.String(validate=field_validators.Equal("tensorflow_js"), required=True, load_only=True)
tensorflow_version = fields.StrictVersion() # todo: required=True
class TensorflowSavedModelBundleWeightsEntry(WeightsEntryBase):
bioimageio_description = "Tensorflow Saved Model Bundle weights format"
weights_format = fields.String(
validate=field_validators.Equal("tensorflow_saved_model_bundle"), required=True, load_only=True
)
tensorflow_version = fields.StrictVersion() # todo: required=True
WeightsEntry = typing.Union[
PytorchStateDictWeightsEntry,
PytorchScriptWeightsEntry,
KerasHdf5WeightsEntry,
TensorflowJsWeightsEntry,
TensorflowSavedModelBundleWeightsEntry,
OnnxWeightsEntry,
]
class ModelParent(BioImageIOSchema):
uri = fields.URI(
bioimageio_description="Url of another model available on bioimage.io or path to a local model in the "
"bioimage.io specification. If it is a url, it needs to be a github url linking to the page containing the "
"model (NOT the raw file)."
)
sha256 = fields.SHA256(bioimageio_description="Hash of the weights of the parent model.")
class Model(rdf.schema.RDF):
raw_nodes = raw_nodes
class Meta:
unknown = RAISE
bioimageio_description = f"""# BioImage.IO Model Resource Description File Specification {get_args(base_nodes.FormatVersion)[-1]}
This specification defines the fields used in a BioImage.IO-compliant resource description file (`RDF`) for describing AI models with pretrained weights.
These fields are typically stored in YAML files which we called Model Resource Description Files or `model RDF`.
The model RDFs can be downloaded or uploaded to the bioimage.io website, produced or consumed by BioImage.IO-compatible consumers(e.g. image analysis software or other website).
The model RDF YAML file contains mandatory and optional fields. In the following description, optional fields are indicated by _optional_.
_optional*_ with an asterisk indicates the field is optional depending on the value in another field.
"""
# todo: unify authors with RDF (optional or required?)
authors = fields.List(
fields.Nested(Author), required=True, bioimageio_description=rdf.schema.RDF.authors_bioimageio_description
)
badges = missing_ # todo: allow badges for Model (RDF has it)
cite = fields.Nested(
CiteEntry,
many=True,
required=True, # todo: unify authors with RDF (optional or required?)
bioimageio_description=rdf.schema.RDF.cite_bioimageio_description,
)
download_url = missing_ # todo: allow download_url for Model (RDF has it)
dependencies = fields.Dependencies( # todo: add validation (0.4.0?)
bioimageio_description="Dependency manager and dependency file, specified as `<dependency manager>:<relative "
"path to file>`. For example: 'conda:./environment.yaml', 'maven:./pom.xml', or 'pip:./requirements.txt'"
)
format_version = fields.String(
validate=field_validators.OneOf(get_args_flat(base_nodes.FormatVersion)),
required=True,
bioimageio_description_order=0,
bioimageio_description=f"""Version of the BioImage.IO Model Resource Description File Specification used.
This is mandatory, and important for the consumer software to verify before parsing the fields.
The recommended behavior for the implementation is to keep backward compatibility and throw an error if the model yaml
is in an unsupported format version. The current format version described here is
{get_args(base_nodes.FormatVersion)[-1]}""",
)
framework = fields.String(
validate=field_validators.OneOf(get_args(base_nodes.Framework)),
bioimageio_description=f"The deep learning framework of the source code. One of: "
f"{', '.join(get_args(base_nodes.Framework))}. This field is only required if the field `source` is present.",
)
git_repo = fields.String(
validate=field_validators.URL(schemes=["http", "https"]),
bioimageio_description=rdf.schema.RDF.git_repo_bioimageio_description
+ "If the model is contained in a subfolder of a git repository, then a url to the exact folder"
+ "(which contains the configuration yaml file) should be used.",
)
icon = missing_ # todo: allow icon for Model (RDF has it)
kwargs = fields.Kwargs(
bioimageio_description="Keyword arguments for the implementation specified by `source`. "
"This field is only required if the field `source` is present."
)
language = fields.String(
validate=field_validators.OneOf(get_args(base_nodes.Language)),
bioimageio_maybe_required=True,
bioimageio_description=f"Programming language of the source code. One of: "
f"{', '.join(get_args(base_nodes.Language))}. This field is only required if the field `source` is present.",
)
license = fields.String(
required=True, # todo: unify license with RDF (optional or required?)
bioimageio_description=rdf.schema.RDF.license_bioimageio_description,
)
name = fields.String(
# validate=field_validators.Length(max=36), # todo: enforce in future version (0.4.0?)
required=True,
bioimageio_description="Name of this model. It should be human-readable and only contain letters, numbers, "
"`_`, `-` or spaces and not be longer than 36 characters.",
)
packaged_by = fields.List(
fields.Nested(Author),
bioimageio_description=f"The persons that have packaged and uploaded this model. Only needs to be specified if "
f"different from `authors` in root or any entry in `weights`.",
)
parent = fields.Nested(
ModelParent,
bioimageio_description="Parent model from which the trained weights of this model have been derived, e.g. by "
"finetuning the weights of this model on a different dataset. For format changes of the same trained model "
"checkpoint, see `weights`.",
)
run_mode = fields.Nested(
RunMode,
bioimageio_description="Custom run mode for this model: for more complex prediction procedures like test time "
"data augmentation that currently cannot be expressed in the specification. The different run modes should be "
"listed in [supported_formats_and_operations.md#Run Modes]"
"(https://github.com/bioimage-io/configuration/blob/master/supported_formats_and_operations.md#run-modes).",
)
sha256 = fields.String(
validate=field_validators.Length(equal=64),
bioimageio_description="SHA256 checksum of the model source code file."
+ _common_sha256_hint
+ " This field is only required if the field source is present.",
)
source = fields.ImportableSource(
bioimageio_maybe_required=True,
bioimageio_description="Language and framework specific implementation. As some weights contain the model "
"architecture, the source is optional depending on the present weight formats. `source` can either point to a "
"local implementation: `<relative path to file>:<identifier of implementation within the source file>` or the "
"implementation in an available dependency: `<root-dependency>.<sub-dependency>.<identifier>`.\nFor example: "
"`my_function.py:MyImplementation` or `core_library.some_module.some_function`.",
)
timestamp = fields.DateTime(
required=True,
bioimageio_description="Timestamp of the initial creation of this model in [ISO 8601]"
"(#https://en.wikipedia.org/wiki/ISO_8601) format.",
)
weights = fields.Dict(
fields.String(
validate=field_validators.OneOf(get_args(base_nodes.WeightsFormat)),
required=True,
bioimageio_description=f"Format of this set of weights. Weight formats can define additional (optional or "
f"required) fields. See [supported_formats_and_operations.md#Weight Format]"
f"(https://github.com/bioimage-io/configuration/blob/master/supported_formats_and_operations.md#weight_format). "
f"One of: {', '.join(get_args(base_nodes.WeightsFormat))}",
),
fields.Union([fields.Nested(we) for we in get_args(WeightsEntry)]),
required=True,
bioimageio_description="The weights for this model. Weights can be given for different formats, but should "
"otherwise be equivalent. The available weight formats determine which consumers can use this model.",
)
@pre_load
def add_weights_format_key_to_weights_entry_value(self, data: dict, many=False, partial=False, **kwargs):
data = deepcopy(data) # Schema.validate() calls pre_load methods, thus we should not modify the input data
if many or partial:
raise NotImplementedError
for weights_format, weights_entry in data.get("weights", {}).items():
if "weights_format" in weights_entry:
raise ValidationError(f"Got unexpected key 'weights_format' in weights entry {weights_format}")
weights_entry["weights_format"] = weights_format
return data
inputs = fields.Nested(
InputTensor, many=True, bioimageio_description="Describes the input tensors expected by this model."
)
outputs = fields.Nested(
OutputTensor, many=True, bioimageio_description="Describes the output tensors from this model."
)
test_inputs = fields.List(
fields.URI,
required=True,
bioimageio_description="List of URIs to test inputs as described in inputs for a single test case. "
"Supported file formats/extensions: '.npy'",
)
test_outputs = fields.List(fields.URI, required=True, bioimageio_description="Analog to to test_inputs.")
sample_inputs = fields.List(
fields.URI,
bioimageio_description="List of URIs to sample inputs to illustrate possible inputs for the model, for example "
"stored as png or tif images.",
)
sample_outputs = fields.List(
fields.URI, bioimageio_description="List of URIs to sample outputs corresponding to the `sample_inputs`."
)
config = fields.Dict(
bioimageio_description=rdf.schema.RDF.config_bioimageio_description
+ """
For example:
```yaml
config:
# custom config for DeepImageJ, see https://github.com/bioimage-io/configuration/issues/23
deepimagej:
model_keys:
# In principle the tag "SERVING" is used in almost every tf model
model_tag: tf.saved_model.tag_constants.SERVING
# Signature definition to call the model. Again "SERVING" is the most general
signature_definition: tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
test_information:
input_size: [2048x2048] # Size of the input images
output_size: [1264x1264 ]# Size of all the outputs
device: cpu # Device used. In principle either cpu or GPU
memory_peak: 257.7 Mb # Maximum memory consumed by the model in the device
runtime: 78.8s # Time it took to run the model
pixel_size: [9.658E-4µmx9.658E-4µm] # Size of the pixels of the input
```
"""
)
@validates_schema
def language_and_framework_match(self, data, **kwargs):
field_names = ("language", "framework")
valid_combinations = [
("python", "scikit-learn"), # todo: remove
("python", "pytorch"),
("python", "tensorflow"),
("java", "tensorflow"),
]
if "source" not in data:
valid_combinations.append((missing_, missing_))
valid_combinations.append(("python", missing_))
valid_combinations.append(("java", missing_))
combination = tuple(data.get(name, missing_) for name in field_names)
if combination not in valid_combinations:
raise ValidationError(f"invalid combination of {dict(zip(field_names, combination))}")
@validates_schema
def source_specified_if_required(self, data, **kwargs):
if "source" in data:
return
weights_format_requires_source = {
"pytorch_state_dict": True,
"pytorch_script": False,
"keras_hdf5": False,
"tensorflow_js": False,
"tensorflow_saved_model_bundle": False,
"onnx": False,
}
require_source = {wf for wf in data["weights"] if weights_format_requires_source[wf]}
if require_source:
raise ValidationError(
f"These specified weight | |
'''
A generic target capture task
'''
import numpy as np
import time
import os
import math
import traceback
from collections import OrderedDict
from riglib.experiment import traits, Sequence, FSMTable, StateTransitions
from riglib.stereo_opengl import ik
from riglib import plants
from .target_graphics import *
####### CONSTANTS
sec_per_min = 60.0
RED = (1,0,0,.5)
GREEN = (0,1,0,0.5)
GOLD = (1., 0.843, 0., 0.5)
mm_per_cm = 1./10
## Plants
# List of possible "plants" that a subject could control either during manual or brain control
cursor_14x14 = plants.CursorPlant(endpt_bounds=(-14, 14, 0., 0., -14, 14))
shoulder_anchor = np.array([2., 0., -15])
chain_kwargs = dict(link_radii=.6, joint_radii=0.6, joint_colors=(181/256., 116/256., 96/256., 1), link_colors=(181/256., 116/256., 96/256., 1))
chain_20_20_endpt = plants.EndptControlled2LArm(link_lengths=[20, 20], base_loc=shoulder_anchor, **chain_kwargs)
init_pos = np.array([0, 0, 0], np.float64)
chain_20_20_endpt.set_intrinsic_coordinates(init_pos)
chain_20_20 = plants.RobotArmGen2D(link_lengths=[20, 20], base_loc=shoulder_anchor, **chain_kwargs)
init_pos = np.array([ 0.38118002, 2.08145271])
chain_20_20.set_intrinsic_coordinates(init_pos)
plantlist = dict(
cursor_14x14=cursor_14x14,
chain_20_20=chain_20_20,
chain_20_20_endpt=chain_20_20_endpt)
class TargetCapture(Sequence):
'''
This is a generic cued target capture skeleton, to form as a common ancestor to the most
common type of motor control task.
'''
status = FSMTable(
wait = StateTransitions(start_trial="target"),
target = StateTransitions(enter_target="hold", timeout="timeout_penalty"),
hold = StateTransitions(leave_early="hold_penalty", hold_complete="targ_transition"),
targ_transition = StateTransitions(trial_complete="reward", trial_abort="wait", trial_incomplete="target"),
timeout_penalty = StateTransitions(timeout_penalty_end="targ_transition", end_state=True),
hold_penalty = StateTransitions(hold_penalty_end="targ_transition", end_state=True),
reward = StateTransitions(reward_end="wait", stoppable=False, end_state=True)
)
trial_end_states = ['reward', 'timeout_penalty', 'hold_penalty']
# initial state
state = "wait"
target_index = -1 # Helper variable to keep track of which target to display within a trial
tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial.
sequence_generators = []
reward_time = traits.Float(.5, desc="Length of reward dispensation")
hold_time = traits.Float(.2, desc="Length of hold required at targets")
hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error")
timeout_time = traits.Float(10, desc="Time allowed to go between targets")
timeout_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error")
max_attempts = traits.Int(10, desc='The number of attempts at a target before\
skipping to the next one')
def _start_wait(self):
# Call parent method to draw the next target capture sequence from the generator
super()._start_wait()
# number of times this sequence of targets has been attempted
self.tries = 0
# index of current target presented to subject
self.target_index = -1
# number of targets to be acquired in this trial
self.chain_length = len(self.targs)
def _parse_next_trial(self):
'''Check that the generator has the required data'''
self.targs = self.next_trial
# TODO error checking
def _start_target(self):
self.target_index += 1
self.target_location = self.targs[self.target_index]
def _end_target(self):
'''Nothing generic to do.'''
pass
def _start_hold(self):
'''Nothing generic to do.'''
pass
def _while_hold(self):
'''Nothing generic to do.'''
pass
def _end_hold(self):
'''Nothing generic to do.'''
pass
def _start_targ_transition(self):
'''Nothing generic to do. Child class might show/hide targets'''
pass
def _while_targ_transition(self):
'''Nothing generic to do.'''
pass
def _end_targ_transition(self):
'''Nothing generic to do.'''
pass
def _start_timeout_penalty(self):
self.tries += 1
self.target_index = -1
def _while_timeout_penalty(self):
'''Nothing generic to do.'''
pass
def _end_timeout_penalty(self):
'''Nothing generic to do.'''
pass
def _start_hold_penalty(self):
self.tries += 1
self.target_index = -1
def _while_hold_penalty(self):
'''Nothing generic to do.'''
pass
def _end_hold_penalty(self):
'''Nothing generic to do.'''
pass
def _start_reward(self):
'''Nothing generic to do.'''
pass
def _while_reward(self):
'''Nothing generic to do.'''
pass
def _end_reward(self):
'''Nothing generic to do.'''
pass
################## State transition test functions ##################
def _test_start_trial(self, time_in_state):
'''Start next trial automatically. You may want this to instead be
- a random delay
- require some initiation action
'''
return True
def _test_timeout(self, time_in_state):
return time_in_state > self.timeout_time
def _test_hold_complete(self, time_in_state):
'''
Test whether the target is held long enough to declare the
trial a success
Possible options
- Target held for the minimum requred time (implemented here)
- Sensorized object moved by a certain amount
- Sensorized object moved to the required location
- Manually triggered by experimenter
'''
return time_in_state > self.hold_time
def _test_trial_complete(self, time_in_state):
'''Test whether all targets in sequence have been acquired'''
return self.target_index == self.chain_length-1
def _test_trial_abort(self, time_in_state):
'''Test whether the target capture sequence should just be skipped due to too many failures'''
return (not self._test_trial_complete(time_in_state)) and (self.tries==self.max_attempts)
def _test_trial_incomplete(self, time_in_state):
'''Test whether the target capture sequence needs to be restarted'''
return (not self._test_trial_complete(time_in_state)) and (self.tries<self.max_attempts)
def _test_timeout_penalty_end(self, time_in_state):
return time_in_state > self.timeout_penalty_time
def _test_hold_penalty_end(self, time_in_state):
return time_in_state > self.hold_penalty_time
def _test_reward_end(self, time_in_state):
return time_in_state > self.reward_time
def _test_enter_target(self, time_in_state):
'''This function is task-specific and not much can be done generically'''
return False
def _test_leave_early(self, time_in_state):
'''This function is task-specific and not much can be done generically'''
return False
def update_report_stats(self):
'''
see experiment.Experiment.update_report_stats for docs
'''
super().update_report_stats()
self.reportstats['Trial #'] = self.calc_trial_num()
self.reportstats['Reward/min'] = np.round(self.calc_events_per_min('reward', 120.), decimals=2)
@classmethod
def get_desc(cls, params, report):
'''Used by the database infrasturcture to generate summary stats on this task'''
duration = report[-1][-1] - report[0][-1]
reward_count = 0
for item in report:
if item[0] == "reward":
reward_count += 1
return "{} rewarded trials in {} min".format(reward_count, duration)
class ScreenTargetCapture(TargetCapture, Window):
"""Concrete implementation of TargetCapture task where targets
are acquired by "holding" a cursor in an on-screen target"""
background = (0,0,0,1)
cursor_color = (.5,0,.5,1)
plant_type = traits.OptionsList(*plantlist, desc='', bmi3d_input_options=list(plantlist.keys()))
starting_pos = (5, 0, 5)
target_color = (1,0,0,.5)
cursor_visible = False # Determines when to hide the cursor.
no_data_count = 0 # Counter for number of missing data frames in a row
scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement)
limit2d = 1
sequence_generators = ['centerout_2D_discrete']
is_bmi_seed = True
_target_color = RED
# Runtime settable traits
reward_time = traits.Float(.5, desc="Length of juice reward")
target_radius = traits.Float(2, desc="Radius of targets in cm")
hold_time = traits.Float(.2, desc="Length of hold required at targets")
hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error")
timeout_time = traits.Float(10, desc="Time allowed to go between targets")
timeout_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error")
max_attempts = traits.Int(10, desc='The number of attempts at a target before\
skipping to the next one')
plant_hide_rate = traits.Float(0.0, desc='If the plant is visible, specifies a percentage of trials where it will be hidden')
plant_type_options = list(plantlist.keys())
plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=list(plantlist.keys()))
plant_visible = traits.Bool(True, desc='Specifies whether entire plant is displayed or just endpoint')
cursor_radius = traits.Float(.5, desc="Radius of cursor")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cursor_visible = True
# Initialize the plant
if not hasattr(self, 'plant'):
self.plant = plantlist[self.plant_type]
self.plant_vis_prev = True
# Add graphics models for the plant and targets to the window
if hasattr(self.plant, 'graphics_models'):
for model in self.plant.graphics_models:
self.add_model(model)
# Instantiate the targets
instantiate_targets = kwargs.pop('instantiate_targets', True)
if instantiate_targets:
target1 = VirtualCircularTarget(target_radius=self.target_radius, target_color=self._target_color)
target2 = VirtualCircularTarget(target_radius=self.target_radius, target_color=self._target_color)
self.targets = [target1, target2]
for target in self.targets:
for model in target.graphics_models:
self.add_model(model)
# Initialize target location variable
self.target_location = np.array([0, 0, 0])
# Declare any plant attributes which must be saved to the HDF file at the _cycle rate
for attr in self.plant.hdf_attrs:
self.add_dtype(*attr)
def init(self):
self.add_dtype('target', 'f8', (3,))
self.add_dtype('target_index', 'i', (1,))
super().init()
def _cycle(self):
'''
Calls any update functions necessary and redraws screen. Runs 60x per second.
'''
self.task_data['target'] = self.target_location.copy()
self.task_data['target_index'] = self.target_index
## Run graphics commands to show/hide the plant if the visibility has changed
if self.plant_type != 'CursorPlant':
if self.plant_visible != self.plant_vis_prev:
self.plant_vis_prev = self.plant_visible
self.plant.set_visibility(self.plant_visible)
# self.show_object(self.plant, show=self.plant_visible)
self.move_effector()
## Save plant status to HDF file
plant_data = self.plant.get_data_to_save()
for key in plant_data:
self.task_data[key] = plant_data[key]
super()._cycle()
def move_effector(self):
'''Move the end effector, if a robot or similar is being controlled'''
pass
def run(self):
'''
See experiment.Experiment.run for documentation.
'''
# Fire up the plant. For virtual/simulation plants, this does little/nothing.
self.plant.start()
try:
super().run()
finally:
self.plant.stop()
##### HELPER AND UPDATE FUNCTIONS ####
def update_cursor_visibility(self):
''' Update cursor visible flag to hide cursor if there has been no good data for more than 3 frames in a row'''
prev = self.cursor_visible
if self.no_data_count < 3:
self.cursor_visible = True
if prev != self.cursor_visible:
self.show_object(self.cursor, show=True)
else:
self.cursor_visible = False
if prev != self.cursor_visible:
self.show_object(self.cursor, show=False)
#### TEST FUNCTIONS ####
def _test_enter_target(self, ts):
'''
return true if the distance between center of cursor and target is smaller than the cursor radius
'''
cursor_pos = self.plant.get_endpoint_pos()
d = np.linalg.norm(cursor_pos - self.target_location)
return d <= (self.target_radius - self.cursor_radius)
def _test_leave_early(self, ts):
'''
return true if | |
<filename>encode.py
#!/usr/bin/env python
# vim:set ts=8 sw=4 sts=4 et:
# Copyright (c) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
import argparse
import array
import cProfile
import hashlib
import math
import pdb
import pprint
import pstats
import struct
import subprocess
import time
import wave
import crcmod
from bitarray import bitarray
from bitstring import BitArray
# ------------------------------------------------------------------------------
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
EXIT_CMDFAILURE = 2
# TTY Colors
NOCOLOR = '\033[0m'
RED = '\033[01;31m'
GREEN = '\033[01;32m'
YELLOW = '\033[01;33m'
BLUE = '\033[01;34m'
MAGENTA = '\033[01;35m'
CYAN = '\033[01;36m'
WHITE = '\033[01;37m'
def msg(s):
print(GREEN + "*", s, NOCOLOR)
def err(s):
print(RED + "!", s, NOCOLOR)
def dbg(s):
if not __debug__:
return
if isinstance(s, dict) or isinstance(s, list):
print(YELLOW + "%", pprint.pformat(s, indent=2), NOCOLOR)
else:
print(YELLOW + "%", s, NOCOLOR)
def sep():
try:
num_columns = int(subprocess.getoutput('stty size').split()[1])
except IndexError:
num_columns = 80
s = "".join(["-" for i in range(num_columns)])
print(WHITE + s + NOCOLOR)
def run_process(s):
if __debug__:
print(CYAN + ">", s, NOCOLOR)
subprocess.call(s, shell=True)
class Timer(object):
def start(self):
self.start_time = int(time.time())
def stop(self):
self.end_time = int(time.time())
def time_delta(self):
return self.end_time - self.start_time
def string_delta(self):
total = self.time_delta()
days = total // 86400
remain = total % 86400
hours = remain // 3600
remain = remain % 3600
minutes = remain // 60
seconds = remain % 60
return str(days) + "d " + str(hours) + "h " + str(minutes) + "m " + str(seconds) + "s"
# ------------------------------------------------------------------------------
crc8 = crcmod.predefined.mkPredefinedCrcFun('crc-8')
crc16 = crcmod.predefined.mkPredefinedCrcFun('crc-16-buypass')
# TODO: This doesn't support signed integers
# TODO: Clean up or consolidate bitarray_from_int and bitarray_from_signed
# TODO: Maybe call this bitarray_from_unsigned
def bitarray_from_int(i, width):
assert i < 2**width
if width == 0:
return bitarray()
return bitarray(('{:0' + str(width) + 'b}').format(i))
def bitarray_from_signed(i, width):
assert i < 2**(width-1)
assert i >= -2**(width-1)
if width == 0:
assert i == 0
return bitarray()
# TODO: Using BitArray is a quick hack. Do two's complement stuff with bitwise operators instead.
bits = BitArray(int=i, length=width)
return bitarray(str(bits.bin))
def utf8_encoded_bitarray_from_int(i):
# i < 2**7
if i < 0x80:
return bitarray_from_int(i, 8)
# i < 2**11
if i < 0x800:
bits = bitarray(16)
bits[0:8] = bitarray_from_int(0xC0 | (i >> 6), 8)
bits[8:16] = bitarray_from_int(0x80 | (i & 0x3F), 8)
return bits
# i < 2**16
if i < 0x10000:
bits = bitarray(24)
bits[0:8] = bitarray_from_int(0xE0 | (i >> 12), 8)
bits[8:16] = bitarray_from_int(0x80 | ((i >> 6) & 0x3F), 8)
bits[16:24] = bitarray_from_int(0x80 | (i & 0x3F), 8)
return bits
# i < 2**21
if i < 0x200000:
bits = bitarray(32)
bits[0:8] = bitarray_from_int(0xF0 | ((i >> 18)), 8)
bits[8:16] = bitarray_from_int(0x80 | ((i >> 12) & 0x3F), 8)
bits[16:24] = bitarray_from_int(0x80 | ((i >> 6) & 0x3F), 8)
bits[24:32] = bitarray_from_int(0x80 | (i & 0x3F), 8)
return bits
# i < 2**26
if i < 0x4000000:
bits = bitarray(40)
bits[0:8] = bitarray_from_int(0xF0 | ((i >> 24)), 8)
bits[8:16] = bitarray_from_int(0x80 | ((i >> 18) & 0x3F), 8)
bits[16:24] = bitarray_from_int(0x80 | ((i >> 12) & 0x3F), 8)
bits[24:32] = bitarray_from_int(0x80 | ((i >> 6) & 0x3F), 8)
bits[32:40] = bitarray_from_int(0x80 | (i & 0x3F), 8)
return bits
# i < 2**31
if i < 0x80000000:
bits = bitarray(40)
bits[0:8] = bitarray_from_int(0xF0 | ((i >> 24)), 8)
bits[8:16] = bitarray_from_int(0x80 | ((i >> 18) & 0x3F), 8)
bits[16:24] = bitarray_from_int(0x80 | ((i >> 12) & 0x3F), 8)
bits[24:32] = bitarray_from_int(0x80 | ((i >> 6) & 0x3F), 8)
bits[32:40] = bitarray_from_int(0x80 | (i & 0x3F), 8)
return bits
assert False, "We shouldn't need to encode any integers that require more than 31 bits"
# ------------------------------------------------------------------------------
BLOCK_SIZE = 4096 # Num samples per block
SAMPLE_RATE = 44100 # Hz
SAMPLE_SIZE = 16 # Num bits per sample
NUM_CHANNELS = 2
MAX_FIXED_PREDICTOR_ORDER = 4
# ------------------------------------------------------------------------------
BLOCK_TYPE_STREAMINFO = 0
BLOCK_TYPE_PADDING = 1
BLOCK_TYPE_APPLICATION = 2
BLOCK_TYPE_SEEKTABLE = 3
BLOCK_TYPE_VORBIS_COMMENT = 4
BLOCK_TYPE_CUESHEET = 5
BLOCK_TYPE_PICTURE = 6
RESIDUAL_CODING_METHOD_PARTITIONED_RICE = 0
RESIDUAL_CODING_METHOD_PARTITIONED_RICE2 = 1
class Stream:
def __init__(self, metadata_blocks, frames):
self.metadata_blocks = metadata_blocks
self.frames = frames
def get_bytes(self):
return b'fLaC' + \
b''.join([block.get_bytes() for block in self.metadata_blocks]) + \
b''.join([frame.get_bytes() for frame in self.frames])
class MetadataBlock:
def __init__(self, metadata_block_header, metadata_block_data):
self.metadata_block_header = metadata_block_header
self.metadata_block_data = metadata_block_data
def get_bytes(self):
return self.metadata_block_header.get_bytes() + \
self.metadata_block_data.get_bytes()
class MetadataBlockHeader:
def __init__(self, last_metadata_block, block_type, length):
self.last_metadata_block = last_metadata_block
self.block_type = block_type
self.length = length
def get_bytes(self):
bits = bitarray(32)
bits[0] = self.last_metadata_block
bits[1:8] = bitarray_from_int(self.block_type, 7)
bits[8:32] = bitarray_from_int(self.length, 24)
return bits.tobytes()
class MetadataBlockStreamInfo:
def __init__(self, num_samples, md5_digest):
self.num_samples = num_samples
self.md5_digest = md5_digest
def get_bytes(self):
bits = bitarray(144)
bits[0:16] = bitarray_from_int(BLOCK_SIZE, 16) # Min block size in samples
bits[16:32] = bitarray_from_int(BLOCK_SIZE, 16) # Max block size in samples
bits[32:56] = 0 # TODO: Min frame size in bytes
bits[56:80] = 0 # TODO: Max frame size in bytes
bits[80:100] = bitarray_from_int(SAMPLE_RATE, 20) # Sample rate in Hz
bits[100:103] = bitarray_from_int(NUM_CHANNELS - 1, 3) # (Num channels) - 1
bits[103:108] = bitarray_from_int(SAMPLE_SIZE - 1, 5) # (Sample size) - 1 in bits per sample
bits[108:144] = bitarray_from_int(self.num_samples, 36) # Total num samples
# bits[144:272] = md5_bits # MD5 signature of the input stream
return bits.tobytes() + self.md5_digest
class Frame:
def __init__(self, frame_number, num_samples, subframes):
self.frame_number = frame_number
self.num_samples = num_samples
self.subframes = subframes
def get_header_bytes(self):
bits = bitarray(32) # Only the first 32 bits are fixed
bits[0:14] = bitarray('11111111111110') # Sync code
bits[14] = 0 # Mandatory Value
bits[15] = 0 # Fixed blocksize stream
bits[16:20] = bitarray('1100') # Num samples, hardcoded to 4096 samples per block. Per the spec, n = 12 ==> 1100. See below for exception.
bits[20:24] = bitarray('1001') # Sample rate, hardcoded to 44.1 kHz
bits[24:28] = bitarray('0001') # Channel assignment, hardcoded to independent stereo
bits[28:31] = bitarray('100') # Sample size, hardcoded to 16 bits per sample
bits[31] = 0 # Mandatory Value
frame_number_bits = utf8_encoded_bitarray_from_int(self.frame_number)
custom_block_size_bits = bitarray()
# The last block can have less than BLOCK_SIZE samples
if self.num_samples != BLOCK_SIZE:
bits[16:20] = bitarray('0111') # Num samples should be retrieved from a separate 16-bit field (custom_block_size_bits)
custom_block_size_bits = bitarray_from_int(self.num_samples - 1, 16)
# We do not have to specify a custom sample rate because the sample rate is fixed to 44.1 kHz
crc_input = (bits + frame_number_bits + custom_block_size_bits).tobytes()
crc_bytes = bytes((crc8(crc_input),))
return crc_input + crc_bytes
def get_subframe_and_padding_bytes(self):
subframe_bits = sum([subframe.get_bits() for subframe in self.subframes], bitarray())
num_padding_bits = 0
if subframe_bits.length() % 8:
num_padding_bits = 8 - (subframe_bits.length() % 8)
padding_bits = bitarray(num_padding_bits) # Allocate padding bits
padding_bits.setall(0) # Set them all to zero
return (subframe_bits + padding_bits).tobytes()
def get_footer_bytes(self):
crc_input = self.get_header_bytes() + self.get_subframe_and_padding_bytes()
crc_bytes = struct.pack('>H', crc16(crc_input))
return crc_bytes
def get_bytes(self):
return self.get_header_bytes() + \
self.get_subframe_and_padding_bytes() + \
self.get_footer_bytes()
# TODO: This is an abstract class
class Subframe:
def __init__(self):
self.header_bits = bitarray(8)
self.data_bits = bitarray()
self.header_bits[0] = 0 # Mandatory value
self.header_bits[1:7] = 0 # These bits must be filled in by a subclass
self.header_bits[7] = 0 # TODO: Wasted bits
def __len__(self):
return self.get_bits().length()
# TODO: Memoize this value
def get_bits(self):
return self.header_bits + self.data_bits
class SubframeConstant(Subframe):
def __init__(self, constant):
super().__init__()
self.header_bits[1:7] = bitarray('000000') # SUBFRAME_CONSTANT
self.data_bits = bitarray_from_signed(constant, SAMPLE_SIZE)
class SubframeVerbatim(Subframe):
def __init__(self, samples):
super().__init__()
self.header_bits[1:7] = bitarray('000001') # SUBFRAME_VERBATIM
verbatim_sample_bytes = struct.pack('>' + str(len(samples)) + 'h', *samples)
self.data_bits.frombytes(verbatim_sample_bytes)
# ------------------------------------------------------------------------------
class SubframeFixed(Subframe):
def __init__(self, predictor_order, | |
<gh_stars>0
import json
import sys
from ..Resource import Resource
from ..Crawler.Fs import FsPath
from ..Crawler import Crawler
from ..Template import Template
from collections import OrderedDict
# compatibility with python 2/3
try:
basestring
except NameError:
basestring = str
class TaskTypeNotFoundError(Exception):
"""Task type not found error."""
class InvalidCrawlerError(Exception):
"""Invalid crawler Error."""
class TaskInvalidOptionError(Exception):
"""Task invalid option error."""
class TaskInvalidMetadataError(Exception):
"""Task invalid metadata error."""
class TaskInvalidOptionValue(Exception):
"""Task invalid option value error."""
class Task(object):
"""
Abstract Task.
A task is used to operate over file paths resolved by the template runner.
Optional options:
- filterTemplate: This is used when querying the crawlers from the task (Task.crawlers).
It works by filtering out crawlers based on the template defined as value of the option.
When the processed template results: "False", "false" or "0" then the crawler is filtered
out from the crawlers result.
- emptyFilterResult: This option should be used in combination with
"filterTemplate" and only works when filterTemplate filters out all crawlers.
By assigning "taskCrawlers" then the crawlers in the task are returned as result (Similar
behaviour about what happens in nuke when a node is disabled). Otherwise, when assigned
with "empty" the task results an empty list (default).
Task Metadata:
- output.verbose: boolean used to print out the output of the task (default False)
"""
__registered = {}
def __init__(self, taskType):
"""
Create a task object.
"""
self.__crawlers = OrderedDict()
self.__metadata = {}
self.__taskType = taskType
self.__options = {}
# default options
self.setOption('filterTemplate', '')
self.setOption('emptyFilterResult', 'empty')
def type(self):
"""
Return the task type.
"""
return self.__taskType
def metadata(self, scope=''):
"""
Return the metadata.
The metadata is represented as dictionary. You can query the entire
metadata by passing an empty string as scope (default). Otherwise,
you can pass a scope string separating each level by '.' (for instance:
fisrt.second.third).
"""
if not scope:
return self.__metadata
currentLevel = self.__metadata
for level in scope.split('.'):
if level not in currentLevel:
raise TaskInvalidMetadataError(
'Invalid metadata "{}"'.format(scope)
)
currentLevel = currentLevel[level]
return currentLevel
def setMetadata(self, scope, value):
"""
Set an arbitrary metadata.
In case you want to set a multi-dimension value under the metadata,
you can you the scope for it by passing the levels separated by "."
(The levels are created automatically as new dictonaries in case they
don't exist yet). Make sure the data being set inside of the metadata
can be serialized through json.
"""
assert scope, "scope cannot be empty"
# we want to store an immutable value under the metadata
safeValue = json.loads(json.dumps(value))
# creating auxiliary levels
levels = scope.split('.')
currentLevel = self.__metadata
for level in levels[:-1]:
if level not in currentLevel:
currentLevel[level] = {}
currentLevel = currentLevel[level]
currentLevel[levels[-1]] = safeValue
def metadataNames(self):
"""
Return a list with the names of the root levels under the metadata.
"""
return list(self.__metadata.keys())
def hasMetadata(self, scope):
"""
Return a boolean telling if the input scope is under the metadata.
In case the scope is empty, then the result is done based if there's
any information under the metadata.
"""
if not scope:
return bool(len(self.__metadata))
levels = scope.split('.')
currentLevel = self.__metadata
found = True
for level in levels[:-1]:
if level not in currentLevel:
found = False
break
currentLevel = currentLevel[level]
if found and levels[-1] in currentLevel:
return True
return False
def option(self, name):
"""
Return a value for an option.
"""
if name not in self.__options:
raise TaskInvalidOptionError(
'Invalid option name: "{0}"'.format(
name
)
)
return self.__options[name]
def templateOption(self, name, crawler=None, vars={}):
"""
Return a value resolved by the Template module.
"""
value = self.option(name)
if crawler:
return Template(value).valueFromCrawler(crawler, vars)
else:
return Template(value).value(vars)
def setOption(self, name, value):
"""
Set an option to the task.
"""
self.__options[name] = value
def optionNames(self):
"""
Return a list of the option names.
"""
return list(self.__options.keys())
def target(self, crawler):
"""
Return the target file path for crawler.
"""
if crawler not in self.__crawlers:
raise InvalidCrawlerError(
'Crawler is not part of the task!'
)
return self.__crawlers[crawler]
def crawlers(self, useFilterTemplateOption=True):
"""
Return a list of crawlers associated with the task.
"""
result = list(self.__crawlers.keys())
# filtering the crawler result based on the "filterTemplate" option
filterTemplate = str(self.option('filterTemplate'))
if useFilterTemplateOption and filterTemplate:
filteredResult = []
for crawler in result:
templateResult = Template(filterTemplate).valueFromCrawler(crawler)
if str(templateResult).lower() not in ['false', '0']:
filteredResult.append(crawler)
result = filteredResult
return result
def add(self, crawler, targetFilePath=''):
"""
Add a crawler to the task.
A target file path can be associated with the crawler. It should be
used by tasks that generate files. This information may be provided
by tasks executed through a task holder where the template in the
task holder is resolved and passed as target when adding
the crawler to the task.
"""
assert isinstance(crawler, Crawler), \
"Invalid Crawler!"
assert isinstance(targetFilePath, basestring), \
"targetFilePath needs to be defined as string"
self.__crawlers[crawler] = targetFilePath
def clear(self):
"""
Remove all crawlers associated with the task.
"""
self.__crawlers.clear()
def output(self):
"""
Perform and result a list of crawlers created by task.
"""
verbose = self.hasMetadata('output.verbose') and self.metadata('output.verbose')
if verbose:
sys.stdout.write('{0} output:\n'.format(self.type()))
# in case all crawlers were filtered out, returning right away.
# \TODO: we may want to have the behaviour of don't performing the task
# when the task does not have any crawler. Right now, it's only applied
# when all crawlers were filtered out by the filter template option.
if len(self.crawlers(useFilterTemplateOption=False)) and len(self.crawlers()) == 0:
return self.__emptyFilterResult(verbose)
contextVars = {}
for crawler in self.crawlers():
for ctxVarName in crawler.contextVarNames():
if ctxVarName not in contextVars:
contextVars[ctxVarName] = crawler.var(ctxVarName)
outputCrawlers = self._perform()
# Copy all context variables to output crawlers
for outputCrawler in outputCrawlers:
if verbose:
sys.stdout.write(
' - {}\n'.format(
outputCrawler.var('filePath')
)
)
for ctxVarName in contextVars:
outputCrawler.setVar(ctxVarName, contextVars[ctxVarName], True)
# flushing output stream
if verbose:
sys.stdout.flush()
return outputCrawlers
def clone(self):
"""
Clone the current task.
"""
clone = self.__class__(self.type())
# copying options
for optionName in self.optionNames():
clone.setOption(optionName, self.option(optionName))
# copying metadata
for metadataName in self.metadataNames():
clone.setMetadata(metadataName, self.metadata(metadataName))
# copying crawlers
for crawler in self.crawlers():
clone.add(crawler, self.target(crawler))
return clone
def toJson(self):
"""
Serialize a task to json (it can be loaded later through createFromJson).
"""
contents = {
"type": self.type()
}
# current metadata
metadata = self.metadata()
# current options
options = {}
for optionName in self.optionNames():
options[optionName] = self.option(optionName)
# crawler data
crawlerData = []
for crawler in self.crawlers():
crawlerData.append({
'filePath': self.target(crawler),
'serializedCrawler': crawler.toJson()
})
# custom resources
loadedResources = Resource.get().loaded(ignoreFromEnvironment=True)
# only including them as result if they are not empty
if len(metadata):
contents['metadata'] = metadata
if len(options):
contents['options'] = options
if len(crawlerData):
contents['crawlerData'] = crawlerData
if len(loadedResources):
contents['resources'] = loadedResources
return json.dumps(
contents,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
@staticmethod
def register(name, taskClass):
"""
Register a task type.
"""
assert issubclass(taskClass, Task), \
"Invalid task class!"
Task.__registered[name] = taskClass
@staticmethod
def registeredNames():
"""
Return a list of registered tasks.
"""
return list(Task.__registered.keys())
@staticmethod
def create(taskType, *args, **kwargs):
"""
Create a task object.
"""
if taskType not in Task.__registered:
raise TaskTypeNotFoundError(
'Task name is not registed: "{0}"'.format(
taskType
)
)
return Task.__registered[taskType](taskType, *args, **kwargs)
@staticmethod
def createFromJson(jsonContents):
"""
Create a task based on the jsonContents (serialized via toJson).
"""
contents = json.loads(jsonContents)
taskType = contents["type"]
taskOptions = contents.get("options", {})
taskMetadata = contents.get("metadata", {})
crawlerData = contents.get("crawlerData", [])
loadResources = contents.get("resources", [])
# loading resources
for loadResource in loadResources:
if loadResource in Resource.get().loaded():
continue
Resource.get().load(loadResource)
# loading task
task = Task.create(taskType)
# setting task options
for optionName, optionValue in taskOptions.items():
task.setOption(optionName, optionValue)
# setting task metadata
for metadataName, metadataValue in taskMetadata.items():
task.setMetadata(metadataName, metadataValue)
# adding crawlers
for crawlerDataItem in crawlerData:
filePath = crawlerDataItem['filePath']
crawler = Crawler.createFromJson(
crawlerDataItem['serializedCrawler']
)
task.add(crawler, filePath)
return task
def _perform(self):
"""
For re-implementation: should implement the computation of the task and return a list of crawlers as output.
The default implementation return a list of crawlers based on the target filePath (The filePath is provided by
by the template). In case none file path has not been specified then returns an empty list of crawlers.
"""
filePaths | |
mock_containers_run.return_value = None
self.assertIs(self.obj.launch_job(job_id, job_exec_id, nuvla), None,
'Unable to launch job execution container')
mock_containers_run.assert_called_once()
mock_net_connect.assert_called_once_with(job_exec_id, 'bridge')
def test_collect_container_metrics_cpu(self):
cpu_stat = {
"cpu_stats": {
"cpu_usage": {
"total_usage": "10"
},
"system_cpu_usage": "100",
"online_cpus": 2
},
}
old_cpu_total = 5
old_cpu_system = 50
err = []
# if all is well, we should expect a float value bigger than 0
self.assertIsInstance(self.obj.collect_container_metrics_cpu(cpu_stat, old_cpu_total, old_cpu_system, err),
float,
"Received unexpected type of CPU usage percentage for container")
self.assertEqual(self.obj.collect_container_metrics_cpu(cpu_stat, old_cpu_total, old_cpu_system, err), 20.0,
"The provided default should return a CPU usage of 20%, but that was not the case")
self.assertEqual(len(err), 0,
"There should not have been any CPU collection errors")
# if online_cpus is not reported, then we get 0% usage
cpu_stat['cpu_stats'].pop('online_cpus')
self.assertEqual(self.obj.collect_container_metrics_cpu(cpu_stat, old_cpu_total, old_cpu_system, err), 0.0,
"Expecting 0% CPU usage due to lack of details, but got something else")
# if a mandatory attribute does not exist, then we get 0% again, but with an error
cpu_stat.pop('cpu_stats')
self.assertEqual(self.obj.collect_container_metrics_cpu(cpu_stat, old_cpu_total, old_cpu_system, err), 0.0,
"Expecting 0% CPU usage due to missing mandatory keys, but got something else")
self.assertGreater(len(err), 0,
"Expecting an error due to the lack to CPU info to collect, but did not get any")
def test_collect_container_metrics_mem(self):
mem_stat = {
"memory_stats": {
"usage": 1024*1024,
"limit": 2*1024*1024
}
}
err = []
# if all is well, we expect a float value higher than 0.0%
self.assertEqual(self.obj.collect_container_metrics_mem(mem_stat, err), (50.0, 1, 2),
"Expecting a memory usage of 50%, but got something else instead")
self.assertEqual(len(err), 0,
"There should not have been any Memory collection errors")
# if the memory limit is set to 0, then we expect 0%, with no errors
mem_stat['memory_stats']['limit'] = 0
self.assertEqual(self.obj.collect_container_metrics_mem(mem_stat, err), (0.0, 1, 0),
"Expecting a memory usage of 50%, but got something else instead")
self.assertEqual(len(err), 0,
"There should not have been any Memory collection errors, even though the results was 0%")
# if there are missing fields, then an error should be added, and 0% should be returned
mem_stat.pop('memory_stats')
self.assertEqual(self.obj.collect_container_metrics_mem(mem_stat, err), (0.0, 0.0, 0.0),
"Expecting 0% due to missing fields, but got something else")
self.assertGreater(len(err), 0,
"There should have been Memory collection errors since fields are missing")
def test_collect_container_metrics_net(self):
net_stat = {
"networks": {
"iface1": {
"rx_bytes": 1*1000*1000,
"tx_bytes": 1*1000*1000
},
"iface2": {
"rx_bytes": 1*1000*1000,
"tx_bytes": 1*1000*1000
}
}
}
# if all goes well, we expect 2MB received and 2MB sent
self.assertEqual(self.obj.collect_container_metrics_net(net_stat), (2, 2),
'Failed to sum network counters')
def test_collect_container_metrics_block(self):
blk_stat = {
"blkio_stats": {
"io_service_bytes_recursive": [
{
"value": 1*1000*1000
},
{
"value": 2*1000*1000
}]
}
}
err = []
# if all goes well, we expect 1MB blk_in and 2MB blk_out, and no errors
self.assertEqual(self.obj.collect_container_metrics_block(blk_stat, err), (2, 1),
'Failed to get block statistics for a container')
self.assertEqual(err, [],
'Reporting errors on blk stats when there should not be any')
# if the blk_stats are misformatted or there is any other exception during collection,
# then we get 0MBs for the corresponding metric, and an error
blk_stat['blkio_stats']['io_service_bytes_recursive'][0]['value'] = "saasd" # not a number
self.assertEqual(self.obj.collect_container_metrics_block(blk_stat, err), (2, 0),
'Expected 0MBs for blk_in (due to misformatted value, but got something else instead')
self.assertEqual(err, ['blk_in'],
'An error occurred while collecting the container blk_in, but it was not reported')
# if blkio stats are missing a field, then we expect (0,0) MBs
blk_stat.pop('blkio_stats')
err = []
self.assertEqual(self.obj.collect_container_metrics_block(blk_stat, err), (0, 0),
'Expected 0MBs for container block stats (due to missing stats), but got something else')
self.assertEqual(err, [],
'There should be no errors reported when blk stats are not given by Docker')
@mock.patch('agent.common.NuvlaBoxCommon.DockerClient.collect_container_metrics_block')
@mock.patch('agent.common.NuvlaBoxCommon.DockerClient.collect_container_metrics_net')
@mock.patch('agent.common.NuvlaBoxCommon.DockerClient.collect_container_metrics_mem')
@mock.patch('agent.common.NuvlaBoxCommon.DockerClient.collect_container_metrics_cpu')
@mock.patch('docker.api.container.ContainerApiMixin.stats')
@mock.patch('docker.models.containers.ContainerCollection.list')
def test_collect_container_metrics(self, mock_containers_list, mock_container_stats, mock_get_cpu,
mock_get_mem, mock_get_net, mock_get_block):
# if there are no containers, we should get an empty list
mock_containers_list.return_value = []
stats = []
mock_container_stats.return_value = iter(stats)
self.assertEqual(self.obj.collect_container_metrics(), [],
'Get container stats when there are no containers running')
# otherwise...
mock_containers_list.return_value = [fake.MockContainer()]
mock_get_mem.return_value = (1, 2 ,3)
mock_get_cpu.return_value = 50
mock_get_net.return_value = (1, 2)
mock_get_block.return_value = (1, 2)
# if one container has malformed CPU stats, the "old_cpu" variable should be set to (0,0) when collecting CPU
old_cpu_total_usage = 1
old_cpu_system_cpu_usage = 1
cpu_stats = {
"cpu_usage": {
"total_usage": old_cpu_total_usage
},
"system_cpu_usage": old_cpu_system_cpu_usage,
"online_cpus": 2
}
stats = [
'{"cpu_stats": {}}',
'{"cpu_stats": %s}' % json.dumps(cpu_stats)
]
mock_container_stats.return_value = iter(stats)
self.assertIsInstance(self.obj.collect_container_metrics(), list,
'Expecting a list from the container metrics collection, but got something else')
# there is only 1 container, so each collector should only have been called once
mock_get_cpu.assert_called_once_with(json.loads(stats[1]), 0, 0, [])
# if all containers have valid stats though, we should expect the "old_cpu" to be different from (0,0)
# and the output to container all the expected fields to be included in the telemetry
mock_get_cpu.reset_mock()
stats[0] = stats[1]
mock_container_stats.return_value = iter(stats)
expected_fields = ['id', 'name', 'container-status',
'cpu-percent', 'mem-usage-limit', 'mem-percent',
'net-in-out', 'blk-in-out', 'restart-count']
self.assertTrue(set(expected_fields).issubset(list(self.obj.collect_container_metrics()[0].keys())),
'Received malformed container stats from the statistics collection mechanism')
mock_get_cpu.assert_called_once_with(json.loads(stats[1]), old_cpu_total_usage, old_cpu_system_cpu_usage, [])
@mock.patch('agent.common.NuvlaBoxCommon.socket.gethostname')
@mock.patch('docker.models.containers.ContainerCollection.get')
@mock.patch('docker.models.containers.ContainerCollection.list')
def test_get_installation_parameters(self, mock_containers_list, mock_containers_get, mock_gethostname):
search_label = 'fake-label'
agent_id = 'my-fake-id'
# if the agent container cannot find itself, it raises an exception
mock_containers_list.return_value = [fake.MockContainer(myid=agent_id), fake.MockContainer()]
mock_gethostname.return_value = 'fake-hostname'
mock_containers_get.side_effect = docker.errors.NotFound('', requests.Response())
self.assertRaises(docker.errors.NotFound, self.obj.get_installation_parameters, search_label)
# otherwise...
mock_containers_get.reset_mock(side_effect=True)
mock_containers_get.return_value = fake.MockContainer(myid=agent_id)
# since all labels exist, the output should container the respective fields for the telemetry
expected_fields = ['project-name', 'working-dir', 'config-files', 'environment']
self.assertIsInstance(self.obj.get_installation_parameters(search_label), dict,
'Expecting installation parameters to be a JSON structure')
self.assertTrue(set(expected_fields).issubset(self.obj.get_installation_parameters(search_label)),
f'Installation parameters are missing the required telemetry fields: {expected_fields}')
# if containers have labels that are supposed to be ignored, these should not be in the returned value
new_agent_container = fake.MockContainer(myid=agent_id)
ignore_env = f'{self.obj.ignore_env_variables[0]}=some-fake-env-value-to-ignore'
new_agent_container.attrs['Config']['Env'] = [ignore_env]
mock_containers_list.return_value = [fake.MockContainer(), new_agent_container]
mock_containers_get.return_value = new_agent_container
self.assertNotIn(ignore_env,
self.obj.get_installation_parameters(search_label)['environment'],
'Unwanted environment variables are not being properly ignored')
# other environment variables will be included though
include_env = 'some-env=some-fake-env-value-NOT-to-ignore'
new_agent_container.attrs['Config']['Env'].append(include_env)
mock_containers_list.return_value = [fake.MockContainer(), new_agent_container]
mock_containers_get.return_value = new_agent_container
self.assertIn(include_env,
self.obj.get_installation_parameters(search_label)['environment'],
'Expected environment variables are not in the final parameters')
# and also make sure the config-files are not duplicated, even if there are many containers reporting
# the same filenames
mock_containers_list.return_value = [fake.MockContainer(), new_agent_container, fake.MockContainer()]
self.assertEqual(sorted(self.obj.get_installation_parameters(search_label)['config-files']),
sorted(new_agent_container.labels['com.docker.compose.project.config_files'].split(',')),
'Installation config files are not reported correctly')
# finally, if one of the compose file labels are missing from the agent_container, we get None
new_agent_container.labels['com.docker.compose.project'] = None
mock_containers_get.return_value = new_agent_container
self.assertIsNone(self.obj.get_installation_parameters(search_label),
'Expected no installation parameters due to missing Docker Compose labels, but got something')
def test_read_system_issues(self):
node_info = {
'Swarm': {
'Error': 'some-fake-error'
},
'Warnings': ['fake-warn-1', 'fake-warn-2']
}
# if all is good, we should get 1 error and 2 warnings
self.assertEqual(self.obj.read_system_issues(node_info), ([node_info['Swarm']['Error']], node_info['Warnings']),
'Got unexpected system errors/warnings')
# and if there are no errors nor warnings, we should get two empty lists
self.assertEqual(self.obj.read_system_issues({}), ([], []),
'Expected no errors nor warnings, but got something instead')
def test_get_node_id(self):
node_info = {
'Swarm': {
'NodeID': 'some-fake-id'
}
}
# should always return the ID value indicated in the passed argument
self.assertEqual(self.obj.get_node_id(node_info), node_info['Swarm']['NodeID'],
'Returned NodeID does not match the real one')
def test_get_cluster_id(self):
node_info = {
'Swarm': {
'Cluster': {
'ID': 'some-fake-cluster-id'
}
}
}
# should always return the ID value indicated in the passed argument
# and ignore the named argument
self.assertEqual(self.obj.get_cluster_id(node_info, default_cluster_name='some-name'),
node_info['Swarm']['Cluster']['ID'],
'Returned Cluster ID does not match the real one')
@mock.patch('agent.common.NuvlaBoxCommon.DockerClient.get_node_info')
def test_get_cluster_managers(self, mock_get_node):
node_info = {
'Swarm': {
'RemoteManagers': [{'NodeID': 'manager-1'}, {'NodeID': 'manager-2'}]
}
}
mock_get_node.return_value = node_info
# if all is good, we should get the managers IDs
self.assertEqual(self.obj.get_cluster_managers(),
list(map(lambda x: x['NodeID'], node_info['Swarm']['RemoteManagers'])),
'Did not get the expected cluster managers IDs')
# but if there are none, we get an empty list
mock_get_node.return_value = {}
self.assertEqual(self.obj.get_cluster_managers(), [],
'Did not get the expected cluster managers IDs')
def test_get_host_architecture(self):
node_info = {
'Architecture': 'fake-arch'
}
# simple attribute lookup
self.assertEqual(self.obj.get_host_architecture(node_info), node_info['Architecture'],
'Host architecture does not match the real one')
def test_get_hostname(self):
node_info = {
'Name': 'fake-name'
}
# simple attribute lookup
self.assertEqual(self.obj.get_hostname(node_info), node_info['Name'],
'Hostname does not match the real one')
@mock.patch('agent.common.NuvlaBoxCommon.DockerClient.get_node_info')
def test_get_cluster_join_address(self, mock_get_node):
node_id = 'fake-node-id'
node_info = {
'Swarm': {
'RemoteManagers': [{'NodeID': node_id, 'Addr': 'good-addr'},
{'NodeID': | |
the transition scores
args['output_dir'] - the directory to hold the activity scores
args['activity_weights']: a dictionary of transition names to
dictionary of activity weights, ex: {'transition_name_0': {
'activity0: 1 or 0,
...
},
...}
args['results_suffix'] - a suffix to append to each output filename
args['transition_dictionary'] - a mapping of
{ 'transition_name_0': {'raster_value': id, ...}, ...}
(the transition names are the same as in args['activity_weights'])
returns nothing"""
pygeoprocessing.geoprocessing.create_directories([args['output_dir']])
#Get the basenames but sort them by raster_value in the
#transition dictionary. This sorts the items in
#args['transition_dictonary'] by the raster value, then
#extracts only the key. This way we know transition_basenames
#is in order by raster_value key
transition_basenames = [x[0] for x in sorted(
args['transition_dictionary'].items(),
key=lambda x: x[1]['raster_value'])]
transition_uris = [
os.path.join(args['input_dir'], x + '%s.tif' % args['results_suffix'])
for x in transition_basenames]
activity_basenames = args['activity_weights'][transition_basenames[0]].keys()
for activity_basename in activity_basenames:
_calculate_activity_score(
activity_basename, transition_basenames, transition_uris, args,
args['cell_size'])
def _calculate_activity_score(
activity_basename, transition_basenames, transition_uris, args, cell_size):
activity_weights = [
args['activity_weights'][x][activity_basename]
for x in transition_basenames]
def _weighted_activity_average(*pixels):
weight = numpy.zeros(pixels[0].shape)
for index, value in enumerate(pixels):
weight += value * activity_weights[index]
return weight / sum(activity_weights)
#Boilerplate for vectorize_datasets
activity_out_uri = os.path.join(
args['output_dir'], activity_basename + '%s.tif' %
args['results_suffix'])
activity_nodata = -1.0
pygeoprocessing.geoprocessing.vectorize_datasets(
transition_uris, _weighted_activity_average, activity_out_uri,
gdal.GDT_Float32, activity_nodata, cell_size, "intersection",
vectorize_op=False)
#Make a max transition raster for a particular activity
transition_activity_out_uri = os.path.join(
args['output_dir'], 'max_transition_%s%s.tif' %
(activity_basename, args['results_suffix']))
transition_nodata = -1
def _max_transition_activity(*pixels):
"""This calculates the maximum weighted activity
occuring on a pixel. If the value is 0.0 we consider it
nodata."""
#Weight all the incoming pixels
weighted_pixels = numpy.array([
value * activity_weights[index] for index, value in
enumerate(pixels)])
#Now find the max indexes
max_index = numpy.argmax(weighted_pixels, axis=0)
indices = numpy.indices(max_index.shape)
max_value = weighted_pixels[max_index, indices[0] ,indices[1]]
return numpy.where(max_value > 0, max_index, transition_nodata)
pygeoprocessing.geoprocessing.vectorize_datasets(
transition_uris, _max_transition_activity, transition_activity_out_uri,
gdal.GDT_Int32, transition_nodata, cell_size, "intersection",
vectorize_op=False)
def calculate_activity_portfolio(args, report_data=None):
"""Does the portfolio selection given activity scores, budgets and
shapefile restrictions.
args['activities'] - a dictionary describing activity issues like
{'activity_name0':
{'out_id': id that goes in a raster,
'measurement_unit': 'area' or 'linear',
'unit_cost': unit cost,
'prioritization_raster_uri': uri to the activity score layer}
...}
args['budget_config'] - dictionary to describe budget selection
{'years_to_spend': an integer >= 1,
'activity_budget':
{'activity0': {'budget_amount': float >= 0.0},
...}
'if_left_over': 'Report remainder' or 'Proportionally reallocate',
'floating_budget': float >= 0.0}
args['lulc_activity_potential_map'] - a datastructure to map lulc ids
to which activities are allowed on the map, ex:
{'general_lucode0':
{'lucode0': '[list of user's lucodes, '32', ...]',
'activities': '[list of allowed activities, 'activity_1', ...]'
},
...},
args['activity_shapefiles']: [prefer/prevent shapefile uris, ...],
args['output_dir']: uri for portfolio outputs,
args['activity_portfolio_uri']: uri for the explicty total activity
portfolio (this must be known at a global level for intercomponent
connectivity)
args['max_transition_activity_portfolio_uri']: uri to the max transition
raster that causes each activity
args['lulc_uri']: a link to the original land cover map
args['results_suffix'] - a suffix to append to each output filename
args['activity_lookup_table_uri'] - a uri to to dump the activity lookup table
to.
args['transition_dictionary'] - a python dictionary that maps transition ids
to transition names
report_data - (optional) an input list that when output has the form
[{
year_index: n,
floating_budget: n,
activity_budget: {
'activity_n': n,
...},
activity_spent: {
'activity_n': n,
...},
area_converted: { in Ha
'activity_n': n,
...}
},...
]
returns nothing"""
#This will keep track of the budget spending
if report_data == None:
report_data = []
pygeoprocessing.geoprocessing.create_directories([args['output_dir']])
budget_selection_activity_uris = {}
activity_nodata = -1.0
#Calculate the amount to offset the activity costs based on 1+ the min per
#pixel cost. This will let us uniformly offset all the activity scores in
#a way that all the prefered activities will come first but still be
#relatively sorted by priority and ROI.
min_activity_cost = min([x['unit_cost'] for x in args['activities'].itervalues()])
prefer_boost = min_activity_cost + 1.0
#These will be used for heapq.merge iterators later
activity_iterators = {}
activity_list = sorted(args['activities'].keys())
id_to_activity_dict = {}
for index, activity_name in enumerate(activity_list):
id_to_activity_dict[index] = activity_name
_dump_to_table(
id_to_activity_dict, args['activity_lookup_table_uri'], 'activity_id',
'activity_type')
#This will become a list of per activity costs indexed by
#activity_index
activity_cost = []
#THis will be used to record what activity ID in a raster matches
#to what real world activity.
activity_raster_lookup = {}
for activity_name in activity_list:
args['activities'][activity_name]['prioritization_raster_uri']
budget_selection_activity_uris[activity_name] = (
args['activities'][activity_name]['prioritization_raster_uri'] + '_prioritization.tif')
pixel_size_out = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
args['lulc_uri'])
for activity_index, activity_name in enumerate(activity_list):
activity_dict = args['activities'][activity_name]
activity_raster_lookup[activity_name] = {
'index': activity_index,
'uri': activity_dict['prioritization_raster_uri']
}
#Get the normalized cost of activity per unit area, then multiply by
#the area of a cell to get the per cell cost. Build as an index for
#later usage in activity selection
per_cell_cost = (
args['activities'][activity_name]['unit_cost'] /
args['activities'][activity_name]['measurement_value']
* pixel_size_out ** 2)
activity_cost.append(per_cell_cost)
_mask_activity_areas(
args, activity_dict['prioritization_raster_uri'],
activity_name, activity_index, activity_nodata,
budget_selection_activity_uris[activity_name], per_cell_cost,
prefer_boost, pixel_size_out)
LOGGER.info('sort the prefer/prevent/activity score to disk')
for activity_index, activity_name in enumerate(activity_list):
#Creating the activity iterators here, sorting by highest to lowest.
activity_iterators[activity_index] = natcap.rios.disk_sort.sort_to_disk(
budget_selection_activity_uris[activity_name],
activity_index, score_weight=-1.0)
#This section counts how many pixels TOTAL we have available for setting
total_available_pixels = 0
available_mask_uri = pygeoprocessing.geoprocessing.temporary_filename()
def _mask_maker(*activity_score):
"""Used to make an activity mask"""
nodata_mask = numpy.empty(activity_score[0].shape, dtype=numpy.bool)
nodata_mask[:] = True
for score in activity_score:
nodata_mask = nodata_mask & (score == activity_nodata)
return numpy.where(nodata_mask, activity_nodata, 1)
#if all(activity_nodata == score for score in activity_score):
# return activity_nodata
#return 1
pygeoprocessing.geoprocessing.vectorize_datasets(
budget_selection_activity_uris.values(), _mask_maker,
available_mask_uri, gdal.GDT_Byte, activity_nodata, pixel_size_out,
"intersection", dataset_to_align_index=0, vectorize_op=False)
mask_ds = gdal.Open(available_mask_uri)
mask_band = mask_ds.GetRasterBand(1)
n_rows, n_cols = pygeoprocessing.geoprocessing.get_row_col_from_uri(
available_mask_uri)
#Make a consistent directory registry.
directory_registry = {
'continuous_activity_portfolio': os.path.join(
args['output_dir'], 'continuous_activity_portfolios'),
'yearly_activity_portfolio': os.path.join(
args['output_dir'], 'yearly_activity_portfolios'),
'total_activity_portfolio': os.path.join(
args['output_dir'])
}
pygeoprocessing.geoprocessing.create_directories(
directory_registry.values())
for row_index in range(n_rows):
mask_array = mask_band.ReadAsArray(0, row_index, n_cols, 1)
#count the number of pixels not nodata
total_available_pixels += numpy.sum(mask_array == 1)
mask_band = None
mask_ds = None
activity_array = numpy.memmap(
pygeoprocessing.geoprocessing.temporary_filename(), dtype=numpy.ubyte,
mode='w+', shape=(n_rows * n_cols,))
activity_nodata = 255
activity_array[:] = activity_nodata
for year_index in xrange(args['budget_config']['years_to_spend']):
LOGGER.info('create a portfolio dataset for year %s', year_index + 1)
#Make a copy of the floating and activity budget.
try:
floating_budget = float(args['budget_config']['floating_budget'])
except ValueError:
# happens in the offchance that the floating_budget is an empty
# string.
floating_budget = 0.
#The activity budget is indexed in the same order as activity
#list.. also the same order in which activity_indexes were stored
#in the heap iterators
activity_budget = [
args['budget_config']['activity_budget'][activity_name]
['budget_amount'] for activity_name in activity_list]
#Record the spending
report_data_dict = {
'year_index': year_index,
'floating_budget': floating_budget,
}
#This makes a dictionary of activity name to activity budget
report_data_dict['activity_budget'] = dict([
(activity_name,
args['budget_config']['activity_budget'][activity_name]['budget_amount']) for
activity_name in activity_list])
#This makes a dictionary of activity name to 0.0, used for spending recording later
report_data_dict['activity_spent'] = dict([
(activity_name, 0.0) for activity_name in activity_list])
report_data_dict['area_converted'] = dict([
(activity_name, 0.0) for activity_name in activity_list])
#We'll use this as a data structure to keep track of how many pixels
#we can spend in each activity
max_possible_activity_pixels = [
int(budget/cost) for budget, cost in
zip(activity_budget, activity_cost)]
heap_empty = False
while (sum(max_possible_activity_pixels) > 0 and
total_available_pixels > 0 and not heap_empty):
#Assemble the activity iterator by only including those iterators
#that have budget on the pixel
valid_activity_iterators = []
for activity_index, pixel_budget in enumerate(
max_possible_activity_pixels):
if pixel_budget > 0:
valid_activity_iterators.append(activity_iterators[activity_index])
if len(valid_activity_iterators) == 0:
#activity budget left for any pixels, break
break
activity_iterator = heapq.merge(*valid_activity_iterators)
#The heap might be empty, if its not, we'll get inside the
#for loop and reset it. This saves us from the tricky case to see if
#there are any elements left to generate since we can't easily peek
#ahead on the activity_iterator
heap_empty = True
for _, flat_index, activity_index in activity_iterator:
heap_empty = False
#See if the pixel has already been allocated
if activity_array[flat_index] != activity_nodata:
continue
#Otherwise, allocate the pixel
if total_available_pixels % 10000 == 0:
LOGGER.info("year %s activity: allocating pixels for activity %s pixels left %s" % (year_index + 1, activity_index,total_available_pixels))
activity_array[flat_index] = activity_index
activity_budget[activity_index] -= activity_cost[activity_index]
#This is complicated index because I set up everything to be
#indexed by activity index, but in the report we dump according
#to activity_name
report_data_dict['activity_spent'][activity_list[activity_index]] += activity_cost[activity_index]
#the 10,000 is to | |
= 13
# legendsize = 12
# fig1 = plt.figure(constrained_layout=False)
# gs1 = fig1.add_gridspec(nrows=2, ncols=1, bottom=0.55, top=0.95, left=0.12, right=0.35, height_ratios=[1, 1])
# gs2 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.55, top=0.95, left=0.5, right=0.98)
# gs3 = fig1.add_gridspec(nrows=1, ncols=2, bottom=0.08, top=0.4, left=0.12, right=0.96, wspace=0.3)
# ax_pol = fig1.add_subplot(gs1[0], frame_on=False); ax_pol.get_xaxis().set_visible(False); ax_pol.get_yaxis().set_visible(False)
# ax_bogo = fig1.add_subplot(gs1[1])
# ax_PD = fig1.add_subplot(gs2[0])
# ax_supDist = fig1.add_subplot(gs3[0])
# ax_subDist = fig1.add_subplot(gs3[1])
# fig1.text(0.01, 0.97, '(a)', fontsize=labelsize)
# fig1.text(0.01, 0.75, '(b)', fontsize=labelsize)
# fig1.text(0.43, 0.97, '(c)', fontsize=labelsize)
# fig1.text(0.01, 0.42, '(d)', fontsize=labelsize)
# fig1.text(0.51, 0.42, '(e)', fontsize=labelsize)
# # POLARON GRAPHIC
# polimg = mpimg.imread('images/PolaronGraphic.png')
# imgplot = ax_pol.imshow(polimg)
# # BOGOLIUBOV DISPERSION (SPHERICAL)
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds.coords['k'].values); kgrid.initArray_premade('th', qds.coords['th'].values)
# kVals = kgrid.getArray('k')
# wk_Vals = pfs.omegak(kVals, mB, n0, gBB)
# ax_bogo.plot(kVals, wk_Vals, 'k-', label='')
# ax_bogo.plot(kVals, nu * kVals, 'b--', label=r'$c|k|$')
# ax_bogo.set_xlabel(r'$|k|$', fontsize=labelsize)
# ax_bogo.set_ylabel(r'$\omega_{|k|}$', fontsize=labelsize)
# ax_bogo.set_xlim([0, 2])
# ax_bogo.xaxis.set_major_locator(plt.MaxNLocator(2))
# ax_bogo.set_ylim([0, 3])
# ax_bogo.yaxis.set_major_locator(plt.MaxNLocator(3))
# ax_bogo.legend(loc=2, fontsize=legendsize)
# # PHASE DIAGRAM (SPHERICAL)
# Pcrit = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).isel(t=-1).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# # Pcrit[aind] = Pinf_Vals[np.argwhere(Einf_2ndderiv_Vals < 0)[-2][0] + 3]
# Pcrit[aind] = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0] # there is a little bit of fudging with the -3 here so that aIBi=-10 gives me Pcrit/(mI*c) = 1 -> I can also just generate data for weaker interactions and see if it's better
# Pcrit_norm = Pcrit / (mI * nu)
# Pcrit_tck = interpolate.splrep(aIBi_Vals, Pcrit_norm, s=0, k=3)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# Pcrit_interpVals = 1 * interpolate.splev(aIBi_interpVals, Pcrit_tck, der=0)
# print(Pcrit_norm)
# print(Pcrit_norm[1], Pcrit_norm[5], Pcrit_norm[-5])
# scalefac = 1.0
# # scalefac = 0.95 # just to align weakly interacting case slightly to 1 (it's pretty much there, would just need higher resolution data)
# Pcrit_norm = scalefac * Pcrit_norm
# Pcrit_interpVals = scalefac * Pcrit_interpVals
# xmin = np.min(aIBi_interpVals / xi)
# xmax = 1.01 * np.max(aIBi_interpVals / xi)
# ymin = 0
# ymax = 1.01 * np.max(Pcrit_interpVals)
# font = {'family': 'serif', 'color': 'black', 'size': legendsize}
# sfont = {'family': 'serif', 'color': 'black', 'size': legendsize - 1}
# ax_PD.plot(aIBi_Vals / xi, Pcrit_norm, 'kx')
# ax_PD.plot(aIBi_interpVals / xi, Pcrit_interpVals, 'k-')
# # f1 = interpolate.interp1d(aIBi_Vals, Pcrit_norm, kind='cubic')
# # ax_PD.plot(aIBi_interpVals, f1(aIBi_interpVals), 'k-')
# ax_PD.set_xlabel(r'$a_{IB}^{-1}$ [$\xi$]', fontsize=labelsize)
# ax_PD.set_ylabel(r'Total Momentum $P$ [$m_{I}c$]', fontsize=labelsize)
# ax_PD.set_xlim([xmin, xmax])
# ax_PD.set_ylim([ymin, ymax])
# ax_PD.fill_between(aIBi_interpVals / xi, Pcrit_interpVals, ymax, facecolor='b', alpha=0.25)
# ax_PD.fill_between(aIBi_interpVals / xi, ymin, Pcrit_interpVals, facecolor='g', alpha=0.25)
# ax_PD.text(-3.2, ymin + 0.175 * (ymax - ymin), 'Polaron', fontdict=font)
# ax_PD.text(-3.1, ymin + 0.1 * (ymax - ymin), '(' + r'$Z>0$' + ')', fontdict=sfont)
# # ax_PD.text(-6.5, ymin + 0.6 * (ymax - ymin), 'Cherenkov', fontdict=font)
# # ax_PD.text(-6.35, ymin + 0.525 * (ymax - ymin), '(' + r'$Z=0$' + ')', fontdict=sfont)
# ax_PD.text(-12.8, ymin + 0.86 * (ymax - ymin), 'Cherenkov', fontdict=font)
# ax_PD.text(-12.65, ymin + 0.785 * (ymax - ymin), '(' + r'$Z=0$' + ')', fontdict=sfont)
# supDist_coords = [-5.0 / xi, 3.0] # is [aIBi/xi, P/(mI*c)]
# subDist_coords = [-5.0 / xi, 0.5] # is [aIBi/xi, P/(mI*c)]
# ax_PD.plot(supDist_coords[0], supDist_coords[1], linestyle='', marker='8', mec='#8f1402', mfc='#8f1402', ms=10)
# ax_PD.plot(subDist_coords[0], subDist_coords[1], linestyle='', marker='8', mec='#8f1402', mfc='#8f1402', ms=10)
# # IMPURITY DISTRIBUTION (CARTESIAN)
# GaussianBroadening = True; sigma = 0.1
# incoh_color = '#8f1402'
# delta_color = '#bf9005'
# def GPDF(xVals, mean, stdev):
# return (1 / (stdev * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # return (1 / (1 * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# aIBi = -5
# qds_aIBi = xr.open_dataset(innerdatapath_cart + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# PVals = qds_aIBi['P'].values
# nPIm_FWHM_indices = []
# nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# nPIm_FWHM_Vals = np.zeros(PVals.size)
# nPIm_distPeak_Vals = np.zeros(PVals.size)
# nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# nPIm_Tot_Vals = np.zeros(PVals.size)
# nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# PIm_Vec = np.empty(PVals.size, dtype=np.object)
# for ind, P in enumerate(PVals):
# qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# dPIm = PIm_Vals[1] - PIm_Vals[0]
# nPIm_Vec[ind] = qds_nPIm_inf.values
# PIm_Vec[ind] = PIm_Vals
# # # Calculate nPIm(t=inf) normalization
# nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # Calculate FWHM, distribution peak, and delta peak
# nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# indices = np.where(D > 0)[0]
# nPIm_FWHM_indices.append((indices[0], indices[-1]))
# nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# Pnorm = PVals / (mI * nu)
# Pratio_sup = 3.0; Pind_sup = np.abs(Pnorm - Pratio_sup).argmin()
# Pratio_sub = 0.5; Pind_sub = np.abs(Pnorm - Pratio_sub).argmin()
# print(Pnorm[Pind_sup], Pnorm[Pind_sub])
# print(nPIm_deltaPeak_Vals[Pind_sup], nPIm_deltaPeak_Vals[Pind_sub])
# ax_supDist.plot(PIm_Vec[Pind_sup] / (mI * nu), nPIm_Vec[Pind_sup], color=incoh_color, lw=0.5, label='Incoherent Part')
# ax_supDist.set_xlim([-0.01, 10])
# ax_supDist.set_ylim([0, 1.05])
# ax_supDist.set_ylabel(r'$n_{|\vec{P_{I}}|}$', fontsize=labelsize)
# ax_supDist.set_xlabel(r'$|\vec{P_{I}}|/(m_{I}c)$', fontsize=labelsize)
# ax_supDist.fill_between(PIm_Vec[Pind_sup] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sup], facecolor=incoh_color, alpha=0.25)
# if GaussianBroadening:
# Pnorm_sup = PVals[Pind_sup] / (mI * nu)
# deltaPeak_sup = nPIm_deltaPeak_Vals[Pind_sup]
# PIm_norm_sup = PIm_Vec[Pind_sup] / (mI * nu)
# delta_GB_sup = deltaPeak_sup * GPDF(PIm_norm_sup, Pnorm_sup, sigma)
# # ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1, label=r'$\delta$-Peak')
# ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1, label='')
# ax_supDist.fill_between(PIm_norm_sup, np.zeros(PIm_norm_sup.size), delta_GB_sup, facecolor=delta_color, alpha=0.25)
# else:
# ax_supDist.plot((PVals[Pind_sup] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sup], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak (Z-factor)')
# ax_supDist.legend(loc=1, fontsize=legendsize)
# ax_subDist.plot(PIm_Vec[Pind_sub] / (mI * nu), nPIm_Vec[Pind_sub], color=incoh_color, lw=0.5, label='Incoherent Part')
# # ax_subDist.set_xlim([-0.01, np.max(PIm_Vec[Pind_sub] / (mI*nu))])
# ax_subDist.set_xlim([-0.01, 10])
# ax_subDist.set_ylim([0, 1.05])
# ax_subDist.set_ylabel(r'$n_{|\vec{P_{I}}|}$', fontsize=labelsize)
# ax_subDist.set_xlabel(r'$|\vec{P_{I}}|/(m_{I}c)$', fontsize=labelsize)
# ax_subDist.fill_between(PIm_Vec[Pind_sub] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sub], facecolor=incoh_color, alpha=0.25)
# if GaussianBroadening:
# Pnorm_sub = PVals[Pind_sub] / (mI * nu)
# deltaPeak_sub = nPIm_deltaPeak_Vals[Pind_sub]
# PIm_norm_sub = PIm_Vec[Pind_sub] / (mI * nu)
# delta_GB_sub = deltaPeak_sub * GPDF(PIm_norm_sub, Pnorm_sub, sigma)
# ax_subDist.plot(PIm_norm_sub, delta_GB_sub, linestyle='-', color=delta_color, linewidth=1, label=r'$\delta$-Peak')
# ax_subDist.fill_between(PIm_norm_sub, np.zeros(PIm_norm_sub.size), delta_GB_sub, facecolor=delta_color, alpha=0.25)
# else:
# ax_subDist.plot((PVals[Pind_sub] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sub], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak (Z-factor)')
# ax_subDist.legend(loc=1, fontsize=legendsize)
# fig1.set_size_inches(7.8, 9)
# fig1.savefig(figdatapath + '/Fig1.pdf')
# # # # FIG 2 - ENERGY DERIVATIVES + SOUND VELOCITY + EFFECTIVE MASS
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# fig2 = plt.figure(constrained_layout=False)
# # gs1 = fig2.add_gridspec(nrows=3, ncols=1, bottom=0.12, top=0.925, left=0.12, right=0.40, hspace=1.0)
# # gs2 = fig2.add_gridspec(nrows=2, ncols=1, bottom=0.12, top=0.925, left=0.58, right=0.98, hspace=0.7)
# gs1 = fig2.add_gridspec(nrows=3, ncols=1, bottom=0.12, top=0.95, left=0.12, right=0.40, hspace=0.2)
# gs2 = fig2.add_gridspec(nrows=2, ncols=1, bottom=0.12, top=0.95, left=0.58, right=0.98, hspace=0.1)
# ax_GSE0 = fig2.add_subplot(gs1[0])
# ax_GSE1 = fig2.add_subplot(gs1[1])
# ax_GSE2 = fig2.add_subplot(gs1[2])
# ax_Vel = fig2.add_subplot(gs2[0])
# ax_Mass = fig2.add_subplot(gs2[1])
# # fig2.text(0.01, 0.95, '(a)', fontsize=labelsize)
# # fig2.text(0.01, 0.65, '(b)', fontsize=labelsize)
# # fig2.text(0.01, 0.32, '(c)', fontsize=labelsize)
# # fig2.text(0.47, 0.95, '(d)', fontsize=labelsize)
# # fig2.text(0.47, 0.47, '(e)', fontsize=labelsize)
# fig2.text(0.01, 0.95, '(a)', fontsize=labelsize)
# fig2.text(0.01, 0.65, '(b)', fontsize=labelsize)
# fig2.text(0.01, 0.37, '(c)', fontsize=labelsize)
# fig2.text(0.47, 0.95, '(d)', fontsize=labelsize)
# fig2.text(0.47, 0.52, '(e)', fontsize=labelsize)
# # # ENERGY DERIVATIVES (SPHERICAL)
# aIBi = -5
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# PVals = qds_aIBi['P'].values
# print(aIBi * xi)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals = np.zeros((PVals.size, tVals.size))
# for Pind, P in enumerate(PVals):
# for tind, t in enumerate(tVals):
# CSAmp = CSAmp_ds.sel(P=P, t=t).values
# Energy_Vals[Pind, tind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Energy_Vals_inf = Energy_Vals[:, -1]
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 5 * PVals.size)
# Einf_Vals | |
"<NAME> <<EMAIL>>" formats
'''
)
#####################
#
# END LEGACY SUPPORT
#
#####################
def _check_values(self):
'''
Make sure all values are of the appropriate
type and are not missing.
'''
if not self.__api_key:
raise PMMailMissingValueException('Cannot send an e-mail without a Postmark API Key')
elif not self.__sender:
raise PMMailMissingValueException('Cannot send an e-mail without a sender (.sender field)')
elif not self.__to and not self.__bcc:
raise PMMailMissingValueException('Cannot send an e-mail without at least one recipient (.to field or .bcc field)')
elif (self.__template_id or self.__template_model) and not all([self.__template_id, self.__template_model]):
raise PMMailMissingValueException(
'Cannot send a template e-mail without a both template_id and template_model set')
elif not any([self.__template_id, self.__template_model, self.__subject]):
raise PMMailMissingValueException('Cannot send an e-mail without a subject')
elif not self.__html_body and not self.__text_body and not self.__template_id:
raise PMMailMissingValueException('Cannot send an e-mail without either an HTML or text version of your e-mail body')
if self.__track_opens and not self.__html_body:
print('WARNING: .track_opens set to True with no .html_body set. Tracking opens will not work; message will still send.')
def to_json_message(self):
json_message = {
'From': self.__sender,
'To': self.__to,
'Subject': self.__subject,
}
if self.__reply_to:
json_message['ReplyTo'] = self.__reply_to
if self.__cc:
json_message['Cc'] = self.__cc
if self.__bcc:
json_message['Bcc'] = self.__bcc
if self.__tag:
json_message['Tag'] = self.__tag
if self.__html_body:
json_message['HtmlBody'] = self.__html_body
if self.__text_body:
json_message['TextBody'] = self.__text_body
if self.__template_id:
json_message['TemplateId'] = self.__template_id
if self.__template_model:
json_message['TemplateModel'] = self.__template_model
if self.__track_opens:
json_message['TrackOpens'] = True
if len(self.__custom_headers) > 0:
cust_headers = []
for key, value in self.__custom_headers.items():
cust_headers.append({
'Name': key,
'Value': value
})
json_message['Headers'] = cust_headers
if len(self.__metadata) > 0:
json_message['Metadata'] = self.__metadata
if len(self.__attachments) > 0:
attachments = []
for attachment in self.__attachments:
if isinstance(attachment, tuple):
file_item = {
"Name": attachment[0],
"Content": attachment[1],
"ContentType": attachment[2],
}
# If need add Content-ID header:
if len(attachment) >= 4 and attachment[3]:
file_item["ContentID"] = attachment[3]
elif isinstance(attachment, MIMEBase):
file_item = {
"Name": attachment.get_filename(),
"Content": attachment.get_payload(),
"ContentType": attachment.get_content_type(),
}
content_id = attachment.get("Content-ID")
if content_id:
# Because postmarkapp api required clear value. Not enclosed in angle brackets:
if content_id.startswith("<") and content_id.endswith(">"):
content_id = content_id[1:-1]
# Postmarkapp will mark attachment as "inline" only if "ContentID" field starts with "cid":
if (attachment.get("Content-Disposition") or "").startswith("inline"):
content_id = "cid:%s" % content_id
file_item["ContentID"] = content_id
else:
continue
attachments.append(file_item)
json_message['Attachments'] = attachments
return json_message
def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err)
# Simple utility that returns a generator to chunk up a list into equal parts
def _chunks(l, n):
return (l[i:i + n] for i in range(0, len(l), n))
class PMBatchMail(object):
# Maximum number of messages to be sent at once.
# Ref: http://developer.postmarkapp.com/developer-build.html#batching-messages
MAX_MESSAGES = 500
def __init__(self, **kwargs):
self.__api_key = None
self.__messages = []
self.__template = False
acceptable_keys = (
'api_key',
'messages'
)
for key in kwargs:
if key in acceptable_keys:
setattr(self, '_PMBatchMail__%s' % key, kwargs[key])
# Set up the user-agent
self.__user_agent = 'Python/%s (python-postmark library version %s)' % ('_'.join([str(var) for var in sys.version_info]), __version__)
# Try to pull in the API key from Django
try:
from django import VERSION
from django.conf import settings as django_settings
self.__user_agent = '%s (Django %s)' % (self.__user_agent, '_'.join([str(var) for var in VERSION]))
if not self.__api_key and hasattr(django_settings, 'POSTMARK_API_KEY'):
self.__api_key = django_settings.POSTMARK_API_KEY
except ImportError:
pass
api_key = property(
lambda self: self.__api_key,
lambda self, value: setattr(self, '_PMBatchMail__api_key', value),
lambda self: setattr(self, '_PMBatchMail__api_key', None),
'''
The API Key for your rack server on Postmark
'''
)
messages = property(
lambda self: self.__messages,
lambda self, value: setattr(self, '_PMBatchMail__messages', value),
lambda self: setattr(self, '_PMBatchMail__messages', None),
'''
Messages to send
'''
)
template = property(
lambda self: self.__template,
lambda self, value: setattr(self, '_PMBatchMail__template', value),
lambda self: setattr(self, '_PMBatchMail__template', None),
'''
Bool to check send with template
'''
)
def add_message(self, message):
'''
Add a message to the batch
'''
self.__messages.append(message)
def remove_message(self, message):
'''
Remove a message from the batch
'''
if message in self.__messages:
self.__messages.remove(message)
def _check_values(self):
'''
Make sure all values are of the appropriate
type and are not missing.
'''
for message in self.__messages:
# Check list of messages to see if sending using templates
if not self.__template and (message.template_id or message.template_model):
self.__template = True
message._check_values()
def send(self, test=None):
# Has one of the messages caused an inactive recipient error?
inactive_recipient = False
# Check messages for completeness prior to attempting to send
self._check_values()
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# Split up into groups of 500 messages for sending
for messages in _chunks(self.messages, PMBatchMail.MAX_MESSAGES):
json_message = []
for message in messages:
json_message.append(message.to_json_message())
if not self.__template:
endpoint_url = __POSTMARK_URL__ + 'email/batch'
payload = json.dumps(json_message, cls=PMJSONEncoder).encode('utf8')
else:
endpoint_url = __POSTMARK_URL__ + 'email/batchWithTemplates'
payload = json.dumps({'Messages': json_message}, cls=PMJSONEncoder).encode('utf8')
req = Request(
endpoint_url,
payload,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
continue
# Attempt send
try:
result = urlopen(req)
jsontxt = result.read().decode()
result.close()
if result.code == 200:
results = json.loads(jsontxt)
for i, res in enumerate(results):
self.__messages[i].message_id = res.get("MessageID", None)
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode()
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
# One of the message recipients was inactive. Postmark still sends the
# rest of the messages that have active recipients. Continue sending
# the rest of the chunks.
inactive_recipient = True
continue
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EngineSplitTrafficArgs', 'EngineSplitTraffic']
@pulumi.input_type
class EngineSplitTrafficArgs:
def __init__(__self__, *,
service: pulumi.Input[str],
split: pulumi.Input['EngineSplitTrafficSplitArgs'],
migrate_traffic: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EngineSplitTraffic resource.
:param pulumi.Input[str] service: The name of the service these settings apply to.
:param pulumi.Input['EngineSplitTrafficSplitArgs'] split: Mapping that defines fractional HTTP traffic diversion to different versions within the service.
Structure is documented below.
:param pulumi.Input[bool] migrate_traffic: If set to true traffic will be migrated to this version.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "service", service)
pulumi.set(__self__, "split", split)
if migrate_traffic is not None:
pulumi.set(__self__, "migrate_traffic", migrate_traffic)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def service(self) -> pulumi.Input[str]:
"""
The name of the service these settings apply to.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: pulumi.Input[str]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def split(self) -> pulumi.Input['EngineSplitTrafficSplitArgs']:
"""
Mapping that defines fractional HTTP traffic diversion to different versions within the service.
Structure is documented below.
"""
return pulumi.get(self, "split")
@split.setter
def split(self, value: pulumi.Input['EngineSplitTrafficSplitArgs']):
pulumi.set(self, "split", value)
@property
@pulumi.getter(name="migrateTraffic")
def migrate_traffic(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true traffic will be migrated to this version.
"""
return pulumi.get(self, "migrate_traffic")
@migrate_traffic.setter
def migrate_traffic(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "migrate_traffic", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _EngineSplitTrafficState:
def __init__(__self__, *,
migrate_traffic: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
split: Optional[pulumi.Input['EngineSplitTrafficSplitArgs']] = None):
"""
Input properties used for looking up and filtering EngineSplitTraffic resources.
:param pulumi.Input[bool] migrate_traffic: If set to true traffic will be migrated to this version.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] service: The name of the service these settings apply to.
:param pulumi.Input['EngineSplitTrafficSplitArgs'] split: Mapping that defines fractional HTTP traffic diversion to different versions within the service.
Structure is documented below.
"""
if migrate_traffic is not None:
pulumi.set(__self__, "migrate_traffic", migrate_traffic)
if project is not None:
pulumi.set(__self__, "project", project)
if service is not None:
pulumi.set(__self__, "service", service)
if split is not None:
pulumi.set(__self__, "split", split)
@property
@pulumi.getter(name="migrateTraffic")
def migrate_traffic(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true traffic will be migrated to this version.
"""
return pulumi.get(self, "migrate_traffic")
@migrate_traffic.setter
def migrate_traffic(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "migrate_traffic", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
The name of the service these settings apply to.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def split(self) -> Optional[pulumi.Input['EngineSplitTrafficSplitArgs']]:
"""
Mapping that defines fractional HTTP traffic diversion to different versions within the service.
Structure is documented below.
"""
return pulumi.get(self, "split")
@split.setter
def split(self, value: Optional[pulumi.Input['EngineSplitTrafficSplitArgs']]):
pulumi.set(self, "split", value)
class EngineSplitTraffic(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
migrate_traffic: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
split: Optional[pulumi.Input[pulumi.InputType['EngineSplitTrafficSplitArgs']]] = None,
__props__=None):
"""
Traffic routing configuration for versions within a single service. Traffic splits define how traffic directed to the service is assigned to versions.
To get more information about ServiceSplitTraffic, see:
* [API documentation](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services)
## Example Usage
### App Engine Service Split Traffic
```python
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket")
object = gcp.storage.BucketObject("object",
bucket=bucket.name,
source=pulumi.FileAsset("./test-fixtures/appengine/hello-world.zip"))
liveapp_v1 = gcp.appengine.StandardAppVersion("liveappV1",
version_id="v1",
service="liveapp",
delete_service_on_destroy=True,
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
})
liveapp_v2 = gcp.appengine.StandardAppVersion("liveappV2",
version_id="v2",
service="liveapp",
noop_on_destroy=True,
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
})
liveapp = gcp.appengine.EngineSplitTraffic("liveapp",
service=liveapp_v2.service,
migrate_traffic=False,
split=gcp.appengine.EngineSplitTrafficSplitArgs(
shard_by="IP",
allocations=pulumi.Output.all(liveapp_v1.version_id, liveapp_v2.version_id).apply(lambda liveappV1Version_id, liveappV2Version_id: {
liveapp_v1_version_id: 0.75,
liveapp_v2_version_id: 0.25,
}),
))
```
## Import
ServiceSplitTraffic can be imported using any of these accepted formats
```sh
$ pulumi import gcp:appengine/engineSplitTraffic:EngineSplitTraffic default apps/{{project}}/services/{{service}}
```
```sh
$ pulumi import gcp:appengine/engineSplitTraffic:EngineSplitTraffic default {{project}}/{{service}}
```
```sh
$ pulumi import gcp:appengine/engineSplitTraffic:EngineSplitTraffic default {{service}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] migrate_traffic: If set to true traffic will be migrated to this version.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] service: The name of the service these settings apply to.
:param pulumi.Input[pulumi.InputType['EngineSplitTrafficSplitArgs']] split: Mapping that defines fractional HTTP traffic diversion to different versions within the service.
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EngineSplitTrafficArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Traffic routing configuration for versions within a single service. Traffic splits define how traffic directed to the service is assigned to versions.
To get more information about ServiceSplitTraffic, see:
* [API documentation](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services)
## Example Usage
### App Engine Service Split Traffic
```python
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket")
object = gcp.storage.BucketObject("object",
bucket=bucket.name,
source=pulumi.FileAsset("./test-fixtures/appengine/hello-world.zip"))
liveapp_v1 = gcp.appengine.StandardAppVersion("liveappV1",
version_id="v1",
service="liveapp",
delete_service_on_destroy=True,
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
})
liveapp_v2 = gcp.appengine.StandardAppVersion("liveappV2",
version_id="v2",
service="liveapp",
noop_on_destroy=True,
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
})
liveapp = gcp.appengine.EngineSplitTraffic("liveapp",
service=liveapp_v2.service,
migrate_traffic=False,
split=gcp.appengine.EngineSplitTrafficSplitArgs(
shard_by="IP",
allocations=pulumi.Output.all(liveapp_v1.version_id, liveapp_v2.version_id).apply(lambda liveappV1Version_id, liveappV2Version_id: {
liveapp_v1_version_id: 0.75,
liveapp_v2_version_id: 0.25,
}),
))
```
## Import
ServiceSplitTraffic can be imported using any of these accepted formats
```sh
$ pulumi import gcp:appengine/engineSplitTraffic:EngineSplitTraffic default apps/{{project}}/services/{{service}}
```
```sh
$ pulumi import gcp:appengine/engineSplitTraffic:EngineSplitTraffic default {{project}}/{{service}}
```
```sh
$ pulumi import gcp:appengine/engineSplitTraffic:EngineSplitTraffic default {{service}}
```
:param str resource_name: The name of the resource.
:param EngineSplitTrafficArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EngineSplitTrafficArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
migrate_traffic: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
split: Optional[pulumi.Input[pulumi.InputType['EngineSplitTrafficSplitArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EngineSplitTrafficArgs.__new__(EngineSplitTrafficArgs)
__props__.__dict__["migrate_traffic"] = migrate_traffic
__props__.__dict__["project"] = project
if service is None and not opts.urn:
raise TypeError("Missing required property 'service'")
__props__.__dict__["service"] = service
if split is None and not opts.urn:
raise TypeError("Missing required property 'split'")
__props__.__dict__["split"] = split
super(EngineSplitTraffic, __self__).__init__(
'gcp:appengine/engineSplitTraffic:EngineSplitTraffic',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
migrate_traffic: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
split: Optional[pulumi.Input[pulumi.InputType['EngineSplitTrafficSplitArgs']]] = None) -> 'EngineSplitTraffic':
"""
Get an existing EngineSplitTraffic resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] migrate_traffic: If set to true traffic | |
#
# Copyright 2012 WebFilings, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Core async task wrapper. This module contains the `Async` class, which is
used to create asynchronous jobs, and a `defaults` decorator you may use to
specify default settings for a particular async task. To use,
# Create a task.
work = Async(
target="function.to.run",
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target},
task_args={"appengine": 1, "task": "kwargs"},
queue="yourqueue"
)
# Enqueue the task.
work.start()
*or*, set default arguments for a function:
@defaults(task_args={"appengine": 1, "task": "kwargs"}, queue="yourqueue")
def run_me(*args, **kwargs):
pass
# Create a task.
work = Async(
target=run_me,
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target},
)
# Enqueue the task.
work.start()
You may also update options after instantiation:
# Create a task.
work = Async(
target="function.to.run",
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target}
)
work.update_options(task_args={"appengine":1, "task": "kwargs"},
queue="yourqueue")
# Enqueue the task.
work.start()
The order of precedence is:
1) options specified when calling start.
2) options specified using update_options.
3) options specified in the constructor.
4) options specified by @defaults decorator.
"""
import copy
from functools import partial
from functools import wraps
import json
import os
import time
import uuid
from furious.job_utils import decode_callbacks
from furious.job_utils import encode_callbacks
from furious.job_utils import get_function_path_and_options
from furious.job_utils import path_to_reference
from furious.job_utils import reference_to_path
from furious import errors
__all__ = ['ASYNC_DEFAULT_QUEUE', 'ASYNC_ENDPOINT', 'Async', 'defaults']
ASYNC_DEFAULT_QUEUE = 'default'
ASYNC_ENDPOINT = '/_queue/async'
MAX_DEPTH = 100
MAX_RESTARTS = 10
DISABLE_RECURSION_CHECK = -1
RETRY_SLEEP_SECS = 4
DEFAULT_RETRY_OPTIONS = {
'task_retry_limit': MAX_RESTARTS
}
class Async(object):
def __init__(self, target, args=None, kwargs=None, **options):
self._options = {}
# Make sure nothing is snuck in.
_check_options(options)
self._update_job(target, args, kwargs)
self.update_options(**options)
self._initialize_recursion_depth()
self._context_id = self._get_context_id()
self._parent_id = self._get_parent_id()
self._id = self._get_id()
self._execution_context = None
self._executing = False
self._executed = False
self._persistence_engine = None
self._result = None
@property
def executed(self):
return self._executed
@property
def executing(self):
return self._executing
@executing.setter
def executing(self, executing):
if self._executed:
raise errors.AlreadyExecutedError(
'You can not execute an executed job.')
if self._executing:
raise errors.AlreadyExecutingError(
'Job is already executing, can not set executing.')
self._executing = executing
@property
def result(self):
if not self.executed:
raise errors.NotExecutedError(
'You must execute this Async before getting its result.')
return self._result
@result.setter
def result(self, result):
if not self._executing:
raise errors.NotExecutingError(
'The Async must be executing to set its result.')
self._result = result
self._executing = False
self._executed = True
if self._options.get('persist_result'):
self._persist_result()
def _persist_result(self):
"""Store this Async's result in persistent storage."""
self._prepare_persistence_engine()
return self._persistence_engine.store_async_result(
self.id, self.result)
@property
def function_path(self):
return self.job[0]
@property
def _function_path(self):
# DEPRECATED: Hanging around for backwards compatibility.
return self.function_path
@property
def job(self):
"""job is stored as a (function path, args, kwargs) tuple."""
return self._options['job']
@property
def recursion_depth(self):
"""Get the current recursion depth. `None` indicates uninitialized
recursion info.
"""
recursion_options = self._options.get('_recursion', {})
return recursion_options.get('current', None)
def _initialize_recursion_depth(self):
"""Ensure recursion info is initialized, if not, initialize it."""
from furious.context import get_current_async
recursion_options = self._options.get('_recursion', {})
current_depth = recursion_options.get('current', 0)
max_depth = recursion_options.get('max', MAX_DEPTH)
try:
executing_async = get_current_async()
# If this async is within an executing async, use the depth off
# that async. Otherwise use the depth set in the async's options.
current_depth = executing_async.recursion_depth
# If max_depth does not equal MAX_DEPTH, it is custom. Otherwise
# use the max_depth from the containing async.
if max_depth == MAX_DEPTH:
executing_options = executing_async.get_options().get(
'_recursion', {})
max_depth = executing_options.get('max', max_depth)
except errors.NotInContextError:
# This Async is not being constructed inside an executing Async.
pass
# Store the recursion info.
self.update_options(_recursion={'current': current_depth,
'max': max_depth})
def check_recursion_depth(self):
"""Check recursion depth, raise AsyncRecursionError if too deep."""
from furious.async import MAX_DEPTH
recursion_options = self._options.get('_recursion', {})
max_depth = recursion_options.get('max', MAX_DEPTH)
# Check if recursion check has been disabled, then check depth.
if (max_depth != DISABLE_RECURSION_CHECK and
self.recursion_depth > max_depth):
raise errors.AsyncRecursionError('Max recursion depth reached.')
def _update_job(self, target, args, kwargs):
"""Specify the function this async job is to execute when run."""
target_path, options = get_function_path_and_options(target)
assert isinstance(args, (tuple, list)) or args is None
assert isinstance(kwargs, dict) or kwargs is None
if options:
self.update_options(**options)
self._options['job'] = (target_path, args, kwargs)
def set_execution_context(self, execution_context):
"""Set the ExecutionContext this async is executing under."""
if self._execution_context:
raise errors.AlreadyInContextError
self._execution_context = execution_context
def get_options(self):
"""Return this async job's configuration options."""
return self._options
def update_options(self, **options):
"""Safely update this async job's configuration options."""
_check_options(options)
if 'persistence_engine' in options:
options['persistence_engine'] = reference_to_path(
options['persistence_engine'])
if 'id' in options:
self._id = options['id']
self._options.update(options)
def get_callbacks(self):
"""Return this async job's callback map."""
return self._options.get('callbacks', {})
def get_headers(self):
"""Create and return task headers."""
# TODO: Encode some options into a header here.
return self._options.get('headers', {})
def get_queue(self):
"""Return the queue the task should run in."""
return self._options.get('queue', ASYNC_DEFAULT_QUEUE)
def get_task_args(self):
"""Get user-specified task kwargs."""
return self._options.get('task_args', {})
def to_task(self):
"""Return a task object representing this async job."""
from google.appengine.api.taskqueue import Task
from google.appengine.api.taskqueue import TaskRetryOptions
self._increment_recursion_level()
self.check_recursion_depth()
url = "%s/%s" % (ASYNC_ENDPOINT, self.function_path)
kwargs = {
'url': url,
'headers': self.get_headers().copy(),
'payload': json.dumps(self.to_dict())
}
kwargs.update(copy.deepcopy(self.get_task_args()))
# Set task_retry_limit
retry_options = copy.deepcopy(DEFAULT_RETRY_OPTIONS)
retry_options.update(kwargs.pop('retry_options', {}))
kwargs['retry_options'] = TaskRetryOptions(**retry_options)
return Task(**kwargs)
def start(self, transactional=False, async=False, rpc=None):
"""Insert the task into the requested queue, 'default' if non given.
If a TransientError is hit the task will re-insert the task. If a
TaskAlreadyExistsError or TombstonedTaskError is hit the task will
silently fail.
If the async flag is set, then the add will be done asynchronously and
the return value will be the rpc object; otherwise the return value is
the task itself. If the rpc kwarg is provided, but we're not in async
mode, then it is ignored.
"""
from google.appengine.api import taskqueue
task = self.to_task()
queue = taskqueue.Queue(name=self.get_queue())
retry_transient = self._options.get('retry_transient_errors', True)
retry_delay = self._options.get('retry_delay', RETRY_SLEEP_SECS)
add = queue.add
if async:
add = partial(queue.add_async, rpc=rpc)
try:
ret = add(task, transactional=transactional)
except taskqueue.TransientError:
# Always re-raise for transactional insert, or if specified by
# options.
if transactional or not retry_transient:
raise
time.sleep(retry_delay)
ret = add(task, transactional=transactional)
except (taskqueue.TaskAlreadyExistsError,
taskqueue.TombstonedTaskError):
return
# TODO: Return a "result" object.
return ret
def __deepcopy__(self, *args):
"""In order to support callbacks being Async objects, we need to
support being deep copied.
"""
return self
def to_dict(self):
"""Return this async job as a dict suitable for json encoding."""
return encode_async_options(self)
@classmethod
def from_dict(cls, async):
"""Return an async job from a dict output by Async.to_dict."""
async_options = decode_async_options(async)
target, args, kwargs = async_options.pop('job')
return cls(target, args, kwargs, **async_options)
def _prepare_persistence_engine(self):
"""Load the specified persistence engine, or the default if none is
set.
"""
if self._persistence_engine:
return
persistence_engine = self._options.get('persistence_engine')
if persistence_engine:
self._persistence_engine = path_to_reference(persistence_engine)
return
from furious.config import get_default_persistence_engine
self._persistence_engine = get_default_persistence_engine()
def _get_context_id(self):
"""If this async is in a context set the context id."""
from furious.context import get_current_context
context_id = self._options.get('context_id')
if context_id:
return context_id
try:
context = get_current_context()
except errors.NotInContextError:
context = None
self.update_options(context_id=None)
if context:
context_id = context.id
self.update_options(context_id=context_id)
return context_id
def _get_parent_id(self):
"""If this async is in within another async set that async id as the
parent.
"""
parent_id = self._options.get('parent_id')
if parent_id:
return parent_id
from furious.context import get_current_async
try:
async = get_current_async()
except errors.NotInContextError:
async = None
if async:
parent_id = ":".join([async.parent_id.split(":")[0], async.id])
else:
parent_id = self.request_id
self.update_options(parent_id=parent_id)
return parent_id
def _get_id(self):
"""If this async has no id, generate one."""
id = self._options.get('id')
if id:
return id
id = uuid.uuid4().hex
self.update_options(id=id)
return id
@property
def id(self):
"""Return this Async's ID value."""
return self._id
@property
def context_id(self):
"""Return this Async's Context Id if it exists."""
return self._context_id
@property
def parent_id(self):
"""Return this Async's Parent Id if it exists."""
return self._parent_id
@property
def full_id(self):
"""Return the full_id for this Async. Consists of the parent id, id and
context id.
"""
full_id = ""
if self.parent_id:
full_id = ":".join([self.parent_id, self.id])
| |
<gh_stars>0
"""
CHAS builtin output modules.
We contain the built in output modules for CHAS.
These modules are to be used with the OutputHandler,
as it will handle the process of getting, mixing, and sending
the audio information to each module.
We offer some useful output modules:
(An asterisk denotes that a dependency is required)
- PyAudioModule - Outputs audio to speakers using PyAudio *
- WaveModule - Outputs audio to a wave file
- PrintModule - Prints audio data to a terminal
- NullModule - Does nothing with the given audio information
Here are some audio modules I would like to see later:
- NetModule - Server/client, used for sending audio data over a network
- FFMPEGModule - Outputting audio to different audio types(mp3, flac, ogg) *
- Other - Wrappers for other output types(simpleaudio, alsa) *
"""
import queue
import wave
import pathlib
from base64 import b64encode
from chaslib.sound.convert import BaseConvert, NullConvert, Float32, Int16
from chaslib.sound.utils import amp_clamp
from chaslib.misctools import get_chas, get_logger
class BaseOutput(object):
"""
BaseOutput - Class all child output modules must inherit!
An 'Output Module' is a component that adds extra functionality to the 'OutputHandler' class.
For example, if you wanted to write audio data to a wave file,
then you would have to write and add an output module that can do so to the 'Output' class.
We define some useful functionality here,
such as defining the Output Module API,
as well as getting a collection of values.
The Output class will do the dirty work of invoking these modules,
and passing audio information to us.
We only have to worry about sending the audio to a location!
Each audio module will be put in it's own thread,
to prevent locking and allow them to operate efficiently.
We accept signed floats as audio data,
so be sure to configure your output accordingly!
We also allow for the registration of a converter,
which will automatically convert the audio information into something we can understand.
By default, if you ask us to return byte information,
we return the samples in stereo.
If this is not ideal, then you can call the 'stereo()' function,
which we will then mix the data down into mono.
If you want to do the mixing and conversions yourself,
then you can simply call 'get_sample(raw=True)'.
"""
def __init__(self):
self.queue = queue.Queue() # Queue for getting audio information
self.convert = NullConvert() # Converter instance
self.running = False # Value determining if we are running
self.out = None # Reference to master OutputHandler class
self.special = False
self.stereo = True # Value determining if we should return samples in stereo.
def mono(self):
"""
Switches this input module to mono,
meaning that 'get_sample()' will return mono data.
"""
self.stereo = False
def add_converter(self, conv):
"""
Adds the given converter to the output module.
The converter MUST inherit BaseConverter,
or an exception will be raised.
:param conv: Converter to add
:type conv: BaseConverter
"""
# Check if the converter inherits BaseConverter
assert isinstance(conv, BaseConvert), "Converter MUST inherit BaseConvert!"
# Otherwise, add it to this module:
self.convert = conv
def get_sample(self, timeout=None, raw=False):
"""
Gets a value from the queue, mixes it, and sends it through the converter.
This returns one sample of audio, the size of which can be determined by
We support the timeout feature, which is the amount of time to wait for values to become available.
Due to threading and our method of synchronization, we will ALWAYS block.
You can optionally disable conversion and mixing by using the 'raw' parameter.
If we are working with stereo, then a tuple of floats will be returned,
the first representing the left channel, then right representing the right channel.
When we are stopped by the Output class,
'None' is added to our queue.
If you encounter 'None', then you should exit and finish up any work you may be doing.
The 'stop()' method will be called shortly after,
so you can put stop code in there.
:param timeout: Timeout value in seconds. Ignored if None, or if we are not blocking
:type timeout: int
:param raw: Value determining if we should operate in raw mode,
where we don't send info to the converter before returning it.
:type raw: bool
"""
if self.special:
# Generate a new frame:
inp = self.out.gen_value()
else:
# Get input from the queue:
inp = self.queue.get(timeout=timeout)
# We are done processing!
# Check if we should convert:
if not raw:
# Process our input:
inp = self._process_input(inp)
# Return the input:
return inp
def get_samples(self, num, timeout=None, raw=False):
"""
Gets a number of inputs from the input queue,
and returns them in a tuple.
Under the hood we call 'get_input()' a number of times,
and return all the inputs as a tuple.
Again, when we are stopped, 'None' is added to our audio queue.
If we encounter 'None', then we will simply return 'None.
:param num: Number of samples to retrieve
:type num: int
:param timeout: Timeout value in seconds. Ignored if we are blocking, or None
:type timeout: int
:param raw: Determines id we should send input through the converter
:type raw: bool
:return: Tuple containing the samples
:rtype: tuple
"""
final = []
# get a number of inputs:
for _ in range(0, num):
# Get input and add it to the final
inp = self.get_sample(timeout=timeout, raw=raw)
if inp is None:
# Just return None
return None
# Add input to list:
final.append(inp)
# Return the inputs
return tuple(final)
def get_added_samples(self, num, timeout=None, raw=False):
"""
Gets a number of inputs from the input queue,
and adds them together into one value.
Under the hood, we call 'get_sample()' a specified amount of times,
and add all the inputs together.
If your converter returns bytes,
then this is a great way to get a combined bytes object!
Again, when we are stopped, 'None' is added to our audio queue.
If we encounter 'None', then we will simply return None.
:param num: Number of samples to retrieve
:type num: int
:param timeout: Timeout value in seconds. Ignored if we are not blocking, or None
:type timeout: int
:param raw: Determines if we should send the input though the converter
:type raw: bool
:return: Added values
"""
# Iterate a specified number of times:
final = self.get_sample(timeout=timeout, raw=raw)
for _ in range(0, num-1):
# Get input and add it to final:
inp = self.get_sample(timeout=timeout, raw=raw)
# Add input to list:
final = final + inp
# Convert the tuple and return:
return final
def add_input(self, inp):
"""
Adds the given input to the audio queue.
This probably should only be called by 'Output',
but if developers has a use for adding values,
and can properly handle any issues that may arise,
then it should be okay to do so.
Unless you have an explicit reason,
floats are the only types that should be added to the queue!
:param inp: Input to add to the queue
:type inp: float
"""
# Add the value to the queue:
self.queue.put(inp)
def _process_input(self, inp):
"""
Processes the given audio info,
which should be a tuple of floats,
0 representing left, and 1 representing right.
We automatically mix down the audio if we are working in mono.
:param inp: Tuple contaning two floats representing audio info
:type inp: tuple
:return: Audio sample in bytes
:rtype: bytes
"""
# Checks if we are in stereo:
if self.stereo:
# Convert the value and add them together:
return self.convert.convert(inp[0]) + self.convert.convert(inp[1])
# Otherwise, add them together and return:
return self.convert.convert(inp[0] + inp[1])
def start(self):
"""
This function is called when the output module is started.
The output module is started when it is added to the thread executor pool.
Feel free to put any setup code you want here.
"""
pass
def stop(self):
"""
This function is called | |
# -*- coding: utf-8 -*-
from random import random
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from django.views.generic import TemplateView
from uncharted.chart import *
class Area100PercentStacked(TemplateView):
template_name = 'area/chart.html'
chartData = [
{
'year': 2000,
'cars': 1587,
'motorcycles': 650,
'bicycles': 121
}, {
'year': 1995,
'cars': 1567,
'motorcycles': 683,
'bicycles': 146
}, {
'year': 1996,
'cars': 1617,
'motorcycles': 691,
'bicycles': 138
}, {
'year': 1997,
'cars': 1630,
'motorcycles': 642,
'bicycles': 127
}, {
'year': 1998,
'cars': 1660,
'motorcycles': 699,
'bicycles': 105
}, {
'year': 1999,
'cars': 1683,
'motorcycles': 721,
'bicycles': 109
}, {
'year': 2000,
'cars': 1691,
'motorcycles': 737,
'bicycles': 112
}, {
'year': 2001,
'cars': 1298,
'motorcycles': 680,
'bicycles': 101
}, {
'year': 2002,
'cars': 1275,
'motorcycles': 664,
'bicycles': 97
}, {
'year': 2003,
'cars': 1246,
'motorcycles': 648,
'bicycles': 93
}, {
'year': 2004,
'cars': 1218,
'motorcycles': 637,
'bicycles': 101
}, {
'year': 2005,
'cars': 1213,
'motorcycles': 633,
'bicycles': 87
}, {
'year': 2006,
'cars': 1199,
'motorcycles': 621,
'bicycles': 79
}, {
'year': 2007,
'cars': 1110,
'motorcycles': 210,
'bicycles': 81
}, {
'year': 2008,
'cars': 1165,
'motorcycles': 232,
'bicycles': 75
}, {
'year': 2009,
'cars': 1145,
'motorcycles': 219,
'bicycles': 88
}, {
'year': 2010,
'cars': 1163,
'motorcycles': 201,
'bicycles': 82
}, {
'year': 2011,
'cars': 1180,
'motorcycles': 285,
'bicycles': 87
}, {
'year': 2012,
'cars': 1159,
'motorcycles': 277,
'bicycles': 71
}]
def get_context_data(self, *args, **kwargs):
context = super(Area100PercentStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
chart.addTitle("Traffic incidents per year", 15)
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.startOnAxis = True
# Value
valueAxis = amValueAxis(title="percent", stackType="100%", gridAlpha=0.07)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="Cars",
valueField="cars",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# second graph
graph = amGraph(
type="line",
title="Motorcycles",
valueField="motorcycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# third graph
graph = amGraph(
type="line",
title="Bicycles",
valueField="bicycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# LEGEND
legend = amLegend(align="center")
chart.addLegend(legend)
# CURSOR
chartCursor = amChartCursor(zoomable=False, cursorAlpha=0)
chart.addChartCursor(chartCursor)
context['chart'] = chart
return context
area100PercentStacked = Area100PercentStacked.as_view()
class AreaStacked(Area100PercentStacked):
def get_context_data(self, *args, **kwargs):
context = super(AreaStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
marginTop=10,
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.startOnAxis = True
# Value
valueAxis = amValueAxis(
title="Traffic incidents",
stackType="regular", # this line makes the chart "stacked"
gridAlpha=0.07,
)
chart.addValueAxis(valueAxis)
# GUIDES are vertical (can also be horizontal) lines (or areas) marking some event.
# first guide
guide1 = amGuide(
category="2001",
lineColor="#CC0000",
lineAlpha=1,
dashLength=2,
inside=True,
labelRotation=90,
label="fines for speeding increased",
)
chart.categoryAxis.addGuide(guide1);
# second guide
guide2 = amGuide(
category="2007",
lineColor="#CC0000",
lineAlpha=1,
dashLength=2,
inside=True,
labelRotation=90,
label="motorcycle maintenance fee introduced",
)
chart.categoryAxis.addGuide(guide2);
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="Cars",
valueField="cars",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6, # setting fillAlphas to > 0 value makes it area graph
hidden=True,
)
chart.addGraph(graph)
# second graph
graph = amGraph(
type="line",
title="Motorcycles",
valueField="motorcycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6,
)
chart.addGraph(graph)
# third graph
graph = amGraph(
type="line",
title="Bicycles",
valueField="bicycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6,
)
chart.addGraph(graph)
# LEGEND
legend = amLegend(position="top")
chart.addLegend(legend)
# CURSOR
chartCursor = amChartCursor(zoomable=False, cursorAlpha=0)
chart.addChartCursor(chartCursor)
context['chart'] = chart
return context
areaStacked = AreaStacked.as_view()
class AreaWithTimeBasedData(Area100PercentStacked):
@property
def chartData(self):
output = []
d = timezone.now() - timedelta(minutes=1000)
for i in xrange(0, 1000):
d = d + timedelta(minutes=1)
value = int((random() * 40) + 10)
output.append({
'date': d,#.isoformat(),
'visits': value,
})
return output
def get_context_data(self, *args, **kwargs):
context = super(AreaWithTimeBasedData, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
marginRight=30,
dataProvider=self.chartData,
categoryField="date",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
chart.addListener("dataUpdated", "zoomChart");
# AXES
# Category
chart.categoryAxis.parseDates = True
chart.categoryAxis.minPeriod = "mm"
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
# Value
valueAxis = amValueAxis(
title="Unique visitors",
gridAlpha=0.07,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="red line",
valueField="visits",
lineAlpha=1,
lineColor="#d1cf2a",
fillAlphas=0.3, # setting fillAlphas to > 0 value makes it area graph
)
chart.addGraph(graph)
# CURSOR
chartCursor = amChartCursor(
cursorPosition="mouse",
categoryBalloonDateFormat="JJ:NN, DD MMMM",
)
chart.addChartCursor(chartCursor)
# SCROLLBAR
chartScrollbar = amChartScrollbar()
chart.addChartScrollbar(chartScrollbar)
context['chart'] = chart
return context
areaWithTimeBasedData = AreaWithTimeBasedData.as_view()
class Bar3D(TemplateView):
template_name = 'bar/chart.html'
chartData = [
{
'year': 2005,
'income': 23.5
}, {
'year': 2006,
'income': 26.2
}, {
'year': 2007,
'income': 30.1
}, {
'year': 2008,
'income': 29.5
}, {
'year': 2009,
'income': 24.6
}]
def get_context_data(self, *args, **kwargs):
context = super(Bar3D, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
rotate=True,
depth3D=20,
angle=30,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.fillAlpha = 1
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.fillColor = "#FAFAFA"
# Value
valueAxis = amValueAxis(title="Income in millions, USD", axisColor="#DADADA", gridAlpha=0.1)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
type="column",
title="Income",
valueField="income",
balloonText="Income in [[category]]:[[value]]",
lineAlpha=0,
fillColors=["#bf1c25"],
fillAlphas=1,
)
chart.addGraph(graph)
context['chart'] = chart
return context
bar3D = Bar3D.as_view()
class BarAndLineMix(Bar3D):
chartData = [
{
'year': 2005,
'income': 23.5,
'expenses': 18.1
}, {
'year': 2006,
'income': 26.2,
'expenses': 22.8
}, {
'year': 2007,
'income': 30.1,
'expenses': 23.9
}, {
'year': 2008,
'income': 29.5,
'expenses': 25.1
}, {
'year': 2009,
'income': 24.6,
'expenses': 25.0
}]
def get_context_data(self, *args, **kwargs):
context = super(BarAndLineMix, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.dashLength = 5
# Value
valueAxis = amValueAxis(
title="Million USD",
dashLength=5,
axisAlpha=0.2,
position="top",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# column graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
lineAlpha=0,
fillColors=["#ADD981"],
fillAlphas=1,
)
chart.addGraph(graph1)
# line graph
graph2 = amGraph(
type="line",
title="Expenses",
valueField="expenses",
lineThickness=2,
bullet="round",
fillAlphas=0,
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
barAndLineMix = BarAndLineMix.as_view()
class BarClustered(BarAndLineMix):
def get_context_data(self, *args, **kwargs):
context = super(BarClustered, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
plotAreaBorderColor="#DADADA",
plotAreaBorderAlpha=1,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
position="top",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
balloonText="Income:[[value]]",
lineAlpha=0,
fillColors=["#ADD981"],
fillAlphas=1,
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
type="column",
title="Expenses",
valueField="expenses",
balloonText="Expenses:[[value]]",
lineAlpha=0,
fillColors=["#81acd9"],
fillAlphas=1,
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
barClustered = BarClustered.as_view()
class BarFloating(BarClustered):
template_name = 'area/chart.html'
chartData = [
{
'name': "John",
'startTime': 8,
'endTime': 11,
'color': "#FF0F00"
}, {
'name': "Joe",
'startTime': 10,
'endTime': 13,
'color': "#FF9E01"
}, {
'name': "Susan",
'startTime': 11,
'endTime': 18,
'color': "#F8FF01"
}, {
'name': "Eaton",
'startTime': 15,
'endTime': 19,
'color': "#04D215"
}]
def get_context_data(self, *args, **kwargs):
context = super(BarFloating, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="name",
startDuration=1,
columnWidth=0.9,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
unit=":00",
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph1 = amGraph(
type="column",
valueField="endTime",
openField="startTime",
balloonText="Income:[[value]]",
lineAlpha=0,
colorField="color",
fillAlphas=0.8,
)
chart.addGraph(graph1)
context['chart'] = chart
return context
barFloating = BarFloating.as_view()
class BarStacked(BarFloating):
template_name = 'bar/3d.html'
chartData = [
{
'year': "2003",
'europe': 2.5,
'namerica': 2.5,
'asia': 2.1,
'lamerica': 0.3,
'meast': 0.2,
'africa': 0.1
}, {
'year': "2004",
'europe': 2.6,
'namerica': 2.7,
'asia': 2.2,
'lamerica': 0.3,
'meast': 0.3,
'africa': 0.1
}, {
'year': "2005",
'europe': 2.8,
'namerica': 2.9,
'asia': 2.4,
'lamerica': 0.3,
'meast': 0.3,
'africa': 0.1
}]
def get_context_data(self, *args, **kwargs):
context = super(BarStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
plotAreaBorderAlpha=0.2,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
stackType="regular",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
type="column",
title="Europe",
labelText="[[value]]",
valueField="europe",
lineAlpha=0,
fillAlphas=1,
lineColor="#C72C95",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
type="column",
title="North America",
labelText="[[value]]",
valueField="namerica",
lineAlpha=0,
fillAlphas=1,
lineColor="#D8E0BD",
)
chart.addGraph(graph2)
# third graph
graph3 = amGraph(
type="column",
title="Asia-Pacific",
labelText="[[value]]",
| |
<gh_stars>10-100
""" File Submission Service and Interfaces.
The Submission service encapsulates the core functionality of accepting,
triaging and forwarding a submission to the dispatcher.
SubmissionServer is typically exposed via HTTP interface implemented by al_ui,
however the core logic is implemented in SubmissionService to provide
seperation between the network rpc interface and the actual submission logic.
There are three primary modes of submission:
two-phase (presubmit + submit)
inline (submit file)
existing (submit a file that is already in the file cache/SAN)
In two-phase mode, submission is a presubmit followed by a submit.
A 'presubmit' is sent to the submission service first. If the server already
has a copy of the sample it indicates as such to the client which saves the
client from copying the file again. Once the client has copied the file
(if required) it then issues a final 'submit'.
"""
import logging
import os
import pprint
import uuid
import tempfile
import time
from assemblyline.al.common import forge
from assemblyline.al.common.task import Task
from assemblyline.al.common.remote_datatypes import ExpiringHash
from assemblyline.al.core.filestore import CorruptedFileStoreException
from assemblyline.common import digests
from assemblyline.common import identify
from assemblyline.common.charset import safe_str
from assemblyline.common.isotime import now_as_iso
log = logging.getLogger('assemblyline.submission')
config = forge.get_config()
SUBMISSION_AUTH = (safe_str(config.submissions.user), safe_str(config.submissions.password))
SHARDS = config.core.dispatcher.shards
class SubmissionException(Exception):
pass
def assert_valid_file(path):
if not os.path.exists(path):
raise Exception('File does not exist: %s' % path)
if os.path.isdir(path):
raise Exception('Expected file. Found directory: %s' % path)
def assert_valid_sha256(sha256):
if len(sha256) != 64:
raise Exception('Invalid SHA256: %s' % sha256)
def effective_ttl(settings):
return settings.get('ttl', config.submissions.ttl)
def max_extracted(settings):
return settings.get('max_extracted', config.services.limits.max_extracted)
def max_supplementary(settings):
return settings.get('max_supplementary', config.services.limits.max_supplementary)
def ttl_to_expiry(ttl):
return now_as_iso(int(ttl) * 24 * 60 * 60)
class SubmissionWrapper(object):
@classmethod
def check_exists(cls, transport, sha256_list):
log.debug("CHECK EXISTS (): %s", sha256_list)
existing = []
missing = []
for sha256 in sha256_list:
if not transport.exists(sha256):
missing.append(sha256)
else:
existing.append(sha256)
return {'existing': existing, 'missing': missing}
# noinspection PyBroadException
@classmethod
def identify(cls, transport, storage, sha256, **kw):
""" Identify a file. """
assert_valid_sha256(sha256)
classification = kw['classification']
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
# By the time identify is called, either the file was in our cache
# and we freshed its ttl or the client has successfully transfered
# the file to us.
local_path = transport.local_path(sha256)
if not local_path:
path = kw.get("path", None)
if path and os.path.exists(path):
local_path = path
if not transport.exists(sha256):
log.warning('File specified is not on server: %s %s.',
sha256, str(transport))
return None
temporary_path = fileinfo = None
try:
if not local_path:
temporary_path = tempfile.mktemp(prefix="submission.identify")
transport.download(sha256, temporary_path)
local_path = temporary_path
fileinfo = identify.fileinfo(local_path)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
finally:
if temporary_path:
try:
os.unlink(temporary_path)
except: # pylint: disable=W0702
pass
return fileinfo
@classmethod
def presubmit(cls, transport, sha256, **kw):
""" Execute a presubmit.
Checks if this file is already cached.
If not, it returns a location for the client to copy the file.
result dictionary example:
{ 'exists': False,
'sha256': u'012345678....9876543210',
'upload_path': u'/home/aluser/012345678....9876543210'
}
"""
log.debug("PRESUBMIT: %s", sha256)
assert_valid_sha256(sha256)
if transport.exists(sha256):
return SubmissionWrapper.result_dict(transport, sha256, True, None, kw)
# We don't have this file. Tell the client as much and tell it where
# to transfer the file before issuing the final submit.
log.debug('Cache miss. Client should transfer to %s', sha256)
return SubmissionWrapper.result_dict(transport, sha256, False, sha256, kw)
# noinspection PyBroadException
@classmethod
def submit(cls, transport, storage, sha256, path, priority, submitter, **kw):
""" Execute a submit.
Any kw are passed along in the dispatched request.
"""
assert_valid_sha256(sha256)
queue = forge.get_dispatch_queue()
classification = kw['classification']
kw['max_extracted'] = max_extracted(kw)
kw['max_supplementary'] = max_supplementary(kw)
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
# By the time submit is called, either the file was in our cache
# and we freshed its ttl or the client has successfully transfered
# the file to us.
local_path = transport.local_path(sha256)
if not transport.exists(sha256):
raise SubmissionException('File specified is not on server: %s %s.' % (sha256, str(transport)))
root_sha256 = sha256
temporary_path = massaged_path = None
try:
if not local_path:
temporary_path = tempfile.mktemp(prefix="submission.submit")
transport.download(sha256, temporary_path)
local_path = temporary_path
fileinfo = identify.fileinfo(local_path)
if fileinfo['sha256'] != sha256:
raise CorruptedFileStoreException('SHA256 mismatch between received '
'and calculated sha256. %s != %s' % (sha256, fileinfo['sha256']))
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
decode_file = forge.get_decode_file()
massaged_path, _, fileinfo, al_meta = decode_file(local_path, fileinfo)
if massaged_path:
local_path = massaged_path
sha256 = fileinfo['sha256']
transport.put(local_path, sha256)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
ignore_size = kw.get('ignore_size', False)
max_size = config.submissions.max.size
if fileinfo['size'] > max_size and not ignore_size:
msg = "File too large (%d > %d). Submission failed" % (fileinfo['size'], max_size)
raise SubmissionException(msg)
# We'll just merge the mandatory arguments, fileinfo, and any
# optional kw and pass those all on to the dispatch callback.
task_args = fileinfo
task_args.update(kw)
task_args.update({
'original_selected': kw.get('selected', []),
'root_sha256': root_sha256,
'srl': sha256,
'sha256': sha256,
'priority': priority,
'submitter': submitter,
'path': safe_str(path)})
if 'metadata' in task_args:
task_args['metadata'].update(al_meta)
else:
task_args['metadata'] = al_meta
submit_task = Task.create(**task_args)
if submit_task.is_initial():
storage.create_submission(
submit_task.sid,
submit_task.as_submission_record(),
[(os.path.basename(path), submit_task.srl)])
log.debug("Submission complete. Dispatching: %s", submit_task)
queue.send(submit_task, shards=SHARDS)
return submit_task.raw
finally:
if massaged_path:
try:
os.unlink(massaged_path)
except: # pylint:disable=W0702
pass
if temporary_path:
try:
os.unlink(temporary_path)
except: # pylint:disable=W0702
pass
@classmethod
def submit_inline(cls, storage, transport, file_paths, **kw):
""" Submit local samples to the submission service.
submit_inline can be used when the sample to submit is already
local to the submission service. It does the presumit, filestore
upload and submit.
Any kw are passed to the Task created to dispatch this submission.
"""
classification = kw['classification']
kw['max_extracted'] = max_extracted(kw)
kw['max_supplementary'] = max_supplementary(kw)
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
submissions = []
file_tuples = []
dispatch_request = None
# Generate static fileinfo data for each file.
for file_path in file_paths:
file_name = os.path.basename(file_path)
fileinfo = identify.fileinfo(file_path)
ignore_size = kw.get('ignore_size', False)
max_size = config.submissions.max.size
if fileinfo['size'] > max_size and not ignore_size:
msg = "File too large (%d > %d). Submission Failed" % \
(fileinfo['size'], max_size)
raise SubmissionException(msg)
decode_file = forge.get_decode_file()
temp_path, original_name, fileinfo, al_meta = \
decode_file(file_path, fileinfo)
if temp_path:
file_path = temp_path
if not original_name:
original_name = os.path.splitext(file_name)[0]
file_name = original_name
sha256 = fileinfo['sha256']
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
file_tuples.append((file_name, sha256))
if not transport.exists(sha256):
log.debug('File not on remote filestore. Uploading %s', sha256)
transport.put(file_path, sha256, location='near')
if temp_path:
os.remove(temp_path)
# We'll just merge the mandatory arguments, fileinfo, and any
# optional kw and pass those all on to the dispatch callback.
task_args = fileinfo
task_args['priority'] = 0 # Just a default.
task_args.update(kw)
task_args['srl'] = sha256
task_args['original_filename'] = file_name
task_args['path'] = file_name
if 'metadata' in task_args:
task_args['metadata'].update(al_meta)
else:
task_args['metadata'] = al_meta
dispatch_request = Task.create(**task_args)
submissions.append(dispatch_request)
storage.create_submission(
dispatch_request.sid,
dispatch_request.as_submission_record(),
file_tuples)
dispatch_queue = forge.get_dispatch_queue()
for submission in submissions:
dispatch_queue.submit(submission)
log.debug("Submission complete. Dispatched: %s", dispatch_request)
# Ugly - fighting with task to give UI something that makes sense.
file_result_tuples = \
zip(file_paths, [dispatch_request.raw for dispatch_request in submissions])
result = submissions[0].raw.copy()
fileinfos = []
for filename, result in file_result_tuples:
finfo = result['fileinfo']
finfo['original_filename'] = os.path.basename(filename)
finfo['path'] = finfo['original_filename']
fileinfos.append(finfo)
result['fileinfo'] = fileinfos
return result
# noinspection PyBroadException
@classmethod
def submit_multi(cls, storage, transport, files, **kw):
""" Submit all files into one submission
submit_multi can be used when all the files are already present in the
file storage.
files is an array of (name, sha256) tuples
Any kw are passed to the Task created to dispatch this submission.
"""
sid = str(uuid.uuid4())
classification = kw['classification']
kw['max_extracted'] = max_extracted(kw)
kw['max_supplementary'] = max_supplementary(kw)
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
submissions = []
temporary_path = None
dispatch_request = None
# Generate static fileinfo data for each file.
for name, sha256 in files:
local_path = transport.local_path(sha256)
if not transport.exists(sha256):
raise SubmissionException('File specified is not on server: %s %s.' % (sha256, str(transport)))
try:
if not local_path:
temporary_path = tempfile.mktemp(prefix="submission.submit_multi")
transport.download(sha256, temporary_path)
local_path = temporary_path
fileinfo = identify.fileinfo(local_path)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
decode_file = forge.get_decode_file()
massaged_path, new_name, fileinfo, al_meta = \
decode_file(local_path, fileinfo)
if massaged_path:
name = new_name
local_path = massaged_path
sha256 = fileinfo['sha256']
if not transport.exists(sha256):
transport.put(local_path, sha256)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
ignore_size = kw.get('ignore_size', False)
max_size = config.submissions.max.size
if fileinfo['size'] > max_size and not ignore_size:
msg = "File too large (%d > %d). Submission failed" % (fileinfo['size'], max_size)
raise SubmissionException(msg)
# We'll just merge the mandatory arguments, fileinfo, and any
# optional kw and pass those all on to the | |
sync.
:param user: requesting user
:param res_id: resource uuid
:param zip_with_rel_path: the zip file name with relative path under res_id collection to
be unzipped
:param bool_remove_original: a bool indicating whether original zip file will be deleted
after unzipping.
:param bool overwrite: a bool indicating whether to overwrite files on unzip
:return:
"""
if __debug__:
assert(zip_with_rel_path.startswith("data/contents/"))
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
zip_with_full_path = os.path.join(resource.root_path, zip_with_rel_path)
if not resource.supports_unzip(zip_with_rel_path):
raise ValidationError("Unzipping of this file is not supported.")
zip_fname = os.path.basename(zip_with_rel_path)
working_dir = os.path.dirname(zip_with_full_path)
unzip_path = None
try:
if overwrite:
# irods doesn't allow overwrite, so we have to check if a file exists, delete it and
# then write the new file. Aggregations are treated as single objects. If one file is
# overwritten in an aggregation, the whole aggregation is deleted.
# unzip to a temporary folder
unzip_path = istorage.unzip(zip_with_full_path, unzipped_folder=uuid4().hex)
# list all files to be moved into the resource
unzipped_files = listfiles_recursively(istorage, unzip_path)
unzipped_foldername = os.path.basename(unzip_path)
destination_folders = []
# list all folders to be written into the resource
for folder in listfolders(istorage, unzip_path):
destination_folder = os.path.join(working_dir, folder)
destination_folders.append(destination_folder)
# walk through each unzipped file, delete aggregations if they exist
for file in unzipped_files:
destination_file = _get_destination_filename(file, unzipped_foldername)
if (istorage.exists(destination_file)):
if resource.resource_type == "CompositeResource":
aggregation_object = resource.get_file_aggregation_object(
destination_file)
if aggregation_object:
if aggregation_object.is_single_file_aggregation:
aggregation_object.logical_delete(user)
else:
directory = os.path.dirname(destination_file)
# remove_folder expects path to start with 'data/contents'
directory = directory.replace(res_id + "/", "")
remove_folder(user, res_id, directory)
else:
logger.error("No aggregation object found for " + destination_file)
istorage.delete(destination_file)
else:
istorage.delete(destination_file)
# now move each file to the destination
for file in unzipped_files:
destination_file = _get_destination_filename(file, unzipped_foldername)
istorage.moveFile(file, destination_file)
# and now link them to the resource
res_files = []
for file in unzipped_files:
destination_file = _get_destination_filename(file, unzipped_foldername)
destination_file = destination_file.replace(res_id + "/", "")
destination_file = resource.get_irods_path(destination_file)
res_file = link_irods_file_to_django(resource, destination_file)
res_files.append(res_file)
# scan for aggregations
check_aggregations(resource, destination_folders, res_files)
istorage.delete(unzip_path)
else:
unzip_path = istorage.unzip(zip_with_full_path)
link_irods_folder_to_django(resource, istorage, unzip_path)
except Exception:
logger.exception("failed to unzip")
if unzip_path and istorage.exists:
istorage.delete(unzip_path)
raise
if bool_remove_original:
delete_resource_file(res_id, zip_fname, user)
# TODO: should check can_be_public_or_discoverable here
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
def _get_destination_filename(file, unzipped_foldername):
"""
Returns the destination file path by removing the temp unzipped_foldername from the file path.
Useful for moving files from a temporary unzipped folder to the resource outside of the
temporary folder.
:param file: path to a file
:param unzipped_foldername: the name of the
:return:
"""
split = file.split("/" + unzipped_foldername + "/", 1)
destination_file = os.path.join(split[0], split[1])
return destination_file
def listfiles_recursively(istorage, path):
files = []
listing = istorage.listdir(path)
for file in listing[1]:
files.append(os.path.join(path, file))
for folder in listing[0]:
files = files + listfiles_recursively(istorage, os.path.join(path, folder))
return files
def listfolders(istorage, path):
return istorage.listdir(path)[0]
def create_folder(res_id, folder_path):
"""
create a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param res_id: resource uuid
:param folder_path: relative path for the new folder to be created under
res_id collection/directory
:return:
"""
if __debug__:
assert(folder_path.startswith("data/contents/"))
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
coll_path = os.path.join(resource.root_path, folder_path)
if not resource.supports_folder_creation(coll_path):
raise ValidationError("Folder creation is not allowed here. "
"The target folder seems to contain aggregation(s)")
# check for duplicate folder path
if istorage.exists(coll_path):
raise ValidationError("Folder already exists")
istorage.session.run("imkdir", None, '-p', coll_path)
def remove_folder(user, res_id, folder_path):
"""
remove a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param folder_path: the relative path for the folder to be removed under res_id collection.
:return:
"""
if __debug__:
assert(folder_path.startswith("data/contents/"))
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
coll_path = os.path.join(resource.root_path, folder_path)
# TODO: Pabitra - resource should check here if folder can be removed
istorage.delete(coll_path)
remove_irods_folder_in_django(resource, istorage, coll_path, user)
resource.update_public_and_discoverable() # make private if required
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
def list_folder(res_id, folder_path):
"""
list a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param folder_path: the relative path for the folder to be listed under res_id collection.
:return:
"""
if __debug__:
assert(folder_path.startswith("data/contents/"))
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
coll_path = os.path.join(resource.root_path, folder_path)
return istorage.listdir(coll_path)
# TODO: modify this to take short paths not including data/contents
def move_or_rename_file_or_folder(user, res_id, src_path, tgt_path, validate_move_rename=True):
"""
Move or rename a file or folder in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param src_path: the relative paths for the source file or folder under res_id collection
:param tgt_path: the relative paths for the target file or folder under res_id collection
:param validate_move_rename: if True, then only ask resource type to check if this action is
allowed. Sometimes resource types internally want to take this action but disallow
this action by a user. In that case resource types set this parameter to False to allow
this action.
:return:
Note: this utilizes partly qualified pathnames data/contents/foo rather than just 'foo'
"""
if __debug__:
assert(src_path.startswith("data/contents/"))
assert(tgt_path.startswith("data/contents/"))
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
src_full_path = os.path.join(resource.root_path, src_path)
tgt_full_path = os.path.join(resource.root_path, tgt_path)
if validate_move_rename:
# this must raise ValidationError if move/rename is not allowed by specific resource type
if not resource.supports_rename_path(src_full_path, tgt_full_path):
raise ValidationError("File/folder move/rename is not allowed.")
istorage.moveFile(src_full_path, tgt_full_path)
rename_irods_file_or_folder_in_django(resource, src_full_path, tgt_full_path)
if resource.resource_type == "CompositeResource":
org_aggregation_name = src_full_path[len(resource.file_path) + 1:]
new_aggregation_name = tgt_full_path[len(resource.file_path) + 1:]
resource.recreate_aggregation_xml_docs(org_aggregation_name, new_aggregation_name)
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
# TODO: modify this to take short paths not including data/contents
def rename_file_or_folder(user, res_id, src_path, tgt_path, validate_rename=True):
"""
Rename a file or folder in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param src_path: the relative path for the source file or folder under res_id collection
:param tgt_path: the relative path for the target file or folder under res_id collection
:param validate_rename: if True, then only ask resource type to check if this action is
allowed. Sometimes resource types internally want to take this action but disallow
this action by a user. In that case resource types set this parameter to False to allow
this action.
:return:
Note: this utilizes partly qualified pathnames data/contents/foo rather than just 'foo'.
Also, this foregoes extensive antibugging of arguments because that is done in the
REST API.
"""
if __debug__:
assert(src_path.startswith("data/contents/"))
assert(tgt_path.startswith("data/contents/"))
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
src_full_path = os.path.join(resource.root_path, src_path)
tgt_full_path = os.path.join(resource.root_path, tgt_path)
if validate_rename:
# this must raise ValidationError if move/rename is not allowed by specific resource type
if not resource.supports_rename_path(src_full_path, tgt_full_path):
raise ValidationError("File rename is not allowed. "
"File seems to be part of an aggregation")
istorage.moveFile(src_full_path, tgt_full_path)
rename_irods_file_or_folder_in_django(resource, src_full_path, tgt_full_path)
if resource.resource_type == "CompositeResource":
org_aggregation_name = src_full_path[len(resource.file_path) + 1:]
new_aggregation_name = tgt_full_path[len(resource.file_path) + 1:]
resource.recreate_aggregation_xml_docs(org_aggregation_name, new_aggregation_name)
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
# TODO: modify this to take short paths not including data/contents
def move_to_folder(user, res_id, src_paths, tgt_path, validate_move=True):
"""
Move a file or folder to a folder in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param src_paths: the relative paths for the source files and/or folders under res_id collection
:param tgt_path: the relative path for the target folder under res_id collection
:param validate_move: if True, then only ask resource type to check if this action is
allowed. Sometimes resource types internally want to take this action but disallow
this action by a user. In that case resource types set this parameter to False to allow
this action.
:return:
Note: this utilizes partly qualified pathnames data/contents/foo rather than just 'foo'
Also, this foregoes extensive antibugging of arguments because that is done in the
REST API.
"""
if __debug__:
for s in src_paths:
assert(s.startswith('data/contents/'))
assert(tgt_path == 'data/contents' or tgt_path.startswith("data/contents/"))
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
tgt_full_path = os.path.join(resource.root_path, tgt_path)
if validate_move:
# this must raise ValidationError if move is not allowed by specific resource type
for src_path in src_paths:
src_full_path = os.path.join(resource.root_path, src_path)
if not resource.supports_rename_path(src_full_path, tgt_full_path):
raise ValidationError("File/folder move is not allowed. "
"Target folder seems to | |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Deeptext dataset"""
from __future__ import division
import os
import numpy as np
from numpy import random
import cv2
import mindspore.dataset as de
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.transforms.c_transforms as CC
import mindspore.common.dtype as mstype
from mindspore.mindrecord import FileWriter
from model_utils.config import config
def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
class PhotoMetricDistortion:
"""Photo Metric Distortion"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, img, boxes, labels):
# random brightness
img = img.astype('float32')
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
return img, boxes, labels
class Expand:
"""expand image"""
def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
def __call__(self, img, boxes, labels):
if random.randint(2):
return img, boxes, labels
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
img = expand_img
boxes += np.tile((left, top), 2)
return img, boxes, labels
def resize_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""resize operation for image"""
img_data = img
h, w = img_data.shape[:2]
img_data = cv2.resize(
img_data, (config.img_width, config.img_height), interpolation=cv2.INTER_LINEAR)
h_scale = config.img_height / h
w_scale = config.img_width / w
scale_factor = np.array(
[w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
img_shape = (config.img_height, config.img_width, 1.0)
img_shape = np.asarray(img_shape, dtype=np.float32)
gt_bboxes = gt_bboxes * scale_factor
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def resize_column_test(img, img_shape, gt_bboxes, gt_label, gt_num):
"""resize operation for image of eval"""
img_data = img
h, w = img_data.shape[:2]
img_data = cv2.resize(
img_data, (config.img_width, config.img_height), interpolation=cv2.INTER_LINEAR)
h_scale = config.img_height / h
w_scale = config.img_width / w
scale_factor = np.array(
[w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
img_shape = (config.img_height, config.img_width)
img_shape = np.append(img_shape, (h_scale, w_scale))
img_shape = np.asarray(img_shape, dtype=np.float32)
gt_bboxes = gt_bboxes * scale_factor
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def impad_to_multiple_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""impad operation for image"""
img_data = cv2.copyMakeBorder(img,
0, config.img_height - img.shape[0], 0, config.img_width - img.shape[1],
cv2.BORDER_CONSTANT,
value=0)
img_data = img_data.astype(np.float32)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def imnormalize_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""imnormalize operation for image"""
mean = np.asarray([123.675, 116.28, 103.53])
std = np.asarray([58.395, 57.12, 57.375])
img_data = img.copy().astype(np.float32)
cv2.cvtColor(img_data, cv2.COLOR_BGR2RGB, img_data) # inplace
cv2.subtract(img_data, np.float64(mean.reshape(1, -1)), img_data) # inplace
cv2.multiply(img_data, 1 / np.float64(std.reshape(1, -1)), img_data) # inplace
img_data = img_data.astype(np.float32)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def flip_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""flip operation for image"""
img_data = img
img_data = np.flip(img_data, axis=1)
flipped = gt_bboxes.copy()
_, w, _ = img_data.shape
flipped[..., 0::4] = w - gt_bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - gt_bboxes[..., 0::4] - 1
return (img_data, img_shape, flipped, gt_label, gt_num)
def flipped_generation(img, img_shape, gt_bboxes, gt_label, gt_num):
"""flipped generation"""
img_data = img
flipped = gt_bboxes.copy()
_, w, _ = img_data.shape
flipped[..., 0::4] = w - gt_bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - gt_bboxes[..., 0::4] - 1
return (img_data, img_shape, flipped, gt_label, gt_num)
def image_bgr_rgb(img, img_shape, gt_bboxes, gt_label, gt_num):
img_data = img[:, :, ::-1]
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def transpose_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""transpose operation for image"""
img_data = img.transpose(2, 0, 1).copy()
img_data = img_data.astype(np.float32)
img_shape = img_shape.astype(np.float32)
gt_bboxes = gt_bboxes.astype(np.float32)
gt_label = gt_label.astype(np.int32)
gt_num = gt_num.astype(np.bool)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def photo_crop_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""photo crop operation for image"""
random_photo = PhotoMetricDistortion()
img_data, gt_bboxes, gt_label = random_photo(img, gt_bboxes, gt_label)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def expand_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""expand operation for image"""
expand = Expand()
img, gt_bboxes, gt_label = expand(img, gt_bboxes, gt_label)
return (img, img_shape, gt_bboxes, gt_label, gt_num)
def preprocess_fn(image, box, is_training):
"""Preprocess function for dataset."""
def _infer_data(image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert):
image_shape = image_shape[:2]
input_data = image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert
input_data = resize_column_test(*input_data)
input_data = image_bgr_rgb(*input_data)
output_data = input_data
return output_data
def _data_aug(image, box, is_training):
"""Data augmentation function."""
image_bgr = image.copy()
image_bgr[:, :, 0] = image[:, :, 2]
image_bgr[:, :, 1] = image[:, :, 1]
image_bgr[:, :, 2] = image[:, :, 0]
image_shape = image_bgr.shape[:2]
gt_box = box[:, :4]
gt_label = box[:, 4]
gt_iscrowd = box[:, 5]
pad_max_number = 128
if box.shape[0] < 128:
gt_box_new = np.pad(gt_box, ((0, pad_max_number - box.shape[0]), (0, 0)), mode="constant",
constant_values=0)
gt_label_new = np.pad(gt_label, ((0, pad_max_number - box.shape[0])), mode="constant", constant_values=-1)
gt_iscrowd_new = np.pad(gt_iscrowd, ((0, pad_max_number - box.shape[0])), mode="constant",
constant_values=1)
else:
gt_box_new = gt_box[0:pad_max_number]
gt_label_new = gt_label[0:pad_max_number]
gt_iscrowd_new = gt_iscrowd[0:pad_max_number]
gt_iscrowd_new_revert = (~(gt_iscrowd_new.astype(np.bool))).astype(np.int32)
if not is_training:
return _infer_data(image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert)
input_data = image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert
expand = (np.random.rand() < config.expand_ratio)
if expand:
input_data = expand_column(*input_data)
input_data = photo_crop_column(*input_data)
input_data = resize_column(*input_data)
input_data = image_bgr_rgb(*input_data)
output_data = input_data
return output_data
return _data_aug(image, box, is_training)
def get_imgs_and_annos(img_dir, txt_dir, image_files, image_anno_dict):
img_basenames = []
for file in os.listdir(img_dir):
# Filter git file.
if 'gif' not in file:
img_basenames.append(os.path.basename(file))
img_names = []
for item in img_basenames:
temp1, _ = os.path.splitext(item)
img_names.append((temp1, item))
for img, img_basename in img_names:
image_path = img_dir + '/' + img_basename
annos = []
# Parse annotation of dataset in paper.
if len(img) == 6 and '_' not in img_basename:
gt = open(txt_dir + '/' + img + '.txt').read().splitlines()
if img.isdigit() and int(img) > 1200:
continue
for img_each_label in gt:
spt = img_each_label.replace(',', '').split(' ')
if ' ' not in img_each_label:
spt = img_each_label.split(',')
| |
<filename>adapter/i_o.py
import os
import numpy as np
import pandas as pd
from adapter.to_python import Excel, Db, Db_sqlalchemy, Debugger
from adapter.label_map import Labels
from adapter.comm.tools import convert_network_drive_path
from datetime import datetime
import re
import sqlite3
from shutil import copy
import ntpath
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
from pdb import set_trace as bp
class IO(object):
"""Connects to the main input
file that can be an excel sheet,
a csv file or a database,
loads the data, looks for
additional input data paths
and loops through those to
get data as well. Saves a
full input DB and provides the
user with output and db paths,
and database connections. It allows
for large tables to be only queried and
not loaded in python.
Parameters:
path : string
Path to the initial input table. An
initial input table can be of
any type (.xlsx, .csv, .db) and can
contain pointers to further input
files. It is recommended to have
a `run_parameters` table (see test
folders on the master branch
of the adapter repo for examples)
that provides an output path and a
version substring.
mapping: list
list of 2-tuples where 0th entry of each
tuple is the name of a windows network drive
location (e.g. "A:") and the 1st entry is OSX
network drive location (e.g. "/Volumes/A").
Defaults to [("X:","/Volumes/my_folder")].
"""
def __init__(
self,
path,
os_mapping=[("X:", "/Volumes/my_drive")]):
self.os_mapping = os_mapping
path = convert_network_drive_path(path, mapping=os_mapping)
self.input_path = path
if isinstance(path, str):
self.input_type = self.get_file_type(path)
# set labels
self.la = Labels().set_labels()
def get_file_type(self, path):
"""Extracts the file type from the fullpath.
Parameters:
path : string
File path
"""
extns = re.split("\.", path)[-1]
# extns = ...
file_type = ""
if extns == "xlsx":
file_type += "excel"
elif (extns == "db") or (extns == "sqlite"):
file_type += "database"
# *mig add more file extension checks
# as needed
elif extns == "csv":
file_type += "text"
# If the path contains lbl.gov, it is likely a database that can be used with sqlalchemy
elif "lbl.gov" in path:
file_type += "sqlalchemy"
else:
msg = "Passed an unsupported input file type: {}."
log.error(msg.format(extns))
return file_type
def load(
self,
create_db=True,
db_flavor="sqlite",
close_db=True,
save_input=True,
set_first_col_as_index=False,
quick_db_out_filename=None,
clean_labels=True,
to_numeric=None,
):
"""Loads tables from the input file
as a dictionary of python dataframes.
Recognizes any special table names, such
as:
- `run_parameters`, that specifies the output
path, alongside to some further analysis related
specifiers.
- `inputs_from_files`, that specifies a list of
additional input files of file types: csv, excel, db.
See examples in the test folders on the master
branch of the adapter repo for details on the
structure and labels of the table.
Parameters:
create_db: bool
Write all tables read from input files
into a run database
db_flavor: string
Database type. Currently implemented:
'sqlite'
close_db: bool
True: close the database that got
created, False: keep the database open
save_input: bool
Save initial input file under the output
folder
set_first_col_as_index: bool or list of strings
True: Set index for all tables
False: do not set the first column as index
for any tables
List of strings: List of tables that need
their first column set as index
quick_db_out_filename: string, defaults to None
Output filename without the
file extension if one does not
want to use the version tag and outpath
as provided in a run parameters table when
saving a database.
Use with caution as there is no run tag
or timestamp included. This may be useful when
quickly converting an excel file with tables
and named ranges into a database.
to_numeric: list
List of string table names where
values should be converted to
numeric where possible
clean_labels: bool
Process table columns to remove trailing whitespaces
Returns:
res : dict
Keys:
'tables_as_dict_of_dfs' - all input
tables loaded in python as dictionary
of dataframes
'outpath' - output folder path
'run_tag' - version + analysis start time
If db got written:
'db_path' - database fullpath
'db_conn' - database connection
"""
dict_of_dfs = self.get_tables(self.input_path)
# are there any further input files?
# if that is the case, the file paths and further info
# should be placed in an `inputs_from_files` table
qry_flags = dict()
if self.la["extra_files"] in dict_of_dfs.keys():
extra_files = dict_of_dfs[self.la["extra_files"]].reset_index()
for inx in extra_files.index:
file_path = extra_files.loc[inx, self.la["inpath"]].strip()
table_names = extra_files.loc[inx, self.la["tbl_nam"]]
if isinstance(table_names, str):
table_names = re.split(",", table_names)
table_names = [i.strip() for i in table_names]
qry_flags[file_path] = extra_files.loc[inx, self.la["query"]]
if (qry_flags[file_path] is not None) and isinstance(
table_names, str
):
qry_flags[file_path] = re.split(",", qry_flags[file_path])
qry_flags[file_path] = [
i.strip() for i in qry_flags[file_path]
]
dict_of_dfs.update(
self.get_tables(
file_path,
table_names=table_names,
query_only=qry_flags[file_path],
pre_existing_keys=dict_of_dfs.keys(),
)
)
else:
qry_flags = None
# define output path for the analysis run
if quick_db_out_filename is None:
# look for `run_parameters` table to extract the outpath
# and the version
# `run_parameters` table should occur only in one
# of the input files, and only once, so if multiple
# run_parameters{any_text} tables are found, then
# the first one, when names sorted alphabetically,
# will be used
run_params_table = []
for key in dict_of_dfs.keys():
if self.la["run_pars"] in key:
msg = "Identified run parameters table named {}"
log.info(msg.format(key))
run_params_table.append(key)
if len(run_params_table) > 1:
run_params_table.sort()
msg = (
"Run parameters table named {} will be used to set the "
"outpath and the version. "
"Additional run parameters table(s) named: {} "
"will be ignored. Please make sure to remove any"
" unwanted run_parameters tables from the inputs."
)
log.warning(
msg.format(run_params_table[0], run_params_table[1:])
)
if len(run_params_table) != 0:
outpath = convert_network_drive_path(
dict_of_dfs[run_params_table[0]].loc[
0, self.la["outpath"]
],
mapping=self.os_mapping,
)
outpath_base = os.path.join(
os.getcwd(),
outpath,
)
version = dict_of_dfs[run_params_table[0]].loc[
0, self.la["version"]
]
if not isinstance(version, str):
# if it was read in as a number, as occurs in the test_input.xlsx on OSX
if str(version).endswith(".0"):
# Assume that the only case is when version "123" got read in as number "123.0"
# period will be removed next
version = str(version).rstrip("0")
# Removing '.', '\', '/' characters from version
# to avoid any errors during writing output
version = re.sub("[\\\\./]", "", version)
self.version = version
# otherwise declare current folder + "/output" as the output
# path
else:
outpath_base = os.path.join(os.getcwd(), "output")
version = ""
run_tag = (
version + "_" + datetime.now().strftime("%Y_%m_%d-%Hh_%Mm")
)
outpath = os.path.join(outpath_base, run_tag)
else:
outpath = os.getcwd()
run_tag = quick_db_out_filename
if not os.path.exists(outpath):
os.makedirs(outpath)
if save_input and isinstance(self.input_path, str):
# self.input_path
filename = ntpath.basename(self.input_path)
filename_extns = re.split("\.", filename)[-1]
filename_only = re.split("\.", filename)[0]
versioned_filename = (
filename_only + "_" + run_tag + "." + filename_extns
)
copy(self.input_path, os.path.join(outpath, versioned_filename))
if create_db == True:
try:
db_res = self.create_db(
dict_of_dfs,
outpath=outpath,
run_tag=run_tag,
flavor=db_flavor,
close=close_db,
)
except:
msg = (
"Not able to create a db of tables "
"that were read in from {}."
)
log.error(msg.format(self.input_path))
if set_first_col_as_index != False:
dict_of_dfs = self.first_col_to_index(
dict_of_dfs, table_names=set_first_col_as_index, drop=True
)
# value type conversion for any tables listed
# as to_numeric and to_float
if to_numeric is not None:
if isinstance(to_numeric, list):
pass
else:
msg = (
"{} passed for to_numeric kwarg."
"Only None or a list of strings are supported."
)
log.error(msg.format(to_numeric))
for key in dict_of_dfs.keys():
if any([key in tb_nm for tb_nm in to_numeric]):
dict_of_dfs[key] = dict_of_dfs[key].apply(
pd.to_numeric, errors="ignore", axis=1
)
res = dict()
res["tables_as_dict_of_dfs"] = dict_of_dfs
res["outpath"] = outpath
res["run_tag"] = run_tag
if create_db == True:
res.update(db_res)
if clean_labels == True:
input_tables_list = res["tables_as_dict_of_dfs"]
for table in input_tables_list:
table_columns = input_tables_list[table].columns
clean_cols = self.process_column_labels(table_columns)
input_tables_list[table].columns = clean_cols
msg = "All table column labels were processed to remove undesired whitespaces."
log.info(msg)
return res
def get_tables(
self,
file_path,
table_names=None,
query_only=None,
pre_existing_keys=None,
):
"""Gets all tables from an input
file. Creates a dictionary
of pandas dataframes, with each dataframe
corresponding to one of the input tables.
If it is a csv file the contents
get loaded as a single table with the
dictionary key being the name of the
file
Parameters:
file_path: str or None
Input file path
No data is read in when None,
creates | |
# Copyright (C) 2015 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rest_framework.response import Response
from adjutant.common.user_store import IdentityManager
from adjutant.api.models import Task
from django.utils import timezone
from adjutant.api import utils
from adjutant.api.v1.views import APIViewWithLogger
from adjutant.api.v1.utils import (
send_stage_email, create_notification, create_token, create_task_hash,
add_task_id_for_roles)
from adjutant.exceptions import SerializerMissingException
from django.conf import settings
class TaskView(APIViewWithLogger):
"""
Base class for api calls that start a Task.
'default_actions' is a required hardcoded field.
The default_actions are considered the primary actions and
will always run first (in the given order). Additional actions
are defined in the settings file and will run in the order supplied,
but after the default_actions.
Default actions can be overridden in the settings file as well if
needed.
"""
default_actions = []
def get(self, request):
"""
The get method will return a json listing the actions this
view will run, and the data fields that those actions require.
"""
class_conf = settings.TASK_SETTINGS.get(
self.task_type, settings.DEFAULT_TASK_SETTINGS)
actions = (
class_conf.get('default_actions', [])
or self.default_actions[:])
actions += class_conf.get('additional_actions', [])
required_fields = []
for action in actions:
action_class, action_serializer = settings.ACTION_CLASSES[action]
for field in action_class.required:
if field not in required_fields:
required_fields.append(field)
return Response({'actions': actions,
'required_fields': required_fields})
def _instantiate_action_serializers(self, request, class_conf):
action_serializer_list = []
action_names = (
class_conf.get('default_actions', [])
or self.default_actions[:])
action_names += class_conf.get('additional_actions', [])
# instantiate all action serializers and check validity
valid = True
for action_name in action_names:
action_class, serializer_class = \
settings.ACTION_CLASSES[action_name]
# instantiate serializer class
if not serializer_class:
raise SerializerMissingException(
"No serializer defined for action %s" % action_name)
serializer = serializer_class(data=request.data)
action_serializer_list.append({
'name': action_name,
'action': action_class,
'serializer': serializer})
if serializer and not serializer.is_valid():
valid = False
if not valid:
errors = {}
for action in action_serializer_list:
if action['serializer']:
errors.update(action['serializer'].errors)
return {'errors': errors}, 400
return action_serializer_list
def _handle_duplicates(self, class_conf, hash_key):
duplicate_tasks = Task.objects.filter(
hash_key=hash_key,
completed=0,
cancelled=0)
if not duplicate_tasks:
return False
duplicate_policy = class_conf.get("duplicate_policy", "")
if duplicate_policy == "cancel":
self.logger.info(
"(%s) - Task is a duplicate - Cancelling old tasks." %
timezone.now())
for task in duplicate_tasks:
task.cancelled = True
task.save()
return False
self.logger.info(
"(%s) - Task is a duplicate - Ignoring new task." %
timezone.now())
return (
{'errors': ['Task is a duplicate of an existing task']},
409)
def process_actions(self, request):
"""
Will ensure the request data contains the required data
based on the action serializer, and if present will create
a Task and the linked actions, attaching notes
based on running of the pre_approve validation
function on all the actions.
If during the pre_approve step at least one of the actions
sets auto_approve to True, and none of them set it to False
the approval steps will also be run.
"""
class_conf = settings.TASK_SETTINGS.get(
self.task_type, settings.DEFAULT_TASK_SETTINGS)
# Action serializers
action_serializer_list = self._instantiate_action_serializers(
request, class_conf)
if isinstance(action_serializer_list, tuple):
return action_serializer_list
hash_key = create_task_hash(self.task_type, action_serializer_list)
# Handle duplicates
duplicate_error = self._handle_duplicates(class_conf, hash_key)
if duplicate_error:
return duplicate_error
# Instantiate Task
ip_address = request.META['REMOTE_ADDR']
keystone_user = request.keystone_user
task = Task.objects.create(
ip_address=ip_address,
keystone_user=keystone_user,
project_id=keystone_user.get('project_id'),
task_type=self.task_type,
hash_key=hash_key)
task.save()
# Instantiate actions with serializers
action_instances = []
for i, action in enumerate(action_serializer_list):
data = action['serializer'].validated_data
# construct the action class
action_instances.append(action['action'](
data=data,
task=task,
order=i
))
# We run pre_approve on the actions once we've setup all of them.
for action_instance in action_instances:
try:
action_instance.pre_approve()
except Exception as e:
return self._handle_task_error(
e, task, error_text='while setting up task')
# send initial confirmation email:
email_conf = class_conf.get('emails', {}).get('initial', None)
send_stage_email(task, email_conf)
approve_list = [act.auto_approve for act in action_instances]
# TODO(amelia): It would be nice to explicitly test this, however
# currently we don't have the right combinations of
# actions to allow for it.
if False in approve_list:
can_auto_approve = False
elif True in approve_list:
can_auto_approve = True
else:
can_auto_approve = False
if can_auto_approve:
task_name = self.__class__.__name__
self.logger.info("(%s) - AutoApproving %s request."
% (timezone.now(), task_name))
approval_data, status = self.approve(request, task)
# Additional information that would be otherwise expected
approval_data['task'] = task
approval_data['auto_approved'] = True
return approval_data, status
return {'task': task}, 200
def _create_token(self, task):
token = create_token(task)
try:
class_conf = settings.TASK_SETTINGS.get(
self.task_type, settings.DEFAULT_TASK_SETTINGS)
# will throw a key error if the token template has not
# been specified
email_conf = class_conf['emails']['token']
send_stage_email(task, email_conf, token)
return {'notes': ['created token']}, 200
except KeyError as e:
return self._handle_task_error(
e, task, error_text='while sending token')
def approve(self, request, task):
"""
Approves the task and runs the post_approve steps.
Will create a token if required, otherwise will run the
submit steps.
"""
# cannot approve an invalid task
action_models = task.actions
actions = [act.get_action() for act in action_models]
valid = all([act.valid for act in actions])
if not valid:
return {'errors': ['actions invalid']}, 400
# TODO(amelia): get action invalidation reasons
# We approve the task before running actions,
# that way if something goes wrong we know if it was approved,
# when it was approved, and who approved it.
task.approved = True
task.approved_on = timezone.now()
task.approved_by = request.keystone_user
task.save()
need_token = False
# post_approve all actions
for action in actions:
try:
action.post_approve()
except Exception as e:
return self._handle_task_error(
e, task, error_text='while approving task')
valid = all([act.valid for act in actions])
if not valid:
return {'errors': ['actions invalid']}, 400
need_token = any([act.need_token for act in actions])
if need_token:
return self._create_token(task)
# submit all actions
for action in actions:
try:
action.submit({})
except Exception as e:
self._handle_task_error(
e, task, error_text='while submitting task')
task.completed = True
task.completed_on = timezone.now()
task.save()
# Sending confirmation email:
class_conf = settings.TASK_SETTINGS.get(
self.task_type, settings.DEFAULT_TASK_SETTINGS)
email_conf = class_conf.get(
'emails', {}).get('completed', None)
send_stage_email(task, email_conf)
return {'notes': ["Task completed successfully."]}, 200
# NOTE(adriant): We should deprecate these TaskViews properly and switch tests
# to work against the openstack ones. One option is making these abstract
# classes, so we retain the code here, but make them useless without extension.
class CreateProject(TaskView):
task_type = "create_project"
default_actions = ["NewProjectWithUserAction", ]
def post(self, request, format=None):
"""
Unauthenticated endpoint bound primarily to NewProjectWithUser.
This process requires approval, so this will validate
incoming data and create a task to be approved
later.
"""
self.logger.info("(%s) - Starting new project task." %
timezone.now())
class_conf = settings.TASK_SETTINGS.get(self.task_type, {})
# we need to set the region the resources will be created in:
request.data['region'] = class_conf.get('default_region')
# domain
request.data['domain_id'] = class_conf.get(
'default_domain_id', 'default')
# parent_id for new project, if null defaults to domain:
request.data['parent_id'] = class_conf.get('default_parent_id')
processed, status = self.process_actions(request)
errors = processed.get('errors', None)
if errors:
self.logger.info("(%s) - Validation errors with task." %
timezone.now())
return Response({'errors': errors}, status=status)
notes = {
'notes':
['New task for CreateProject.']
}
create_notification(processed['task'], notes)
self.logger.info("(%s) - Task created." % timezone.now())
response_dict = {'notes': ['task created']}
add_task_id_for_roles(request, processed, response_dict, ['admin'])
return Response(response_dict, status=status)
class InviteUser(TaskView):
task_type = "invite_user"
default_actions = ['MocNewUserAction', ]
@utils.mod_or_admin
def get(self, request):
return super(InviteUser, self).get(request)
@utils.mod_or_admin
def post(self, request, format=None):
"""
Invites a user to the current tenant.
This endpoint requires either Admin access or the
request to come from a project_admin|project_mod.
As such this Task is considered pre-approved.
"""
self.logger.info("(%s) - New AttachUser request." % timezone.now())
# Default project_id to the keystone user's project
if ('project_id' not in request.data
or request.data['project_id'] is None):
request.data['project_id'] = request.keystone_user['project_id']
# Default domain_id to the keystone user's project
if ('domain_id' not in request.data
or request.data['domain_id'] is None):
request.data['domain_id'] = \
request.keystone_user['project_domain_id']
processed, status = self.process_actions(request)
errors = processed.get('errors', None)
if errors:
self.logger.info("(%s) - Validation errors with task." %
timezone.now())
return Response({'errors': errors}, status=status)
response_dict = {'notes': processed['notes']}
add_task_id_for_roles(request, processed, response_dict, ['admin'])
return Response(response_dict, status=status)
class ResetPassword(TaskView):
task_type = "reset_password"
default_actions = ['ResetUserPasswordAction', ]
@utils.minimal_duration(min_time=3)
def post(self, request, format=None):
"""
Unauthenticated endpoint bound to the password reset action.
This | |
import mock
import mpi4py.MPI
import numpy as np
import pytest
import unittest
import chainer
import chainer.cuda
import chainer.initializers
import chainer.links
import chainer.testing
import chainer.testing.attr
import chainermn
from chainermn.communicators import _communication_utility
from chainermn.communicators.flat_communicator \
import FlatCommunicator
from chainermn.communicators.hierarchical_communicator \
import HierarchicalCommunicator
from chainermn.communicators.naive_communicator \
import NaiveCommunicator
from chainermn.communicators.non_cuda_aware_communicator \
import NonCudaAwareCommunicator
from chainermn.communicators.pure_nccl_communicator \
import PureNcclCommunicator
from chainermn.communicators.single_node_communicator \
import SingleNodeCommunicator
from chainermn.communicators.two_dimensional_communicator \
import TwoDimensionalCommunicator
from chainermn import nccl
class ExampleModel(chainer.Chain):
def __init__(self, dtype=None):
W = None
bias = None
if dtype is not None:
self.dtype = dtype
W = chainer.initializers.Normal(dtype=self.dtype)
bias = chainer.initializers.Zero(dtype=self.dtype)
super(ExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3, initialW=W, initial_bias=bias)
self.b = chainer.links.Linear(3, 4, initialW=W, initial_bias=bias)
self.c = chainer.links.Linear(None, 5, initialW=W,
initial_bias=bias)
class ExampleMixedModel(chainer.Chain):
def __init__(self):
W16 = chainer.initializers.Normal(dtype=np.float16)
W32 = chainer.initializers.Normal(dtype=np.float32)
bias16 = chainer.initializers.Zero(dtype=np.float16)
bias32 = chainer.initializers.Zero(dtype=np.float32)
super(ExampleMixedModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3, initialW=W32,
initial_bias=bias32)
self.b = chainer.links.Linear(3, 4, initialW=W16,
initial_bias=bias16)
self.c = chainer.links.Linear(None, 5, initialW=W16,
initial_bias=bias32)
class Param(object):
def __init__(self, param):
self.gpu = False
self.nccl1 = False
self.model_dtype = None
self.allreduce_grad_dtype = None
self.batched_copy = False
self.global_dtype = None
self.__dict__.update(param)
def __repr__(self):
import pprint
return pprint.pformat(self.__dict__)
cpu_params = [Param(p) for p in [
{
'communicator_class': NaiveCommunicator,
'multi_node': True,
}]]
gpu_params = [Param(p) for p in [
{
'communicator_class': NaiveCommunicator,
'multi_node': True,
}, {
'communicator_class': NaiveCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': HierarchicalCommunicator,
'multi_node': True,
}, {
'communicator_class': HierarchicalCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': TwoDimensionalCommunicator,
'multi_node': True,
}, {
'communicator_class': TwoDimensionalCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': SingleNodeCommunicator,
'multi_node': False,
}, {
'communicator_class': SingleNodeCommunicator,
'model_dtype': np.float16,
'multi_node': False,
}, {
'communicator_class': NonCudaAwareCommunicator,
'multi_node': True,
}, {
'communicator_class': NonCudaAwareCommunicator,
'model_dtype': np.float16,
'multi_node': False,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'allreduce_grad_dtype': np.float16,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float16,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float64,
'allreduce_grad_dtype': np.float64,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float16,
'batched_copy': True,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float32,
'batched_copy': True,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float32,
'allreduce_grad_dtype': np.float32,
'batched_copy': True,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float32,
'allreduce_grad_dtype': np.float16,
'batched_copy': True,
}]]
gpu_mixed_dtype_params = [Param(p) for p in [
{
'communicator_class': NonCudaAwareCommunicator,
'multi_node': True,
}, {
'communicator_class': NaiveCommunicator,
'multi_node': True,
}, {
'communicator_class': TwoDimensionalCommunicator,
'multi_node': True,
}, {
'communicator_class': HierarchicalCommunicator,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'multi_node': True,
}, {
'communicator_class': SingleNodeCommunicator,
'multi_node': False,
}
]]
for global_dtype in [np.float32, np.float16, chainer.mixed16, None]:
for allreduce_dtype in [np.float32, np.float16, None]:
if global_dtype is None and allreduce_dtype is None:
continue
for batched_copy in [True, False]:
gpu_mixed_dtype_params.append(Param({
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'global_dtype': global_dtype,
'allreduce_grad_dtype': allreduce_dtype,
'batched_copy': batched_copy,
}))
mpi_comm = mpi4py.MPI.COMM_WORLD
def create_communicator(param, use_gpu):
if not param.multi_node:
ranks = _communication_utility.init_ranks(mpi_comm)
inter_size = ranks[4]
if inter_size > 1:
pytest.skip('This test is for single node only')
if use_gpu and not param.nccl1 and nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
if param.communicator_class is PureNcclCommunicator:
communicator = param.communicator_class(
mpi_comm, allreduce_grad_dtype=param.allreduce_grad_dtype,
batched_copy=param.batched_copy)
else:
communicator = param.communicator_class(mpi_comm)
if use_gpu:
chainer.cuda.get_device_from_id(communicator.intra_rank).use()
return communicator
def destroy_communicator(comm):
"""Destroy internal NCCL communicator.
When too many NCCL communicator are alive, NCCL produces
unhandled CUDA error. To avoid this, we need to make sure to
destory NCCL communicator after every use.
"""
if hasattr(comm, 'nccl_comm') and comm.nccl_comm is not None:
comm.nccl_comm.destroy()
comm.nccl_comm = None
if hasattr(comm, 'intra_nccl_cojmm') and comm.intra_nccl_comm is not None:
comm.intra_nccl_comm.destroy()
comm.intra_nccl_comm = None
def check_send_and_recv(communicator, *shape):
if communicator.size < 2:
pytest.skip('This test is for multiple nodes')
if communicator.rank > 0:
rank_prev = (communicator.rank - 1) % communicator.size
data_recv = communicator.recv(source=rank_prev, tag=0)
chainer.testing.assert_allclose(
data_recv, rank_prev * np.ones((shape)))
if communicator.rank < communicator.size - 1:
rank_next = (communicator.rank + 1) % communicator.size
data_send = communicator.rank * \
np.ones((shape)).astype(np.float32)
communicator.send(data_send, dest=rank_next, tag=0)
def check_send_and_recv_tuple(communicator, data):
if communicator.size < 2:
pytest.skip('This test is for multiple nodes')
if communicator.rank > 0:
rank_prev = (communicator.rank - 1) % communicator.size
data_recv = communicator.recv(source=rank_prev, tag=0)
for array0, array1 in zip(data, data_recv):
chainer.testing.assert_allclose(array0, array1)
if communicator.rank < communicator.size - 1:
rank_next = (communicator.rank + 1) % communicator.size
communicator.send(data, dest=rank_next, tag=0)
def check_bcast_data(communicator, model):
model.a.W.data[:] = communicator.rank
model.b.W.data[:] = communicator.rank + 1
model.c.b.data[:] = communicator.rank + 2
communicator.bcast_data(model)
chainer.testing.assert_allclose(model.a.W.data, 0 * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.data, 1 * np.ones((4, 3)))
chainer.testing.assert_allclose(model.c.b.data, 2 * np.ones((5, )))
def check_allreduce_grad(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad[:] = communicator.rank + 2
communicator.allreduce_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(model.c.b.grad,
(base + 2) * np.ones((5, )))
def check_allreduce_grad_empty(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad = None
communicator.allreduce_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
def check_allreduce_grad_empty_half(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.data[:] = communicator.rank
model.b.W.data[:] = communicator.rank + 1
model.c.b.data[:] = communicator.rank + 2
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
if communicator.rank % 2 == 0:
model.c.b.grad[:] = communicator.rank + 2
else:
model.c.b.grad = None
communicator.allreduce_grad(model, zero_fill=True)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
v = 0
for i in range(communicator.size):
if i % 2 == 0:
v += i + 2
v /= communicator.size
chainer.testing.assert_allclose(model.c.b.grad,
v * np.ones((5, )))
def check_send_recv(param, use_gpu):
communicator = create_communicator(param, use_gpu)
assert mpi_comm.Get_rank() == communicator.rank
assert mpi_comm.Get_size() == communicator.size
check_send_and_recv(communicator, 50)
check_send_and_recv(communicator, 50, 20)
check_send_and_recv(communicator, 50, 20, 5)
check_send_and_recv(communicator, 50, 20, 5, 3)
data = [np.ones((50)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
data = [
np.ones((50)).astype(np.float32),
np.ones((50, 20)).astype(np.float32),
np.ones((50, 20, 5)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
destroy_communicator(communicator)
def check_allreduce_grad_mixed_dtype(param, model, use_gpu):
# Checks the actual allreduce communication is performed
# in the correct data type (FP16 or FP32)
comm_class = param.communicator_class
if not param.multi_node:
ranks = _communication_utility.init_ranks(mpi_comm)
inter_size = ranks[4]
if inter_size > 1:
pytest.skip('This test is for single node only')
if comm_class is PureNcclCommunicator:
communicator = comm_class(
mpi_comm, allreduce_grad_dtype=param.allreduce_grad_dtype,
batched_copy=param.batched_copy)
else:
communicator = comm_class(mpi_comm)
mpi_comm.barrier()
# answer type: see the document of `create_communicator`
global_dtype = param.global_dtype
allreduce_dtype = param.allreduce_grad_dtype
# assert test configuration.
assert chainer.get_dtype() == global_dtype
answer_dtype = None
if allreduce_dtype == np.float16:
answer_dtype = np.float16
elif allreduce_dtype == np.float32:
answer_dtype = np.float32
else:
if global_dtype == np.float32:
answer_dtype = np.float32
else:
answer_dtype = np.float16
if use_gpu:
model.to_gpu()
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad[:] = communicator.rank + 2
if isinstance(communicator, PureNcclCommunicator):
communicator._init_comms()
with mock.patch.object(communicator, 'nccl_comm',
wraps=communicator.nccl_comm) as mc:
answer_dtype = _communication_utility._get_nccl_type_id(
answer_dtype)
communicator.allreduce_grad(model)
# dtype that was used in the actual communication,
# which is nccl_comm.allReduce
call_args = mc.allReduce.call_args[0]
actual_dtype = call_args[3]
assert answer_dtype == actual_dtype
else:
# For other MPI-based communicators,
# all communication should happen in FP32 as of now, so
# here we just check the results are correct for
# 16-32 mixed models.
communicator.allreduce_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
mpi_comm.barrier()
destroy_communicator(communicator)
def check_collective_communication(param, use_gpu):
communicator = create_communicator(param, use_gpu)
mpi_comm.barrier()
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_bcast_data(communicator, model)
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_allreduce_grad(communicator, model)
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_allreduce_grad_empty(communicator, model)
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_allreduce_grad_empty_half(communicator, model)
# Check allreduce debug mode
model = ExampleModel()
if use_gpu:
model.to_gpu()
# The example model includes some nan parameters so the debug mode
# must detect it.
chainer.set_debug(True)
with pytest.raises(ValueError, match=r'.* diverged .*'):
check_allreduce_grad(communicator, model)
chainer.set_debug(False)
# barrier() requires before destructor of PureNcclCommunicator
# because communication may not be finished.
mpi_comm.barrier()
destroy_communicator(communicator)
# chainer.testing.parameterize is not available at functions
@pytest.mark.parametrize('param', cpu_params)
def test_communicator_cpu(param):
check_send_recv(param, False)
| |
<reponame>conzty01/RA_Scheduler
from unittest.mock import MagicMock, patch
from scheduleServer import app
from flask import Response
import unittest
import datetime
from helperFunctions.helperFunctions import stdRet, getCurSchoolYear, AuthenticatedUser
class TestSchedule_editSched(unittest.TestCase):
def setUp(self):
# Set up a number of items that will be used for these tests.
# -- Mock the os.environ method so that we can create the server. --
# Helper Dict for holding the os.environ configuration
self.helper_osEnviron = {
"CLIENT_ID": "TEST CLIENT_ID",
"PROJECT_ID": "TEST PROJECT_ID",
"AUTH_URI": "TEST AUTH_URI",
"TOKEN_URI": "TEST TOKEN_URI",
"AUTH_PROVIDER_X509_CERT_URL": "TEST AUTH_PROVIDER_X509_CERT_URL",
"CLIENT_SECRET": "TEST CLIENT_SECRET",
"REDIRECT_URIS": "TEST1,TEST2,TEST3,TEST4",
"JAVASCRIPT_ORIGINS": "TEST5,TEST6",
"EXPLAIN_TEMPLATE_LOADING": "FALSE",
"LOG_LEVEL": "WARNING",
"USE_ADHOC": "FALSE",
"SECRET_KEY": "TEST SECRET KEY",
"OAUTHLIB_RELAX_TOKEN_SCOPE": "1",
"OAUTHLIB_INSECURE_TRANSPORT": "1",
"HOST_URL": "https://localhost:5000",
"DATABASE_URL": "postgres://ra_sched"
}
# Create a dictionary patcher for the os.environ method
self.patcher_osEnviron = patch.dict("os.environ",
self.helper_osEnviron)
# Start the os patchers (No mock object is returned since we used patch.dict())
self.patcher_osEnviron.start()
# -- Create an instance of ScheduleServer that we may test against. --
# Mark the application as being tested
app.config["TESTING"] = True
# Disable the login_required decorator
app.config["LOGIN_DISABLED"] = True
# Reinitialize the Login Manager to accept the new configuration
app.login_manager.init_app(app)
# Create the test server
self.server = app.test_client()
# -- Create a patcher for the getAuth() method from helperFunctions --
# since we have disabled the login manager for testing
# First we must create an object for the auth_level that we can manipulate
# as needed for the tests. By default, the auth_level is set to 1.
self.mocked_authLevel = MagicMock(return_value=1)
# In order for the authLevel to respond to __lt__, __gt__, and __eq__ calls,
# we need to create lambda functions that can effectively implement the
# respective magic methods.
self.mocked_authLevel_ltMock = lambda me, other: me.return_value < other
self.mocked_authLevel_gtMock = lambda me, other: me.return_value > other
self.mocked_authLevel_eqMock = lambda me, other: me.return_value == other
# We then set the auth_level mock to return the __lt__ Mock
self.mocked_authLevel.__lt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __gt__ Mock
self.mocked_authLevel.__gt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __eq__ Mock
self.mocked_authLevel.__eq__ = self.mocked_authLevel_ltMock
# Set the ra_id and hall_id to values that can be used throughout
self.user_ra_id = 1
self.user_hall_id = 1
self.user_school_id = 1
self.associatedResHalls = [
{
"id": self.user_hall_id,
"auth_level": self.mocked_authLevel,
"name": "Test Hall",
"school_id": self.user_school_id,
"school_name": "Test School"
}
]
# Assemble all of the desired values into an Authenticated User Object
self.helper_getAuth = AuthenticatedUser(
"<EMAIL>",
self.user_ra_id,
"Test",
"User",
self.associatedResHalls
)
# Create the patcher for the getAuth() method
self.patcher_getAuth = patch("schedule.schedule.getAuth", autospec=True)
# Start the patcher - mock returned
self.mocked_getAuth = self.patcher_getAuth.start()
# Configure the mocked_getAuth to return the helper_getAuth dictionary
self.mocked_getAuth.return_value = self.helper_getAuth
# -- Create a patcher for the appGlobals file --
self.patcher_appGlobals = patch("schedule.schedule.ag", autospec=True)
# Start the patcher - mock returned
self.mocked_appGlobals = self.patcher_appGlobals.start()
# Configure the mocked appGlobals as desired
self.mocked_appGlobals.baseOpts = {"HOST_URL": "https://localhost:5000"}
self.mocked_appGlobals.conn = MagicMock()
self.mocked_appGlobals.UPLOAD_FOLDER = "./static"
self.mocked_appGlobals.ALLOWED_EXTENSIONS = {"txt", "csv"}
# Create a patcher for the getCurSchoolYear method
self.patcher_getCurSchoolYear = patch("schedule.schedule.getCurSchoolYear", autospec=True)
# Start the patcher - mock returned
self.mocked_getCurSchoolYear = self.patcher_getCurSchoolYear.start()
# Configure the mocked getCurSchoolYear
self.helper_schoolYearStart = datetime.date(2021, 8, 1)
self.helper_schoolYearEnd = datetime.date(2022, 7, 31)
self.mocked_getCurSchoolYear.return_value = (self.helper_schoolYearStart, self.helper_schoolYearEnd)
# -- Create a patchers for the logging --
self.patcher_loggingDEBUG = patch("logging.debug", autospec=True)
self.patcher_loggingINFO = patch("logging.info", autospec=True)
self.patcher_loggingWARNING = patch("logging.warning", autospec=True)
self.patcher_loggingCRITICAL = patch("logging.critical", autospec=True)
self.patcher_loggingERROR = patch("logging.error", autospec=True)
# Start the patcher - mock returned
self.mocked_loggingDEBUG = self.patcher_loggingDEBUG.start()
self.mocked_loggingINFO = self.patcher_loggingINFO.start()
self.mocked_loggingWARNING = self.patcher_loggingWARNING.start()
self.mocked_loggingCRITICAL = self.patcher_loggingCRITICAL.start()
self.mocked_loggingERROR = self.patcher_loggingERROR.start()
def tearDown(self):
# Stop all of the patchers
self.patcher_getAuth.stop()
self.patcher_appGlobals.stop()
self.patcher_osEnviron.stop()
self.patcher_getCurSchoolYear.stop()
# Stop all of the logging patchers
self.patcher_loggingDEBUG.stop()
self.patcher_loggingINFO.stop()
self.patcher_loggingWARNING.stop()
self.patcher_loggingCRITICAL.stop()
self.patcher_loggingERROR.stop()
def resetAuthLevel(self):
# This function serves to reset the auth_level of the session
# to the default value which is 1.
self.mocked_authLevel.return_value = 1
@patch("schedule.schedule.abort", autospec=True)
def test_withoutAuthorizedUser_returnsNotAuthorizedResponse(self, mocked_abort):
# Test to ensure that when a user that is NOT authorized to view
# the Edit Schedule Portal navigates to the page, they receive a
# response that indicates that they are not authorized. An
# authorized user is a user that has an auth_level of at least
# 2 (AHD).
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
# Reset the auth_level to 1
self.resetAuthLevel()
# Create a custom exception to be used for this test
custException = EOFError
# Configure the mocked_abort object to behave as expected
mocked_abort.side_effect = custException
# -- Act --
# -- Assert --
# Request the desired page and that we received an error
self.assertRaises(custException, self.server.get, "/schedule/editSched",
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# Assert that the mocked_abort was called with the expected value
mocked_abort.assert_called_once_with(403)
# Assert that no additional call to the DB was made
self.mocked_appGlobals.conn.cursor().execute.assert_not_called()
@patch("schedule.schedule.getRAStats", autospec=True)
def test_withAuthorizedUser_callsGetRAStatsFunction(self, mocked_getRAStats):
# Test to ensure that when an AHD that is authorized to view the
# Edit Schedule Portal navigates to the page, this method calls
# the getRAStats API.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
# Set the auth_level of this session to 2
self.mocked_authLevel.return_value = 2
# Generate the various objects that will be used in this test
# Configure the getRAStats mock to behave as expected. This function
# will be thoroughly tested in another test class.
mocked_getRAStats.return_value = {
1: {
"name": "<NAME>",
"pts": 19
},
2: {
"name": "<NAME>",
"pts": 1
}
}
# -- Act --
# Request the desired page.
resp = self.server.get("/schedule/editSched",
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that we received a 200 status code
self.assertEqual(resp.status_code, 200)
# Assert that the response is not JSON
self.assertFalse(resp.is_json)
# Assert that the getRABreakStats Function was called as expected
mocked_getRAStats.assert_called_once_with(
self.helper_getAuth.hall_id(),
self.helper_schoolYearStart,
self.helper_schoolYearEnd
)
# Assert that the last call to the DB was queried as expected.
# In this instance, we are unable to use the assert_called_once_with
# method as this function calls out to
self.mocked_appGlobals.conn.cursor().execute.assert_any_call("""
SELECT ra.id, ra.first_name, ra.last_name, ra.color
FROM ra JOIN staff_membership AS sm ON (ra.id = sm.ra_id)
WHERE sm.res_hall_id = %s
ORDER BY ra.first_name ASC;""", (self.helper_getAuth.hall_id(),)
)
# Assert that at least one call to the DB was queried as expected.
self.mocked_appGlobals.conn.cursor().execute.assert_any_call(
"SELECT duty_flag_label FROM hall_settings WHERE res_hall_id = %s",
(self.user_hall_id,)
)
# Assert that at least one call to the DB was queried as expected
self.mocked_appGlobals.conn.cursor().execute.assert_any_call("""
SELECT id, status, created_date
FROM scheduler_queue
WHERE res_hall_id = %s
ORDER BY created_date DESC
LIMIT (10);""", (self.user_hall_id,))
@patch("schedule.schedule.getRAStats", autospec=True)
@patch("schedule.schedule.render_template", autospec=True)
def test_withAuthorizedUser_passesExpectedDataToRenderer(self, mocked_renderTemplate, mocked_getRAStats):
# Test to ensure that when a user that is authorized to view the
# Edit Schedule Portal navigates to the page, the expected information
# is being passed to the render_template function. An authorized user
# is a user that has an auth_level of at least 2 (AHD).
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
# Set the auth_level of this session to 2
self.mocked_authLevel.return_value = 2
# Create some of the objects used in this test
expectedDutyFlagLabel = "Test Label"
expectedGCalIntegration = False
expectedCustomSettingsDict = {
"dutyFlagLabel": expectedDutyFlagLabel,
"gCalConnected": expectedGCalIntegration,
"yearStart": self.helper_schoolYearStart,
"yearEnd": self.helper_schoolYearEnd
}
# Configure the expectedCustomSettingsDict so that it is as it would
# appear to the renderer
expectedCustomSettingsDict.update(self.mocked_appGlobals.baseOpts)
# Generate the various objects that will be used in this test
expectedRAList = []
for i in range(10):
expectedRAList.append((i, "User{}".format(i), "Test", "#{i}"))
# Configure the getRAStats mock to behave as expected. This function
# will be thoroughly tested in another test class.
mocked_getRAStats.return_value = {
1: {
"name": "<NAME>",
"pts": 19
},
2: {
"name": "<NAME>",
"pts": 1
}
}
# Sort alphabetically by last name of RA
ptDictSorted = sorted(
mocked_getRAStats.return_value.items(),
key=lambda kv: kv[1]["name"].split(" ")[1]
)
# Create the scheduler queue list
expectedSchedQueueList = [i for i in range(10)]
# Configure | |
Services"
self._cm.update_config({"TSQUERY_STREAMS_LIMIT": 1000})
hosts = management.get_hosts(include_cm_host=True)
# pick hostId that match the ipAddress of cm_server
# mgmt_host may be empty then use the 1st host from the -w
try:
mgmt_host = [x for x in hosts if x.ipAddress == socket.gethostbyname(cmx.cm_server)][0]
except IndexError:
mgmt_host = [x for x in hosts if x.id == 0][0]
for role_type in [x for x in self._service.get_role_types() if x in self._role_list]:
try:
if not [x for x in self._service.get_all_roles() if x.type == role_type]:
print "Creating Management Role %s " % role_type
role_name = "mgmt-%s-%s" % (role_type, mgmt_host.md5host)
for cmd in self._service.create_role(role_name, role_type, mgmt_host.hostId).get_commands():
check.status_for_command("Creating %s" % role_name, cmd)
except ApiException as err:
print "ERROR: %s " % err.message
# now configure each role
for group in [x for x in self._service.get_all_role_config_groups() if x.roleType in self._role_list]:
if group.roleType == "ACTIVITYMONITOR":
group.update_config({"firehose_database_host": "%s:5432" % socket.getfqdn(cmx.cm_server),
"firehose_database_user": "amon",
"firehose_database_password": <PASSWORD>,
"firehose_database_type": "postgresql",
"firehose_database_name": "amon",
"mgmt_log_dir": LOG_DIR+"/cloudera-scm-firehose",
"firehose_heapsize": "215964392"})
elif group.roleType == "ALERTPUBLISHER":
group.update_config({"mgmt_log_dir": LOG_DIR+"/cloudera-scm-alertpublisher"})
elif group.roleType == "EVENTSERVER":
group.update_config({"event_server_heapsize": "215964392",
"mgmt_log_dir": LOG_DIR+"/cloudera-scm-eventserver",
"eventserver_index_dir": LOG_DIR+"/lib/cloudera-scm-eventserver"})
elif group.roleType == "HOSTMONITOR":
group.update_config({"mgmt_log_dir": LOG_DIR+"/cloudera-scm-firehose",
"firehose_storage_dir": LOG_DIR+"/lib/cloudera-host-monitor"})
elif group.roleType == "SERVICEMONITOR":
group.update_config({"mgmt_log_dir": LOG_DIR+"/cloudera-scm-firehose",
"firehose_storage_dir": LOG_DIR+"/lib/cloudera-service-monitor"})
elif group.roleType == "NAVIGATOR" and management.licensed():
group.update_config({})
elif group.roleType == "NAVIGATORMETADATASERVER" and management.licensed():
group.update_config({})
elif group.roleType == "REPORTSMANAGER" and management.licensed():
group.update_config({"headlamp_database_host": "%s:5432" % socket.getfqdn(cmx.cm_server),
"headlamp_database_name": "rman",
"headlamp_database_password": <PASSWORD>,
"headlamp_database_type": "postgresql",
"headlamp_database_user": "rman",
"headlamp_scratch_dir": LOG_DIR+"/lib/cloudera-scm-headlamp",
"mgmt_log_dir": LOG_DIR+"/cloudera-scm-headlamp"})
elif group.roleType == "OOZIE":
group.update_config({"oozie_database_host": "%s:5432" % socket.getfqdn(cmx.cm_server),
"oozie_database_name": "oozie",
"oozie_database_password": <PASSWORD>,
"oozie_database_type": "postgresql",
"oozie_database_user": "oozie",
"oozie_log_dir": LOG_DIR+"/oozie" })
@classmethod
def licensed(cls):
"""
Check if Cluster is licensed
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
cm = api.get_cloudera_manager()
try:
return bool(cm.get_license().uuid)
except ApiException as err:
return "Express" not in err.message
@classmethod
def upload_license(cls):
"""
Upload License file
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
cm = api.get_cloudera_manager()
if cmx.license_file and not management.licensed():
print "Upload license"
with open(cmx.license_file, 'r') as f:
license_contents = f.read()
print "Upload CM License: \n %s " % license_contents
cm.update_license(license_contents)
# REPORTSMANAGER required after applying license
management("REPORTSMANAGER").setup()
management("REPORTSMANAGER").start()
@classmethod
def begin_trial(cls):
"""
Begin Trial
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=cmx.password)
print "def begin_trial"
if not management.licensed():
try:
api.post("/cm/trial/begin")
# REPORTSMANAGER required after applying license
management("REPORTSMANAGER").setup()
management("REPORTSMANAGER").start()
except ApiException as err:
print err.message
@classmethod
def get_mgmt_password(cls, role_type):
"""
Get password for "ACTIVITY<PASSWORD>', '<PASSWORD>', '<PASSWORD>", "<PASSWORD>", "<PASSWORD>"
:param role_type:
:return:
"""
contents = []
mgmt_password = False
if os.path.exists('/etc/cloudera-scm-server'):
file_path = os.path.join('/etc/cloudera-scm-server', 'db.mgmt.properties')
try:
with open(file_path) as f:
contents = f.readlines()
except IOError:
print "Unable to open file %s." % file_path
# role_type expected to be in
# ACTIVITYMONITOR, REPORTSMANAGER, NAVIGATOR, OOZIE, HIVEMETASTORESERVER
if role_type in ['ACTIVITYMONITOR', 'REPORTSMANAGER', 'NAVIGATOR','OOZIE','HIVEMETASTORESERVER']:
idx = "com.cloudera.cmf.%s.db.password=" % role_type
match = [s.rstrip('\n') for s in contents if idx in s][0]
mgmt_password = match[match.index(idx) + len(idx):]
return mgmt_password
@classmethod
def get_cmhost(cls):
"""
return cm host in the same format as other host
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
idx = len(set(enumerate(cmx.host_names)))
_host = [x for x in api.get_all_hosts() if x.ipAddress == socket.gethostbyname(cmx.cm_server)][0]
cmhost={
'id': idx,
'hostId': _host.hostId,
'hostname': _host.hostname,
'md5host': hashlib.md5(_host.hostname).hexdigest(),
'ipAddress': _host.ipAddress,
}
return type('', (), cmhost)
@classmethod
def get_hosts(cls, include_cm_host=False):
"""
because api.get_all_hosts() returns all the hosts as instanceof ApiHost: hostId hostname ipAddress
and cluster.list_hosts() returns all the cluster hosts as instanceof ApiHostRef: hostId
we only need Cluster hosts with instanceof ApiHost: hostId hostname ipAddress + md5host
preserve host order in -w
hashlib.md5(host.hostname).hexdigest()
attributes = {'id': None, 'hostId': None, 'hostname': None, 'md5host': None, 'ipAddress': None, }
return a list of hosts
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
w_hosts = set(enumerate(cmx.host_names))
if include_cm_host and socket.gethostbyname(cmx.cm_server) \
not in [socket.gethostbyname(x) for x in cmx.host_names]:
w_hosts.add((len(w_hosts), cmx.cm_server))
hosts = []
for idx, host in w_hosts:
_host = [x for x in api.get_all_hosts() if x.ipAddress == socket.gethostbyname(host)][0]
hosts.append({
'id': idx,
'hostId': _host.hostId,
'hostname': _host.hostname,
'md5host': hashlib.md5(_host.hostname).hexdigest(),
'ipAddress': _host.ipAddress,
})
return [type('', (), x) for x in hosts]
@classmethod
def restart_management(cls):
"""
Restart Management Services
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
mgmt = api.get_cloudera_manager().get_service()
check.status_for_command("Stop Management services", mgmt.stop())
check.status_for_command("Start Management services", mgmt.start())
class ServiceActions:
"""
Example stopping/starting services ['HBASE', 'IMPALA', 'SPARK', 'SOLR']
:param service_list:
:param action:
:return:
"""
def __init__(self, *service_list):
self._service_list = service_list
self._api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
self._cluster = self._api.get_cluster(cmx.cluster_name)
def stop(self):
self._action('stop')
def start(self):
self._action('start')
def restart(self):
self._action('restart')
def _action(self, action):
state = {'start': ['STOPPED'], 'stop': ['STARTED'], 'restart': ['STARTED', 'STOPPED']}
for services in [x for x in self._cluster.get_all_services()
if x.type in self._service_list and x.serviceState in state[action]]:
check.status_for_command("%s service %s" % (action.upper(), services.type),
getattr(self._cluster.get_service(services.name), action)())
@classmethod
def get_service_type(cls, name):
"""
Returns service based on service type name
:param name:
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
cluster = api.get_cluster(cmx.cluster_name)
try:
service = [x for x in cluster.get_all_services() if x.type == name][0]
except IndexError:
service = None
return service
@classmethod
def deploy_client_config_for(cls, obj):
"""
Example deploying GATEWAY Client Config on each host
Note: only recommended if you need to deploy on a specific hostId.
Use the cluster.deploy_client_config() for normal use.
example usage:
# hostId
for host in get_cluster_hosts(include_cm_host=True):
deploy_client_config_for(host.hostId)
# cdh service
for service in cluster.get_all_services():
deploy_client_config_for(service)
:param host.hostId, or ApiService:
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
# cluster = api.get_cluster(cmx.cluster_name)
if isinstance(obj, str) or isinstance(obj, unicode):
for role_name in [x.roleName for x in api.get_host(obj).roleRefs if 'GATEWAY' in x.roleName]:
service = cdh.get_service_type('GATEWAY')
print "Deploying client config for service: %s - host: [%s]" % \
(service.type, api.get_host(obj).hostname)
check.status_for_command("Deploy client config for role %s" %
role_name, service.deploy_client_config(role_name))
elif isinstance(obj, ApiService):
for role in obj.get_roles_by_type("GATEWAY"):
check.status_for_command("Deploy client config for role %s" %
role.name, obj.deploy_client_config(role.name))
@classmethod
def create_service_role(cls, service, role_type, host):
"""
Helper function to create a role
:return:
"""
service_name = service.name[:4] + hashlib.md5(service.name).hexdigest()[:8] \
if len(role_type) > 24 else service.name
role_name = "-".join([service_name, role_type, host.md5host])[:64]
print "Creating role: %s on host: [%s]" % (role_name, host.hostname)
for cmd in service.create_role(role_name, role_type, host.hostId).get_commands():
check.status_for_command("Creating role: %s on host: [%s]" % (role_name, host.hostname), cmd)
@classmethod
def restart_cluster(cls):
"""
Restart Cluster and Cluster wide deploy client config
:return:
"""
api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
cluster = api.get_cluster(cmx.cluster_name)
print "Restart cluster: %s" % cmx.cluster_name
check.status_for_command("Stop %s" % cmx.cluster_name, cluster.stop())
check.status_for_command("Start %s" % cmx.cluster_name, cluster.start())
# Example deploying cluster wide Client Config
check.status_for_command("Deploy client config for %s" % cmx.cluster_name, cluster.deploy_client_config())
@classmethod
def dependencies_for(cls, service):
"""
Utility function returns dict of service dependencies
:return:
"""
service_config = {}
config_types = {"hue_webhdfs": ['NAMENODE', 'HTTPFS'], "hdfs_service": "HDFS", "sentry_service": "SENTRY",
"zookeeper_service": "ZOOKEEPER", "hbase_service": "HBASE", "solr_service": "SOLR",
"hive_service": "HIVE", "sqoop_service": "SQOOP",
"impala_service": "IMPALA", "oozie_service": "OOZIE",
"mapreduce_yarn_service": ['MAPREDUCE', 'YARN'], "yarn_service": "YARN"}
dependency_list = []
# get required service config
for k, v in service.get_config(view="full")[0].items():
if v.required:
dependency_list.append(k)
# Extended dependence list, adding the optional ones as well
if service.type == 'HUE':
dependency_list.extend(['sqoop_service',
'impala_service'])
if service.type in ['HIVE', 'HDFS', 'HUE', 'HBASE', 'OOZIE', 'MAPREDUCE', 'YARN']:
dependency_list.append('zookeeper_service')
# if service.type in ['HIVE']:
# dependency_list.append('sentry_service')
if service.type == 'OOZIE':
dependency_list.append('hive_service')
# if service.type in ['FLUME', 'IMPALA']:
# dependency_list.append('hbase_service')
if service.type in ['FLUME', 'SPARK', 'SENTRY']:
dependency_list.append('hdfs_service')
# if service.type == 'FLUME':
# dependency_list.append('solr_service')
for key in dependency_list:
if key == "<KEY>":
hdfs = cdh.get_service_type('HDFS')
if hdfs is not None:
service_config[key] = [x.name for x in hdfs.get_roles_by_type('NAMENODE')][0]
# prefer HTTPS over NAMENODE
if [x.name for x in hdfs.get_roles_by_type('HTTPFS')]:
service_config[key] = [x.name for x in hdfs.get_roles_by_type('HTTPFS')][0]
elif key == "mapreduce_yarn_service":
for _type in config_types[key]:
if cdh.get_service_type(_type) is not None:
service_config[key] = cdh.get_service_type(_type).name
# prefer YARN over MAPREDUCE
if cdh.get_service_type(_type) is not None and _type == 'YARN':
service_config[key] = cdh.get_service_type(_type).name
elif key == "hue_hbase_thrift":
hbase = cdh.get_service_type('HBASE')
if hbase is not None:
service_config[key] = [x.name for x in hbase.get_roles_by_type(config_types[key])][0]
else:
if cdh.get_service_type(config_types[key]) is not None:
service_config[key] = cdh.get_service_type(config_types[key]).name
return service_config
class ActiveCommands:
def __init__(self):
self._api = ApiResource(server_host=cmx.cm_server, username=cmx.username, password=<PASSWORD>)
def status_for_command(self, message, command):
"""
Helper to check active command status
:param message:
:param command:
:return:
"""
_state = 0
_bar = ['[|]', '[/]', '[-]', '[\\]']
while True:
if self._api.get("/commands/%s" % command.id)['active']:
sys.stdout.write(_bar[_state] + ' ' + message + ' ' + ('\b' * (len(message) + 5)))
sys.stdout.flush()
_state += 1
if _state > 3:
_state | |
jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 204
self.ebnfSuffix()
self.state = 207
self.match(jsgParser.CBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectMacroContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def EQUALS(self):
return self.getToken(jsgParser.EQUALS, 0)
def membersDef(self):
return self.getTypedRuleContext(jsgParser.MembersDefContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def getRuleIndex(self):
return jsgParser.RULE_objectMacro
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitObjectMacro" ):
return visitor.visitObjectMacro(self)
else:
return visitor.visitChildren(self)
def objectMacro(self):
localctx = jsgParser.ObjectMacroContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_objectMacro)
try:
self.enterOuterAlt(localctx, 1)
self.state = 209
self.match(jsgParser.ID)
self.state = 210
self.match(jsgParser.EQUALS)
self.state = 211
self.membersDef()
self.state = 212
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueTypeMacroContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def EQUALS(self):
return self.getToken(jsgParser.EQUALS, 0)
def nonRefValueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.NonRefValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.NonRefValueTypeContext,i)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_valueTypeMacro
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueTypeMacro" ):
return visitor.visitValueTypeMacro(self)
else:
return visitor.visitChildren(self)
def valueTypeMacro(self):
localctx = jsgParser.ValueTypeMacroContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_valueTypeMacro)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 214
self.match(jsgParser.ID)
self.state = 215
self.match(jsgParser.EQUALS)
self.state = 216
self.nonRefValueType()
self.state = 221
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.BAR:
self.state = 217
self.match(jsgParser.BAR)
self.state = 218
self.nonRefValueType()
self.state = 223
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 224
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BuiltinValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def JSON_STRING(self):
return self.getToken(jsgParser.JSON_STRING, 0)
def JSON_NUMBER(self):
return self.getToken(jsgParser.JSON_NUMBER, 0)
def JSON_INT(self):
return self.getToken(jsgParser.JSON_INT, 0)
def JSON_BOOL(self):
return self.getToken(jsgParser.JSON_BOOL, 0)
def JSON_NULL(self):
return self.getToken(jsgParser.JSON_NULL, 0)
def JSON_ARRAY(self):
return self.getToken(jsgParser.JSON_ARRAY, 0)
def JSON_OBJECT(self):
return self.getToken(jsgParser.JSON_OBJECT, 0)
def ANY(self):
return self.getToken(jsgParser.ANY, 0)
def getRuleIndex(self):
return jsgParser.RULE_builtinValueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBuiltinValueType" ):
return visitor.visitBuiltinValueType(self)
else:
return visitor.visitChildren(self)
def builtinValueType(self):
localctx = jsgParser.BuiltinValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_builtinValueType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 226
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ANY) | (1 << jsgParser.JSON_STRING) | (1 << jsgParser.JSON_NUMBER) | (1 << jsgParser.JSON_INT) | (1 << jsgParser.JSON_BOOL) | (1 << jsgParser.JSON_NULL) | (1 << jsgParser.JSON_ARRAY) | (1 << jsgParser.JSON_OBJECT))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def idref(self):
return self.getTypedRuleContext(jsgParser.IdrefContext,0)
def nonRefValueType(self):
return self.getTypedRuleContext(jsgParser.NonRefValueTypeContext,0)
def getRuleIndex(self):
return jsgParser.RULE_valueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueType" ):
return visitor.visitValueType(self)
else:
return visitor.visitChildren(self)
def valueType(self):
localctx = jsgParser.ValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_valueType)
try:
self.state = 230
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.ID]:
self.enterOuterAlt(localctx, 1)
self.state = 228
self.idref()
pass
elif token in [jsgParser.LEXER_ID_REF, jsgParser.STRING, jsgParser.ANY, jsgParser.JSON_STRING, jsgParser.JSON_NUMBER, jsgParser.JSON_INT, jsgParser.JSON_BOOL, jsgParser.JSON_NULL, jsgParser.JSON_ARRAY, jsgParser.JSON_OBJECT, jsgParser.OBRACKET, jsgParser.OBRACE, jsgParser.OPREN]:
self.enterOuterAlt(localctx, 2)
self.state = 229
self.nonRefValueType()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NonRefValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID_REF(self):
return self.getToken(jsgParser.LEXER_ID_REF, 0)
def STRING(self):
return self.getToken(jsgParser.STRING, 0)
def builtinValueType(self):
return self.getTypedRuleContext(jsgParser.BuiltinValueTypeContext,0)
def objectExpr(self):
return self.getTypedRuleContext(jsgParser.ObjectExprContext,0)
def arrayExpr(self):
return self.getTypedRuleContext(jsgParser.ArrayExprContext,0)
def OPREN(self):
return self.getToken(jsgParser.OPREN, 0)
def typeAlternatives(self):
return self.getTypedRuleContext(jsgParser.TypeAlternativesContext,0)
def CPREN(self):
return self.getToken(jsgParser.CPREN, 0)
def getRuleIndex(self):
return jsgParser.RULE_nonRefValueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNonRefValueType" ):
return visitor.visitNonRefValueType(self)
else:
return visitor.visitChildren(self)
def nonRefValueType(self):
localctx = jsgParser.NonRefValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_nonRefValueType)
try:
self.state = 241
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.LEXER_ID_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 232
self.match(jsgParser.LEXER_ID_REF)
pass
elif token in [jsgParser.STRING]:
self.enterOuterAlt(localctx, 2)
self.state = 233
self.match(jsgParser.STRING)
pass
elif token in [jsgParser.ANY, jsgParser.JSON_STRING, jsgParser.JSON_NUMBER, jsgParser.JSON_INT, jsgParser.JSON_BOOL, jsgParser.JSON_NULL, jsgParser.JSON_ARRAY, jsgParser.JSON_OBJECT]:
self.enterOuterAlt(localctx, 3)
self.state = 234
self.builtinValueType()
pass
elif token in [jsgParser.OBRACE]:
self.enterOuterAlt(localctx, 4)
self.state = 235
self.objectExpr()
pass
elif token in [jsgParser.OBRACKET]:
self.enterOuterAlt(localctx, 5)
self.state = 236
self.arrayExpr()
pass
elif token in [jsgParser.OPREN]:
self.enterOuterAlt(localctx, 6)
self.state = 237
self.match(jsgParser.OPREN)
self.state = 238
self.typeAlternatives()
self.state = 239
self.match(jsgParser.CPREN)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeAlternativesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def valueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.ValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.ValueTypeContext,i)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_typeAlternatives
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeAlternatives" ):
return visitor.visitTypeAlternatives(self)
else:
return visitor.visitChildren(self)
def typeAlternatives(self):
localctx = jsgParser.TypeAlternativesContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_typeAlternatives)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 243
self.valueType()
self.state = 246
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 244
self.match(jsgParser.BAR)
self.state = 245
self.valueType()
self.state = 248
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==jsgParser.BAR):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdrefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def getRuleIndex(self):
return jsgParser.RULE_idref
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdref" ):
return visitor.visitIdref(self)
else:
return visitor.visitChildren(self)
def idref(self):
localctx = jsgParser.IdrefContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_idref)
try:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.match(jsgParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EbnfSuffixContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def QMARK(self):
return self.getToken(jsgParser.QMARK, 0)
def STAR(self):
return self.getToken(jsgParser.STAR, 0)
def PLUS(self):
return self.getToken(jsgParser.PLUS, 0)
def OBRACE(self):
return self.getToken(jsgParser.OBRACE, 0)
def INT(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.INT)
else:
return self.getToken(jsgParser.INT, i)
def CBRACE(self):
return self.getToken(jsgParser.CBRACE, 0)
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def getRuleIndex(self):
return jsgParser.RULE_ebnfSuffix
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEbnfSuffix" ):
return visitor.visitEbnfSuffix(self)
else:
return visitor.visitChildren(self)
def ebnfSuffix(self):
localctx = jsgParser.EbnfSuffixContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_ebnfSuffix)
self._la = 0 # Token type
try:
self.state = 264
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.QMARK]:
self.enterOuterAlt(localctx, 1)
self.state = 252
self.match(jsgParser.QMARK)
pass
elif token in [jsgParser.STAR]:
self.enterOuterAlt(localctx, 2)
self.state = 253
self.match(jsgParser.STAR)
pass
elif token in [jsgParser.PLUS]:
self.enterOuterAlt(localctx, 3)
self.state = 254
self.match(jsgParser.PLUS)
pass
elif token in [jsgParser.OBRACE]:
self.enterOuterAlt(localctx, 4)
self.state = 255
self.match(jsgParser.OBRACE)
self.state = 256
self.match(jsgParser.INT)
self.state = 261
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.COMMA:
self.state = 257
self.match(jsgParser.COMMA)
self.state = 259
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.INT or _la==jsgParser.STAR:
self.state = 258
_la = self._input.LA(1)
if not(_la==jsgParser.INT or _la==jsgParser.STAR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 263
self.match(jsgParser.CBRACE)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRulesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TERMINALS(self):
return self.getToken(jsgParser.TERMINALS, 0)
def lexerRuleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.LexerRuleSpecContext)
else:
return self.getTypedRuleContext(jsgParser.LexerRuleSpecContext,i)
def getRuleIndex(self):
return jsgParser.RULE_lexerRules
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRules" ):
return visitor.visitLexerRules(self)
else:
return visitor.visitChildren(self)
def lexerRules(self):
localctx = jsgParser.LexerRulesContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_lexerRules)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 266
self.match(jsgParser.TERMINALS)
self.state = 270
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.LEXER_ID:
self.state = 267
self.lexerRuleSpec()
self.state = 272
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID(self):
return self.getToken(jsgParser.LEXER_ID, 0)
def COLON(self):
return self.getToken(jsgParser.COLON, 0)
def lexerRuleBlock(self):
return self.getTypedRuleContext(jsgParser.LexerRuleBlockContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerRuleSpec
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleSpec" ):
return visitor.visitLexerRuleSpec(self)
else:
return visitor.visitChildren(self)
def lexerRuleSpec(self):
localctx = jsgParser.LexerRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_lexerRuleSpec)
try:
self.enterOuterAlt(localctx, 1)
self.state = 273
self.match(jsgParser.LEXER_ID)
| |
<gh_stars>0
import os
import traceback
from flask import Flask, request, url_for, render_template, redirect, Response, session, make_response
import StringIO
from data_reader import DataReader
import xlwt #excel writing. used for the excel output.
import sys
import mimetypes
from werkzeug.datastructures import Headers #used for exporting files
import jsonpickle #lets us transfer Pond object between views.
#for graphing
#we need to import matplotlib and set which renderer to use before we use pyplot. This allows it to work without a GUI installed on the OS.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import numpy as np
##############################################################
#IMPORTANT VARIABLES
#
##############################################################
#How to work with file uploads http://flask.pocoo.org/docs/0.10/patterns/fileuploads/
# This is the path to the upload directory
ALLOWED_EXTENSIONS = set(['xls', 'xlsx', 'csv'])
TEMPLATE_FILE = 'template.xls'
TEMPLATE_FILE_ROUTE = '/'+TEMPLATE_FILE
EXAMPLE_FILE = 'example_data.xls'
EXAMPLE_FILE_ROUTE = '/'+EXAMPLE_FILE
INTERNAL_SERVER_ERROR_TEMPLATE_FILE = "500.html"
INTERNAL_SERVER_ERROR_TEMPLATE_ROUTE = '/'+INTERNAL_SERVER_ERROR_TEMPLATE_FILE
FIRST_DATA_ROW_FOR_EXPORT = 1
#SESSION KEYS
PICKLED_POND_LIST_KEY = 'pickled_pond_list'
# Initialize the Flask application
app = Flask(__name__)
random_number = os.urandom(24)
app.secret_key = random_number
# These are the extension that we are accepting to be uploaded
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 #arbitrary 16 megabyte upload limit
def getPondList():
#SALAMANDER
pond_list = unpickle_pond_list()
return pond_list
#used for making it possible to get numbers from python, and put them in HTML
#Got this from http://blog.bouni.de/blog/2013/04/24/call-functions-out-of-jinjs2-templates/
@app.context_processor
def my_utility_processor():
def ponds():
print "running ponds method"
pond_list = getPondList()
print "length of pond list: ", len(pond_list)
return pond_list
return dict(ponds=ponds)
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def indexView():
'''
Renders the template for the index.
'''
# if 'pond_pic_visible' not in session:
# session['pond_pic_visible']='visible'
#http://runnable.com/UiPcaBXaxGNYAAAL/how-to-upload-a-uploaded_file-to-the-server-in-flask-for-python
if request.method == 'POST': #true if the button "upload" is clicked
# Get the name of the uploaded uploaded_file
uploaded_file = request.files['uploaded_file']
# Check if the uploaded_file is one of the allowed types/extensions
if uploaded_file and allowed_file(uploaded_file.filename):
pond_file = request.files['uploaded_file']
try:
reader = DataReader("") #I don't plan on using this filename, thanks
pond_list = reader.readFile(pond_file.read()) #read method is http://werkzeug.pocoo.org/docs/0.10/datastructures/#werkzeug.datastructures.FileStorage,
except Exception as e:
print "error in getPondList"
print str(e)
return render_template(INTERNAL_SERVER_ERROR_TEMPLATE_ROUTE, error = str(e))
##################################################################
#let's try something. AARDVARK <--easy to search for this
#(this might be more work than making Pond objects serializable)
##################################################################
##trying http://jsonpickle.github.io/
pickle_pond_list(pond_list)
return redirect(url_for("primary_production"))
else:
error_message = "Apologies, that file extension is not allowed. Please try one of the allowed extensions."
return render_template('home_with_error.html', template_file_route = TEMPLATE_FILE_ROUTE, example_file_route = EXAMPLE_FILE_ROUTE,error_message=error_message)
return render_template('home.html', template_file_route = TEMPLATE_FILE_ROUTE, example_file_route = EXAMPLE_FILE_ROUTE)
@app.route(TEMPLATE_FILE_ROUTE, methods=['GET', 'POST'])
def template():
'''
Used to offer template data file
#http://stackoverflow.com/questions/20646822/how-to-serve-static-files-in-flask
'''
try:
return app.send_static_file(TEMPLATE_FILE)
except Exception as e:
print str(e)
return render_template(INTERNAL_SERVER_ERROR_TEMPLATE_ROUTE, error = str(e))
@app.route(EXAMPLE_FILE_ROUTE, methods=['GET', 'POST'])
def example_file_view():
'''
Used to offer example data file
#http://stackoverflow.com/questions/20646822/how-to-serve-static-files-in-flask
'''
try:
return app.send_static_file(EXAMPLE_FILE)
except Exception as e:
print str(e)
return render_template(INTERNAL_SERVER_ERROR_TEMPLATE_ROUTE, error = str(e))
################################################################
#renders the primary_production template.
################################################################
@app.route('/primary_production', methods=['GET', 'POST'])
@app.route('/primary_production.html', methods=['GET', 'POST'])
def primary_production():
'''
Renders the primary_production template, which shows calculated values and a button to download them.
'''
print "primary_production view"
try:
return render_template("primary_production.html")
except Exception as e:
print str(e)
return render_template(INTERNAL_SERVER_ERROR_TEMPLATE_ROUTE, error = str(e))
#c.f. flask quickstart "variable rules"
# @app.route('/graph/<pond_key>/<int:layer>')
# @app.route('/graph')
@app.route('/graph/<pond_key>/<int:layer_index>')
def hourly_ppr_in_layer_graph(pond_key="", layer_index = 0):
'''
#TODO: comments
'''
#get the correct pond from the list in the session dict
print "***************"
print "pond_key is ", pond_key
print "layer_index is ", layer_index
try:
pond = retrieve_pond(pond_key)
times = pond.get_list_of_times()
ppr_values = pond.calculate_hourly_phytoplankton_primary_production_rates_list_over_whole_day_in_thermal_layer(layer_index)
x_values = times
y_values = ppr_values
# print "x values: ", x_values
# print "x length: ", len(x_values)
# print "y values: ", y_values
# print "y length: ", len(y_values)
x_label = "hour"
y_label = "PPPR (mgC*m^-3)"
graph_title = "PPPR, ", pond.get_lake_id(), " layer ", layer_index+1
return graph(x_values,y_values,x_label, y_label,graph_title)
except:
print "Unexpected error:", sys.exc_info()[0]
#return error graphic
#TODO: an error graphic
return app.send_static_file('graph_error.png')
@app.route('/export')
def export_view():
'''
Code to make an excel file for download.
Modified from...
http://snipplr.com/view/69344/create-excel-file-with-xlwt-and-insert-in-flask-response-valid-for-jqueryfiledownload/
'''
print "export"
#########################
# Code for creating Flask
# response
#########################
response = Response()
response.status_code = 200
##################################
# Code for creating Excel data and
# inserting into Flask response
##################################
#.... code here for adding worksheets and cells
#Create a new workbook object
workbook = xlwt.Workbook()
#Add a sheet
daily_worksheet = workbook.add_sheet('Daily Statistics')
#columns to write to
year_column = 0
lake_ID_column = year_column+1
day_of_year_column = lake_ID_column+1
bppr_column = day_of_year_column+1
pppr_column = bppr_column+1
#get data from session, write to daily_worksheet
#PLATYPUS
pond_list = unpickle_pond_list()
year_list = []
lake_id_list = []
day_of_year_list = []
bpprList =[]
ppprList = []
for pond in pond_list:
year = pond.get_year()
lake_id = pond.get_lake_id()
day_of_year = pond.get_day_of_year()
bppr = pond.calculate_daily_whole_lake_benthic_primary_production_m2()
pppr = pond.calculate_daily_whole_lake_phytoplankton_primary_production_m2()
year_list.append(year)
lake_id_list.append(lake_id)
day_of_year_list.append(day_of_year)
bpprList.append(bppr)
ppprList.append(pppr)
write_column_to_worksheet(daily_worksheet, year_column, "year", year_list)
write_column_to_worksheet(daily_worksheet, lake_ID_column, "Lake ID", lake_id_list)
write_column_to_worksheet(daily_worksheet, day_of_year_column, "day of year", day_of_year_list)
write_column_to_worksheet(daily_worksheet, bppr_column, "bppr_m2", bpprList)
write_column_to_worksheet(daily_worksheet, pppr_column, "pppr_m2", ppprList)
#Add another sheet
hourly_worksheet = workbook.add_sheet('Hourly Statistics')
#columns
year_column = 0
lake_ID_column = year_column+1
day_of_year_column = lake_ID_column+1
layer_column = day_of_year_column+1
hour_column = layer_column+1
hourly_ppr_rates_column = hour_column+1
#lists
year_list = []
lake_id_list = []
day_of_year_list = []
layer_list = []
hour_list = []
hourly_ppr_rates_list = []
counter = 0
for pond in pond_list:
year = pond.get_year()
lake_id = pond.get_lake_id()
day_of_year = pond.get_day_of_year()
for layer in range (0, len(pond.get_thermal_layer_depths())):
hourly_ppr_in_this_layer_list = []
hourly_ppr_in_this_layer_list = pond.calculate_hourly_phytoplankton_primary_production_rates_list_over_whole_day_in_thermal_layer(layer)
hour = 0.0
time_interval = pond.get_time_interval()
for hourly_ppr in hourly_ppr_in_this_layer_list:
year_list.append(year)
lake_id_list.append(lake_id)
day_of_year_list.append(day_of_year)
layer_list.append(layer)
hour_list.append(hour)
hourly_ppr_rates_list.append(hourly_ppr)
hour+=time_interval
counter+=1
if(counter>10000):
raise Exception("too big! The ouput is too big!!!")
sys.exit()
exit()
#write to columns
write_column_to_worksheet(hourly_worksheet, year_column, "year", year_list)
write_column_to_worksheet(hourly_worksheet, lake_ID_column, "lake", lake_id_list)
write_column_to_worksheet(hourly_worksheet, day_of_year_column, "day", day_of_year_list)
write_column_to_worksheet(hourly_worksheet, layer_column, "layer", layer_list)
write_column_to_worksheet(hourly_worksheet, hour_column, "hour", hour_list)
write_column_to_worksheet(hourly_worksheet, hourly_ppr_rates_column, "ppr_m3", hourly_ppr_rates_list)
#This is the magic. The workbook is saved into the StringIO object,
#then that is passed to response for Flask to use.
output = StringIO.StringIO()
workbook.save(output)
response.data = output.getvalue()
################################
# Code for setting correct
# headers for jquery.fileDownload
#################################
filename = "export.xls"
mimetype_tuple = mimetypes.guess_type(filename)
#HTTP headers for forcing file download
response_headers = Headers({
'Pragma': "public", # required,
'Expires': '0',
'Cache-Control': 'must-revalidate, post-check=0, pre-check=0',
'Cache-Control': 'private', # required for certain browsers,
'Content-Type': mimetype_tuple[0],
'Content-Disposition': 'attachment; filename=\"%s\";' % filename,
'Content-Transfer-Encoding': 'binary',
'Content-Length': len(response.data)
})
if not mimetype_tuple[1] is None:
response.update({
'Content-Encoding': mimetype_tuple[1]
})
response.headers = response_headers
#as per jquery.fileDownload.js requirements
response.set_cookie('fileDownload', 'true', path='/')
################################
# Return the response
#################################
return response
@app.errorhandler(413)
def request_entity_too_large(error):
'''
Error handler view. Should display when files that are too large are uploaded.
'''
return 'File Too Large'
@app.errorhandler(404)
def pageNotFound(error):
return "Page not found"
@app.errorhandler(500)
def internalServerError(internal_exception):
'''
Prints internal program exceptions so they are visible by the user. Stopgap measure for usability.
'''
#TODO: more and better errors, so that when specific parts of the data are wrong, users can figure it out.
traceback.print_exc()
print str(internal_exception)
return render_template(INTERNAL_SERVER_ERROR_TEMPLATE_ROUTE, error = str(internal_exception))
#HELPER METHODS
def write_column_to_worksheet(worksheet,column_number=0, column_header = "", values_list=[]):
'''
Prepends a column header and puts the data in values_list into worksheet at the specified column
@param worksheet: An xlrd worksheet to write to.
@param column_number: Column number to write to.
@param column_header: Header to put at the top of the column.
@param values_list: list of values to put in the column.
'''
print "writing column to worksheet"
values_list.insert(0, column_header) #stick the column header at the front.
numRows = len(values_list)
for i in range(0, numRows):
row = i
column = column_number
value=values_list[row]
worksheet.write(row,column,value)
def retrieve_pond(pond_key = ""):
#pickled pond list from session
print "retrieve pond", pond_key
pond_list = unpickle_pond_list()
try:
pond = next(pond for pond in pond_list if pond.get_key()==pond_key)
except:
raise Exception("Could not find pond")
print "found pond"
return pond
def unpickle_pond_list():
pickled_ponds_list = session[PICKLED_POND_LIST_KEY]
pond_list = []
for pickled_pond in pickled_ponds_list:
pond = jsonpickle.decode(pickled_pond, keys=True) #BEWARE! THIS TURNS ALL THE KEYS IN BATHYMETRIC POND SHAPE TO STRINGS
pond_list.append(pond)
return pond_list
def pickle_pond_list(pond_list = []):
pickled_ponds_list = []
for pond in pond_list:
pickled_pond = jsonpickle.encode(pond,keys=True) #make it NOT SET THE KEYS TO STRINGS
pickled_ponds_list.append(pickled_pond)
session[PICKLED_POND_LIST_KEY] = pickled_ponds_list
def graph(x_vals=[],y_vals=[],x_label = "x label", y_label="y label", graph_title = | |
<reponame>cimatosa/tsquad
"""
Numeric Integration Using Tanh-Sinh Variable Transformation
This is the main module which implements the Tanh-Sinh method for numeric integration.
"""
# python import
import logging
import math
import cmath
import traceback
import typing
# tsquad module imports
from . import shanks
from . import tsconfig
from . import generate_py_nodes_weights
generate_py_nodes_weights.run()
from . import nodes_weights
########################################################################################################################
## typedefs
########################################################################################################################
numeric = typing.Union[int, float]
########################################################################################################################
## define module specific exceptions
########################################################################################################################
class TSIntegrationError(Exception):
pass
class TSIntegrationLimitReachedError(Exception):
pass
class TSIntegrationFunctionEvaluationError(Exception):
pass
class TSIntegrationOscLimitReachedError(Exception):
pass
########################################################################################################################
## implementation of the tanh-sinh method
########################################################################################################################
def _f_x_exception_wrapper(f, x, args):
try:
f_x = f(x, *args)
except Exception as e:
logging.error(
"calling function at x={:.8e} failed with exception {}, show traceback:\n".format(
x, e.__class__.__name__
)
)
traceback.print_exc()
raise TSIntegrationFunctionEvaluationError(
"Failed to evaluate function (Exception occurred during function call)"
)
if cmath.isnan(f_x):
raise TSIntegrationFunctionEvaluationError(
"Failed to evaluate function (function returns nan at x={:.8e})".format(x)
)
return f_x
class QuadRes(object):
__slots__ = ("I", "err", "func_calls", "rec_steps")
def __init__(self, I=0, err=0, func_calls=0, rec_steps=0):
self.I = I
self.err = err
self.func_calls = func_calls
self.rec_steps = rec_steps
def __add__(self, other):
if (self.err is not None) and (other.err is not None):
err = self.err + other.err
else:
err = None
r = QuadRes(
I=self.I + other.I,
err=err,
func_calls=self.func_calls + other.func_calls,
rec_steps=self.rec_steps + other.rec_steps,
)
return r
def __str__(self):
return "QuadRes(I={}, err={}, func_calls={}, rec_steps={})".format(
self.I, self.err, self.func_calls, self.rec_steps
)
def __repr__(self):
return self.__str__()
class QuadTS(object):
"""
Integrate f(x, *args) from a to b. If a = 0 singularities at x = 0 are treated very well.
The procedure is assumed to have converged if the estimated error `err_est` and the
value of the integral `I` fulfill:
err_est < abs_tol or err_est / |I| < rel_tol
Note that the routine starts with a t-grid using `2 * tsconfig.N_0 + 1` equally spaced nodes on the t-axes.
If the tolerance criterion is not met, use a finer grind by doubling N_0.
Repeat this at most `subgrid_max` times. Be aware that this requires that the nodes and weights need to be
pre-calculated for the finer grids, i.e., `subgrid_max <= tsconfig.num_sub_grids` needs to hold.
If you change the values in `tsconfig.py` simply run `python3 generate_py_nodes_weights` to force an
update of the `nodes_weights.py` file.
If the tolerance criterion os not met, repeat recursively on sub-intervals.
The maximum number of recursions is given by `rec_limit`.
Note that the integral over [a, 0] is automatically translated to -I([0, a])
in order to better treat a potential singularity at x=0.
:param f: function to integrate, callable of the form f(x, *args)
:param args: arguments passed to `f`
:param abs_tol: absolute tolerance
:param rel_tol: relative tolerance
:param recursive: if True, allow for recursive application of the tanh-sinh scheme
:param rec_limit: maximum number of recursions allowed
:param force_t_max_idx: Set the upper boundary/truncation for the (transformed) t-integral by hand,
this controls the resolution of the lower bound a of the x-integral.
Mainly needed by testing purposes.
:param subgrid_max: Set the number of sub-grids to use. If `subgrid_max=0` use the largest number ava
:param osc_threshold: when summing up integrals of single periods of oscillating functions
(see `quad_osc_finite`) this threshold stops the summation
if `|(I_k - s_k) / I_k| < osc_threshold`.
It poses a relative threshold for the new term `s_k` with respect to the partial sum `I_k`.
:param osc_limit: Stops the summation for oscillatory functions (see `quad_osc_finite`) and raises a
`TSIntegrationOscLimitReachedError` when `osc_limit` terms have been added.
Set `osc_limit=0` to have no limit.
:param debug: if True, enable debug messages
"""
def __init__(
self,
f: typing.Callable,
args: tuple = tuple(),
abs_tol: float = 1e-12,
rel_tol: float = 1e-12,
recursive: bool = True,
rec_limit: int = 50,
force_t_max_idx: [None, int] = None,
subgrid_max=0,
osc_threshold=1e-12,
osc_limit=5000,
debug=False,
other=None,
):
# init class members
self.f = f
if other is None:
self.args = args
self.abs_tol = abs_tol
self.rel_tol = rel_tol
self.recursive = recursive
self.rec_limit = rec_limit
self.force_t_max_idx = force_t_max_idx
self.subgrid_max = subgrid_max
self.osc_threshold = osc_threshold
self.osc_limit = osc_limit
self.debug = debug
else:
self.args = other.args
self.abs_tol = other.abs_tol
self.rel_tol = other.rel_tol
self.recursive = other.recursive
self.rec_limit = other.rec_limit
self.force_t_max_idx = other.force_t_max_idx
self.subgrid_max = other.subgrid_max
self.osc_threshold = other.osc_threshold
self.osc_limit = other.osc_limit
self.debug = other.debug
# process data
if self.subgrid_max == 0:
self.subgrid_max = tsconfig.num_sub_grids - 1
elif self.subgrid_max < 3:
logging.info("subgrid_max ({}) set to 3".format(subgrid_max))
self.subgrid_max = 3
elif subgrid_max > tsconfig.num_sub_grids - 1:
raise ValueError("subgrid_max exceeds pre-calculated nodes and weights")
def _get_integral_bound(self, a, b):
"""
chose t_max such that |w_(t_max) I(g(t_max))| < abs_tol
"""
sc = (b - a) / 2
for i in range(tsconfig.N_t_max):
f_x = _f_x_exception_wrapper(
self.f, a + sc * nodes_weights._1mg[i][0][-1], self.args
)
tmp = abs(sc * f_x * nodes_weights._w[i][0][-1])
if tmp < self.abs_tol:
return i
####################################################################################################################
## simple (single run) Tanh-Sinh integration scheme, i.e., the core routine
####################################################################################################################
def _quad(self, a, b) -> QuadRes:
"""
helper function that performs the actual Tanh-Sinh integration and estimates the error
(http://crd-legacy.lbl.gov/~dhbailey/dhbpapers/dhb-tanh-sinh.pdf.)
Perform the numeric integration of int_a^b f(x, *args) dx.
Sequentially doubles the number of nodes until die desired accuracy is reached.
If the maximum number of doubling (given by subgrid_max) is reached without achieving
the desired accuracy, and TSIntegrationError is raises.
:param a: lower integral boundary
:param b: upper integral boundary
:return: a QuadRes result object where `I` contains the value of the numeric integration and `err` the estimate
of the error. In addition, `func_calls` the `adaptive_splits` is provided by the result object.
"""
local_func_calls = 0
if self.force_t_max_idx is None:
t_max_idx = self._get_integral_bound(a, b)
local_func_calls += t_max_idx + 1
else:
t_max_idx = self.force_t_max_idx
data_dt = nodes_weights.dt[t_max_idx]
data_1mg = nodes_weights._1mg[t_max_idx]
data_w = nodes_weights._w[t_max_idx]
eps = 10 ** -14
I_res_n2 = 0
I_res_n1 = 0
I_res = 0
sum_tmp = 0
sc = (b - a) / 2
f_x_max = _f_x_exception_wrapper(self.f, a + sc * data_1mg[0][+0], self.args)
w_f_t_max = sc * f_x_max * data_w[0][0]
f_x_min = _f_x_exception_wrapper(self.f, a + sc * data_1mg[0][-1], self.args)
w_f_t_min = sc * f_x_min * data_w[0][-1]
local_func_calls += 2
d4_t_min = abs(w_f_t_min)
d4_t_max = abs(w_f_t_max)
err4 = max(d4_t_min, d4_t_max)
err_est = math.nan
err1 = err2 = err3 = math.nan
N = self.subgrid_max
assert N >= 2
if self.debug:
print("## TS integration in debug mode ##")
print("## " + "-" * 29 + " ##")
print("tmin", a + sc * data_1mg[0][-1])
print("tmax", a + sc * data_1mg[0][+0])
print("f(t_min)", f_x_min)
print("f(t_max)", f_x_max)
print("d4_tmin", d4_t_min)
print("d4_tmax", d4_t_max)
print("## " + "-" * 29 + " ##")
success = False
may_be_success = False
I_res_final = 0
err_est_final = 0
for n in range(N + 1):
max_tmp = 0
for k in range(len(data_w[n])):
f_x = _f_x_exception_wrapper(self.f, a + sc * data_1mg[n][k], self.args)
w_f = sc * f_x * data_w[n][k]
max_tmp = max(max_tmp, abs(w_f))
sum_tmp += w_f
local_func_calls += len(data_w[n])
I_res_n2 = I_res_n1
I_res_n1 = I_res
I_res = sum_tmp * data_dt[n]
err3 = abs(eps * max_tmp)
if self.debug:
print("n", n, "I_n", I_res)
if n >= 2:
d1 = abs(I_res - I_res_n1)
if self.debug:
print(
"d1 = I_n - I_(n-1) {:.8e} -> err {:.16e}".format(
d1, d1 ** 2
)
)
if d1 == 0:
if self.debug:
print("best we can have!")
print("return, {:.16e} +- {:.4e}".format(I_res, err_est))
err_est = max(err3, err4)
return QuadRes(
I=I_res,
err=err_est,
func_calls=local_func_calls,
rec_steps=1,
)
else:
d1_log = math.log10(d1)
d2 = abs(I_res - I_res_n2)
if self.debug:
print("d2 = I_n - I_(n-2) {:.8e}".format(d2))
err1 = d1 ** 2
if self.debug:
print("err1 = {:.8e}".format(err1))
if (d2 > 1e-308) and (d2 < 1):
try:
d2_log = math.log10(d2)
tmp = d1_log ** 2 / d2_log
except ZeroDivisionError:
print("d2", d2)
print("d2_log", d2_log)
raise
if self.debug:
print("d1_log", d1_log)
print("d2_log", d2_log)
print("tmp ", tmp)
if tmp < -308:
err2 = 0
if self.debug:
print("err2 = 0 (due to 10 ** d1log^2/d2log underflow")
elif tmp > 308:
err2 = 10
if self.debug:
print("err2 = 10 (due to 10 ** d1log^2/d2log overflow")
else:
err2 = 10 ** tmp
if self.debug:
print("err2 = {:.8e}".format(err2))
else:
err2 = 10
if self.debug:
print("err2 = 10 (due to d2 < 1e-308)")
if self.debug:
print("err3 = {:.8e}".format(err3))
if err2 >= 10:
if self.debug:
print("err1 >= 10 -> | |
# Copyright (c) 2021 <NAME>.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""GEMPAK calculations."""
import numpy as np
# Gravitational constant
g = 9.80616 # m / s^2
# Dry air gas constant
Rd = 287.04 # J / K / kg
def interp_logp_height(sounding, missing=-9999):
"""Interpolate height linearly with respect to log p.
Parameters
----------
sounding : dict
Sounding dictionary structure.
Notes
-----
This function mimics the functionality of the MR_INTZ
subroutine in GEMPAK.
"""
size = len(sounding['HGHT'])
idx = -1
maxlev = -1
while size + idx != 0:
if sounding['HGHT'][idx] != missing:
maxlev = size + idx
break
else:
idx -= 1
pbot = missing
for i in range(maxlev):
pres = sounding['PRES'][i]
hght = sounding['HGHT'][i]
if pres == missing:
continue
elif hght != missing:
pbot = pres
zbot = hght
ptop = 2000
elif pbot == missing:
continue
else:
ilev = i + 1
while pres <= ptop:
if sounding['HGHT'][ilev] != missing:
ptop = sounding['PRES'][ilev]
ztop = sounding['HGHT'][ilev]
else:
ilev += 1
sounding['HGHT'][i] = (zbot + (ztop - zbot)
* (np.log(pres / pbot) / np.log(ptop / pbot)))
if maxlev < size - 1:
if maxlev > -1:
pb = sounding['PRES'][maxlev]
zb = sounding['HGHT'][maxlev]
tb = sounding['TEMP'][maxlev]
tdb = sounding['DWPT'][maxlev]
else:
pb = missing
zb = missing
tb = missing
tdb = missing
for i in range(maxlev + 1, size):
if sounding['HGHT'][i] == missing:
tt = sounding['TEMP'][i]
tdt = sounding['DWPT'][i]
pt = sounding['PRES'][i]
scale_z = scale_height(tb, tt, tdb, tdt, pb, pt, missing)
sounding['HGHT'][i] = moist_hydrostatic_height(zb, pb, pt, scale_z)
def interp_logp_pressure(sounding, missing=-9999):
"""Interpolate pressure from heights.
Parameters
----------
sounding : dict
Sounding dictionary structure.
Notes
-----
This function is similar to the MR_INTP subroutine from GEMPAK.
"""
i = 0
ilev = -1
klev = -1
size = len(sounding['PRES'])
pt = missing
pb = missing
zt = missing
zb = missing
while i < size:
p = sounding['PRES'][i]
z = sounding['HGHT'][i]
if p != missing and z != missing:
klev = i
pt = p
zt = z
if ilev != -1 and klev != -1:
for j in range(ilev + 1, klev):
z = sounding['HGHT'][j]
if z != missing and zb != missing and pb != missing:
sounding['PRES'][j] = (
pb * np.exp((z - zb) * np.log(pt / pb) / (zt - zb))
)
ilev = klev
pb = pt
zb = zt
i += 1
def interp_missing_data(sounding, missing=-9999):
"""Interpolate missing sounding data.
Parameters
----------
sounding : dict
Sounding dictionary structure.
Notes
-----
This function is similar to the MR_MISS subroutine in GEMPAK.
"""
size = len(sounding['PRES'])
recipe = [('TEMP', 'DWPT'), ('DRCT', 'SPED'), ('DWPT', None)]
for var1, var2 in recipe:
iabove = 0
i = 1
more = True
while i < (size - 1) and more:
if sounding[var1][i] == missing:
if iabove <= i:
iabove = i + 1
found = False
while not found:
if sounding[var1][iabove] != missing:
found = True
else:
iabove += 1
if iabove >= size:
found = True
iabove = 0
more = False
if (var2 is None and iabove != 0
and sounding['PRES'][i - 1] > 100
and sounding['PRES'][iabove] < 100):
iabove = 0
if iabove != 0:
adata = {}
bdata = {}
for param, val in sounding.items():
if (param in ['PRES', 'TEMP', 'DWPT',
'DRCT', 'SPED', 'HGHT']):
adata[param] = val[i - 1]
bdata[param] = val[iabove]
vlev = sounding['PRES'][i]
outdata = interp_parameters(vlev, adata, bdata, missing)
sounding[var1][i] = outdata[var1]
if var2 is not None:
sounding[var2][i] = outdata[var2]
i += 1
def interp_moist_height(sounding, missing=-9999):
"""Interpolate moist hydrostatic height.
Parameters
----------
sounding : dict
Sounding dictionary structure.
Notes
-----
This function mimics the functionality of the MR_SCMZ
subroutine in GEMPAK. This the default behavior when
merging observed sounding data.
"""
hlist = (np.ones(len(sounding['PRES'])) * -9999)
ilev = -1
top = False
found = False
while not found and not top:
ilev += 1
if ilev >= len(sounding['PRES']):
top = True
elif (sounding['PRES'][ilev] != missing
and sounding['TEMP'][ilev] != missing
and sounding['HGHT'][ilev] != missing):
found = True
while not top:
pb = sounding['PRES'][ilev]
plev = sounding['PRES'][ilev]
tb = sounding['TEMP'][ilev]
tdb = sounding['DWPT'][ilev]
zb = sounding['HGHT'][ilev]
zlev = sounding['HGHT'][ilev]
jlev = ilev
klev = 0
mand = False
while not mand:
jlev += 1
if jlev >= len(sounding['PRES']):
mand = True
top = True
else:
pt = sounding['PRES'][jlev]
tt = sounding['TEMP'][jlev]
tdt = sounding['DWPT'][jlev]
zt = sounding['HGHT'][jlev]
if (zt != missing
and tt != missing):
mand = True
klev = jlev
if (sounding['PRES'][ilev] != missing
and sounding['TEMP'][ilev] != missing
and sounding['PRES'][jlev] != missing
and sounding['TEMP'][jlev] != missing):
scale_z = scale_height(tb, tt, tdb, tdt, pb, pt, missing)
znew = moist_hydrostatic_height(zb, pb, pt, scale_z, missing)
tb = tt
tdb = tdt
pb = pt
zb = znew
else:
scale_z = missing
znew = missing
hlist[jlev] = scale_z
if klev != 0:
s = (zt - zlev) / (znew - zlev)
for h in range(ilev + 1, klev + 1):
hlist[h] *= s
hbb = zlev
pbb = plev
for ii in range(ilev + 1, jlev):
p = sounding['PRES'][ii]
scale_z = hlist[ii]
z = moist_hydrostatic_height(hbb, pbb, p, scale_z)
sounding['HGHT'][ii] = z
hbb = z
pbb = p
ilev = klev
def interp_parameters(vlev, adata, bdata, missing=-9999):
"""General interpolation with respect to log-p.
Parameters
----------
vlev : float
Pressure level to interpolate to.
adata : dict
Sounding dictionary containing data below (i.e., greater pressure) the
desired pressure level.
bdata : dict
Sounding dictionary containing the data above (i.e., lesser pressure)
the desired pressure level.
Returns
-------
dict
A sounding dictionary with interpolated values.
Notes
-----
See the PC_INTP subroutine in GEMPAK.
"""
pres1 = adata['PRES']
pres2 = bdata['PRES']
between = (((pres1 < pres2) and (pres1 < vlev)
and (vlev < pres2))
or ((pres2 < pres1) and (pres2 < vlev)
and (vlev < pres1)))
if not between:
raise ValueError('Current pressure does not fall between levels.')
elif pres1 <= 0 or pres2 <= 0:
raise ValueError('Pressure cannot be negative.')
outdata = {}
rmult = np.log(vlev / pres1) / np.log(pres2 / pres1)
outdata['PRES'] = vlev
for param, aval in adata.items():
bval = bdata[param]
if param == 'DRCT':
ang1 = aval % 360
ang2 = bval % 360
if abs(ang1 - ang2) > 180:
if ang1 < ang2:
ang1 += 360
else:
ang2 += 360
ang = ang1 + (ang2 - ang1) * rmult
outdata[param] = ang % 360
else:
outdata[param] = aval + (bval - aval) * rmult
if missing in [aval, bval]:
outdata[param] = missing
return outdata
def mixing_ratio(dwpc, pres, missing=-9999):
"""Calculate the water vapor mixing ratio.
Parameters
----------
dwpc : float
Dewpoint (degC)
pres : float
Total air pressure (hPa)
missing : float, optional
Missing data flag
Returns
-------
float
The (mass) mixing ratio (kg/kg)
Notes
-----
See GEMPAK function PR_MIXR
"""
if missing in [dwpc, pres]:
mixr = missing
else:
vapr = vapor_pressure(dwpc, missing)
if vapr == missing:
mixr = missing
else:
corr = (1.001 + ((pres - 100.) / 900.) * 0.0034)
e = corr * vapr
if e > (0.5 * pres):
mixr = missing
else:
mixr = 0.62197 * (e / (pres - e)) * 1000.
return mixr
def moist_hydrostatic_height(z_bot, pres_bot, pres_top, scale_height,
missing=-9999):
"""Calculate the moist hydrostatic height at the top of a layer.
Parameters
----------
z_bot : float
Bottom of layer height (m)
pres_bot : float
Bottom of layer pressure (hPa)
pres_top : float
Top of layer pressure (hPa)
sacle_height : float
Scale height of layer (m)
missing : float, optional
Missing data flag
Returns
-------
float
The moist hydrostatic height (m)
Notes
-----
See GEMPAK function PR_MHGT
"""
if missing in [z_bot, pres_bot, pres_top, scale_height]:
mhgt = missing
else:
mhgt = z_bot + scale_height * np.log(pres_bot / pres_top)
return mhgt
def scale_height(tmpc_bot, tmpc_top, dwpc_bot, dwpc_top,
pres_bot, pres_top, missing=-9999):
"""Calculate the scale height of a layer.
Parameters
----------
tmpc_bot : float
Bottom of layer temperature (degC)
tmpc_top : float
Top of layer temperature (degC)
dwpc_bot : float
Bottom | |
to be able to set a list of public keys: call the ``UpdateDevEndpoint`` API with the public key content in the ``deletePublicKeys`` attribute, and the list of new keys in the ``addPublicKeys`` attribute.
- *(string) --*
- **SecurityConfiguration** *(string) --*
The name of the SecurityConfiguration structure to be used with this DevEndpoint.
- **Arguments** *(dict) --*
A map of arguments used to configure the DevEndpoint.
Note that currently, we only support "--enable-glue-datacatalog": "" as a valid argument.
- *(string) --*
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetJobRuns(Paginator):
def paginate(self, JobName: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Glue.Client.get_job_runs`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRuns>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
JobName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **JobRuns** *(list) --*
A list of job-run metatdata objects.
- *(dict) --*
Contains information about a job run.
- **Id** *(string) --*
The ID of this job run.
- **Attempt** *(integer) --*
The number of the attempt to run this job.
- **PreviousRunId** *(string) --*
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
- **TriggerName** *(string) --*
The name of the trigger that started this job run.
- **JobName** *(string) --*
The name of the job definition being used in this run.
- **StartedOn** *(datetime) --*
The date and time at which this job run was started.
- **LastModifiedOn** *(datetime) --*
The last time this job run was modified.
- **CompletedOn** *(datetime) --*
The date and time this job run completed.
- **JobRunState** *(string) --*
The current state of the job run.
- **Arguments** *(dict) --*
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the `Calling AWS Glue APIs in Python <http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html>`__ topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the `Special Parameters Used by AWS Glue <http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html>`__ topic in the developer guide.
- *(string) --*
- *(string) --*
- **ErrorMessage** *(string) --*
An error message associated with this job run.
- **PredecessorRuns** *(list) --*
A list of predecessors to this job run.
- *(dict) --*
A job run that was used in the predicate of a conditional trigger that triggered this job run.
- **JobName** *(string) --*
The name of the job definition used by the predecessor job run.
- **RunId** *(string) --*
The job-run ID of the predecessor job run.
- **AllocatedCapacity** *(integer) --*
This field is deprecated, use ``MaxCapacity`` instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the `AWS Glue pricing page <https://aws.amazon.com/glue/pricing/>`__ .
- **ExecutionTime** *(integer) --*
The amount of time (in seconds) that the job run consumed resources.
- **Timeout** *(integer) --*
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters ``TIMEOUT`` status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
- **MaxCapacity** *(float) --*
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the `AWS Glue pricing page <https://aws.amazon.com/glue/pricing/>`__ .
Do not set ``Max Capacity`` if using ``WorkerType`` and ``NumberOfWorkers`` .
The value that can be allocated for ``MaxCapacity`` depends on whether you are running a python shell job, or an Apache Spark ETL job:
* When you specify a python shell job (``JobCommand.Name`` ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
* When you specify an Apache Spark ETL job (``JobCommand.Name`` ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
- **NotificationProperty** *(dict) --*
Specifies configuration properties of a job run notification.
- **NotifyDelayAfter** *(integer) --*
After a job run starts, the number of minutes to wait before sending a job run delay notification.
- **WorkerType** *(string) --*
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
* For the ``Standard`` worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
* For the ``G.1X`` worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
* For the ``G.2X`` worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
- **NumberOfWorkers** *(integer) --*
The number of workers of a defined ``workerType`` that are allocated when a job runs.
The maximum number of workers you can define are 299 for ``G.1X`` , and 149 for ``G.2X`` .
- **SecurityConfiguration** *(string) --*
The name of the SecurityConfiguration structure to be used with this job run.
- **LogGroupName** *(string) --*
The name of the log group for secure logging, that can be server-side encrypted in CloudWatch using KMS. This name can be ``/aws-glue/jobs/`` , in which case the default encryption is ``NONE`` . If you add a role name and SecurityConfiguration name (in other words, ``/aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/`` ), then that security configuration will be used to encrypt the log group.
:type JobName: string
:param JobName: **[REQUIRED]**
The name of the job definition for which to retrieve all job runs.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the | |
#!/usr/bin/env python
# coding: utf-8
# # Tarea #1
# ## Estudiante: <NAME>
# In[2]:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
from pandas import DataFrame
from matplotlib import colors as mcolors
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import MinMaxScaler
# #### Función para calcular los índices de calidad de la predicción
# In[3]:
def indices_general(MC, nombres = None):
precision_global = np.sum(MC.diagonal()) / np.sum(MC)
error_global = 1 - precision_global
precision_categoria = pd.DataFrame(MC.diagonal()/np.sum(MC,axis = 1)).T
if nombres!=None:
precision_categoria.columns = nombres
return {"Matriz de Confusión":MC,
"Precisión Global":precision_global,
"Error Global":error_global,
"Precisión por categoría":precision_categoria}
# #### Función para graficar la distribución de la variable a predecir
# In[4]:
def distribucion_variable_predecir(data:DataFrame,variable_predict:str):
colors = list(dict(**mcolors.CSS4_COLORS))
df = pd.crosstab(index=data[variable_predict],columns="valor") / data[variable_predict].count()
fig = plt.figure(figsize=(10,9))
g = fig.add_subplot(111)
countv = 0
titulo = "Distribución de la variable %s" % variable_predict
for i in range(df.shape[0]):
g.barh(1,df.iloc[i],left = countv, align='center',color=colors[11+i],label= df.iloc[i].name)
countv = countv + df.iloc[i]
vals = g.get_xticks()
g.set_xlim(0,1)
g.set_yticklabels("")
g.set_title(titulo)
g.set_ylabel(variable_predict)
g.set_xticklabels(['{:.0%}'.format(x) for x in vals])
countv = 0
for v in df.iloc[:,0]:
g.text(np.mean([countv,countv+v]) - 0.03, 1 , '{:.1%}'.format(v), color='black', fontweight='bold')
countv = countv + v
g.legend(loc='upper center', bbox_to_anchor=(1.08, 1), shadow=True, ncol=1)
# #### Función para ver la distribución de una variable categórica respecto a la predecir
# In[5]:
def poder_predictivo_categorica(data:DataFrame, var:str, variable_predict:str):
df = pd.crosstab(index= data[var],columns=data[variable_predict])
df = df.div(df.sum(axis=1),axis=0)
titulo = "Distribución de la variable %s según la variable %s" % (var,variable_predict)
g = df.plot(kind='barh',stacked=True,legend = True, figsize = (10,9), xlim = (0,1),title = titulo, width = 0.8)
vals = g.get_xticks()
g.set_xticklabels(['{:.0%}'.format(x) for x in vals])
g.legend(loc='upper center', bbox_to_anchor=(1.08, 1), shadow=True, ncol=1)
for bars in g.containers:
plt.setp(bars, width=.9)
for i in range(df.shape[0]):
countv = 0
for v in df.iloc[i]:
g.text(np.mean([countv,countv+v]) - 0.03, i , '{:.1%}'.format(v), color='black', fontweight='bold')
countv = countv + v
# #### Función para ver la distribución de una variable numérica respecto a la predecir
# In[6]:
def poder_predictivo_numerica(data:DataFrame, var:str, variable_predict:str):
sns.FacetGrid(data, hue=variable_predict, height=6).map(sns.kdeplot, var, shade=True).add_legend()
# ### Ejercicio 1:
# #### En este ejercicio usaremos los datos (voces.csv). Se trata de un problema de reconocimiento de genero mediante el analisis de la voz y el habla. Esta base de datos fue creada para identificar una voz como masculina o femenina, basandose en las propiedades acusticas de la voz y el habla. El conjunto de datos consta de 3.168 muestras de voz grabadas, recogidas de hablantes masculinos y femeninos
# #### 1. Cargue la tabla de datos voces.csv en Python.
# In[6]:
voces = pd.read_csv("voces.csv", delimiter = ',', decimal = '.')
# In[7]:
voces.info()
voces.shape
voces.info
# In[8]:
voces.describe()
# Se sacan estadisticas basicas para ver distribuciones y si es necesario centrar y normalizar las tabla, ya que Redes
# Neuronales es un metodo basado en distancias.
# In[9]:
# Normalizando y centrando la tabla ya que hay valores en diferentes escalas
voices = voces.iloc[:,0:20]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(voices)
voices.loc[:,:] = scaled_values
voices.head()
# #### 2. Genere al azar una tabla de testing con una 20% de los datos y con el resto de los datos genere una tabla de aprendizaje.
# #### Distribución de la variable a predecir
# In[10]:
distribucion_variable_predecir(voces,"genero")
# La variable a predecir esta completamente balanceada, por lo que, en el testing las predicciones
# deben de dar muy parecidas.
# #### Elimina la variable categorica, deja las variables predictoras en X
# In[11]:
X = voices.iloc[:,0:20]
X.head()
# #### Deja la variable a predecir en y
# In[12]:
y = voces.iloc[:,20:21]
y.head()
# #### Se separan los datos con el 80% de los datos para entrenamiento y el 20% para testing
# In[13]:
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
# #### 3. Usando MLPClassifier genere un modelo predictivo para la tabla de aprendizaje. Utilice una cantidad suficiente de capas ocultas y nodos para que la prediccion sea buena.
# #### Usando 10000 capas ocultas de 150 nodos cada una
# In[14]:
instancia_red = MLPClassifier(solver='lbfgs', random_state=0,hidden_layer_sizes=[10000, 150])
instancia_red.fit(X_train,y_train.iloc[:,0].values)
# #### 4. Con la tabla de testing calcule la matriz de confusion, la precision, la precision positiva, la precision negativa, los falsos positivos, los falsos negativos, la acertividad positiva y la acertividad negativa. Luego construya un cuadro comparativo.
# #### Indices de Calidad del Modelo
# In[15]:
prediccion = instancia_red.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# In[16]:
# Desplegando funcion programada
def indices_personalizados(MC):
print(MC)
precision_global = (MC.iloc[0,0] + MC.iloc[1,1]) / (MC.iloc[0,0]
+ MC.iloc[0,1] + MC.iloc[1,0] + MC.iloc[1,1])
error_global = 1 - precision_global
precision_positiva = (MC.iloc[1,1]) / (MC.iloc[1,0] + MC.iloc[1,1])
precision_negativa = (MC.iloc[0,0]) / (MC.iloc[0,0] + MC.iloc[0,1])
falsos_positivos = (MC.iloc[0,1]) / (MC.iloc[0,0] + MC.iloc[0,1])
falsos_negativos = (MC.iloc[1,0]) / (MC.iloc[1,0] + MC.iloc[1,1])
asertividad_positiva = (MC.iloc[1,1]) / (MC.iloc[0,1] + MC.iloc[1,1])
asertividad_negativa = (MC.iloc[0,0]) / (MC.iloc[0,0] + MC.iloc[1,0])
return {"Precisión Global":precision_global,
"Error Global":error_global,
"Precision Positiva (PP)":precision_positiva,
"Precision Negativa (PN)":precision_negativa,
"Falsos Positivos (PFP)":falsos_positivos,
"Falsos Negativos (PFN)":falsos_negativos,
"Asertividad Positiva (AP)":asertividad_positiva,
"Asertividad Negativa (AN)":asertividad_negativa}
# #### Desplegando indices personalizados
# In[19]:
datos = (([286, 3],[7, 338]))
df = pd.DataFrame(datos, columns = ["Masculino", "Femenino"])
MC = df
indices_personalizados(MC)
# #### 5. Construya un cuadro comparativo con respecto a las tareas del curso anterior. ¿Cual metodo es mejor?
# In[74]:
cadena = "Cuadro Comparativo entre Modelos Supervisados"
print(cadena.center(35," "))
print(" ========================================")
print(" Modelo K Vecinos Mas Cercanos:\n**************************")
print("Precisión Global: 0.9479495268138801\nError Global: 0.05205047318611988\nPrecision Positiva (PP): 0.9779874213836478\nPrecision Negativa (PN): 0.9177215189873418\nFalsos Positivos (PFP): 0.08227848101265822\nFalsos Negativos (PFN): 0.0220125786163522\nAsertividad Positiva (AP): 0.9228486646884273\nAsertividad Negativa (AN): 0.9764309764309764\n**************************")
print(" Arbol de decision:\n**************************")
print("Precisión Global: 0.9684542586750788\nError Global: 0.03154574132492116\nPrecision Positiva (PP): 0.9688473520249221\nPrecision Negativa (PN): 0.9680511182108626\nFalsos Positivos (PFP): 0.03194888178913738\nFalsos Negativos (PFN): 0.03115264797507788\nAsertividad Positiva (AP): 0.9688473520249221\nAsertividad Negativa (AN): 0.9680511182108626\n**************************")
print(" Arboles Aleatorios:\n**************************")
print("Precisión Global: 0.9889589905362776\nError Global: 0.01104100946372244\nPrecision Positiva (PP): 0.99375\nPrecision Negativa (PN): 0.9840764331210191\nFalsos Positivos (PFP): 0.01592356687898089\nFalsos Negativos (PFN): 0.00625\nAsertividad Positiva (AP): 0.9845201238390093\nAsertividad Negativa (AN): 0.9935691318327974\n**************************")
print(" Modelo ADA Boosting:\n**************************")
print("Precisión Global: 0.9810725552050473,\nError Global: 0.018927444794952675\nPrecision Positiva (PP): 0.990625\nPrecision Negativa (PN): 0.9713375796178344\nFalsos Positivos (PFP): 0.028662420382165606\nFalsos Negativos (PFN): 0.009375\nAsertividad Positiva (AP): 0.9723926380368099\nAsertividad Negativa (AN): 0.9902597402597403\n**************************")
print(" Modelo XG Boosting:\n**************************")
print("Precisión Global: 0.9889589905362776,\nError Global: 0.01104100946372244\nPrecision Positiva (PP): 0.99375\nPrecision Negativa (PN): 0.9840764331210191\nFalsos Positivos (PFP): 0.01592356687898089\nFalsos Negativos (PFN): 0.00625\nAsertividad Positiva (AP): 0.9845201238390093\nAsertividad Negativa (AN): 0.9935691318327974\n**************************")
print(" Modelo Maquinas de Soporte Vectorial:\n**************************")
print("Precisión Global: 0.9826498422712934\nError Global: 0.017350157728706628\nPrecision Positiva (PP): 0.9821958456973294\nPrecision Negativa (PN): 0.9831649831649831\nFalsos Positivos (PFP): 0.016835016835016835\nFalsos Negativos (PFN): 0.017804154302670624\nAsertividad Positiva (AP): 0.9851190476190477\nAsertividad Negativa (AN): 0.9798657718120806\n**************************")
print(" Modelo Redes Neuronales - MLPClassifier\n**************************")
print("Precisión Global: 0.9842271293375394\nError Global: 0.01577287066246058\nPrecision Positiva (PP): 0.9797101449275363\nPrecision Negativa (PN): 0.9896193771626297\nFalsos Positivos (PFP): 0.010380622837370242\nFalsos Negativos (PFN): 0.020289855072463767\nAsertividad Positiva (AP): 0.9912023460410557\nAsertividad Negativa (AN): 0.9761092150170648\n**************************")
print(" ========================================")
# ##### Analisis
# * Con respecto al cuadro comparativo se puede ver que el Modelo que da los mejores resultados es el de Arboles Aleatorios junto con el XG Boosting, ya que ambos tienen la precision global mas alta de casi un 99%, ademas que la Asertividad Positiva es de mas de un 98% mientras que la negativa es de mas de un 99% lo que los hace modelos bastante confiables.
# #### Repita los ejercicios anteriores, pero esta vez utilice el paquete Keras, utilice la misma cantidad de capas ocultas y nodos que la usada arriba. ¿Mejora la prediccion?
# #### Reescalando las variables predictoras y haciendo el split del 80% para el training y el 20% para el testing
# In[89]:
dummy_y = pd.get_dummies(y)
scaler = MinMaxScaler(feature_range = (0, 1))
scaled_X = pd.DataFrame(scaler.fit_transform(X), columns = list(X))
X_train, X_test, y_train, y_test = train_test_split(scaled_X, dummy_y, train_size = 0.8, random_state = 0)
print(X_train.head())
print(dummy_y)
# #### Creando el Modelo en keras junto con Capas
# In[98]:
model = Sequential()
model.add(Dense(20, input_dim = 20, activation = 'relu')) # primera capa oculta con 10 neuronas, 20 features
model.add(Dense(100, activation = 'sigmoid')) # segunda capa oculta con 10 neuronas
model.add(Dense(30, activation = 'relu')) # tercera capa oculta con 10 neuronas
model.add(Dense(2, activation = 'softmax')) # capa output con 10 neuronas, el 2 son la cantidad de categorias a predecir
# #### Compilacion del Modelo
# In[99]:
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
# #### Resumen del Modelo
# In[100]:
print(model.summary())
# #### Se usan 10000 etapas de entrenamiento (Epochs) y se actualizan los pesos cada 150 observaciones procesadas
# In[101]:
model.fit(X_train, y_train, epochs = 10000, batch_size = 150, verbose = 0)
y_pred = model.predict(X_test)
y_test_class = np.argmax(np.asanyarray(y_test), axis = 1)
y_pred_class = np.argmax(y_pred, axis = 1)
# #### Predicciones y Calidad del Modelo
# In[102]:
scores = model.evaluate(X_test, y_test)
MC = confusion_matrix(y_test_class, y_pred_class)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))
# In[103]:
datos = (([296, | |
conv=e_arch.CALL_CONV_CDECL)
def strcat(self, emu, argv, ctx={}):
"""
char *strcat(
char *strDestination,
const char *strSource
);
"""
_str1, _str2 = argv
s1 = self.read_mem_string(_str1, 1)
s2 = self.read_mem_string(_str2, 1)
argv[0] = s1
argv[1] = s2
new = (s1 + s2).encode("utf-8")
self.mem_write(_str1, new + b"\x00")
return _str1
@apihook("wcscat", argc=2, conv=e_arch.CALL_CONV_CDECL)
def wcscat(self, emu, argv, ctx={}):
"""
wchar_t *wcscat(
wchar_t *strDestination,
const wchar_t *strSource
);
"""
_str1, _str2 = argv
s1 = self.read_mem_string(_str1, 2)
s2 = self.read_mem_string(_str2, 2)
argv[0] = s1
argv[1] = s2
new = (s1 + s2).encode("utf-16le")
self.mem_write(_str1, new + b"\x00\x00")
return _str1
@apihook("wcslen", argc=1, conv=e_arch.CALL_CONV_CDECL)
def wcslen(self, emu, argv, ctx={}):
"""
size_t wcslen(
const wchar_t* wcs
);
"""
(s,) = argv
string = self.read_wide_string(s)
argv[0] = string
rv = len(string)
return rv
@apihook("_lock", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _lock(self, emu, argv, ctx={}):
"""
void __cdecl _lock
int locknum
);
"""
return
@apihook("_unlock", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _unlock(self, emu, argv, ctx={}):
"""
void __cdecl _unlock
int locknum
);
"""
return
@apihook("_ltoa", argc=3, conv=e_arch.CALL_CONV_CDECL)
def _ltoa(self, emu, argv, ctx={}):
"""
char *_ltoa(
long value,
char *str,
int radix
);
"""
(
val,
out_str,
radix,
) = argv
v = str(val).encode("utf-8")
self.mem_write(out_str, v)
return
@apihook("__dllonexit", argc=3, conv=e_arch.CALL_CONV_CDECL)
def __dllonexit(self, emu, argv, ctx={}):
"""
onexit_t __dllonexit(
_onexit_t func,
_PVFV ** pbegin,
_PVFV ** pend
)
"""
(
func,
pbegin,
pend,
) = argv
return func
@apihook("strncmp", argc=3, conv=e_arch.CALL_CONV_CDECL)
def strncmp(self, emu, argv, ctx={}):
"""
int strncmp(
const char *string1,
const char *string2,
size_t count
);
"""
s1, s2, c = argv
rv = 1
string1 = self.read_mem_string(s1, 1)
string2 = self.read_mem_string(s2, 1)
if string1 == string2:
rv = 0
argv[0] = string1
argv[1] = string2
return rv
@apihook("strcmp", argc=2, conv=e_arch.CALL_CONV_CDECL)
def strcmp(self, emu, argv, ctx={}):
"""
int strcmp(
const char *string1,
const char *string2,
);
"""
s1, s2 = argv
rv = 1
string1 = self.read_mem_string(s1, 1)
string2 = self.read_mem_string(s2, 1)
if string1 == string2:
rv = 0
argv[0] = string1
argv[1] = string2
return rv
@apihook("strrchr", argc=2, conv=e_arch.CALL_CONV_CDECL)
def strrchr(self, emu, argv, ctx={}):
"""
char *strrchr(
const char *str,
int c
);
"""
cstr, c = argv
cs = self.read_string(cstr)
hay = cs.encode("utf-8")
needle = c.to_bytes(1, "little")
offset = hay.rfind(needle)
if offset < 0:
rv = 0
else:
rv = cstr + offset
argv[0] = cs
argv[1] = needle.decode("utf-8")
return rv
@apihook("_ftol", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _ftol(self, emu, argv, ctx={}):
"""
int _ftol(int);
"""
(f,) = argv
return int(f)
@apihook("_adjust_fdiv", argc=0, conv=e_arch.CALL_CONV_CDECL)
def _adjust_fdiv(self, emu, argv, ctx={}):
"""
void _adjust_fdiv(void)
"""
return
@apihook("tolower", argc=1, conv=e_arch.CALL_CONV_CDECL)
def tolower(self, emu, argv, ctx={}):
"""
int tolower ( int c );
"""
(c,) = argv
return c | 0x20
@apihook("sscanf", argc=e_arch.VAR_ARGS, conv=e_arch.CALL_CONV_CDECL)
def sscanf(self, emu, argv, ctx={}):
"""
int sscanf ( const char * s, const char * format, ...);
"""
return
@apihook("strchr", argc=2, conv=e_arch.CALL_CONV_CDECL)
def strchr(self, emu, argv, ctx={}):
"""
char *strchr(
const char *str,
int c
);
"""
cstr, c = argv
cs = self.read_string(cstr)
hay = cs.encode("utf-8")
needle = c.to_bytes(1, "little")
offset = hay.find(needle)
if offset < 0:
rv = 0
else:
rv = cstr + offset
argv[0] = cs
argv[1] = needle.decode("utf-8")
return rv
@apihook("_set_invalid_parameter_handler", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _set_invalid_parameter_handler(self, emu, argv, ctx={}):
"""
_invalid_parameter_handler _set_invalid_parameter_handler(
_invalid_parameter_handler pNew
);
"""
(pNew,) = argv
return 0
@apihook("__CxxFrameHandler", argc=4, conv=e_arch.CALL_CONV_CDECL)
def __CxxFrameHandler(self, emu, argv, ctx={}):
"""
EXCEPTION_DISPOSITION __CxxFrameHandler(
EHExceptionRecord *pExcept,
EHRegistrationNode *pRN,
void *pContext,
DispatcherContext *pDC
)
"""
(
pExcept,
pRN,
pContext,
pDC,
) = argv
return 0
@apihook("_vsnprintf", argc=4, conv=e_arch.CALL_CONV_CDECL)
def _vsnprintf(self, emu, argv, ctx={}):
"""
int _vsnprintf(
char *buffer,
size_t count,
const char *format,
va_list argptr
);
"""
buffer, count, _format, argptr = argv
rv = 0
fmt_str = self.read_mem_string(_format, 1)
fmt_cnt = self.get_va_arg_count(fmt_str)
vargs = self.va_args(argptr, fmt_cnt)
fin = self.do_str_format(fmt_str, vargs)
fin = fin[:count] + "\x00"
rv = len(fin)
self.mem_write(buffer, fin.encode("utf-8"))
argv[0] = fin.replace("\x00", "")
argv[1] = fmt_str
return rv
@apihook("__stdio_common_vsprintf", argc=7, conv=e_arch.CALL_CONV_CDECL)
def __stdio_common_vsprintf(self, emu, argv, ctx={}):
"""
int __stdio_common_vsprintf(
unsigned int64 Options,
char *Buffer,
unsigned int BufferCount,
const char *format,
locale_t Locale,
va_list argptr
);
"""
options_lo, options_hi, buffer, count, _format, locale, argptr = argv
rv = 0
fmt_str = self.read_mem_string(_format, 1)
fmt_cnt = self.get_va_arg_count(fmt_str)
vargs = self.va_args(argptr, fmt_cnt)
fin = self.do_str_format(fmt_str, vargs)
fin = fin[:count] + "\x00"
rv = len(fin)
self.mem_write(buffer, fin.encode("utf-8"))
argv[0] = fin.replace("\x00", "")
argv[1] = fmt_str
return rv
@apihook("_strcmpi", argc=2, conv=e_arch.CALL_CONV_CDECL)
def _strcmpi(self, emu, argv, ctx={}):
"""
int _strcmpi(
const char *string1,
const char *string2
);
"""
string1, string2 = argv
rv = 1
if not string1 or not string2:
return rv
cs1 = self.read_string(string1)
cs2 = self.read_string(string2)
argv[0] = cs1
argv[1] = cs2
if cs1.lower() == cs2.lower():
rv = 0
return rv
@apihook("_wcsicmp", argc=2, conv=e_arch.CALL_CONV_CDECL)
def _wcsicmp(self, emu, argv, ctx={}):
"""
int _wcsicmp(
const wchar_t *string1,
const wchar_t *string2
);
"""
string1, string2 = argv
rv = 1
if not string1 or not string2:
return rv
cs1 = self.read_wide_string(string1)
cs2 = self.read_wide_string(string2)
argv[0] = cs1
argv[1] = cs2
if cs1.lower() == cs2.lower():
rv = 0
return rv
@apihook("??3@YAXPAX@Z", argc=0, conv=e_arch.CALL_CONV_CDECL)
def __3_YAXPAX_Z(self, emu, argv, ctx={}):
return
@apihook("??2@YAPAXI@Z", argc=0, conv=e_arch.CALL_CONV_CDECL)
def __2_YAPAXI_Z(self, emu, argv, ctx={}):
return
@apihook("__current_exception_context", argc=0, conv=e_arch.CALL_CONV_CDECL)
def __current_exception_context(self, emu, argv, ctx={}):
return
@apihook("__current_exception", argc=0, conv=e_arch.CALL_CONV_CDECL)
def __current_exception(self, emu, argv, ctx={}):
return
@apihook("_set_new_mode", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _set_new_mode(self, emu, argv, ctx={}):
return
@apihook("_configthreadlocale", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _configthreadlocale(self, emu, argv, ctx={}):
return
@apihook("_setusermatherr", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _setusermatherr(self, emu, argv, ctx={}):
return
@apihook("__setusermatherr", argc=1, conv=e_arch.CALL_CONV_CDECL)
def __setusermatherr(self, emu, argv, ctx={}):
return
@apihook("_cexit", argc=0, conv=e_arch.CALL_CONV_CDECL)
def _cexit(self, emu, argv, ctx={}):
# TODO: handle atexit flavor functions
self.exit_process()
@apihook("_c_exit", argc=0, conv=e_arch.CALL_CONV_CDECL)
def _c_exit(self, emu, argv, ctx={}):
self.exit_process()
@apihook(
"_register_thread_local_exe_atexit_callback",
argc=1,
conv=e_arch.CALL_CONV_CDECL,
)
def _register_thread_local_exe_atexit_callback(self, emu, argv, ctx={}):
return
@apihook("_crt_atexit", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _crt_atexit(self, emu, argv, ctx={}):
return
@apihook("_controlfp_s", argc=3, conv=e_arch.CALL_CONV_CDECL)
def _controlfp_s(self, emu, argv, ctx={}):
return
@apihook("terminate", argc=1, conv=e_arch.CALL_CONV_CDECL)
def terminate(self, emu, argv, ctx={}):
self.exit_process()
@apihook("_crt_atexit", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _crt_atexit(self, emu, argv, ctx={}):
return
@apihook("_initialize_narrow_environment", argc=0, conv=e_arch.CALL_CONV_CDECL)
def _initialize_narrow_environment(self, emu, argv, ctx={}):
return
@apihook("_configure_narrow_argv", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _configure_narrow_argv(self, emu, argv, ctx={}):
return
@apihook("_set_fmode", argc=1, conv=e_arch.CALL_CONV_CDECL)
def _set_fmode(self, emu, argv, ctx={}):
return
@apihook("_itoa", argc=3, conv=e_arch.CALL_CONV_CDECL)
def _itoa(self, emu, argv, ctx={}):
return
@apihook("_itow", argc=3, conv=e_arch.CALL_CONV_CDECL)
def _itow(self, emu, argv, ctx={}):
return
@apihook("_EH_prolog", argc=0, conv=e_arch.CALL_CONV_CDECL)
def _EH_prolog(self, emu, argv, ctx={}):
# push -1
emu.push_stack(0xFFFFFFFF)
# push eax
emu.push_stack(emu.reg_read(e_arch.X86_REG_EAX))
# mov eax, DWORD PTR fs:[0]
# push eax
emu.push_stack(emu.read_ptr(emu.fs_addr + 0))
# mov eax, DWORD PTR [esp+12]
eax = emu.read_ptr(emu.reg_read(e_arch.X86_REG_ESP) + 12)
# mov DWORD PTR fs:[0], esp
emu.write_ptr(emu.fs_addr + 0, emu.reg_read(e_arch.X86_REG_ESP))
# mov DWORD PTR [esp+12], ebp
emu.write_ptr(
emu.reg_read(e_arch.X86_REG_ESP) + 12, emu.reg_read(e_arch.X86_REG_EBP)
)
# lea ebp, DWORD PTR [esp+12]
emu.reg_write(e_arch.X86_REG_EBP, emu.reg_read(e_arch.X86_REG_ESP) + 12)
# push eax
# ret 0
emu.push_stack(eax)
return
@apihook("wcstombs", argc=3, conv=e_arch.CALL_CONV_CDECL)
def wcstombs(self, emu, argv, ctx={}):
"""
size_t wcstombs(
char *mbstr,
const wchar_t *wcstr,
size_t count
);
"""
mbstr, wcstr, count = argv
s = self.read_wide_string(wcstr, count)
self.write_string(s, mbstr)
return len(s.encode("ascii"))
@apihook('_stricmp', argc=2, conv=e_arch.CALL_CONV_CDECL)
def _stricmp(self, emu, argv, ctx={}):
"""
int _stricmp(
const char *string1,
const char *string2
);
"""
string1, string2 = argv
rv = 1
if not string1 or not string2:
return rv
cs1 = self.read_string(string1)
cs2 = self.read_string(string2)
argv[0] = cs1
argv[1] = cs2
if cs1.lower() == cs2.lower():
rv = 0
return rv
@apihook('_wcsicmp', argc=2, conv=e_arch.CALL_CONV_CDECL)
def _wcsicmp(self, emu, argv, ctx={}):
"""
int wcsicmp(
const wchar_t *string1,
const wchar_t *string2
);
"""
string1, string2 = argv
rv = 1
ws1 = self.read_wide_string(string1)
ws2 = self.read_wide_string(string2)
argv[0] = ws1
argv[1] = ws2
if ws1.lower() == ws2.lower():
rv = 0
return rv
@apihook('wcscmp', argc=2, conv=e_arch.CALL_CONV_CDECL)
def wcscmp(self, emu, argv, ctx={}):
"""
int wcscmp(
const wchar_t *string1,
const wchar_t *string2,
);
"""
s1, s2 = argv
rv = 1
string1 = self.read_wide_string(s1)
string2 = self.read_wide_string(s2)
if string1 == string2:
rv = 0
argv[0] = string1
argv[1] = string2
return rv
@apihook('_snwprintf', argc=e_arch.VAR_ARGS, conv=e_arch.CALL_CONV_CDECL)
def _snwprintf(self, emu, argv, ctx={}):
"""
int _snwprintf(
wchar_t *buffer,
size_t count,
const wchar_t *format [,
argument] ...
);
"""
| |
= X[:, 1+shift+dim] # 4
global_dim_3 = X[:, 2+shift+dim] # 5
# Sort with respect to ground truth
# data = zip(ground_dim,pred_dim_1, ground_dim_2, ground_dim_3)
# data = sorted(data, key=lambda tup: tup[0])
# ground_dim_sort, pred_dim_sort_1, ground_dim_sort_2, ground_dim_sort_3 = zip(*data)
# sorts all three dimenions for YPR
data = zip(ground_dim_1, pred_dim_1, global_dim_1)
data = sorted(data, key=lambda tup: tup[0])
ground_dim_sort_1, pred_dim_sort_1, global_dim_sort_1 = zip(*data)
data = zip(ground_dim_2, pred_dim_2, global_dim_2)
data = sorted(data, key=lambda tup: tup[0])
ground_dim_sort_2, pred_dim_sort_2, global_dim_sort_2 = zip(*data)
data = zip(ground_dim_3, pred_dim_3, global_dim_3)
data = sorted(data, key=lambda tup: tup[0])
ground_dim_sort_3, pred_dim_sort_3, global_dim_sort_3 = zip(*data)
font = {'size': 18}
matplotlib.rc('font', **font)
matplotlib.rc('lines', linewidth=2.5)
# plt.tight_layout()
with sns.axes_style("darkgrid"):
ax1 = plt.subplot(311)
ax2 = plt.subplot(312)
ax3 = plt.subplot(313)
my_dpi = 300
plt.figure(figsize=(1200/my_dpi, 1200/my_dpi), dpi=my_dpi)
ax1.axhline(0, linestyle=':', color='r', linewidth=1)
ax1.plot(ground_dim_sort_1, label='Ground Truth', color='k', linewidth=1.8)
ax1.plot(pred_dim_sort_1, ':', label='Model Prediction',
markersize=.9, linewidth=.8) # , linestyle=':')
# ax1.set_xlabel('Sorted Datapoints')
ax1.set_ylabel('Pitch Step (Deg.)')
# ax1.set_ylim([-5,5])
# ax1.set_yticks(np.arange(-5,5.01,2.5))
# ax1.legend()
# plt.show()
# plt.title('One Step Dim+1')
ax2.axhline(0, linestyle=':', color='r', linewidth=1)
ax2.plot(ground_dim_sort_2, label='Ground Truth', color='k', linewidth=1.8)
ax2.plot(pred_dim_sort_2, ':', label='Model Prediction',
markersize=.9, linewidth=.8) # , linestyle=':')
# ax2.set_xlabel('Sorted Datapoints')
ax2.set_ylabel('Roll Step (Deg.)')
# ax2.set_ylim([-5,5])
# ax2.set_yticks(np.arange(-5,5.01,2.5))
# ax2.set_yticklabels(["-5", "-2.5", "0", "2.5", "5"])
# ax2.legend()
# plt.show()
# plt.title('One Step Dim+2')
ax3.axhline(0, linestyle=':', color='r', linewidth=1)
ax3.plot(ground_dim_sort_3, label='Ground Truth', color='k', linewidth=1.8)
ax3.plot(pred_dim_sort_3, ':', label='Model Prediction',
markersize=.9, linewidth=.8) # , linestyle=':')
ax3.set_xlabel('Sorted Datapoints')
ax3.set_ylabel('Yaw Step (Deg.)')
ax3.set_ylim([-5, 5])
ax3.set_yticks(np.arange(-5, 5.01, 2.5))
leg3 = ax3.legend(loc=8, ncol=2)
for line in leg3.get_lines():
line.set_linewidth(2.5)
plt.show()
def plot_test_train(model, dataset, variances = True):
"""
Takes a dynamics model and plots test vs train predictions on a dataset of the form (X,U,dX)
- variances adds a highlight showing the variance of one step prediction estimates
"""
'''
Some Models:
model_pll = '_models/temp/2018-12-14--10-47-41.7_plot_pll_stack3_.pth'
model_mse = '_models/temp/2018-12-14--10-51-10.9_plot_mse_stack3_.pth'
model_pll_ens = '_models/temp/2018-12-14--10-53-42.9_plot_pll_ensemble_stack3_.pth'
model_pll_ens_10 = '_models/temp/2018-12-14--11-49-21.6_plot_pll_ens_10_stack3_.pth'
model_mse_ens = '_models/temp/2018-12-14--10-52-40.4_plot_mse_ensemble_stack3_.pth'
25Hz models for with variance
new ensemble: '_models/temp/2019-02-23--16-10-00.4_plot_temp__stack3_'
new single: '_models/temp/2019-02-23--17-03-22.0_plot_temp_single_stack3_'
'''
#for crazyflie plots
model_pll_ens_10 = '_models/temp/2018-12-14--11-49-21.6_plot_pll_ens_10_stack3_.pth'
model_testing = '_models/temp/2019-02-25--09-51-49.1_temp_single_debug_stack3_.pth'
# below for iono
model_testing = "_models/temp/2019-05-02--09-43-01.5_temp_stack3_.pth"
if variances:
predictions_means, predictions_vars = gather_predictions(
model_testing, dataset, variances=variances)
else:
predictions_means = gather_predictions(
model_testing, dataset, variances=variances)
X = dataset[0]
U = dataset[1]
dX = dataset[2]
dim = 3
# New plot
font = {'size': 11}
matplotlib.rc('font', **font)
matplotlib.rc('text', usetex=True)
matplotlib.rc('lines', linewidth=1.5)
plt.tight_layout()
# plot for test train compare
fig = plt.figure()
with sns.axes_style("whitegrid"):
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.5
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
plt.subplots_adjust(bottom=.13, top=.93, left=.1, right=1-.03, hspace=.28)
# sort and plot data
if not variances:
# Gather test train splitted data
lx = int(np.shape(dX)[0]*.8)
data_train = zip(dX[:lx, dim], predictions_means[:lx, dim])
data_train = sorted(data_train, key=lambda tup: tup[0])
gt_sort_train, pred_sort_pll_train = zip(*data_train)
data_test = zip(dX[lx:, dim], predictions_means[lx:, dim])
data_test = sorted(data_test, key=lambda tup: tup[0])
gt_sort_test, pred_sort_pll_test = zip(*data_test)
# plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=7)
gt_train = np.linspace(0, 1, len(gt_sort_train))
ax1.plot(gt_train,gt_sort_train, label='Ground Truth', color='k', linewidth=1.8)
ax1.plot(gt_train, pred_sort_pll_train, '-', label='Probablistic Model Prediction',
markersize=.9, linewidth=.7, alpha=.8) # , linestyle=':')
ax1.set_title("Training Data Predictions")
ax1.legend(prop={'size': 7})
gt_test = np.linspace(0, 1, len(gt_sort_test))
ax2.plot(gt_test, gt_sort_test, label='Ground Truth', color='k', linewidth=1.8)
ax2.plot(gt_test, pred_sort_pll_test, '-', label='Bayesian Model Validation DataPrediction',
markersize=.9, linewidth=1.2, alpha=.8) # , linestyle=':')
ax2.set_title("Test Data Predictions")
else:
# Gather test train splitted data
lx = int(np.shape(dX)[0]*.8)
data_train = zip(dX[:lx, dim], predictions_means[:lx, dim], predictions_vars[:lx, dim])
data_train = sorted(data_train, key=lambda tup: tup[0])
gt_sort_train, pred_sort_pll_train, pred_vars_train = zip(*data_train)
data_test = zip(dX[lx:, dim], predictions_means[lx:,
dim], predictions_vars[lx:, dim])
data_test = sorted(data_test, key=lambda tup: tup[0])
gt_sort_test, pred_sort_pll_test, pred_vars_test = zip(*data_test)
print(np.shape(pred_sort_pll_train))
print(np.shape(pred_vars_train))
# plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=7)
gt_train = np.linspace(0, 1, len(gt_sort_train))
ax1.plot(gt_train, gt_sort_train, label='Ground Truth',
color='k', linewidth=1.8)
ax1.plot(gt_train, pred_sort_pll_train, '-', label='Probablistic Model Prediction',
markersize=.9, linewidth=.7, alpha=.8) # , linestyle=':')
ax1.plot(gt_train, np.array(pred_sort_pll_train)+np.array(pred_vars_train),
'-', color = 'r', label='Variance of Predictions', linewidth=.4, alpha=.4)
ax1.plot(gt_train,np.array(pred_sort_pll_train)-np.array(pred_vars_train), '-', color='r', linewidth=.4, alpha=.4)
ax1.set_title("Training Data Predictions")
ax1.legend(prop={'size': 7})
gt_test = np.linspace(0, 1, len(gt_sort_test))
ax2.plot(gt_test, gt_sort_test, label='Ground Truth',
color='k', linewidth=1.8)
ax2.plot(gt_test, pred_sort_pll_test, '-', label='Bayesian Model Validation DataPrediction',
markersize=.9, linewidth=1.2, alpha=.8) # , linestyle=':')
ax2.plot(gt_test, np.array(pred_sort_pll_test)+np.array(pred_vars_test),
'-', color = 'r', label='Variance of Predictions', linewidth=.4, alpha=.4)
ax2.plot(gt_test,np.array(pred_sort_pll_test)-np.array(pred_vars_test), '-', color='r', linewidth=.4, alpha=.4)
ax2.set_title("Test Data Predictions")
fontProperties = {'family': 'Times New Roman'}
# a = plt.gca()
# print(a)
# a.set_xticklabels(a.get_xticks(), fontProperties)
# a.set_yticklabels(a.get_yticks(), fontProperties)
ax1.grid(b=True, which='major', color='k',
linestyle='-', linewidth=0, alpha=.75)
ax1.grid(b=True, which='minor', color='b',
linestyle='--', linewidth=0, alpha=.5)
ax1.set_xticks([])
ax2.grid(b=True, which='major', color='k',
linestyle='-', linewidth=0, alpha=.75)
ax2.grid(b=True, which='minor', color='b',
linestyle='--', linewidth=0, alpha=.5)
fig.text(.02, .75, 'One Step Prediction, Pitch (deg)',
rotation=90, family='Times New Roman')
# fig.text(.404, .04, 'Sorted Datapoints, Normalized', family='Times New Roman')
ax2.set_xlabel('Sorted Datapoints, Normalized')
for ax in [ax1, ax2]:
# if ax == ax1:
# loc = matplotlib.ticker.MultipleLocator(base=int(lx/10))
# else:
# loc = matplotlib.ticker.MultipleLocator(
# base=int((np.shape(dX)[0]-lx)/10))
ax.set_ylim([-6.0, 6.0])
ax.set_xlim([0, 1])
fig.set_size_inches(5, 3.5)
# plt.savefig('psoter', edgecolor='black', dpi=100, transparent=True)
plt.savefig('testrain.pdf', format='pdf', dpi=300)
# plt.show()
def plot_rollout_compare():
"""
Function to plot the first 4 rollout flights to show rapid learning.
It assumes a specific file structure so will run on it's own.
Includes some commented code for an old version.
"""
font = {'size': 17}
matplotlib.rc('font', **font)
matplotlib.rc('lines', linewidth=3)
matplotlib.rc('text', usetex=True)
fig = plt.figure()
with sns.axes_style("whitegrid"):
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.5
ax1 = plt.subplot(221)
ax2 = plt.subplot(222)
ax3 = plt.subplot(223)
ax4 = plt.subplot(224)
axes = [ax1, ax2, ax3, ax4]
plt.tight_layout()
line1, = ax1.plot([], [])
dim = 3
# original plot
dir1 = "_logged_data_autonomous/_examples/icra-top20/roll0/"
dir2 = "_logged_data_autonomous/_examples/icra-top20/roll1/"
dir3 = "_logged_data_autonomous/_examples/icra-top20/roll2/"
dir4 = "_logged_data_autonomous/_examples/icra-top20/roll3/"
dir5 = "_logged_data_autonomous/_examples/icra-top20/roll4/"
dir6 = "_logged_data_autonomous/_examples/icra-top20/roll5/"
#new plot
dir1 = "_logged_data_autonomous/_newquad1/publ2/c50_rand/"
dir2 = "_logged_data_autonomous/_newquad1/publ2/c50_roll01/"
dir3 = "_logged_data_autonomous/_newquad1/publ2/c50_roll02/"
dir4 = "_logged_data_autonomous/_newquad1/publ2/c50_roll03/"
dir5 = "_logged_data_autonomous/_newquad1/publ2/c50_roll04/"
# dir6 = "_logged_data_autonomous/_newquad1/publ2/c50_roll05/"
# dir7 = "_logged_data_autonomous/_newquad1/publ2/c50_roll06/"
dirs = [dir1, dir2, dir3, dir4] # , dir5]#, dir6]#, dir7]
colors = ['r', 'y', 'g', 'c'] # , 'b']#, 'm']#, 'k' ]
colors = ['r', 'b', 'g', 'k'] # , 'b']#, 'm']#, 'k' ]
best_len = 0
best_time = 3000
load_params = {
'delta_state': True, # normally leave as True, prediction mode
# when true, will include the time plus one in the dataframe (for trying predictions of true state vs delta)
'include_tplus1': True,
# trims high vbat because these points the quad is not moving
'trim_high_vbat': 4200,
# If not trimming data with fast log, need another way to get rid of repeated 0s
'takeoff_points': 180,
# if all the euler angles (floats) don't change, it is not realistic data
'trim_0_dX': True,
'find_move': True,
# if the states change by a large amount, not realistic
'trime_large_dX': True,
# Anything out of here is erroneous anyways. Can be used to focus training
'bound_inputs': [25000, 65500],
# IMPORTANT ONE: stacks the past states and inputs to pass into network
'stack_states': 3,
# looks for sharp changes to tthrow out items post collision
'collision_flag': False,
# shuffle pre training, makes it hard to plot trajectories
'shuffle_here': False,
'timestep_flags': [], # if you want to filter rostime stamps, do it here
'battery': True, # if battery voltage is in the state data
# adds a column to the dataframe tracking end of trajectories
'terminals': True,
'fastLog': True, # if using the software with the new fast log
# Number of times the control freq you will be using is faster than that at data logging
'contFreq': 1,
'zero_yaw': False
}
# load_params ={
# 'delta_state': True,
# 'takeoff_points': 0,
# 'trim_0_dX': True,
# 'trime_large_dX': True,
# 'bound_inputs': [20000,65500],
# 'stack_states': 4,
# 'collision_flag': False,
# 'shuffle_here': False,
# 'timestep_flags': [],
# 'battery' : False
# }
for k, dir in enumerate(dirs):
axis = axes[k]
for i in range(10):
# file = random.choice(os.listdir(dir))
file = os.listdir(dir)[i]
print(file)
print('Processing File: ', file, 'Dir: ', k, 'File number: ', i)
if dir == dir4 or dir == dir5 or dir == dir6:
takeoff = True
load_params['takeoff_points'] = 170
else:
takeoff = False
X, U, dX, objv, Ts, times, terminal = trim_load_param(
str(dir+file), load_params)
time = np.max(times)
n = len(X[:, dim])
if n > best_len:
best_len = n
if time > best_time:
best_time = time
print(best_time)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.