ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a5541ba7b99409850f4bedec10ee7cc360a8ff7 | # qubit number=5
# total number=57
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.cx(input_qubit[0],input_qubit[4]) # number=54
prog.x(input_qubit[4]) # number=55
prog.cx(input_qubit[0],input_qubit[4]) # number=56
prog.cx(input_qubit[2],input_qubit[0]) # number=45
prog.z(input_qubit[2]) # number=46
prog.cx(input_qubit[2],input_qubit[0]) # number=47
prog.h(input_qubit[1]) # number=4
prog.rx(2.664070570244145,input_qubit[1]) # number=39
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[2]) # number=49
prog.cz(input_qubit[3],input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=51
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[3]) # number=40
prog.y(input_qubit[4]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[1],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[0]) # number=34
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.cx(input_qubit[2],input_qubit[3]) # number=44
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.z(input_qubit[1]) # number=52
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1642.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | 1a5542c3dfdfb63caabdf52bc5b956f5a7f996ff | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
from setuptools.extension import Extension
from codecs import open
import os
import re
import sys
from Cython.Build import cythonize
here = os.path.abspath(os.path.dirname(__file__))
sys.path.append(here)
import versioneer # noqa: E402
import cuda_ext # noqa: E402
CLASSIFIERS = """
Development Status :: 4 - Beta
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MINIMUM_VERSIONS = {
"numpy": "1.13",
"requests": "2.18",
"jax": "0.2.10",
}
CONSOLE_SCRIPTS = [
"veros = veros.cli.veros:cli",
"veros-run = veros.cli.veros_run:cli",
"veros-copy-setup = veros.cli.veros_copy_setup:cli",
"veros-resubmit = veros.cli.veros_resubmit:cli",
"veros-create-mask = veros.cli.veros_create_mask:cli",
]
PACKAGE_DATA = ["setups/*/assets.json", "setups/*/*.npy", "setups/*/*.png"]
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
def parse_requirements(reqfile):
requirements = []
with open(os.path.join(here, reqfile), encoding="utf-8") as f:
for line in f:
line = line.strip()
pkg = re.match(r"(\w+)\b.*", line).group(1)
if pkg in MINIMUM_VERSIONS:
line = "".join([line, ",>=", MINIMUM_VERSIONS[pkg]])
line = line.replace("==", "<=")
requirements.append(line)
return requirements
INSTALL_REQUIRES = parse_requirements("requirements.txt")
EXTRAS_REQUIRE = {
"test": ["pytest", "pytest-cov", "pytest-forked", "codecov", "xarray"],
}
EXTRAS_REQUIRE["jax"] = parse_requirements("requirements_jax.txt")
def get_extensions(require_cython_ext, require_cuda_ext):
cuda_info = cuda_ext.cuda_info
extension_modules = {
"veros.core.special.tdma_cython_": ["tdma_cython_.pyx"],
"veros.core.special.tdma_cuda_": ["tdma_cuda_.pyx", "cuda_tdma_kernels.cu"],
}
def is_cuda_ext(sources):
return any(source.endswith(".cu") for source in sources)
extensions = []
for module, sources in extension_modules.items():
extension_dir = os.path.join(*module.split(".")[:-1])
kwargs = dict()
if is_cuda_ext(sources):
kwargs.update(
library_dirs=cuda_info["lib64"],
libraries=["cudart"],
runtime_library_dirs=cuda_info["lib64"],
include_dirs=cuda_info["include"],
)
ext = Extension(
name=module,
sources=[os.path.join(extension_dir, f) for f in sources],
extra_compile_args={
"gcc": [],
"nvcc": cuda_info["cflags"],
},
**kwargs,
)
extensions.append(ext)
extensions = cythonize(extensions, language_level=3, exclude_failures=True)
for ext in extensions:
is_required = (not is_cuda_ext(ext.sources) and require_cython_ext) or (
is_cuda_ext(ext.sources) and require_cuda_ext
)
if not is_required:
ext.optional = True
return extensions
cmdclass = versioneer.get_cmdclass()
cmdclass.update(build_ext=cuda_ext.custom_build_ext)
def _env_to_bool(envvar):
return os.environ.get(envvar, "").lower() in ("1", "true", "on")
extensions = get_extensions(
require_cython_ext=_env_to_bool("VEROS_REQUIRE_CYTHON_EXT"),
require_cuda_ext=_env_to_bool("VEROS_REQUIRE_CUDA_EXT"),
)
setup(
name="veros",
license="MIT",
author="Dion Häfner (NBI Copenhagen)",
author_email="[email protected]",
keywords="oceanography python parallel numpy multi-core geophysics ocean-model mpi4py jax",
description="The versatile ocean simulator, in pure Python, powered by JAX.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://veros.readthedocs.io",
python_requires=">=3.7",
version=versioneer.get_version(),
cmdclass=cmdclass,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
ext_modules=extensions,
entry_points={"console_scripts": CONSOLE_SCRIPTS, "veros.setup_dirs": ["base = veros.setups"]},
package_data={"veros": PACKAGE_DATA},
classifiers=[c for c in CLASSIFIERS.split("\n") if c],
zip_safe=False,
)
|
py | 1a5543a21870703849a64fed1e3f3a29f93a129f | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import peforth
import argparse
import sys
import time
import numpy as np
import tensorflow as tf
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
input_mean=0, input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels = 3,
name='png_reader')
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
name='gif_reader'))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
else:
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
# peforth.ok('11> ',loc=locals(),cmd=":> [0] value locals cr")
if __name__ == "__main__":
file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg"
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
args = parser.parse_args()
if args.graph:
model_file = args.graph
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
graph = load_graph(model_file)
t = read_tensor_from_image_file(file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
with tf.Session(graph=graph) as sess:
start = time.time()
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
end=time.time()
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
print('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
for i in top_k:
print(labels[i], results[i])
peforth.ok('22> ',loc=locals(),cmd=":> [0] value locals cr")
|
py | 1a55445ca952b8b935ec33d32810cf3798dcaf84 | import os
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import precision_score, recall_score
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from transformers import BertModel
class RNN_RNN(nn.Module):
def __init__(self, device, hps, loss_weights):
super(RNN_RNN, self).__init__()
self._hps = hps
self._device = device
self._loss_weight = loss_weights
now = datetime.now()
dt_str = now.strftime("%d-%m-%Y-%H-%M-%S")
comment = '__'.join([k + '_' + str(v) for k, v in self._hps.items()])
path = os.path.join(self._hps["path"], 'runs')
if not os.path.exists(path):
os.makedirs(path)
self._summary_writer = SummaryWriter(os.path.join(path, dt_str), comment=comment)
self._define_model()
self._optimizer = self._define_optimizer()
self._global_step = 0
def _define_model(self):
self._bert = BertModel.from_pretrained(self._hps['bert'], output_hidden_states=True)
self._emb_bert_dim = 768 * (4 if self._hps['emb_layers'] == 'concat' else 1)
self._fc_emb = nn.Linear(self._emb_bert_dim, self._hps['emb_dim'])
self._fc_emb_relu = nn.ReLU()
if self._hps['units'] == 'lstm':
self._net_1 = nn.LSTM(self._hps['emb_dim'], self._hps['hidden1'], bidirectional=True)
self._net_2 = nn.LSTM(self._hps['hidden1'] * 2, self._hps['hidden2'], bidirectional=False)
else:
self._net_1 = nn.GRU(self._hps['emb_dim'], self._hps['hidden1'], bidirectional=True)
self._net_2 = nn.GRU(self._hps['hidden1'] * 2, self._hps['hidden2'], bidirectional=False)
self._fc = nn.Linear(self._hps['hidden2'], 3)
self._dropout_emb = nn.Dropout(self._hps['prob_emb'])
self._dropout1 = nn.Dropout(self._hps['prob1'])
self._dropout2 = nn.Dropout(self._hps['prob2'])
self._softmax = nn.LogSoftmax(dim=1)
self._criterion = nn.NLLLoss(ignore_index=-1, reduction='mean', weight=self._loss_weight)
def _define_optimizer(self):
opt = torch.optim.SGD(self.parameters(), self._hps['lr'])
if self._hps['optimizer'] == 'ADAM':
opt = torch.optim.Adam(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
elif self._hps['optimizer'] == 'Adadelta':
opt = torch.optim.Adadelta(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
elif self._hps['optimizer'] == 'Adagrad':
opt = torch.optim.Adagrad(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
elif self._hps['optimizer'] == 'RSMProp':
opt = torch.optim.RMSprop(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
return opt
def _pointwise_max(self, tensors):
tensors = tensors.view(self._hps['msg_len'], -1, self._hps['batch_size'] * self._hps['conv_len'])
t_prev = tensors[0]
for t in tensors:
t_prev = torch.max(t_prev, t)
t_prev = t_prev.view(self._hps['batch_size'] * self._hps['conv_len'], -1)
return t_prev
def _extract_layer(self, hidden_states):
if self._hps['emb_layers'] == 'last':
return hidden_states[-1]
elif self._hps['emb_layers'] == 'second':
return hidden_states[-2]
elif self._hps['emb_layers'] == 'sum_all':
return torch.sum(torch.stack(hidden_states[1:]), dim=0) # exclude first layer (embedding)
elif self._hps['emb_layers'] == 'sum_four':
return torch.sum(torch.stack(hidden_states[-4:]), dim=0)
elif self._hps['emb_layers'] == 'concat':
return torch.cat(hidden_states[-4:], dim=2)
else:
return hidden_states[-1]
def _decode_softmax(self, pred, msk_conv):
pred = pred.view(self._hps['batch_size'] * self._hps['conv_len'], -1)
msk_conv = msk_conv.view(self._hps['batch_size'] * self._hps['conv_len'])
indeces = torch.nonzero(msk_conv, as_tuple=True)
preds = pred[indeces]
return list(map(lambda x: np.argmax(x), preds.tolist()))
def close_writer(self):
self._summary_writer.close()
def get_states(self):
return self.state_dict(), self._optimizer.state_dict(), self._global_step
def load_state(self, checkpoint):
self.load_model(checkpoint['state_dict'])
self._optimizer.load_state_dict(checkpoint['optimizer'])
self._global_step = checkpoint['step']
def forward(self, x, msk_conv, msk_msg):
"""
:param x: (batch, conv_len, msg_len+2)
:param msk_conv: (batch, conv_len)
:param msk_msg: (batch, conv_len, msg_len+2)
:return: (conv_len, batch, classes)
"""
input_ids = x.view(self._hps['batch_size'] * self._hps['conv_len'], -1) # batch * conv_len, sequence_length
attention_mask = msk_msg.view(self._hps['batch_size'] * self._hps['conv_len'], -1)
self._bert.eval()
with torch.no_grad():
bert_emb_out = self._bert(input_ids, attention_mask)
# word embeddings
bert_emb_states = bert_emb_out[2] # (batch * conv_len, sequence_length, hidden_size) for each layer (13)
bert_emb = self._extract_layer(bert_emb_states)
bert_emb = bert_emb[:, 1:-1, :] # discard special tokens
msk_msg = msk_msg[:, :, 1:-1]
if self._hps['emb_dim'] != self._emb_bert_dim:
bert_emb = self._fc_emb_relu(self._fc_emb(bert_emb)) # reduce dimensions
embed_x = self._dropout_emb(bert_emb)
# reshape x
embed_x = embed_x.reshape(self._hps['msg_len'], # msg_len
self._hps['batch_size'] * self._hps['conv_len'], # batch * conv_len
self._hps['emb_dim']) # hid_dim
# first net
net_in, _ = self._net_1(embed_x) # (msg_len, batch * conv_len, hidden)
msk_msg = msk_msg.reshape(self._hps['msg_len'], self._hps['batch_size'] * self._hps['conv_len']).unsqueeze(-1)
net_in = net_in * msk_msg # remove padding
dropped = self._dropout1(net_in)
first_net = self._pointwise_max(dropped)
first_net = first_net.view(self._hps['conv_len'], self._hps['batch_size'], self._hps['hidden1'] * 2)
# second net
net_out, _ = self._net_2(first_net) # conv_len, batch, hidden
second_net = self._dropout2(net_out)
# prediction
msgs = []
for msg in second_net:
out = self._fc(msg) # batch, classes
out = self._softmax(out)
msgs.append(out)
output = torch.stack(msgs) # conv_len, batch, classes
msk_conv = msk_conv.view(self._hps['conv_len'], self._hps['batch_size']).unsqueeze(-1)
output = output * msk_conv
return output
def fit(self, x, y, msk_conv, msk_msg):
"""
Train the model
:param x: input sequence (batch, conv_len, msg_len+2)
:param y: target sequence (batch, conv_len)
:param msk_conv: conversation mask (batch, conv_len)
:param msk_msg: message mask (batch, conv_len, msg_len+2)
:return: loss value, step
"""
self.train()
self._optimizer.zero_grad()
preds = self(x, msk_conv, msk_msg) # conv_len, batch, classes
# compute average loss
avg_loss = []
pred_y = preds.view(self._hps['batch_size'], self._hps['conv_len'], -1)
true_y = y.view(self._hps['batch_size'], self._hps['conv_len'])
for i in range(self._hps['batch_size']):
avg_loss.append(self._criterion(pred_y[i], true_y[i]))
loss = torch.mean(torch.stack(avg_loss))
loss_value = loss.item()
# optimization step
loss.backward()
if self._hps['clip'] != -1:
nn.utils.clip_grad_norm_(self.parameters(), self._hps['clip'])
self._optimizer.step()
# compute metrics
if self._global_step % self._hps['save'] == 0:
y_pred = self._decode_softmax(preds, msk_conv)
y_test = y.view(self._hps['batch_size'] * self._hps['conv_len']).tolist()
y_test = list(filter(lambda z: z != -1, y_test)) # ignore padding
parameters = [p for p in self.parameters() if p.grad is not None]
total_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach()).to(self._device) for p in parameters])) # L2 norm
prec = precision_score(y_test, y_pred, average='macro', zero_division=0)
rec = recall_score(y_test, y_pred, average='macro', zero_division=0)
self._summary_writer.add_scalar('Loss/train', loss_value, self._global_step)
self._summary_writer.add_scalar('Precision/train', prec, self._global_step)
self._summary_writer.add_scalar('Recall/train', rec, self._global_step)
self._summary_writer.add_scalar('Grad norm/train', total_norm, self._global_step)
if self._hps['emb_dim'] != self._emb_bert_dim:
self._summary_writer.add_histogram('fc_emb/bias', self._fc_emb.bias, self._global_step)
self._summary_writer.add_histogram('fc_emb/weight', self._fc_emb.weight, self._global_step)
self._summary_writer.add_histogram('net_1/bias', self._net_1.bias_hh_l0, self._global_step)
self._summary_writer.add_histogram('net_1/weight', self._net_1.weight_hh_l0, self._global_step)
self._summary_writer.add_histogram('net_2/bias', self._net_2.bias_hh_l0, self._global_step)
self._summary_writer.add_histogram('net_2/weight', self._net_2.weight_hh_l0, self._global_step)
self._summary_writer.add_histogram('fc/bias', self._fc.bias, self._global_step)
self._summary_writer.add_histogram('fc/weight', self._fc.weight, self._global_step)
self._summary_writer.flush()
self._global_step += 1
return loss_value, self._global_step
def valid(self, x, y, msk_conv, msk_msg):
"""
Validate the model
:param x: input sequence (batch, conv_len, msg_len+2)
:param y: target sequence (batch, conv_len)
:param msk_conv: conversation mask (batch, conv_len)
:param msk_msg: message mask (batch, conv_len, msg_len+2)
:return: loss value, step
"""
with torch.no_grad():
self.eval()
preds = self(x, msk_conv, msk_msg) # conv_len, batch, classes
# compute average loss
avg_loss = []
pred_y = preds.view(self._hps['batch_size'], self._hps['conv_len'], -1)
true_y = y.view(self._hps['batch_size'], self._hps['conv_len'])
for i in range(self._hps['batch_size']):
avg_loss.append(self._criterion(pred_y[i], true_y[i]))
loss = torch.mean(torch.stack(avg_loss))
loss_value = loss.item()
# compute metrics
if self._global_step % self._hps['save'] == 0:
y_pred = self._decode_softmax(preds, msk_conv)
y_test = y.view(self._hps['batch_size'] * self._hps['conv_len']).tolist()
y_test = list(filter(lambda z: z != -1, y_test)) # ignore padding
prec = precision_score(y_test, y_pred, average='macro', zero_division=0)
rec = recall_score(y_test, y_pred, average='macro', zero_division=0)
self._summary_writer.add_scalar('Loss/valid', loss_value, self._global_step)
self._summary_writer.add_scalar('Precision/valid', prec, self._global_step)
self._summary_writer.add_scalar('Recall/valid', rec, self._global_step)
self._summary_writer.flush()
self._global_step += 1
return loss_value, self._global_step
def predict(self, x, msk_conv, msk_msg, no_batch=False):
"""
Use the model for prediction
:param x: input sequence (batch, conv_len, msg_len+2)
:param msk_conv: conversation mask (batch, conv_len)
:param msk_msg: message mask (batch, conv_len, msg_len+2)
:param no_batch: true if there is only 1 batch
:return: [unpad_conv_len]
"""
if no_batch:
self._hps['batch_size'] = 1
with torch.no_grad():
self.eval()
preds = self(x, msk_conv, msk_msg) # conv_len, batch, classes
return self._decode_softmax(preds, msk_conv)
|
py | 1a5544767bd3580fb1c1f6aa040484871ada5058 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# This signature was contributed by RedSocks - http://redsocks.nl
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class iStealerURL(Signature):
name = "istealer_url"
description = "Contacts C&C server HTTP check-in (iStealer)"
severity = 3
categories = ["istealer"]
authors = ["RedSocks"]
minimum = "2.0"
urls_re = [
".*index.*php.*action.*logs",
]
def on_complete(self):
for indicator in self.urls_re:
match = self.check_url(pattern=indicator, regex=True)
if match:
self.mark_ioc("url", match)
return self.has_marks()
|
py | 1a554550a1df3c710e4ebe36482d44a195a17504 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
from epicteller.core.dao.message import MessageDAO
from epicteller.core.model.message import TextMessageContent
from epicteller.core.util.enum import MessageType
async def main():
start = 0
limit = 1000
while messages := await MessageDAO.scan_messages(start, limit):
for message in messages:
print(f'message {message.id}', end='')
if message.type != MessageType.TEXT:
print(f'...skip, not text')
continue
assert isinstance(message.content, TextMessageContent)
print(f'[{message.content.text[:30]}]', end='')
replaced_text = message.content.text.replace('\r\n', '\n')
if replaced_text == message.content.text:
print(f'...skip, equally')
continue
content = TextMessageContent(text=replaced_text)
await MessageDAO.update_message(message.id, content=content.dict(), updated=message.created)
print(f'...replaced')
start += limit
print('Done')
if __name__ == '__main__':
asyncio.run(main())
|
py | 1a5545acddc1a6d5d88d33a791512060504f95d2 | import numpy as np
# Divides {dataset} into {k} equal (or nearly equal) partitions
def kdivide(dataset, k):
assert k > 1, "Must have k > 1"
return list(map(np.squeeze, np.array_split(dataset, k)))
# Divides {dataset} into features and labels
# NOTE: Assumes labels are the last column
def fldivide(dataset):
return [dataset[:, :-1], dataset[:, -1]]
def fljoin(features, labels):
return np.hstack([features, labels.reshape((len(labels), 1))])
# Makes {dataset} into an nth-order dataset by horizontally appending columns
# e.g. [x^0 x^1 x^2 ... x^n]
def make_nth_order(dataset, n):
return np.hstack([dataset ** i for i in range(n+1)])
# Expands {dataset} into [n, m * len(args)] features by apply each operation f
# in args onto the dataset. Typically these are nonlinear transformations
def basis_expand(dataset, *args):
return np.hstack([f(dataset) for f in args])
def add_feature_bias(dataset):
ones = np.ones([len(dataset), 1])
return np.hstack([ones, dataset])
|
py | 1a55467b71d0155c05f20f2b2078256c4e0eb5a9 | # -*- coding: utf-8 -*-
"""
Parse Excel table to output compact JSON.
Always outputs to terms.json
Usage:
python TermExtractor.py <inputfile.xlsx>
Dependencies:
pandas
"""
# Importing the libraries
import argparse
import json
import sys
from collections import namedtuple
import pandas as pd
# Requires filename to read from
parser = argparse.ArgumentParser()
parser.add_argument("inputfile")
args = parser.parse_args()
# Importing the excel data
try:
workbook = pd.read_excel(args.inputfile, sheet_name=None, encoding="utf-8")
except:
sys.exit("ERROR: file could not be read")
# Create list of named tuples for matching terms
termlist = []
Term = namedtuple("Term", "default yen alts")
for sheet in workbook.values():
height, width = sheet.shape
for y in range(0, height):
for x in range(0, width):
# Detect cells marked with # or % as the first letter
cell = sheet.iat[y, x]
if isinstance(cell, str):
# Regular term inclusion
if cell[0] == '#' or cell[0] == '%':
xpos = x + 1
default = cell[1:].strip()
# Grab primary replacement if valid
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
yen = sheet.iat[y, xpos].strip()
else:
yen = ""
alts = []
while True:
# Add optional alts
xpos += 1
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
alts.append(sheet.iat[y, xpos].strip())
else:
break
# Have at least one replacement, otherwise discard
if yen or alts:
termlist.append(Term(default=default, yen=yen, alts=alts))
# Automatically add a capitalized version only if starting with %
if cell[0] == '%':
xpos = x + 1
default = cell[1:].strip().capitalize()
# Grab primary replacement if valid
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
yen = sheet.iat[y, xpos].strip().capitalize()
else:
yen = ""
alts = []
while True:
# Add optional alts
xpos += 1
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
alts.append(sheet.iat[y, xpos].strip().capitalize())
else:
break
# Have at least one replacement, otherwise discard
if yen or alts:
termlist.append(Term(default=default, yen=yen, alts=alts))
# Sort by length, descending
termlist.sort(key=lambda term: len(getattr(term, "default")), reverse=True)
# Export JSON
jset = {}
# Add every term in form: "Default name": ["Default name", "yen name", "alts"]
for t in termlist:
# Add the Default name, then yen name. Repeat Default if no yen name is given
jset[t.default] = [t.default]
jset[t.default].append(t.yen if t.yen else t.default)
for a in t.alts:
jset[t.default].append(a)
try:
with open('terms.json', 'w', encoding="utf-8") as outfile:
json.dump(jset, outfile, indent=4, ensure_ascii=False)
except:
print("ERROR: Writing the file has failed!")
else:
print("terms.json written successfully")
|
py | 1a5546f01b754b2326dd51bc52a241cb7406d9e0 | #!/bin/python3.5
# call it the regression testing file
# @DEVI-if you wanna pipe the output, run with python -u. buffered output
# screws up the output
import sys
import os
from test_LEB128 import test_signed_LEB128
from test_LEB128 import test_unsigned_LEB128
from leb128s import leb128sencodedecodeexhaustive
from leb128s import leb128uencodedecodeexhaustive
from abc import ABCMeta, abstractmethod
sys.path.append('../')
from utils import Colors
from argparser import *
from TBInit import *
total_test_cnt = int()
expected_pass_cnt = int()
expected_fail_cnt = int()
success = Colors.green + "SUCCESS: " + Colors.ENDC
fail = Colors.red + "FAIL: " + Colors.ENDC
# in order to keep the regression test script clean, the tests will need to
# inherit from this test class, implement the two virtual methods and then call
# it inside the main.
class Void_Spwner():
__metaclass__ = ABCMeta
def __init__(self):
pass
# this is the method that runs your tests
@abstractmethod
def Legacy(self):
pass
# this tells the class what name to use to display your test results
@abstractmethod
def GetName(self):
return(str())
def Spwn(self):
pid = os.fork()
# I don't have a bellybutton
if pid == 0:
self.Legacy()
sys.exit()
elif pid > 0:
cpid, status = os.waitpid(pid, 0)
if status == 0:
print(success + ': ' + self.GetName())
else:
print(fail + ': ' + self.GetName())
else:
# basically we couldnt fork a child
print(fail + 'return code:' + pid)
raise Exception("could not fork child")
def ObjectList():
obj_list = []
cwd = os.getcwd()
for file in os.listdir(cwd + "/testsuite"):
if file.endswith(".wasm"):
obj_list.append(cwd + "/testsuite/" + file)
return(obj_list)
################################################################################
class LEB128EncodeTest(Void_Spwner):
def Legacy(self):
test_unsigned_LEB128()
test_signed_LEB128()
def GetName(self):
return('leb128encodetest')
class LEB128Exhaustive(Void_Spwner):
def Legacy(self):
leb128sencodedecodeexhaustive()
leb128uencodedecodeexhaustive()
def GetName(self):
return('leb128exhaustive')
################################################################################
def main():
return_list = []
# LEB128 tests
leb128encodetest = LEB128EncodeTest()
leb128encodetest.Spwn()
# leb128s exhaustive
leb128sex = LEB128Exhaustive()
leb128sex.Spwn()
# parser test on the WASM testsuite
obj_list = ObjectList()
for testfile in obj_list:
pid = os.fork()
# I dont have a bellybutton
if pid == 0:
# @DEVI-FIXME-pipe stdout and stderr to a file instead of the
# bitbucket
sys.stdout = open('/dev/null', 'w')
sys.stderr = open('/dev/null', 'w')
interpreter = PythonInterpreter()
module = interpreter.parse(testfile)
interpreter.appendmodule(module)
interpreter.dump_sections(module)
interpreter.runValidations()
vm = VM(interpreter.getmodules())
ms = vm.getState()
# interpreter.dump_sections(module)
DumpIndexSpaces(ms)
DumpLinearMems(ms.Linear_Memory, 1000)
sys.exit()
# the parent process
elif pid > 0:
# @DEVI-FIXME-we are intentionally blocking. later i will fix this
# so we can use multicores to run our reg tests faster.
cpid, status = os.waitpid(pid, 0)
return_list.append(status)
if status == 0:
print(success + testfile)
else:
print(fail + testfile)
else:
# basically we couldnt fork a child
print(fail + 'return code:' + pid)
raise Exception("could not fork child")
if __name__ == '__main__':
main()
|
py | 1a5547b6f7135dbbe9cae02af738333c77b35f79 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import mox
import testtools
import urlparse
import uuid
import iso8601
from lxml import etree
from oslo.config import cfg
import webob
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import availability_zone
from nova.api.openstack.compute.plugins.v3 import ips
from nova.api.openstack.compute.plugins.v3 import keypairs
from nova.api.openstack.compute.plugins.v3 import servers
from nova.api.openstack.compute import views
from nova.api.openstack import xmlutil
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.image import glance
from nova.network import manager
from nova.network.neutronv2 import api as neutron_api
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests import matchers
from nova.tests import utils
from nova import utils as nova_utils
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
XPATH_NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/compute/api/v1.1'
}
INSTANCE_IDS = {FAKE_UUID: 1}
FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
def fake_gen_uuid():
return FAKE_UUID
def return_servers_empty(context, *args, **kwargs):
return []
def instance_update_and_get_original(context, instance_uuid, values,
update_cells=True,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, values, update_cells=True):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return inst
def fake_compute_api(cls, req, id):
return True
def fake_start_stop_not_ready(self, context, instance):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_instance_get_by_uuid_not_found(context, uuid, columns_to_join):
raise exception.InstanceNotFound(instance_id=uuid)
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance_id, password):
self.instance_id = instance_id
self.password = password
class Base64ValidationTest(test.TestCase):
def setUp(self):
super(Base64ValidationTest, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def test_decode_base64(self):
value = "A random string"
result = self.controller._decode_base64(base64.b64encode(value))
self.assertEqual(result, value)
def test_decode_base64_binary(self):
value = "\x00\x12\x75\x99"
result = self.controller._decode_base64(base64.b64encode(value))
self.assertEqual(result, value)
def test_decode_base64_whitespace(self):
value = "A random string"
encoded = base64.b64encode(value)
white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
result = self.controller._decode_base64(white)
self.assertEqual(result, value)
def test_decode_base64_invalid(self):
invalid = "A random string"
result = self.controller._decode_base64(invalid)
self.assertEqual(result, None)
def test_decode_base64_illegal_bytes(self):
value = "A random string"
encoded = base64.b64encode(value)
white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
result = self.controller._decode_base64(white)
self.assertEqual(result, None)
class NeutronV2Subclass(neutron_api.API):
"""Used to ensure that API handles subclasses properly."""
pass
class ControllerTest(test.TestCase):
def setUp(self):
super(ControllerTest, self).setUp()
self.flags(verbose=True, use_ipv6=False)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
return_server = fakes.fake_instance_get()
return_servers = fakes.fake_instance_get_all_by_filters()
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers)
self.stubs.Set(db, 'instance_get_by_uuid',
return_server)
self.stubs.Set(db, 'instance_update_and_get_original',
instance_update_and_get_original)
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.ips_controller = ips.IPsController()
policy.reset()
policy.init()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
class ServersControllerTest(ControllerTest):
def setUp(self):
super(ServersControllerTest, self).setUp()
CONF.set_override('glance_host', 'localhost')
nova_utils.reset_is_neutron()
def test_requested_networks_prefix(self):
uuid = 'br-00000000-0000-0000-0000-000000000000'
requested_networks = [{'uuid': uuid}]
res = self.controller._get_requested_networks(requested_networks)
self.assertTrue((uuid, None) in res)
def test_requested_networks_neutronv2_enabled_with_port(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEquals(res, [(None, None, port)])
def test_requested_networks_neutronv2_enabled_with_network(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEquals(res, [(network, None, None)])
def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEquals(res, [(None, None, port)])
def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
addr = '10.0.0.1'
requested_networks = [{'uuid': network,
'fixed_ip': addr,
'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_neutronv2_disabled_with_port(self):
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_api_enabled_with_v2_subclass(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEquals(res, [(None, None, port)])
def test_requested_networks_neutronv2_subclass_with_port(self):
cls = 'nova.tests.api.openstack.compute.test_servers.NeutronV2Subclass'
self.flags(network_api_class=cls)
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEquals(res, [(None, None, port)])
def test_get_server_by_uuid(self):
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
def test_unique_host_id(self):
"""Create two servers with the same host and different
project_ids and check that the host_id's are unique.
"""
def return_instance_with_host(self, *args, **kwargs):
project_id = str(uuid.uuid4())
return fakes.stub_instance(id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid',
return_instance_with_host)
self.stubs.Set(db, 'instance_get',
return_instance_with_host)
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
server1 = self.controller.show(req, FAKE_UUID)
server2 = self.controller.show(req, FAKE_UUID)
self.assertNotEqual(server1['server']['host_id'],
server2['server']['host_id'])
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
return {
"server": {
"id": uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": progress,
"name": "server1",
"status": status,
"access_ip_v4": "",
"access_ip_v6": "",
"host_id": '',
"image": {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {
"seq": "1",
},
"links": [
{
"rel": "self",
"href": "http://localhost/v3/servers/%s" % uuid,
},
{
"rel": "bookmark",
"href": "http://localhost/servers/%s" % uuid,
},
],
}
}
def test_get_server_by_id(self):
self.flags(use_ipv6=True)
image_bookmark = "http://localhost:9292/images/10"
flavor_bookmark = "http://localhost/flavors/1"
uuid = FAKE_UUID
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status="BUILD",
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_active_status_by_id(self):
image_bookmark = "http://localhost:9292/images/10"
flavor_bookmark = "http://localhost/flavors/1"
new_return_server = fakes.fake_instance_get(
vm_state=vm_states.ACTIVE, progress=100)
self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
uuid = FAKE_UUID
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_id_image_ref_by_id(self):
image_ref = "10"
image_bookmark = "http://localhost:9292/images/10"
flavor_id = "1"
flavor_bookmark = "http://localhost/flavors/1"
new_return_server = fakes.fake_instance_get(
vm_state=vm_states.ACTIVE, image_ref=image_ref,
flavor_id=flavor_id, progress=100)
self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
uuid = FAKE_UUID
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_addresses_from_cache(self):
pub0 = ('172.19.0.1', '172.19.0.2',)
pub1 = ('1.2.3.4',)
pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
priv0 = ('192.168.0.3', '192.168.0.4',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'public',
'subnets': [{'cidr': '172.19.0.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': '1.2.3.0/16',
'ips': [_ip(ip) for ip in pub1]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub2]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br1',
'id': 2,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip(ip) for ip in priv0]}]}}]
return_server = fakes.fake_instance_get(nw_cache=nw_cache)
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % FAKE_UUID)
res_dict = self.ips_controller.index(req, FAKE_UUID)
expected = {
'addresses': {
'private': [
{'version': 4, 'addr': '192.168.0.3',
'type': 'fixed', 'mac_addr': 'bb:bb:bb:bb:bb:bb'},
{'version': 4, 'addr': '192.168.0.4',
'type': 'fixed', 'mac_addr': 'bb:bb:bb:bb:bb:bb'},
],
'public': [
{'version': 4, 'addr': '172.19.0.1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '172.19.0.2',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '1.2.3.4',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
],
},
}
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_get_server_addresses_nonexistent_network(self):
url = '/v3/servers/%s/ips/network_0' % FAKE_UUID
req = fakes.HTTPRequestV3.blank(url)
self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
req, FAKE_UUID, 'network_0')
def test_get_server_addresses_nonexistent_server(self):
def fake_instance_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
server_id = str(uuid.uuid4())
req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % server_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.ips_controller.index, req, server_id)
def test_get_server_list_empty(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers_empty)
req = fakes.HTTPRequestV3.blank('/servers')
res_dict = self.controller.index(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_list_with_reservation_id(self):
req = fakes.HTTPRequestV3.blank('/servers?reservation_id=foo')
res_dict = self.controller.index(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_empty(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_details(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list(self):
req = fakes.HTTPRequestV3.blank('/servers')
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict['servers']), 5)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertEqual(s.get('image', None), None)
expected_links = [
{
"rel": "self",
"href": "http://localhost/v3/servers/%s" % s['id'],
},
{
"rel": "bookmark",
"href": "http://localhost/servers/%s" % s['id'],
},
]
self.assertEqual(s['links'], expected_links)
def test_get_servers_with_limit(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=3')
res_dict = self.controller.index(req)
servers = res_dict['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
servers_links = res_dict['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v3/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected_params = {'limit': ['3'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected_params))
def test_get_servers_with_limit_bad_value(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=aaa')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_server_details_empty(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers_empty)
req = fakes.HTTPRequestV3.blank('/servers/detail')
res_dict = self.controller.index(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_details_with_limit(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?limit=3')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v3/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_server_details_with_limit_bad_value(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?limit=aaa')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail, req)
def test_get_server_details_with_limit_and_other_params(self):
req = fakes.HTTPRequestV3.blank('/servers/detail'
'?limit=3&blah=2:t')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v3/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'blah': ['2:t'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_servers_with_too_big_limit(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=30')
res_dict = self.controller.index(req)
self.assertTrue('servers_links' not in res_dict)
def test_get_servers_with_bad_limit(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_marker(self):
url = '/v3/servers?marker=%s' % fakes.get_fake_uuid(2)
req = fakes.HTTPRequestV3.blank(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
def test_get_servers_with_limit_and_marker(self):
url = '/v3/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
req = fakes.HTTPRequestV3.blank(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
def test_get_servers_with_bad_marker(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=2&marker=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_bad_option(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?unknownoption=whee')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_image(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('image' in search_opts)
self.assertEqual(search_opts['image'], '12345')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?image=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_tenant_id_filter_converts_to_project_id_for_admin(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None):
self.assertNotEqual(filters, None)
self.assertEqual(filters['project_id'], 'newfake')
self.assertFalse(filters.get('tenant_id'))
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers'
'?all_tenants=1&tenant_id=newfake',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('servers' in res)
def test_admin_restricted_tenant(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None):
self.assertNotEqual(filters, None)
self.assertEqual(filters['project_id'], 'fake')
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('servers' in res)
def test_all_tenants_pass_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None):
self.assertNotEqual(filters, None)
self.assertTrue('project_id' not in filters)
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
rules = {
"compute:get_all_tenants":
common_policy.parse_rule("project_id:fake"),
"compute:get_all":
common_policy.parse_rule("project_id:fake"),
}
common_policy.set_rules(common_policy.Rules(rules))
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
res = self.controller.index(req)
self.assertTrue('servers' in res)
def test_all_tenants_fail_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None):
self.assertNotEqual(filters, None)
return [fakes.stub_instance(100)]
rules = {
"compute:get_all_tenants":
common_policy.parse_rule("project_id:non_fake"),
"compute:get_all":
common_policy.parse_rule("project_id:fake"),
}
common_policy.set_rules(common_policy.Rules(rules))
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_get_servers_allows_flavor(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('flavor' in search_opts)
# flavor is an integer ID
self.assertEqual(search_opts['flavor'], '12345')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?flavor=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_with_bad_flavor(self):
req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_servers_allows_status(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('vm_state' in search_opts)
self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE)
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?status=active')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_task_status(self):
server_uuid = str(uuid.uuid4())
task_state = task_states.REBOOTING
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('task_state' in search_opts)
self.assertEqual(search_opts['task_state'], [task_state])
db_list = [fakes.stub_instance(100, uuid=server_uuid,
task_state=task_state)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?status=reboot')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_invalid_status(self):
# Test getting servers by invalid status.
req = fakes.HTTPRequestV3.blank('/servers?status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_servers_deleted_status_as_user(self):
req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
use_admin_context=False)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail, req)
def test_get_servers_deleted_status_as_admin(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertTrue('vm_state' in search_opts)
self.assertEqual(search_opts['vm_state'], 'deleted')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_name(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('name' in search_opts)
self.assertEqual(search_opts['name'], 'whee.*')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?name=whee.*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('changes_since' in search_opts)
changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertEqual(search_opts['changes_since'], changes_since)
self.assertTrue('deleted' not in search_opts)
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
params = 'changes_since=2011-01-24T17:08:01Z'
req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since_bad_value(self):
params = 'changes_since=asdf'
req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
def test_get_servers_admin_filters_as_user(self):
"""Test getting servers by admin-only or unknown options when
context is not admin. Make sure the admin and unknown options
are stripped before they get to compute_api.get_all()
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
# Allowed by user
self.assertTrue('name' in search_opts)
self.assertTrue('ip' in search_opts)
# OSAPI converts status to vm_state
self.assertTrue('vm_state' in search_opts)
# Allowed only by admins with admin API on
self.assertFalse('unknown_option' in search_opts)
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = fakes.HTTPRequest.blank('/servers?%s' % query_str)
res = self.controller.index(req)
servers = res['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_options_as_admin(self):
"""Test getting servers by admin-only or unknown options when
context is admin. All options should be passed
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
# Allowed by user
self.assertTrue('name' in search_opts)
# OSAPI converts status to vm_state
self.assertTrue('vm_state' in search_opts)
# Allowed only by admins with admin API on
self.assertTrue('ip' in search_opts)
self.assertTrue('unknown_option' in search_opts)
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = fakes.HTTPRequestV3.blank('/servers?%s' % query_str,
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_ip(self):
"""Test getting servers by ip."""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('ip' in search_opts)
self.assertEqual(search_opts['ip'], '10\..*')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?ip=10\..*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_allows_ip6(self):
"""Test getting servers by ip6 with admin_api enabled and
admin context
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False):
self.assertNotEqual(search_opts, None)
self.assertTrue('ip6' in search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, instance_obj.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?ip6=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_all_server_details(self):
expected_flavor = {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost/flavors/1',
},
],
}
expected_image = {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost:9292/images/10',
},
],
}
req = fakes.HTTPRequestV3.blank('/servers/detail')
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['host_id'], '')
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertEqual(s['image'], expected_image)
self.assertEqual(s['flavor'], expected_flavor)
self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], str(i + 1))
def test_get_all_server_details_with_host(self):
'''
We want to make sure that if two instances are on the same host, then
they return the same host_id. If two instances are on different hosts,
they should return different host_ids. In this test, there are 5
instances - 2 on one host and 3 on another.
'''
def return_servers_with_host(context, *args, **kwargs):
return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
uuid=fakes.get_fake_uuid(i))
for i in xrange(5)]
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers_with_host)
req = fakes.HTTPRequestV3.blank('/servers/detail')
res_dict = self.controller.detail(req)
server_list = res_dict['servers']
host_ids = [server_list[0]['host_id'], server_list[1]['host_id']]
self.assertTrue(host_ids[0] and host_ids[1])
self.assertNotEqual(host_ids[0], host_ids[1])
for i, s in enumerate(server_list):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['host_id'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % (i + 1))
class ServersControllerDeleteTest(ControllerTest):
def setUp(self):
super(ServersControllerDeleteTest, self).setUp()
self.server_delete_called = False
def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
def _create_delete_request(self, uuid):
fakes.stub_out_instance_quota(self.stubs, 0, 10)
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
req.method = 'DELETE'
return req
def _delete_server_instance(self, uuid=FAKE_UUID):
req = self._create_delete_request(uuid)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.controller.delete(req, uuid)
def test_delete_server_instance(self):
self._delete_server_instance()
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self._delete_server_instance,
uuid='non-existent-uuid')
def test_delete_server_instance_while_building(self):
req = self._create_delete_request(FAKE_UUID)
self.controller.delete(req, FAKE_UUID)
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_while_resize(self):
req = self._create_delete_request(FAKE_UUID)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_PREP))
self.controller.delete(req, FAKE_UUID)
# Delete shoud be allowed in any case, even during resizing,
# because it may get stuck.
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_if_not_launched(self):
self.flags(reclaim_instance_interval=3600)
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
req.method = 'DELETE'
self.server_delete_called = False
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(launched_at=None))
def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
self.controller.delete(req, FAKE_UUID)
# delete() should be called for instance which has never been active,
# even if reclaim_instance_interval has been set.
self.assertEqual(self.server_delete_called, True)
class ServersControllerRebuildInstanceTest(ControllerTest):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v3/fake/images/%s' % image_uuid
def setUp(self):
super(ServersControllerRebuildInstanceTest, self).setUp()
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.body = {
'rebuild': {
'name': 'new_name',
'image_ref': self.image_href,
'metadata': {
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def test_rebuild_instance_with_access_ipv4_bad_format(self):
# proper local hrefs must start with 'http://localhost/v2/'
self.body['rebuild']['access_ip_v4'] = 'bad_format'
self.body['rebuild']['access_ip_v6'] = 'fead::1234'
self.body['rebuild']['metadata']['hello'] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_rebuild_instance_with_blank_metadata_key(self):
self.body['rebuild']['access_ip_v4'] = '0.0.0.0'
self.body['rebuild']['access_ip_v6'] = 'fead::1234'
self.body['rebuild']['metadata'][''] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_rebuild_instance_with_metadata_key_too_long(self):
self.body['rebuild']['access_ip_v4'] = '0.0.0.0'
self.body['rebuild']['access_ip_v6'] = 'fead::1234'
self.body['rebuild']['metadata'][('a' * 260)] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_rebuild_instance_with_metadata_value_too_long(self):
self.body['rebuild']['access_ip_v4'] = '0.0.0.0'
self.body['rebuild']['access_ip_v6'] = 'fead::1234'
self.body['rebuild']['metadata']['key1'] = ('a' * 260)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_rebuild, self.req,
FAKE_UUID, self.body)
def test_rebuild_instance_fails_when_min_ram_too_small(self):
# make min_ram larger than our instance ram size
def fake_get_image(self, context, image_href):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="4096", min_disk="10")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_rebuild_instance_fails_when_min_disk_too_small(self):
# make min_disk larger than our instance disk size
def fake_get_image(self, context, image_href):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="128", min_disk="100000")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild, self.req,
FAKE_UUID, self.body)
def test_rebuild_instance_image_too_large(self):
# make image size larger than our instance disk size
size = str(1000 * (1024 ** 3))
def fake_get_image(self, context, image_href):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', size=size)
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_rebuild_instance_name_all_blank(self):
def fake_get_image(self, context, image_href):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.body['rebuild']['name'] = ' '
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_rebuild_instance_with_deleted_image(self):
def fake_get_image(self, context, image_href):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='DELETED')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_rebuild_instance_with_access_ipv6_bad_format(self):
self.body['rebuild']['access_ip_v4'] = '1.2.3.4'
self.body['rebuild']['access_ip_v6'] = 'bad_format'
self.body['rebuild']['metadata']['hello'] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, self.body)
def test_start(self):
self.mox.StubOutWithMock(compute_api.API, 'start')
compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.controller._start_server(req, FAKE_UUID, body)
def test_start_not_ready(self):
self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
def test_stop(self):
self.mox.StubOutWithMock(compute_api.API, 'stop')
compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.controller._stop_server(req, FAKE_UUID, body)
def test_stop_not_ready(self):
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
def test_start_with_bogus_id(self):
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._start_server, req, 'test_inst', body)
def test_stop_with_bogus_id(self):
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
class ServersControllerUpdateTest(ControllerTest):
def _get_request(self, body=None, options=None):
if options:
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(**options))
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = jsonutils.dumps(body)
return req
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
'access_ip_v4': '0.0.0.0',
'access_ip_v6': 'beef::0123',
}}
req = self._get_request(body, {'name': 'server_test',
'access_ipv4': '0.0.0.0',
'access_ipv6': 'beef::0123'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
self.assertEqual(res_dict['server']['access_ip_v4'], '0.0.0.0')
self.assertEqual(res_dict['server']['access_ip_v6'], 'beef::123')
def test_update_server_invalid_xml_raises_lookup(self):
req = webob.Request.blank('/v3/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/xml'
#xml request which raises LookupError
req.body = """<?xml version="1.0" encoding="TF-8"?>
<metadata
xmlns="http://docs.openstack.org/compute/api/v1.1"
key="Label"></metadata>"""
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res_dict['badRequest']['message'],
"Malformed request body")
def test_update_server_invalid_xml_raises_expat(self):
req = webob.Request.blank('/v3/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/xml'
#xml request which raises ExpatError
req.body = """<?xml version="1.0" encoding="UTF-8"?>
<metadata
xmlns="http://docs.openstack.org/compute/api/v1.1"
key="Label"></meta>"""
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res_dict['badRequest']['message'],
"Malformed request body")
def test_update_server_name(self):
body = {'server': {'name': 'server_test'}}
req = self._get_request(body, {'name': 'server_test'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name_too_long(self):
body = {'server': {'name': 'x' * 256}}
req = self._get_request(body, {'name': 'server_test'})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, FAKE_UUID, body)
def test_update_server_name_all_blank_spaces(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/v3/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' ' * 64}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, FAKE_UUID, body)
def test_update_server_access_ipv4(self):
body = {'server': {'access_ip_v4': '0.0.0.0'}}
req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['access_ip_v4'], '0.0.0.0')
def test_update_server_access_ipv4_bad_format(self):
body = {'server': {'access_ip_v4': 'bad_format'}}
req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, FAKE_UUID, body)
def test_update_server_access_ipv4_none(self):
body = {'server': {'access_ip_v4': None}}
req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['access_ip_v4'], '')
def test_update_server_access_ipv4_blank(self):
body = {'server': {'access_ip_v4': ''}}
req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['access_ip_v4'], '')
def test_update_server_access_ipv6(self):
body = {'server': {'access_ip_v6': 'beef::0123'}}
req = self._get_request(body, {'access_ipv6': 'beef::0123'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['access_ip_v6'], 'beef::123')
def test_update_server_access_ipv6_bad_format(self):
body = {'server': {'access_ip_v6': 'bad_format'}}
req = self._get_request(body, {'access_ipv6': 'beef::0123'})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, FAKE_UUID, body)
def test_update_server_access_ipv6_none(self):
body = {'server': {'access_ip_v6': None}}
req = self._get_request(body, {'access_ipv6': 'beef::0123'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['access_ip_v6'], '')
def test_update_server_access_ipv6_blank(self):
body = {'server': {'access_ip_v6': ''}}
req = self._get_request(body, {'access_ipv6': 'beef::0123'})
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['access_ip_v6'], '')
def test_update_server_adminPass_ignored(self):
inst_dict = dict(name='server_test', adminPass='bacon')
body = dict(server=inst_dict)
def server_update(context, id, params):
filtered_dict = {
'display_name': 'server_test',
}
self.assertEqual(params, filtered_dict)
filtered_dict['uuid'] = id
return filtered_dict
self.stubs.Set(db, 'instance_update', server_update)
# FIXME (comstud)
# self.stubs.Set(db, 'instance_get',
# return_server_with_attributes(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dumps(body)
res_dict = self.controller.update(req, FAKE_UUID, body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_not_found(self):
def fake_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'get', fake_get)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body)
def test_update_server_not_found_on_update(self):
def fake_update(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body)
class ServerStatusTest(test.TestCase):
def setUp(self):
super(ServerStatusTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _get_with_state(self, vm_state, task_state=None):
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_state,
task_state=task_state))
request = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
return self.controller.show(request, FAKE_UUID)
def test_active(self):
response = self._get_with_state(vm_states.ACTIVE)
self.assertEqual(response['server']['status'], 'ACTIVE')
def test_reboot(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING)
self.assertEqual(response['server']['status'], 'REBOOT')
def test_reboot_hard(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING_HARD)
self.assertEqual(response['server']['status'], 'HARD_REBOOT')
def test_reboot_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:reboot':
common_policy.parse_rule('role:admin')}
common_policy.set_rules(common_policy.Rules(rule))
req = fakes.HTTPRequestV3.blank('/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_reboot, req, '1234',
{'reboot': {'type': 'HARD'}})
def test_rebuild(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBUILDING)
self.assertEqual(response['server']['status'], 'REBUILD')
def test_rebuild_error(self):
response = self._get_with_state(vm_states.ERROR)
self.assertEqual(response['server']['status'], 'ERROR')
def test_resize(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.RESIZE_PREP)
self.assertEqual(response['server']['status'], 'RESIZE')
def test_confirm_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:confirm_resize':
common_policy.parse_rule('role:admin')}
common_policy.set_rules(common_policy.Rules(rule))
req = fakes.HTTPRequestV3.blank('/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_confirm_resize, req, '1234', {})
def test_verify_resize(self):
response = self._get_with_state(vm_states.RESIZED, None)
self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
def test_revert_resize(self):
response = self._get_with_state(vm_states.RESIZED,
task_states.RESIZE_REVERTING)
self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
def test_revert_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:revert_resize':
common_policy.parse_rule('role:admin')}
common_policy.set_rules(common_policy.Rules(rule))
req = fakes.HTTPRequestV3.blank('/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_revert_resize, req, '1234', {})
def test_password_update(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.UPDATING_PASSWORD)
self.assertEqual(response['server']['status'], 'PASSWORD')
def test_stopped(self):
response = self._get_with_state(vm_states.STOPPED)
self.assertEqual(response['server']['status'], 'SHUTOFF')
class ServersControllerCreateTest(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self.stubs)
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params, update_cells=True):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return inst
def server_update_and_get_original(
context, instance_uuid, params, update_cells=False,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update_and_get_original)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.body = {
'server': {
'name': 'server_test',
'image_ref': self.image_uuid,
'flavor_ref': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self.bdm = [{'delete_on_termination': 1,
'device_name': 123,
'volume_size': 1,
'volume_id': '11111111-1111-1111-1111-111111111111'}]
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _check_admin_pass_len(self, server_dict):
"""utility function - check server_dict for admin_pass length."""
self.assertEqual(CONF.password_length,
len(server_dict["admin_pass"]))
def _check_admin_pass_missing(self, server_dict):
"""utility function - check server_dict for absence of admin_pass."""
self.assertTrue("admin_pass" not in server_dict)
def _test_create_instance(self, flavor=2):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.body['server']['image_ref'] = image_uuid
self.body['server']['flavor_ref'] = flavor
self.req.body = jsonutils.dumps(self.body)
server = self.controller.create(self.req, self.body).obj['server']
self._check_admin_pass_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_private_flavor(self):
values = {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': '1324',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': False,
}
db.flavor_create(context.get_admin_context(), values)
self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
flavor=1324)
def test_create_server_bad_image_href(self):
image_href = 1
self.body['server']['min_count'] = 1
self.body['server']['image_ref'] = image_href,
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, self.body)
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_server_with_invalid_networks_parameter(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# flavor_ref = 'http://localhost/123/flavors/3'
# body = {
# 'server': {
# 'name': 'server_test',
# 'imageRef': image_href,
# 'flavorRef': flavor_ref,
# 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'},
# }
# }
# req = fakes.HTTPRequest.blank('/v2/fake/servers')
# req.method = 'POST'
# req.body = jsonutils.dumps(body)
# req.headers["content-type"] = "application/json"
# self.assertRaises(webob.exc.HTTPBadRequest,
# self.controller.create,
# req,
# body)
def test_create_server_with_deleted_image(self):
# Get the fake image service so we can set the status to deleted
(image_service, image_id) = glance.get_remote_image_service(
context, '')
image_service.update(context, self.image_uuid, {'status': 'DELETED'})
self.addCleanup(image_service.update, context, self.image_uuid,
{'status': 'active'})
self.body['server']['flavor_ref'] = 2
self.req.body = jsonutils.dumps(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(self.req, self.body)
def test_create_server_image_too_large(self):
# Get the fake image service so we can set the status to deleted
(image_service, image_id) = glance.get_remote_image_service(
context, self.image_uuid)
image = image_service.show(context, image_id)
orig_size = image['size']
new_size = str(1000 * (1024 ** 3))
image_service.update(context, self.image_uuid, {'size': new_size})
self.addCleanup(image_service.update, context, self.image_uuid,
{'size': orig_size})
self.body['server']['flavor_ref'] = 2
self.req.body = jsonutils.dumps(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
"Instance type's disk is too small for requested image."):
self.controller.create(self.req, self.body)
def test_create_instance_image_ref_is_bookmark(self):
image_href = 'http://localhost/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_image_ref_is_invalid(self):
image_uuid = 'this_is_not_a_valid_uuid'
image_href = 'http://localhost/images/%s' % image_uuid
flavor_ref = 'http://localhost/flavors/3'
self.body['server']['image_ref'] = image_href
self.body['server']['flavor_ref'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, self.body)
def test_create_instance_no_key_pair(self):
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
self._test_create_instance()
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavor_ref'] = 2
if no_image:
self.body['server'].pop('image_ref', None)
self.body['server'].update(params)
self.req.body = jsonutils.dumps(self.body)
self.req.headers["content-type"] = "application/json"
server = self.controller.create(self.req, self.body).obj['server']
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-keypairs extension tests
# def test_create_instance_with_keypairs_enabled(self):
# self.ext_mgr.extensions = {'os-keypairs': 'fake'}
# key_name = 'green'
#
# params = {'key_name': key_name}
# old_create = compute_api.API.create
#
# # NOTE(sdague): key pair goes back to the database,
# # so we need to stub it out for tests
# def key_pair_get(context, user_id, name):
# return {'public_key': 'FAKE_KEY',
# 'fingerprint': 'FAKE_FINGERPRINT',
# 'name': name}
#
# def create(*args, **kwargs):
# self.assertEqual(kwargs['key_name'], key_name)
# return old_create(*args, **kwargs)
#
# self.stubs.Set(db, 'key_pair_get', key_pair_get)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
#
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_instance_with_networks_enabled(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# requested_networks = [{'uuid': net_uuid}]
# params = {'networks': requested_networks}
# old_create = compute_api.API.create
# def create(*args, **kwargs):
# result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
# self.assertEqual(kwargs['requested_networks'], result)
# return old_create(*args, **kwargs)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
def test_create_instance_with_networks_disabled_neutronv2(self):
nova_utils.reset_is_neutron()
self.flags(network_api_class='nova.network.neutronv2.api.API')
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
None)]
self.assertEqual(kwargs['requested_networks'], result)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_networks_disabled(self):
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['requested_networks'], None)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_access_ip(self):
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['access_ip_v4'] = '1.2.3.4'
self.body['server']['access_ip_v6'] = 'fead::1234'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self._check_admin_pass_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_with_access_ip_pass_disabled(self):
# test with admin passwords disabled See lp bug 921814
self.flags(enable_instance_password=False)
# proper local hrefs must start with 'http://localhost/v3/'
self.flags(enable_instance_password=False)
image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['access_ip_v4'] = '1.2.3.4'
self.body['server']['access_ip_v6'] = 'fead::1234'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self._check_admin_pass_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_bad_format_access_ip_v4(self):
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['access_ip_v4'] = 'bad_format'
self.body['server']['access_ip_v6'] = 'fead::1234'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, self.body)
def test_create_instance_bad_format_access_ip_v6(self):
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['access_ip_v4'] = '1.2.3.4'
self.body['server']['access_ip_v6'] = 'bad_format'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, self.body)
def test_create_instance_name_too_long(self):
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['name'] = 'X' * 256
self.body['server']['image_ref'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, self.body)
def test_create_instance_name_all_blank_spaces(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v3/images/%s' % image_uuid
flavor_ref = 'http://localhost/flavors/3'
body = {
'server': {
'name': ' ' * 64,
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'personality': [
{
"path": "/etc/banner.txt",
"contents": "MQ==",
},
],
},
}
req = fakes.HTTPRequest.blank('/v3/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_instance(self):
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self._check_admin_pass_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_extension_create_exception(self):
def fake_keypair_server_create(self, server_dict,
create_kwargs):
raise KeyError
self.stubs.Set(keypairs.Keypairs, 'server_create',
fake_keypair_server_create)
# proper local hrefs must start with 'http://localhost/v3/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v3/images/%s' % image_uuid
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(KeyError, self.controller.create, req, body)
def test_create_instance_pass_disabled(self):
self.flags(enable_instance_password=False)
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self._check_admin_pass_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_too_much_metadata(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['metadata']['vote'] = 'fiddletown'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, self.req, self.body)
def test_create_instance_metadata_key_too_long(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['metadata'] = {('a' * 260): '12345'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, self.req, self.body)
def test_create_instance_metadata_value_too_long(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['metadata'] = {'key1': ('a' * 260)}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, self.req, self.body)
def test_create_instance_metadata_key_blank(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['image_ref'] = image_href
self.body['server']['metadata'] = {'': 'abcd'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, self.body)
def test_create_instance_invalid_key_name(self):
image_href = 'http://localhost/v2/images/2'
self.body['server']['image_ref'] = image_href
self.body['server']['key_name'] = 'nonexistentkey'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, self.body)
def test_create_instance_valid_key_name(self):
self.body['server']['key_name'] = 'key'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_pass_len(res["server"])
def test_create_instance_invalid_flavor_href(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = 'http://localhost/v2/flavors/asdf'
self.body['server']['image_ref'] = image_href
self.body['server']['flavor_ref'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, self.body)
def test_create_instance_invalid_flavor_id_int(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = -1
self.body['server']['image_ref'] = image_href
self.body['server']['flavor_ref'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, self.body)
def test_create_instance_bad_flavor_href(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = 'http://localhost/v2/flavors/17'
self.body['server']['image_ref'] = image_href
self.body['server']['flavor_ref'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, self.body)
def test_create_instance_bad_href(self):
image_href = 'asdf'
self.body['server']['image_ref'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, self.body)
def test_create_instance_local_href(self):
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_admin_pass(self):
self.body['server']['flavor_ref'] = 3,
self.body['server']['admin_pass'] = 'testpass'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self.assertEqual(server['admin_pass'],
self.body['server']['admin_pass'])
def test_create_instance_admin_pass_pass_disabled(self):
self.flags(enable_instance_password=False)
self.body['server']['flavor_ref'] = 3,
self.body['server']['admin_pass'] = 'testpass'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
self.assertTrue('admin_pass' in self.body['server'])
def test_create_instance_admin_pass_empty(self):
self.body['server']['flavor_ref'] = 3,
self.body['server']['admin_pass'] = ''
self.req.body = jsonutils.dumps(self.body)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, self.body)
def test_create_location(self):
selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
bookhref = 'http://localhost/fake/servers/%s' % FAKE_UUID
self.req.body = jsonutils.dumps(self.body)
robj = self.controller.create(self.req, self.body)
self.assertEqual(robj['Location'], selfhref)
def _do_test_create_instance_above_quota(self, resource, allowed, quota,
expected_msg):
fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
self.body['server']['flavor_ref'] = 3
self.req.body = jsonutils.dumps(self.body)
try:
server = self.controller.create(self.req, self.body).obj['server']
self.fail('expected quota to be exceeded')
except webob.exc.HTTPRequestEntityTooLarge as e:
self.assertEquals(e.explanation, expected_msg)
def test_create_instance_above_quota_instances(self):
msg = _('Quota exceeded for instances: Requested 1, but'
' already used 10 of 10 instances')
self._do_test_create_instance_above_quota('instances', 0, 10, msg)
def test_create_instance_above_quota_ram(self):
msg = _('Quota exceeded for ram: Requested 4096, but'
' already used 8192 of 10240 ram')
self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
def test_create_instance_above_quota_cores(self):
msg = _('Quota exceeded for cores: Requested 2, but'
' already used 9 of 10 cores')
self._do_test_create_instance_above_quota('cores', 1, 10, msg)
def test_create_instance_with_neutronv2_port_in_use(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortInUse(port_id=port)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
class TestServerCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestServerCreateRequestXMLDeserializer, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
servers_controller = servers.ServersController(extension_info=ext_info)
self.deserializer = servers.CreateDeserializer(servers_controller)
def test_minimal_request(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
},
}
self.assertEquals(request['body'], expected)
def test_xml_create_exception(self):
def fake_availablity_extract_xml_deserialize(self,
server_node,
server_dict):
raise KeyError
self.stubs.Set(availability_zone.AvailabilityZone,
'server_xml_extract_server_deserialize',
fake_availablity_extract_xml_deserialize)
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2"/>"""
self.assertRaises(KeyError, self.deserializer.deserialize,
serial_request)
def test_request_with_alternate_namespace_prefix(self):
serial_request = """
<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2">
<ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
</ns2:server>
"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
'metadata': {"hello": "world"},
},
}
self.assertEquals(request['body'], expected)
def test_access_ipv4(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2"
access_ip_v4="1.2.3.4"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
"access_ip_v4": "1.2.3.4",
},
}
self.assertEquals(request['body'], expected)
def test_access_ipv6(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2"
access_ip_v6="fead::1234"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
"access_ip_v6": "fead::1234",
},
}
self.assertEquals(request['body'], expected)
def test_access_ip(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2"
access_ip_v4="1.2.3.4"
access_ip_v6="fead::1234"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
},
}
self.assertEquals(request['body'], expected)
def test_admin_pass(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2"
admin_pass="1234"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
"admin_pass": "1234",
},
}
self.assertEquals(request['body'], expected)
def test_image_link(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="http://localhost:8774/v3/images/2"
flavor_ref="3"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "http://localhost:8774/v3/images/2",
"flavor_ref": "3",
},
}
self.assertEquals(request['body'], expected)
def test_flavor_link(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="http://localhost:8774/v3/flavors/3"/>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "http://localhost:8774/v3/flavors/3",
},
}
self.assertEquals(request['body'], expected)
def test_multiple_metadata_items(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test"
image_ref="1"
flavor_ref="2">
<metadata>
<meta key="one">two</meta>
<meta key="open">snack</meta>
</metadata>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
"metadata": {"one": "two", "open": "snack"},
},
}
self.assertEquals(request['body'], expected)
def test_spec_request(self):
image_bookmark_link = ("http://servers.api.openstack.org/1234/"
"images/52415800-8b69-11e0-9b19-734f6f006e54")
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
image_ref="%s"
flavor_ref="52415800-8b69-11e0-9b19-734f1195ff37"
name="new-server-test">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
</server>""" % (image_bookmark_link)
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
"name": "new-server-test",
"image_ref": ("http://servers.api.openstack.org/1234/"
"images/52415800-8b69-11e0-9b19-734f6f006e54"),
"flavor_ref": "52415800-8b69-11e0-9b19-734f1195ff37",
"metadata": {"My Server Name": "Apache1"},
},
}
self.assertEquals(request['body'], expected)
def test_request_with_empty_networks(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks/>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [],
}}
self.assertEquals(request['body'], expected)
def test_request_with_one_network(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network uuid="1" fixed_ip="10.0.1.12"/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
}}
self.assertEquals(request['body'], expected)
def test_request_with_two_networks(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network uuid="1" fixed_ip="10.0.1.12"/>
<network uuid="2" fixed_ip="10.0.2.12"/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
{"uuid": "2", "fixed_ip": "10.0.2.12"}],
}}
self.assertEquals(request['body'], expected)
def test_request_with_second_network_node_ignored(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network uuid="1" fixed_ip="10.0.1.12"/>
</networks>
<networks>
<network uuid="2" fixed_ip="10.0.2.12"/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
}}
self.assertEquals(request['body'], expected)
def test_request_with_one_network_missing_id(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network fixed_ip="10.0.1.12"/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"fixed_ip": "10.0.1.12"}],
}}
self.assertEquals(request['body'], expected)
def test_request_with_one_network_missing_fixed_ip(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network uuid="1"/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"uuid": "1"}],
}}
self.assertEquals(request['body'], expected)
def test_request_with_one_network_empty_id(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network uuid="" fixed_ip="10.0.1.12"/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"uuid": "", "fixed_ip": "10.0.1.12"}],
}}
self.assertEquals(request['body'], expected)
def test_request_with_one_network_empty_fixed_ip(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network uuid="1" fixed_ip=""/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"uuid": "1", "fixed_ip": ""}],
}}
self.assertEquals(request['body'], expected)
def test_request_with_networks_duplicate_ids(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v2"
name="new-server-test" image_ref="1" flavor_ref="1">
<networks>
<network uuid="1" fixed_ip="10.0.1.12"/>
<network uuid="1" fixed_ip="10.0.2.12"/>
</networks>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
{"uuid": "1", "fixed_ip": "10.0.2.12"}],
}}
self.assertEquals(request['body'], expected)
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
class TestAddressesXMLSerialization(test.TestCase):
index_serializer = ips.AddressesTemplate()
show_serializer = ips.NetworkTemplate()
def test_xml_declaration(self):
fixture = {
'network_2': [
{'addr': '192.168.0.1', 'version': 4,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'addr': 'fe80::beef', 'version': 6,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
],
}
output = self.show_serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_show(self):
fixture = {
'network_2': [
{'addr': '192.168.0.1', 'version': 4,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'addr': 'fe80::beef', 'version': 6,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
],
}
output = self.show_serializer.serialize(fixture)
root = etree.XML(output)
network = fixture['network_2']
self.assertEqual(str(root.get('id')), 'network_2')
ip_elems = root.findall('{0}ip'.format(NS))
for z, ip_elem in enumerate(ip_elems):
ip = network[z]
self.assertEqual(str(ip_elem.get('version')),
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
self.assertEqual(str(ip_elem.get('type')),
str(ip['type']))
self.assertEqual(str(ip_elem.get('mac_addr')),
str(ip['mac_addr']))
def test_index(self):
fixture = {
'addresses': {
'network_1': [
{'addr': '192.168.0.3', 'version': 4,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'addr': '192.168.0.5', 'version': 4,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
],
'network_2': [
{'addr': '192.168.0.1', 'version': 4,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'addr': 'fe80::beef', 'version': 6,
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
],
},
}
output = self.index_serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'addresses', version='v3')
addresses_dict = fixture['addresses']
network_elems = root.findall('{0}network'.format(NS))
self.assertEqual(len(network_elems), 2)
for i, network_elem in enumerate(network_elems):
network = addresses_dict.items()[i]
self.assertEqual(str(network_elem.get('id')), str(network[0]))
ip_elems = network_elem.findall('{0}ip'.format(NS))
for z, ip_elem in enumerate(ip_elems):
ip = network[1][z]
self.assertEqual(str(ip_elem.get('version')),
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
self.assertEqual(str(ip_elem.get('type')),
str(ip['type']))
self.assertEqual(str(ip_elem.get('mac_addr')),
str(ip['mac_addr']))
class ServersViewBuilderTest(test.TestCase):
def setUp(self):
super(ServersViewBuilderTest, self).setUp()
CONF.set_override('glance_host', 'localhost')
self.flags(use_ipv6=True)
self.instance = fakes.stub_instance(
id=1,
image_ref="5",
uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
display_name="test_server",
include_fake_metadata=False)
privates = ['172.19.0.1']
publics = ['192.168.0.3']
public6s = ['b33f::fdee:ddff:fecc:bbaa']
def nw_info(*args, **kwargs):
return [(None, {'label': 'public',
'ips': [dict(ip=ip) for ip in publics],
'ip6s': [dict(ip=ip) for ip in public6s]}),
(None, {'label': 'private',
'ips': [dict(ip=ip) for ip in privates]})]
def floaters(*args, **kwargs):
return []
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
floaters)
self.uuid = self.instance['uuid']
self.view_builder = views.servers.ViewBuilderV3()
self.request = fakes.HTTPRequestV3.blank("")
def test_get_flavor_valid_instance_type(self):
flavor_bookmark = "http://localhost/flavors/1"
expected = {"id": "1",
"links": [{"rel": "bookmark",
"href": flavor_bookmark}]}
result = self.view_builder._get_flavor(self.request, self.instance)
self.assertEqual(result, expected)
def test_build_server(self):
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": "http://localhost/v3/servers/%s" %
self.uuid,
},
{
"rel": "bookmark",
"href": "http://localhost/servers/%s" % self.uuid,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost:9292/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"access_ip_v4": "",
"access_ip_v6": "",
"host_id": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_no_image(self):
self.instance["image_ref"] = ""
output = self.view_builder.show(self.request, self.instance)
self.assertEqual(output['server']['image'], "")
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = {
'code': 404,
'instance_uuid': self.uuid,
'message': "HTTPNotFound",
'details': "Stock details for test",
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
image_bookmark = "http://localhost:9292/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"name": "test_server",
"status": "ERROR",
"access_ip_v4": "",
"access_ip_v6": "",
"host_id": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
"fault": {
"code": 404,
"created": "2010-10-10T12:00:00Z",
"message": "HTTPNotFound",
"details": "Stock details for test",
},
}
}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = {
'code': 500,
'instance_uuid': self.uuid,
'message': "Error",
'details': 'Stock details for test',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = {
'code': 500,
'instance_uuid': self.uuid,
'message': "Error",
'details': 'Stock details for test',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error",
'details': 'Stock details for test'}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = {
'code': 500,
'instance_uuid': self.uuid,
'message': "Error",
'details': '',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
self.instance['fault'] = {
'code': 404,
'instance_uuid': self.uuid,
'message': "HTTPNotFound",
'details': "Stock details for test",
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
image_bookmark = "http://localhost:9292/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
output = self.view_builder.show(self.request, self.instance)
self.assertFalse('fault' in output['server'])
def test_build_server_detail_active_status(self):
#set the power state of the instance to running
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
image_bookmark = "http://localhost:9292/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
"name": "test_server",
"status": "ACTIVE",
"access_ip_v4": "",
"access_ip_v6": "",
"host_id": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_accessipv4(self):
self.instance['access_ip_v4'] = '1.2.3.4'
image_bookmark = "http://localhost:9292/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"host_id": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
]
},
"metadata": {},
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "",
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_accessipv6(self):
self.instance['access_ip_v6'] = 'fead::1234'
image_bookmark = "http://localhost:9292/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"host_id": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
]
},
"metadata": {},
"access_ip_v4": "",
"access_ip_v6": "fead::1234",
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
metadata = []
metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
self.instance['metadata'] = metadata
image_bookmark = "http://localhost:9292/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"access_ip_v4": "",
"access_ip_v6": "",
"host_id": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
]
},
"metadata": {"Open": "Stack"},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
class ServerXMLSerializationTest(test.TestCase):
TIMESTAMP = "2010-10-11T10:30:22Z"
SERVER_HREF = 'http://localhost/v3/servers/%s' % FAKE_UUID
SERVER_NEXT = 'http://localhost/v3/servers?limit=%s&marker=%s'
SERVER_BOOKMARK = 'http://localhost/servers/%s' % FAKE_UUID
IMAGE_BOOKMARK = 'http://localhost:9292/images/5'
FLAVOR_BOOKMARK = 'http://localhost/flavors/1'
def test_xml_declaration(self):
serializer = servers.ServerTemplate()
fixture = {
"server": {
'id': FAKE_UUID,
'user_id': 'fake_user_id',
'tenant_id': 'fake_tenant_id',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
"name": "test_server",
"status": "BUILD",
"host_id": 'e4d909c290d0fb1ca068ffaddf22cbd0',
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": self.IMAGE_BOOKMARK,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": self.FLAVOR_BOOKMARK,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
"network_two": [
{
"version": 4,
"addr": "67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
},
"metadata": {
"Open": "Stack",
"Number": "1",
},
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
}
}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_show(self):
serializer = servers.ServerTemplate()
fixture = {
"server": {
"id": FAKE_UUID,
"user_id": "fake",
"tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
"name": "test_server",
"status": "BUILD",
"host_id": 'e4d909c290d0fb1ca068ffaddf22cbd0',
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": self.IMAGE_BOOKMARK,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": self.FLAVOR_BOOKMARK,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
"network_two": [
{
"version": 4,
"addr": "67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
},
"metadata": {
"Open": "Stack",
"Number": "1",
},
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
}
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'server', version='v3')
server_dict = fixture['server']
for key in ['name', 'id', 'created', 'access_ip_v4',
'updated', 'progress', 'status', 'host_id',
'access_ip_v6']:
self.assertEqual(root.get(key), str(server_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = server_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
image_root = root.find('{0}image'.format(NS))
self.assertEqual(image_root.get('id'), server_dict['image']['id'])
link_nodes = image_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['image']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
flavor_root = root.find('{0}flavor'.format(NS))
self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['flavor']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
addresses_root = root.find('{0}addresses'.format(NS))
addresses_dict = server_dict['addresses']
network_elems = addresses_root.findall('{0}network'.format(NS))
self.assertEqual(len(network_elems), 2)
for i, network_elem in enumerate(network_elems):
network = addresses_dict.items()[i]
self.assertEqual(str(network_elem.get('id')), str(network[0]))
ip_elems = network_elem.findall('{0}ip'.format(NS))
for z, ip_elem in enumerate(ip_elems):
ip = network[1][z]
self.assertEqual(str(ip_elem.get('version')),
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
self.assertEqual(str(ip_elem.get('type')),
str(ip['type']))
self.assertEqual(str(ip_elem.get('mac_addr')),
str(ip['mac_addr']))
def test_create(self):
serializer = servers.FullServerTemplate()
fixture = {
"server": {
"id": FAKE_UUID,
"user_id": "fake",
"tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
"name": "test_server",
"status": "BUILD",
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
"host_id": "e4d909c290d0fb1ca068ffaddf22cbd0",
"admin_pass": "test_password",
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": self.IMAGE_BOOKMARK,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": self.FLAVOR_BOOKMARK,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
"network_two": [
{
"version": 4,
"addr": "67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
},
"metadata": {
"Open": "Stack",
"Number": "1",
},
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
}
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'server', version='v3')
server_dict = fixture['server']
for key in ['name', 'id', 'created', 'access_ip_v4',
'updated', 'progress', 'status', 'host_id',
'access_ip_v6', 'admin_pass']:
self.assertEqual(root.get(key), str(server_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = server_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
image_root = root.find('{0}image'.format(NS))
self.assertEqual(image_root.get('id'), server_dict['image']['id'])
link_nodes = image_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['image']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
flavor_root = root.find('{0}flavor'.format(NS))
self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['flavor']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
addresses_root = root.find('{0}addresses'.format(NS))
addresses_dict = server_dict['addresses']
network_elems = addresses_root.findall('{0}network'.format(NS))
self.assertEqual(len(network_elems), 2)
for i, network_elem in enumerate(network_elems):
network = addresses_dict.items()[i]
self.assertEqual(str(network_elem.get('id')), str(network[0]))
ip_elems = network_elem.findall('{0}ip'.format(NS))
for z, ip_elem in enumerate(ip_elems):
ip = network[1][z]
self.assertEqual(str(ip_elem.get('version')),
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
self.assertEqual(str(ip_elem.get('type')),
str(ip['type']))
self.assertEqual(str(ip_elem.get('mac_addr')),
str(ip['mac_addr']))
def test_index(self):
serializer = servers.MinimalServersTemplate()
uuid1 = fakes.get_fake_uuid(1)
uuid2 = fakes.get_fake_uuid(2)
expected_server_href = 'http://localhost/v3/servers/%s' % uuid1
expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
expected_server_href_2 = 'http://localhost/v3/servers/%s' % uuid2
expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
fixture = {"servers": [
{
"id": fakes.get_fake_uuid(1),
"name": "test_server",
'links': [
{
'href': expected_server_href,
'rel': 'self',
},
{
'href': expected_server_bookmark,
'rel': 'bookmark',
},
],
},
{
"id": fakes.get_fake_uuid(2),
"name": "test_server_2",
'links': [
{
'href': expected_server_href_2,
'rel': 'self',
},
{
'href': expected_server_bookmark_2,
'rel': 'bookmark',
},
],
},
]}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'servers_index', version='v3')
server_elems = root.findall('{0}server'.format(NS))
self.assertEqual(len(server_elems), 2)
for i, server_elem in enumerate(server_elems):
server_dict = fixture['servers'][i]
for key in ['name', 'id']:
self.assertEqual(server_elem.get(key), str(server_dict[key]))
link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index_with_servers_links(self):
serializer = servers.MinimalServersTemplate()
uuid1 = fakes.get_fake_uuid(1)
uuid2 = fakes.get_fake_uuid(2)
expected_server_href = 'http://localhost/v3/servers/%s' % uuid1
expected_server_next = self.SERVER_NEXT % (2, 2)
expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
expected_server_href_2 = 'http://localhost/v3/servers/%s' % uuid2
expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
fixture = {"servers": [
{
"id": fakes.get_fake_uuid(1),
"name": "test_server",
'links': [
{
'href': expected_server_href,
'rel': 'self',
},
{
'href': expected_server_bookmark,
'rel': 'bookmark',
},
],
},
{
"id": fakes.get_fake_uuid(2),
"name": "test_server_2",
'links': [
{
'href': expected_server_href_2,
'rel': 'self',
},
{
'href': expected_server_bookmark_2,
'rel': 'bookmark',
},
],
},
],
"servers_links": [
{
'rel': 'next',
'href': expected_server_next,
},
]}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'servers_index', version='v3')
server_elems = root.findall('{0}server'.format(NS))
self.assertEqual(len(server_elems), 2)
for i, server_elem in enumerate(server_elems):
server_dict = fixture['servers'][i]
for key in ['name', 'id']:
self.assertEqual(server_elem.get(key), str(server_dict[key]))
link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
# Check servers_links
servers_links = root.findall('{0}link'.format(ATOMNS))
for i, link in enumerate(fixture['servers_links']):
for key, value in link.items():
self.assertEqual(servers_links[i].get(key), value)
def test_detail(self):
serializer = servers.ServersTemplate()
uuid1 = fakes.get_fake_uuid(1)
expected_server_href = 'http://localhost/v3/servers/%s' % uuid1
expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
expected_image_bookmark = self.IMAGE_BOOKMARK
expected_flavor_bookmark = self.FLAVOR_BOOKMARK
uuid2 = fakes.get_fake_uuid(2)
expected_server_href_2 = 'http://localhost/v3/servers/%s' % uuid2
expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
fixture = {"servers": [
{
"id": fakes.get_fake_uuid(1),
"user_id": "fake",
"tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
"name": "test_server",
"status": "BUILD",
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
"host_id": 'e4d909c290d0fb1ca068ffaddf22cbd0',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": expected_image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": expected_flavor_bookmark,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
},
"metadata": {
"Number": "1",
},
"links": [
{
"href": expected_server_href,
"rel": "self",
},
{
"href": expected_server_bookmark,
"rel": "bookmark",
},
],
},
{
"id": fakes.get_fake_uuid(2),
"user_id": 'fake',
"tenant_id": 'fake',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 100,
"name": "test_server_2",
"status": "ACTIVE",
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
"host_id": 'e4d909c290d0fb1ca068ffaddf22cbd0',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": expected_image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": expected_flavor_bookmark,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
},
"metadata": {
"Number": "2",
},
"links": [
{
"href": expected_server_href_2,
"rel": "self",
},
{
"href": expected_server_bookmark_2,
"rel": "bookmark",
},
],
},
]}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'servers', version='v3')
server_elems = root.findall('{0}server'.format(NS))
self.assertEqual(len(server_elems), 2)
for i, server_elem in enumerate(server_elems):
server_dict = fixture['servers'][i]
for key in ['name', 'id', 'created', 'access_ip_v4',
'updated', 'progress', 'status', 'host_id',
'access_ip_v6']:
self.assertEqual(server_elem.get(key), str(server_dict[key]))
link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = server_elem.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = server_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(),
str(meta_value))
image_root = server_elem.find('{0}image'.format(NS))
self.assertEqual(image_root.get('id'), server_dict['image']['id'])
link_nodes = image_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['image']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
flavor_root = server_elem.find('{0}flavor'.format(NS))
self.assertEqual(flavor_root.get('id'),
server_dict['flavor']['id'])
link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['flavor']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
addresses_root = server_elem.find('{0}addresses'.format(NS))
addresses_dict = server_dict['addresses']
network_elems = addresses_root.findall('{0}network'.format(NS))
for i, network_elem in enumerate(network_elems):
network = addresses_dict.items()[i]
self.assertEqual(str(network_elem.get('id')), str(network[0]))
ip_elems = network_elem.findall('{0}ip'.format(NS))
for z, ip_elem in enumerate(ip_elems):
ip = network[1][z]
self.assertEqual(str(ip_elem.get('version')),
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
self.assertEqual(str(ip_elem.get('type')),
str(ip['type']))
self.assertEqual(str(ip_elem.get('mac_addr')),
str(ip['mac_addr']))
def test_update(self):
serializer = servers.ServerTemplate()
fixture = {
"server": {
"id": FAKE_UUID,
"user_id": "fake",
"tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
"name": "test_server",
"status": "BUILD",
"host_id": 'e4d909c290d0fb1ca068ffaddf22cbd0',
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": self.IMAGE_BOOKMARK,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": self.FLAVOR_BOOKMARK,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
"network_two": [
{
"version": 4,
"addr": "67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
},
"metadata": {
"Open": "Stack",
"Number": "1",
},
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
"fault": {
"code": 500,
"created": self.TIMESTAMP,
"message": "Error Message",
"details": "Fault details",
}
}
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'server', version='v3')
server_dict = fixture['server']
for key in ['name', 'id', 'created', 'access_ip_v4',
'updated', 'progress', 'status', 'host_id',
'access_ip_v6']:
self.assertEqual(root.get(key), str(server_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = server_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
image_root = root.find('{0}image'.format(NS))
self.assertEqual(image_root.get('id'), server_dict['image']['id'])
link_nodes = image_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['image']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
flavor_root = root.find('{0}flavor'.format(NS))
self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['flavor']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
addresses_root = root.find('{0}addresses'.format(NS))
addresses_dict = server_dict['addresses']
network_elems = addresses_root.findall('{0}network'.format(NS))
self.assertEqual(len(network_elems), 2)
for i, network_elem in enumerate(network_elems):
network = addresses_dict.items()[i]
self.assertEqual(str(network_elem.get('id')), str(network[0]))
ip_elems = network_elem.findall('{0}ip'.format(NS))
for z, ip_elem in enumerate(ip_elems):
ip = network[1][z]
self.assertEqual(str(ip_elem.get('version')),
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
self.assertEqual(str(ip_elem.get('type')),
str(ip['type']))
self.assertEqual(str(ip_elem.get('mac_addr')),
str(ip['mac_addr']))
fault_root = root.find('{0}fault'.format(NS))
fault_dict = server_dict['fault']
self.assertEqual(fault_root.get("code"), str(fault_dict["code"]))
self.assertEqual(fault_root.get("created"), fault_dict["created"])
msg_elem = fault_root.find('{0}message'.format(NS))
self.assertEqual(msg_elem.text, fault_dict["message"])
det_elem = fault_root.find('{0}details'.format(NS))
self.assertEqual(det_elem.text, fault_dict["details"])
def test_action(self):
serializer = servers.FullServerTemplate()
fixture = {
"server": {
"id": FAKE_UUID,
"user_id": "fake",
"tenant_id": "fake",
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
"progress": 0,
"name": "test_server",
"status": "BUILD",
"access_ip_v4": "1.2.3.4",
"access_ip_v6": "fead::1234",
"host_id": "e4d909c290d0fb1ca068ffaddf22cbd0",
"admin_pass": "test_password",
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": self.IMAGE_BOOKMARK,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": self.FLAVOR_BOOKMARK,
},
],
},
"addresses": {
"network_one": [
{
"version": 4,
"addr": "67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.138",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
"network_two": [
{
"version": 4,
"addr": "67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
{
"version": 6,
"addr": "::babe:67.23.10.139",
"type": "fixed",
"mac_addr": "aa:aa:aa:aa:aa:aa"
},
],
},
"metadata": {
"Open": "Stack",
"Number": "1",
},
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
}
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'server', version='v3')
server_dict = fixture['server']
for key in ['name', 'id', 'created', 'access_ip_v4',
'updated', 'progress', 'status', 'host_id',
'access_ip_v6', 'admin_pass']:
self.assertEqual(root.get(key), str(server_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(server_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = server_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
image_root = root.find('{0}image'.format(NS))
self.assertEqual(image_root.get('id'), server_dict['image']['id'])
link_nodes = image_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['image']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
flavor_root = root.find('{0}flavor'.format(NS))
self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 1)
for i, link in enumerate(server_dict['flavor']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
addresses_root = root.find('{0}addresses'.format(NS))
addresses_dict = server_dict['addresses']
network_elems = addresses_root.findall('{0}network'.format(NS))
self.assertEqual(len(network_elems), 2)
for i, network_elem in enumerate(network_elems):
network = addresses_dict.items()[i]
self.assertEqual(str(network_elem.get('id')), str(network[0]))
ip_elems = network_elem.findall('{0}ip'.format(NS))
for z, ip_elem in enumerate(ip_elems):
ip = network[1][z]
self.assertEqual(str(ip_elem.get('version')),
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
self.assertEqual(str(ip_elem.get('type')),
str(ip['type']))
self.assertEqual(str(ip_elem.get('mac_addr')),
str(ip['mac_addr']))
class ServersAllExtensionsTestCase(test.TestCase):
"""
Servers tests using default API router with all extensions enabled.
The intent here is to catch cases where extensions end up throwing
an exception because of a malformed request before the core API
gets a chance to validate the request and return a 422 response.
For example, ServerDiskConfigController extends servers.Controller:
@wsgi.extends
def create(self, req, body):
if 'server' in body:
self._set_disk_config(body['server'])
resp_obj = (yield)
self._show(req, resp_obj)
we want to ensure that the extension isn't barfing on an invalid
body.
"""
def setUp(self):
super(ServersAllExtensionsTestCase, self).setUp()
self.app = compute.APIRouterV3()
def test_create_missing_server(self):
# Test create with malformed body.
def fake_create(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
self.stubs.Set(compute_api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(422, res.status_int)
def test_update_missing_server(self):
# Test create with malformed body.
def fake_update(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
self.stubs.Set(compute_api.API, 'create', fake_update)
req = fakes.HTTPRequestV3.blank('/servers/1')
req.method = 'PUT'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(422, res.status_int)
class ServersUnprocessableEntityTestCase(test.TestCase):
"""
Tests of places we throw 422 Unprocessable Entity from
"""
def setUp(self):
super(ServersUnprocessableEntityTestCase, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _unprocessable_server_create(self, body):
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
def test_create_server_no_body(self):
self._unprocessable_server_create(body=None)
def test_create_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_server_create(body=body)
def test_create_server_malformed_entity(self):
body = {'server': 'string'}
self._unprocessable_server_create(body=body)
def _unprocessable_server_update(self, body):
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
req.method = 'PUT'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update, req, FAKE_UUID, body)
def test_update_server_no_body(self):
self._unprocessable_server_update(body=None)
def test_update_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_server_update(body=body)
def test_create_update_malformed_entity(self):
body = {'server': 'string'}
self._unprocessable_server_update(body=body)
|
py | 1a5549241b6aa9cd829726d306f3212ca51e2235 | """
National Control Devices
http://www.controlanything.com/
The Complete ProXR Command Set:
http://www.controlanything.com/Relay/Device/A0010
http://assets.controlanything.com/manuals/ProXR.pdf
"""
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pychron.hardware.core.core_device import CoreDevice
# ============= standard library imports ========================
# ============= local library imports ==========================
class NCDDevice(CoreDevice):
def initialize(self, *args, **kw):
super(NCDDevice, self).initialize(*args, **kw)
if self.communicator:
self.communicator.write_terminator = None
return True
def _make_cmdstr(self, *args):
return bytes([int(a) for a in args])
# formatter = lambda x:chr(x)
# return b''.join(map(chr, args))
# ============= EOF =============================================
|
py | 1a554986ea41e6501edc2c6fffbe5cb71052d6d7 | try: from Tkinter import *
except: from tkinter import *
win = Tk()
win.title('Reality - Game')
win.iconbitmap('C:\Windows\System32')
win.geometry('400x200+100+100')
from os import startfile as s
fungtion_0 = lambda : s('R프롤로그')
fungtion_1 = lambda : s('R1화')
fungtion_2 = lambda : s('R2화')
fungtion_3 = lambda : s('R3화')
fungtion_4 = lambda : s('R4화')
fungtion_5 = lambda : s('R5화')
fungtion_6 = lambda : s('R6화')
fungtion_7 = lambda : s('R7화')
fungtion_8 = lambda : s('R8화')
fungtion_9 = lambda : s('R에필로그')
btn_0 = Button(win, text = '프롤로그', command = fungtion_0)
btn_0.pack()
btn_1 = Button(win, text = '1화', command = fungtion_1)
btn_1.pack()
btn_2 = Button(win, text = '2화', command = fungtion_2)
btn_2.pack()
btn_3 = Button(win, text = '3화', command = fungtion_3)
btn_3.pack()
btn_4 = Button(win, text = '4화', command = fungtion_4)
btn_4.pack()
btn_5 = Button(win, text = '5화', command = fungtion_5)
btn_5.pack()
btn_6 = Button(win, text = '6화', command = fungtion_6)
btn_6.pack()
btn_7 = Button(win, text = '7화', command = fungtion_7)
btn_7.pack()
btn_8 = Button(win, text = '8화', command = fungtion_8)
btn_8.pack()
btn_9 = Button(win, text = '에필로그', command = fungtion_9)
btn_9.pack()
win.mainloop() |
py | 1a5549be92cc47511f207fc15e1aa6a7f0369a45 | from gym.spaces import Discrete, Box, MultiDiscrete, Space
import numpy as np
import tree
from typing import Union, Optional
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils import force_tuple
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
TensorType
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class Random(Exploration):
"""A random action selector (deterministic/greedy for explore=False).
If explore=True, returns actions randomly from `self.action_space` (via
Space.sample()).
If explore=False, returns the greedy/max-likelihood action.
"""
def __init__(self, action_space: Space, *, model: ModelV2,
framework: Optional[str], **kwargs):
"""Initialize a Random Exploration object.
Args:
action_space (Space): The gym action space used by the environment.
framework (Optional[str]): One of None, "tf", "tfe", "torch".
"""
super().__init__(
action_space=action_space,
model=model,
framework=framework,
**kwargs)
self.action_space_struct = get_base_struct_from_space(
self.action_space)
@override(Exploration)
def get_exploration_action(self,
*,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True):
# Instantiate the distribution object.
if self.framework in ["tf2", "tf", "tfe"]:
return self.get_tf_exploration_action_op(action_distribution,
explore)
else:
return self.get_torch_exploration_action(action_distribution,
explore)
def get_tf_exploration_action_op(
self, action_dist: ActionDistribution,
explore: Optional[Union[bool, TensorType]]):
def true_fn():
batch_size = 1
req = force_tuple(
action_dist.required_model_output_shape(
self.action_space, self.model.model_config))
# Add a batch dimension?
if len(action_dist.inputs.shape) == len(req) + 1:
batch_size = tf.shape(action_dist.inputs)[0]
# Function to produce random samples from primitive space
# components: (Multi)Discrete or Box.
def random_component(component):
if isinstance(component, Discrete):
return tf.random.uniform(
shape=(batch_size, ) + component.shape,
maxval=component.n,
dtype=component.dtype)
elif isinstance(component, MultiDiscrete):
return tf.concat(
[
tf.random.uniform(
shape=(batch_size, 1),
maxval=n,
dtype=component.dtype) for n in component.nvec
],
axis=1)
elif isinstance(component, Box):
if component.bounded_above.all() and \
component.bounded_below.all():
return tf.random.uniform(
shape=(batch_size, ) + component.shape,
minval=component.low,
maxval=component.high,
dtype=component.dtype)
else:
return tf.random.normal(
shape=(batch_size, ) + component.shape,
dtype=component.dtype)
actions = tree.map_structure(random_component,
self.action_space_struct)
return actions
def false_fn():
return action_dist.deterministic_sample()
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool) else explore,
true_fn=true_fn,
false_fn=false_fn)
# TODO(sven): Move into (deterministic_)sample(logp=True|False)
batch_size = tf.shape(tree.flatten(action)[0])[0]
logp = tf.zeros(shape=(batch_size, ), dtype=tf.float32)
return action, logp
def get_torch_exploration_action(self, action_dist: ActionDistribution,
explore: bool):
if explore:
req = force_tuple(
action_dist.required_model_output_shape(
self.action_space, self.model.model_config))
# Add a batch dimension?
if len(action_dist.inputs.shape) == len(req) + 1:
batch_size = action_dist.inputs.shape[0]
a = np.stack(
[self.action_space.sample() for _ in range(batch_size)])
else:
a = self.action_space.sample()
# Convert action to torch tensor.
action = torch.from_numpy(a).to(self.device)
else:
action = action_dist.deterministic_sample()
logp = torch.zeros(
(action.size()[0], ), dtype=torch.float32, device=self.device)
return action, logp
|
py | 1a5549dc7a5f3970f0c692fdfca8b471aa330a5c | """Compare standard standard_bound with h-mitigator."""
from math import nan
from timeit import default_timer as timer
from typing import Tuple
from nc_operations.perform_enum import PerformEnum
from optimization.initial_simplex import InitialSimplex
from optimization.opt_method import OptMethod
from optimization.optimize import Optimize
from h_mitigator.optimize_mitigator import OptimizeMitigator
from h_mitigator.setting_mitigator import SettingMitigator
def compare_mitigator(setting: SettingMitigator,
opt_method: OptMethod,
number_l=1) -> Tuple[float, float]:
"""Compare standard_bound with the new Lyapunov standard_bound."""
if opt_method == OptMethod.GRID_SEARCH:
delta_val = 0.1
theta_bounds = [(delta_val, 4.0)]
standard_bound = Optimize(setting=setting, number_param=1).grid_search(
grid_bounds=theta_bounds, delta=delta_val)
bound_array = theta_bounds[:]
for _i in range(1, number_l + 1):
bound_array.append((1.0 + delta_val, 4.0))
h_mit_bound = OptimizeMitigator(setting_h_mit=setting,
number_param=number_l + 1).grid_search(
grid_bounds=bound_array,
delta=delta_val)
elif opt_method == OptMethod.PATTERN_SEARCH:
theta_start = 0.5
start_list = [theta_start]
standard_bound = Optimize(setting=setting,
number_param=1).pattern_search(
start_list=start_list,
delta=3.0,
delta_min=0.01)
start_list_new = [theta_start] + [1.0] * number_l
h_mit_bound = OptimizeMitigator(setting_h_mit=setting,
number_param=number_l +
1).pattern_search(
start_list=start_list_new,
delta=3.0,
delta_min=0.01)
# This part is there to overcome opt_method issues
if h_mit_bound > standard_bound:
h_mit_bound = standard_bound
elif opt_method == OptMethod.NELDER_MEAD:
theta_start = 0.5
start_list = [theta_start]
start_simplex = InitialSimplex(parameters_to_optimize=1).gao_han(
start_list=start_list)
standard_bound = Optimize(setting=setting, number_param=1).nelder_mead(
simplex=start_simplex, sd_min=10**(-2))
start_list_new = [theta_start] + [1.0] * number_l
start_simplex_new = InitialSimplex(parameters_to_optimize=number_l +
1).gao_han(
start_list=start_list_new)
h_mit_bound = OptimizeMitigator(setting_h_mit=setting,
number_param=number_l + 1).nelder_mead(
simplex=start_simplex_new,
sd_min=10**(-2))
# This part is there to overcome opt_method issues
if h_mit_bound > standard_bound:
h_mit_bound = standard_bound
elif opt_method == OptMethod.BASIN_HOPPING:
theta_start = 0.5
start_list = [theta_start]
standard_bound = Optimize(
setting=setting,
number_param=1).basin_hopping(start_list=start_list)
start_list_new = [theta_start] + [1.0] * number_l
h_mit_bound = OptimizeMitigator(
setting_h_mit=setting,
number_param=number_l + 1).basin_hopping(start_list=start_list_new)
# This part is there to overcome opt_method issues
if h_mit_bound > standard_bound:
h_mit_bound = standard_bound
elif opt_method == OptMethod.DUAL_ANNEALING:
theta_bounds = [(0.1, 4.0)]
standard_bound = Optimize(
setting=setting,
number_param=1).dual_annealing(bound_list=theta_bounds)
bound_array = theta_bounds[:]
for _i in range(1, number_l + 1):
bound_array.append((0.9, 4.0))
h_mit_bound = OptimizeMitigator(
setting_h_mit=setting,
number_param=number_l + 1).dual_annealing(bound_list=bound_array)
# This part is there to overcome opt_method issues
if h_mit_bound > standard_bound:
h_mit_bound = standard_bound
elif opt_method == OptMethod.DIFFERENTIAL_EVOLUTION:
theta_bounds = [(0.1, 8.0)]
standard_bound = Optimize(
setting=setting,
number_param=1).diff_evolution(bound_list=theta_bounds)
bound_array = theta_bounds[:]
for _i in range(1, number_l + 1):
bound_array.append((0.9, 8.0))
h_mit_bound = OptimizeMitigator(
setting_h_mit=setting,
number_param=number_l + 1).diff_evolution(bound_list=bound_array)
else:
raise NameError(
f"Optimization parameter {opt_method.name} is infeasible")
# This part is there to overcome opt_method issues
if h_mit_bound > standard_bound:
h_mit_bound = standard_bound
if standard_bound == 0 or h_mit_bound == 0:
standard_bound = nan
h_mit_bound = nan
return standard_bound, h_mit_bound
def compare_time(setting: SettingMitigator,
opt_method: OptMethod,
number_l=1) -> tuple:
"""Compare computation times."""
if opt_method == OptMethod.GRID_SEARCH:
bound_array = [(0.1, 4.0)]
start = timer()
Optimize(setting=setting,
number_param=1).grid_search(grid_bounds=bound_array,
delta=0.1)
stop = timer()
time_standard = stop - start
for _ in range(1, number_l + 1):
bound_array.append((0.9, 4.0))
start = timer()
OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
1).grid_search(grid_bounds=bound_array, delta=0.1)
stop = timer()
time_lyapunov = stop - start
elif opt_method == OptMethod.PATTERN_SEARCH:
start_list = [0.5]
start = timer()
Optimize(setting=setting,
number_param=1).pattern_search(start_list=start_list,
delta=3.0,
delta_min=0.01)
stop = timer()
time_standard = stop - start
start_list = [0.5] + [1.0] * number_l
start = timer()
OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
1).pattern_search(start_list=start_list,
delta=3.0,
delta_min=0.01)
stop = timer()
time_lyapunov = stop - start
elif opt_method == OptMethod.NELDER_MEAD:
start_simplex = InitialSimplex(parameters_to_optimize=1).uniform_dist(
max_theta=1.0)
start = timer()
Optimize(setting=setting,
number_param=1).nelder_mead(simplex=start_simplex,
sd_min=10**(-2))
stop = timer()
time_standard = stop - start
start_simplex_new = InitialSimplex(parameters_to_optimize=number_l +
1).uniform_dist(max_theta=1.0,
max_l=2.0)
start = timer()
OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
1).nelder_mead(simplex=start_simplex_new,
sd_min=10**(-2))
stop = timer()
time_lyapunov = stop - start
elif opt_method == OptMethod.DUAL_ANNEALING:
bound_array = [(0.1, 4.0)]
start = timer()
Optimize(setting=setting,
number_param=1).dual_annealing(bound_list=bound_array)
stop = timer()
time_standard = stop - start
for _ in range(1, number_l + 1):
bound_array.append((0.9, 4.0))
start = timer()
OptimizeMitigator(setting_h_mit=setting, number_param=number_l +
1).dual_annealing(bound_list=bound_array)
stop = timer()
time_lyapunov = stop - start
else:
raise NameError(
f"Optimization parameter {opt_method.name} is infeasible")
return time_standard, time_lyapunov
if __name__ == '__main__':
from nc_arrivals.iid import DM1
from nc_server.constant_rate_server import ConstantRateServer
from utils.perform_parameter import PerformParameter
from h_mitigator.fat_cross_perform import FatCrossPerform
from h_mitigator.single_server_mit_perform import SingleServerMitPerform
OUTPUT_TIME = PerformParameter(perform_metric=PerformEnum.OUTPUT, value=4)
SETTING1 = SingleServerMitPerform(arr_list=[DM1(lamb=4.4)],
server=ConstantRateServer(rate=0.24),
perform_param=OUTPUT_TIME)
# print(
# compare_mitigator(
# setting=SETTING1, opt_method=OptMethod.GRID_SEARCH,
# print_x=True))
DELAY_PROB = PerformParameter(perform_metric=PerformEnum.DELAY_PROB,
value=4)
ARR_LIST = [DM1(lamb=11.0), DM1(lamb=9.0)]
SER_LIST = [ConstantRateServer(rate=5.0), ConstantRateServer(rate=4.0)]
SETTING2 = FatCrossPerform(arr_list=ARR_LIST,
ser_list=SER_LIST,
perform_param=DELAY_PROB)
print("compare bounds\n")
print(compare_mitigator(setting=SETTING2,
opt_method=OptMethod.GRID_SEARCH))
print(
compare_mitigator(setting=SETTING2,
opt_method=OptMethod.PATTERN_SEARCH))
print(
compare_mitigator(setting=SETTING2,
opt_method=OptMethod.DUAL_ANNEALING))
print("\ncompare runtimes\n")
print(
compare_time(setting=SETTING2,
opt_method=OptMethod.GRID_SEARCH,
number_l=1))
print(
compare_time(setting=SETTING2,
opt_method=OptMethod.DUAL_ANNEALING,
number_l=1))
|
py | 1a554a4ea4a02938e607b9481d1c39d1896e7126 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAffine(PythonPackage):
"""Matrices describing affine transformation of the plane."""
homepage = "https://github.com/sgillies/affine"
url = "https://github.com/sgillies/affine/archive/2.1.0.zip"
depends_on('py-setuptools', type='build')
version('2.1.0', sha256='b67b7dee9a9865185a931758a3e347ad8583d0ac985895b90985a477ccfa4745')
|
py | 1a554a7a31de937d943bd823ad4598fb9d500240 | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.asset_device_registration_relationship import AssetDeviceRegistrationRelationship
globals()['AssetDeviceRegistrationRelationship'] = AssetDeviceRegistrationRelationship
class NiatelemetryLcAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'NIATELEMETRY.LC': "niatelemetry.Lc",
},
('object_type',): {
'NIATELEMETRY.LC': "niatelemetry.Lc",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'description': (str,), # noqa: E501
'dn': (str,), # noqa: E501
'hardware_version': (str,), # noqa: E501
'model': (str,), # noqa: E501
'node_id': (int,), # noqa: E501
'operational_state': (str,), # noqa: E501
'power_state': (str,), # noqa: E501
'record_type': (str,), # noqa: E501
'record_version': (str,), # noqa: E501
'redundancy_state': (str,), # noqa: E501
'serial_number': (str,), # noqa: E501
'site_name': (str,), # noqa: E501
'vid': (str,), # noqa: E501
'registered_device': (AssetDeviceRegistrationRelationship,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'description': 'Description', # noqa: E501
'dn': 'Dn', # noqa: E501
'hardware_version': 'HardwareVersion', # noqa: E501
'model': 'Model', # noqa: E501
'node_id': 'NodeId', # noqa: E501
'operational_state': 'OperationalState', # noqa: E501
'power_state': 'PowerState', # noqa: E501
'record_type': 'RecordType', # noqa: E501
'record_version': 'RecordVersion', # noqa: E501
'redundancy_state': 'RedundancyState', # noqa: E501
'serial_number': 'SerialNumber', # noqa: E501
'site_name': 'SiteName', # noqa: E501
'vid': 'Vid', # noqa: E501
'registered_device': 'RegisteredDevice', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NiatelemetryLcAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "niatelemetry.Lc", must be one of ["niatelemetry.Lc", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "niatelemetry.Lc", must be one of ["niatelemetry.Lc", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): Description of the line cards present.. [optional] # noqa: E501
dn (str): Dn value for the line cards present.. [optional] # noqa: E501
hardware_version (str): Hardware version of the line cards present.. [optional] # noqa: E501
model (str): Model of the line cards present.. [optional] # noqa: E501
node_id (int): Node Id of the line card present.. [optional] # noqa: E501
operational_state (str): Opretaional state of the line cards present.. [optional] # noqa: E501
power_state (str): Power state of the line cards present.. [optional] # noqa: E501
record_type (str): Type of record DCNM / APIC / SE. This determines the type of platform where inventory was collected.. [optional] # noqa: E501
record_version (str): Version of record being pushed. This determines what was the API version for data available from the device.. [optional] # noqa: E501
redundancy_state (str): Redundancy state of the line cards present.. [optional] # noqa: E501
serial_number (str): Serial number of the line card present.. [optional] # noqa: E501
site_name (str): The Site name represents an APIC cluster. Service Engine can onboard multiple APIC clusters / sites.. [optional] # noqa: E501
vid (str): VID for the line card in the inventory.. [optional] # noqa: E501
registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "niatelemetry.Lc")
object_type = kwargs.get('object_type', "niatelemetry.Lc")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
py | 1a554b7308ef873cc7beaf4e245048bb8f6d6ec3 | import logging
import time
from xml.etree.ElementTree import fromstring
import declxml as xml
import requests
from requests_cache import CachedSession
logger = logging.getLogger(__name__)
class BGGClient:
BASE_URL = "https://www.boardgamegeek.com/xmlapi2"
def __init__(self, cache=None, debug=False):
if not cache:
self.requester = requests.Session()
else:
self.requester = cache.cache
if debug:
logging.basicConfig(level=logging.DEBUG)
def collection(self, user_name, **kwargs):
params = kwargs.copy()
params["username"] = user_name
data = self._make_request("/collection?version=1", params)
collection = self._collection_to_games(data)
return collection
def plays(self, user_name):
params = {
"username": user_name,
"page": 1,
}
all_plays = []
data = self._make_request("/plays?version=1", params)
new_plays = self._plays_to_games(data)
while (len(new_plays) > 0):
all_plays = all_plays + new_plays
params["page"] += 1
data = self._make_request("/plays?version=1", params)
new_plays = self._plays_to_games(data)
return all_plays
def game_list(self, game_ids):
if not game_ids:
return []
# Split game_ids into smaller chunks to avoid "414 URI too long"
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
games = []
for game_ids_subset in chunks(game_ids, 100):
url = "/thing/?stats=1&id=" + ",".join([str(id_) for id_ in game_ids_subset])
data = self._make_request(url)
games += self._games_list_to_games(data)
return games
def _make_request(self, url, params={}, tries=0):
try:
response = self.requester.get(BGGClient.BASE_URL + url, params=params)
except requests.exceptions.ConnectionError:
if tries < 3:
time.sleep(2)
return self._make_request(url, params=params, tries=tries + 1)
raise BGGException("BGG API closed the connection prematurely, please try again...")
logger.debug("REQUEST: " + response.url)
logger.debug("RESPONSE: \n" + prettify_if_xml(response.text))
if response.status_code != 200:
# Handle 202 Accepted
if response.status_code == 202:
if tries < 10:
time.sleep(5)
return self._make_request(url, params=params, tries=tries + 1)
# Handle 504 Gateway Timeout
if response.status_code == 540:
if tries < 3:
time.sleep(2)
return self._make_request(url, params=params, tries=tries + 1)
raise BGGException(
f"BGG returned status code {response.status_code} when "
f"requesting {response.url}"
)
tree = fromstring(response.text)
if tree.tag == "errors":
raise BGGException(
f"BGG returned errors while requesting {response.url} - " +
str([subnode.text for node in tree for subnode in node])
)
return response.text
def _plays_to_games(self, data):
def after_players_hook(_, status):
return status["name"]
plays_processor = xml.dictionary("plays", [
xml.array(
xml.dictionary('play', [
xml.integer(".", attribute="id", alias="playid"),
xml.dictionary('item', [
xml.string(".", attribute="name", alias="gamename"),
xml.integer(".", attribute="objectid", alias="gameid")
], alias='game'),
xml.array(
xml.dictionary('players/player', [
xml.string(".", attribute="name")
], required=False, alias='players', hooks=xml.Hooks(after_parse=after_players_hook))
)
], required=False, alias="plays")
)
])
plays = xml.parse_from_string(plays_processor, data)
plays = plays["plays"]
return plays
def _collection_to_games(self, data):
def after_status_hook(_, status):
return [tag for tag, value in status.items() if value == "1"]
game_in_collection_processor = xml.dictionary("items", [
xml.array(
xml.dictionary('item', [
xml.integer(".", attribute="objectid", alias="id"),
xml.string("name"),
xml.string("thumbnail", required=False, alias="image"),
xml.string("version/item/thumbnail", required=False, alias="image_version"),
xml.dictionary("status", [
xml.string(".", attribute="fortrade"),
xml.string(".", attribute="own"),
xml.string(".", attribute="preordered"),
xml.string(".", attribute="prevowned"),
xml.string(".", attribute="want"),
xml.string(".", attribute="wanttobuy"),
xml.string(".", attribute="wanttoplay"),
xml.string(".", attribute="wishlist"),
], alias='tags', hooks=xml.Hooks(after_parse=after_status_hook)),
xml.string("status", attribute="lastmodified", alias="lastmodified"),
xml.integer("numplays"),
], required=False, alias="items"),
)
])
collection = xml.parse_from_string(game_in_collection_processor, data)
collection = collection["items"]
return collection
def _games_list_to_games(self, data):
def numplayers_to_result(_, results):
result = {result["value"].lower().replace(" ", "_"): int(result["numvotes"]) for result in results}
if not result:
result = {'best': 0, 'recommended': 0, 'not_recommended': 0}
is_recommended = result['best'] + result['recommended'] > result['not_recommended']
if not is_recommended:
return "not_recommended"
is_best = result['best'] > 10 and result['best'] > result['recommended']
if is_best:
return "best"
return "recommended"
def suggested_numplayers(_, numplayers):
# Remove not_recommended player counts
numplayers = [players for players in numplayers if players["result"] != "not_recommended"]
# If there's only one player count, that's the best one
if len(numplayers) == 1:
numplayers[0]["result"] = "best"
# Just return the numbers
return [
(players["numplayers"], players["result"])
for players in numplayers
]
def log_item(_, item):
logger.debug("Successfully parsed: {} (id: {}).".format(item["name"], item["id"]))
return item
game_processor = xml.dictionary("items", [
xml.array(
xml.dictionary(
"item",
[
xml.integer(".", attribute="id"),
xml.string(".", attribute="type"),
xml.string("name[@type='primary']", attribute="value", alias="name"),
xml.string("description"),
xml.array(
xml.string(
"link[@type='boardgamecategory']",
attribute="value",
required=False
),
alias="categories",
),
xml.array(
xml.string(
"link[@type='boardgamemechanic']",
attribute="value",
required=False
),
alias="mechanics",
),
xml.array(
xml.dictionary(
"link[@type='boardgameexpansion']", [
xml.integer(".", attribute="id"),
xml.boolean(".", attribute="inbound", required=False),
],
required=False
),
alias="expansions",
),
xml.array(
xml.dictionary("poll[@name='suggested_numplayers']/results", [
xml.string(".", attribute="numplayers"),
xml.array(
xml.dictionary("result", [
xml.string(".", attribute="value"),
xml.integer(".", attribute="numvotes"),
], required=False),
hooks=xml.Hooks(after_parse=numplayers_to_result)
)
]),
alias="suggested_numplayers",
hooks=xml.Hooks(after_parse=suggested_numplayers),
),
xml.string(
"statistics/ratings/averageweight",
attribute="value",
alias="weight"
),
xml.string(
"statistics/ratings/ranks/rank[@friendlyname='Board Game Rank']",
attribute="value",
required=False,
alias="rank"
),
xml.string(
"statistics/ratings/usersrated",
attribute="value",
alias="usersrated"
),
xml.string(
"statistics/ratings/owned",
attribute="value",
alias="numowned"
),
xml.string(
"statistics/ratings/bayesaverage",
attribute="value",
alias="rating"
),
xml.string("playingtime", attribute="value", alias="playing_time"),
],
required=False,
alias="items",
hooks=xml.Hooks(after_parse=log_item),
)
)
])
games = xml.parse_from_string(game_processor, data)
games = games["items"]
return games
class CacheBackendSqlite:
def __init__(self, path, ttl):
self.cache = CachedSession(
cache_name=path,
backend="sqlite",
expire_after=ttl,
extension="",
fast_save=True,
allowable_codes=(200,)
)
class BGGException(Exception):
pass
def prettify_if_xml(xml_string):
import xml.dom.minidom
import re
xml_string = re.sub(r"\s+<", "<", re.sub(r">\s+", ">", re.sub(r"\s+", " ", xml_string)))
if not xml_string.startswith("<?xml"):
return xml_string
parsed = xml.dom.minidom.parseString(xml_string)
return parsed.toprettyxml()
|
py | 1a554be3baf4b39ec562439888770dadaf6b2610 | # Copyright 2011 OpenStack Foundation
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import network
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'floating_ips')
def _translate_floating_ip_view(floating_ip):
result = {
'id': floating_ip['id'],
'ip': floating_ip['address'],
'pool': floating_ip['pool'],
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
except (TypeError, KeyError, AttributeError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['fixed_ip']['instance_uuid']
except (TypeError, KeyError, AttributeError):
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
for ip in floating_ips]}
def get_instance_by_floating_ip_addr(self, context, address):
snagiibfa = self.network_api.get_instance_id_by_floating_address
instance_id = snagiibfa(context, address)
if instance_id:
return common.get_instance(self.compute_api, context, instance_id)
def disassociate_floating_ip(self, context, instance, address):
try:
self.network_api.disassociate_floating_ip(context, instance, address)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(FloatingIPController, self).__init__()
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
authorize(context)
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.InvalidID):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return _translate_floating_ip_view(floating_ip)
def index(self, req):
"""Return a list of floating ips allocated to a project."""
context = req.environ['nova.context']
authorize(context)
floating_ips = self.network_api.get_floating_ips_by_project(context)
return _translate_floating_ips_view(floating_ips)
def create(self, req, body=None):
context = req.environ['nova.context']
authorize(context)
pool = None
if body and 'pool' in body:
pool = body['pool']
try:
address = self.network_api.allocate_floating_ip(context, pool)
ip = self.network_api.get_floating_ip_by_address(context, address)
except exception.NoMoreFloatingIps:
if pool:
msg = _("No more floating ips in pool %s.") % pool
else:
msg = _("No more floating ips available.")
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.FloatingIpLimitExceeded:
if pool:
msg = _("IP allocation over quota in pool %s.") % pool
else:
msg = _("IP allocation over quota.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.FloatingIpPoolNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return _translate_floating_ip_view(ip)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.InvalidID):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
address = floating_ip['address']
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
try:
self.network_api.disassociate_and_release_floating_ip(
context, instance, floating_ip)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
return webob.Response(status_int=202)
class FloatingIPActionController(wsgi.Controller):
def __init__(self, ext_mgr=None, *args, **kwargs):
super(FloatingIPActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.network_api = network.API()
self.ext_mgr = ext_mgr
@wsgi.action('addFloatingIp')
def _add_floating_ip(self, req, id, body):
"""Associate floating_ip to an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['addFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id)
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
LOG.warning(
_LW('Info cache is %r during associate') % instance.info_cache,
instance=instance)
msg = _('No nw_info cache associated with instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_ips = cached_nwinfo.fixed_ips()
if not fixed_ips:
msg = _('No fixed ips associated to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_address = None
if self.ext_mgr.is_loaded('os-extended-floating-ips'):
if 'fixed_address' in body['addFloatingIp']:
fixed_address = body['addFloatingIp']['fixed_address']
for fixed in fixed_ips:
if fixed['address'] == fixed_address:
break
else:
msg = _('Specified fixed address not assigned to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
if not fixed_address:
try:
fixed_address = next(ip['address'] for ip in fixed_ips
if netaddr.valid_ipv4(ip['address']))
except StopIteration:
msg = _('Unable to associate floating ip %(address)s '
'to any fixed IPs for instance %(id)s. '
'Instance has no fixed IPv4 addresses to '
'associate.') % (
{'address': address, 'id': id})
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(fixed_ips) > 1:
LOG.warning(_LW('multiple fixed_ips exist, using the first '
'IPv4 fixed_ip: %s'), fixed_address)
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=address,
fixed_address=fixed_address)
except exception.FloatingIpAssociated:
msg = _('floating ip is already associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.FloatingIpNotFoundForAddress:
msg = _('floating ip not found')
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except Exception as e:
msg = _('Unable to associate floating ip %(address)s to '
'fixed ip %(fixed_address)s for instance %(id)s. '
'Error: %(error)s') % (
{'address': address, 'fixed_address': fixed_address,
'id': id, 'error': e})
LOG.exception(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('removeFloatingIp')
def _remove_floating_ip(self, req, id, body):
"""Dissociate floating_ip from an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['removeFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip_by_address(context,
address)
except exception.FloatingIpNotFoundForAddress:
msg = _("floating ip not found")
raise webob.exc.HTTPNotFound(explanation=msg)
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
(uuidutils.is_uuid_like(id) and
[instance.uuid == id] or
[instance.id == id])[0]):
try:
disassociate_floating_ip(self, context, instance, address)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
else:
msg = _("Floating ip %(address)s is not associated with instance "
"%(id)s.") % {'address': address, 'id': id}
raise webob.exc.HTTPConflict(explanation=msg)
class Floating_ips(extensions.ExtensionDescriptor):
"""Floating IPs support."""
name = "FloatingIps"
alias = "os-floating-ips"
namespace = "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1"
updated = "2011-06-16T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ips',
FloatingIPController(),
member_actions={})
resources.append(res)
return resources
def get_controller_extensions(self):
controller = FloatingIPActionController(self.ext_mgr)
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
py | 1a554c85e018b73d01d8173112b6c6dab152a902 | import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.common import is_datetime64tz_dtype, needs_i8_conversion
import pandas as pd
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_unique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.unique()
# dict.fromkeys preserves the order
unique_values = list(dict.fromkeys(obj.values))
if isinstance(obj, pd.MultiIndex):
expected = pd.MultiIndex.from_tuples(unique_values)
expected.names = obj.names
tm.assert_index_equal(result, expected)
elif isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected)
else:
expected = np.array(unique_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_unique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
result = obj.unique()
unique_values_raw = dict.fromkeys(obj.values)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)]
unique_values = [null_obj] + unique_values_not_null
if isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
result = result.normalize()
expected = expected.normalize()
elif isinstance(obj, pd.CategoricalIndex):
expected = expected.set_categories(unique_values_not_null)
tm.assert_index_equal(result, expected)
else:
expected = np.array(unique_values, dtype=obj.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_nunique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
expected = len(obj.unique())
assert obj.nunique(dropna=False) == expected
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_nunique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
if isinstance(obj, pd.CategoricalIndex):
assert obj.nunique() == len(obj.categories)
assert obj.nunique(dropna=False) == len(obj.categories) + 1
else:
num_unique_values = len(obj.unique())
assert obj.nunique() == max(0, num_unique_values - 1)
assert obj.nunique(dropna=False) == max(0, num_unique_values)
@pytest.mark.parametrize(
"idx_or_series_w_bad_unicode", [pd.Index(["\ud83d"] * 2), pd.Series(["\ud83d"] * 2)]
)
def test_unique_bad_unicode(idx_or_series_w_bad_unicode):
# regression test for #34550
obj = idx_or_series_w_bad_unicode
result = obj.unique()
if isinstance(obj, pd.Index):
expected = pd.Index(["\ud83d"], dtype=object)
tm.assert_index_equal(result, expected)
else:
expected = np.array(["\ud83d"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_nunique_dropna(dropna):
# GH37566
s = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT])
res = s.nunique(dropna)
assert res == 1 if dropna else 5
|
py | 1a554cade9e3839a173abd0eefa991d3acf0f269 | """
Обрабатываем/отправляем сообщения согласно протоколу:
сообщения разделены нулевым байтом \0.
"""
import select
import socket
HOST = "127.0.0.1"
PORT = 9999
clients = {}
SEP = b"\0"
class Client:
def __init__(self, sock):
self.sock = sock
self._out_stream = bytes()
self._accumulated_data = bytes()
def send(self, message):
self._out_stream += message + SEP
def recv(self):
data = self.sock.recv(1)
if not data:
self.sock.close()
return None
self._accumulated_data += data
messages = []
while True:
if SEP in self._accumulated_data:
msg, rest = self._accumulated_data.split(SEP, 1)
self._accumulated_data = rest
messages.append(msg)
else:
break
return messages
def flush(self):
sent = self.sock.send(self._out_stream)
self._out_stream = self._out_stream[sent:]
return len(self._out_stream) == 0
def broadcast(poll, message):
for client in clients.values():
client.send(message)
poll.register(client.sock, select.POLLOUT)
def main():
listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_sock.bind((HOST, PORT))
listen_sock.listen(5)
poll = select.poll()
poll.register(listen_sock, select.POLLIN)
while True:
for fd, event in poll.poll():
# сокет с ошибкой или соединение было закрыто
if event & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
poll.unregister(fd)
client = clients[fd]
print('Client {} disconnected'.format(client.sock.getpeername()))
del clients[fd]
# слушающий сокет
elif fd == listen_sock.fileno():
client_sock, addr = listen_sock.accept()
client_sock.setblocking(0)
fd = client_sock.fileno()
clients[fd] = Client(client_sock)
poll.register(fd, select.POLLIN)
print('Connection from {}'.format(addr))
# новые данные от клиента
elif event & select.POLLIN:
client = clients[fd]
messages = client.recv()
if messages:
for message in messages:
broadcast(poll, message)
# сокет клиента готов к записи
elif event & select.POLLOUT:
client = clients[fd]
is_empty = client.flush()
if is_empty:
poll.modify(client.sock, select.POLLIN)
if __name__ == '__main__':
main()
|
py | 1a554cf6b52dd5910e73fcb113103fc314d0fb95 | #! /usr/bin/env python
"""
Module containing functions for cubes frame registration.
"""
__author__ = 'C. A. Gomez Gonzalez, V. Christiaens, G. Ruane, R. Farkas'
__all__ = ['frame_shift',
'cube_shift',
'shift_fft',
'frame_center_radon',
'frame_center_satspots',
'cube_recenter_satspots',
'cube_recenter_radon',
'cube_recenter_dft_upsampling',
'cube_recenter_2dfit',
'cube_recenter_via_speckles']
import numpy as np
import warnings
from packaging import version
try:
import cv2
no_opencv = False
except ImportError:
msg = "Opencv python bindings are missing."
warnings.warn(msg, ImportWarning)
no_opencv = True
from hciplot import plot_frames
from scipy.ndimage import fourier_shift
from scipy.ndimage import shift
import skimage
from skimage.transform import radon
if version.parse(skimage.__version__) <= version.parse('0.17.0'):
from skimage.feature import register_translation as cc_center
else:
from skimage.registration import phase_cross_correlation as cc_center
from multiprocessing import cpu_count
from matplotlib import pyplot as plt
from . import frame_crop
from ..conf import time_ini, timing, Progressbar
from ..conf.utils_conf import vip_figsize, check_array
from ..conf.utils_conf import pool_map, iterable
from ..stats import frame_basic_stats
from ..var import (get_square, frame_center, get_annulus_segments,
fit_2dmoffat, fit_2dgaussian, fit_2dairydisk,
fit_2d2gaussian, cube_filter_lowpass, cube_filter_highpass)
from ..preproc import cube_crop_frames
def frame_shift(array, shift_y, shift_x, imlib='vip-fft',
interpolation='lanczos4', border_mode='reflect'):
""" Shifts a 2D array by shift_y, shift_x. Boundaries are filled with zeros.
Parameters
----------
array : numpy ndarray
Input 2d array.
shift_y, shift_x: float
Shifts in y and x directions.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for performing the image shift.
'ndimage-fourier', does a fourier shift operation and preserves better
the pixel values (therefore the flux and photometry). Interpolation
based shift ('opencv' and 'ndimage-interp') is faster than the fourier
shift. 'opencv' is recommended when speed is critical.
interpolation : str, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp'
(Scipy.ndimage), where the images are shifted via interpolation.
For Scipy.ndimage the options are: 'nearneig', bilinear', 'biquadratic',
'bicubic', 'biquartic' or 'biquintic'. The 'nearneig' interpolation is
the fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For Opencv the options are: 'nearneig', 'bilinear', 'bicubic' or
'lanczos4'. The 'nearneig' interpolation is the fastest and the
'lanczos4' the slowest and accurate. 'lanczos4' is the default for
Opencv and 'biquartic' for Scipy.ndimage.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
Returns
-------
array_shifted : numpy ndarray
Shifted 2d array.
"""
check_array(array, dim=2)
image = array.copy()
if imlib == 'ndimage-fourier':
shift_val = (shift_y, shift_x)
array_shifted = fourier_shift(np.fft.fftn(image), shift_val)
array_shifted = np.fft.ifftn(array_shifted)
array_shifted = array_shifted.real
elif imlib == 'ndimage-interp':
if interpolation == 'nearneig':
order = 0
elif interpolation == 'bilinear':
order = 1
elif interpolation == 'biquadratic':
order = 2
elif interpolation == 'bicubic':
order = 3
elif interpolation == 'biquartic' or interpolation == 'lanczos4':
order = 4
elif interpolation == 'biquintic':
order = 5
else:
raise ValueError('Scipy.ndimage interpolation method not '
'recognized')
if border_mode not in ['reflect', 'nearest', 'constant', 'mirror',
'wrap']:
raise ValueError('`border_mode` not recognized')
array_shifted = shift(image, (shift_y, shift_x), order=order,
mode=border_mode)
elif imlib == 'opencv':
if no_opencv:
msg = 'Opencv python bindings cannot be imported. Install opencv or'
msg += ' set imlib to ndimage-fourier or ndimage-interp'
raise RuntimeError(msg)
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp = cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
elif interpolation == 'lanczos4':
intp = cv2.INTER_LANCZOS4
else:
raise ValueError('Opencv interpolation method not recognized')
if border_mode == 'mirror':
bormo = cv2.BORDER_REFLECT_101 # gfedcb|abcdefgh|gfedcba
elif border_mode == 'reflect':
bormo = cv2.BORDER_REFLECT # fedcba|abcdefgh|hgfedcb
elif border_mode == 'wrap':
bormo = cv2.BORDER_WRAP # cdefgh|abcdefgh|abcdefg
elif border_mode == 'constant':
bormo = cv2.BORDER_CONSTANT # iiiiii|abcdefgh|iiiiiii
elif border_mode == 'nearest':
bormo = cv2.BORDER_REPLICATE # aaaaaa|abcdefgh|hhhhhhh
else:
raise ValueError('`border_mode` not recognized')
image = np.float32(image)
y, x = image.shape
M = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
array_shifted = cv2.warpAffine(image, M, (x, y), flags=intp,
borderMode=bormo)
elif imlib == 'vip-fft':
array_shifted = shift_fft(array, shift_x, shift_y)
else:
raise ValueError('Image transformation library not recognized')
return array_shifted
def cube_shift(cube, shift_y, shift_x, imlib='vip-fft',
interpolation='lanczos4'):
""" Shifts the X-Y coordinates of a cube or 3D array by x and y values.
Parameters
----------
cube : numpy ndarray, 3d
Input cube.
shift_y, shift_x: float, list of floats or np.ndarray of floats
Shifts in y and x directions for each frame. If the a single value is
given then all the frames will be shifted by the same amount.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
Returns
-------
cube_out : numpy ndarray, 3d
Cube with shifted frames.
"""
check_array(cube, dim=3)
nfr = cube.shape[0]
cube_out = np.zeros_like(cube)
if isinstance(shift_x, (int, float)):
shift_x = np.ones((nfr)) * shift_x
if isinstance(shift_y, (int, float)):
shift_y = np.ones((nfr)) * shift_y
for i in range(cube.shape[0]):
cube_out[i] = frame_shift(cube[i], shift_y[i], shift_x[i], imlib,
interpolation)
return cube_out
def frame_center_satspots(array, xy, subi_size=19, sigfactor=6, shift=False,
imlib='vip-fft', interpolation='lanczos4',
fit_type='moff', debug=False, verbose=True):
""" Finds the center of a frame with waffle/satellite spots (e.g. for
VLT/SPHERE). The method used to determine the center is by centroiding the
4 spots via a 2d Gaussian fit and finding the intersection of the
lines they create (see Notes). This method is very sensitive to the SNR of
the satellite spots, therefore thresholding of the background pixels is
performed. If the results are too extreme, the debug parameter will allow to
see in depth what is going on with the fit (maybe you'll need to adjust the
sigfactor for the background pixels thresholding).
Parameters
----------
array : numpy ndarray, 2d
Image or frame.
xy : tuple of 4 tuples of 2 elements
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
shift : bool, optional
If True the image is shifted.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
debug : bool, optional
If True debug information is printed and plotted.
verbose : bool, optional
If True the intersection and shifts information is printed out.
Returns
-------
array_rec
Shifted images. *Only returned if ``shift=True``.*
shifty, shiftx
Shift Y,X to get to the true center.
Notes
-----
linear system:
.. code-block: none
A1 * x + B1 * y = C1
A2 * x + B2 * y = C2
Cramer's rule - solution can be found in determinants:
.. code-block: none
x = Dx/D
y = Dy/D
where D is main determinant of the system:
.. code-block: none
A1 B1
A2 B2
and Dx and Dy can be found from matrices:
.. code-block: none
C1 B1
C2 B2
and
.. code-block: none
A1 C1
A2 C2
C column consequently substitutes the coef. columns of x and y
L stores our coefs A, B, C of the line equations.
.. code-block: none
For D: L1[0] L1[1] for Dx: L1[2] L1[1] for Dy: L1[0] L1[2]
L2[0] L2[1] L2[2] L2[1] L2[0] L2[2]
"""
def line(p1, p2):
""" produces coefs A, B, C of line equation by 2 points
"""
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
""" finds intersection point (if any) of 2 lines provided by coefs
"""
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return None
# --------------------------------------------------------------------------
check_array(array, dim=2)
if fit_type not in ['gaus','moff']:
raise TypeError('fit_type is not recognized')
if not isinstance(xy, (tuple, list)) or len(xy) != 4:
raise TypeError('Input waffle spot coordinates in wrong format (must '
'be a tuple of 4 tuples')
cy, cx = frame_center(array)
centx = []
centy = []
subims = []
for i in range(len(xy)):
sim, y, x = get_square(array, subi_size, xy[i][1], xy[i][0],
position=True, verbose=False)
if fit_type=='gaus':
cent2dgy, cent2dgx = fit_2dgaussian(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
else:
cent2dgy, cent2dgx = fit_2dmoffat(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
centx.append(cent2dgx + x)
centy.append(cent2dgy + y)
subims.append(sim)
cent2dgx_1, cent2dgx_2, cent2dgx_3, cent2dgx_4 = centx
cent2dgy_1, cent2dgy_2, cent2dgy_3, cent2dgy_4 = centy
si1, si2, si3, si4 = subims
if debug:
plot_frames((si1, si2, si3, si4), colorbar=True)
print('Centroids X,Y:')
print(cent2dgx_1, cent2dgy_1)
print(cent2dgx_2, cent2dgy_2)
print(cent2dgx_3, cent2dgy_3)
print(cent2dgx_4, cent2dgy_4)
L1 = line([cent2dgx_1, cent2dgy_1], [cent2dgx_4, cent2dgy_4])
L2 = line([cent2dgx_2, cent2dgy_2], [cent2dgx_3, cent2dgy_3])
R = intersection(L1, L2)
msgerr = "Check that the order of the tuples in `xy` is correct and"
msgerr += " the satellite spots have good S/N"
if R is not None:
shiftx = cx - R[0]
shifty = cy - R[1]
if np.abs(shiftx) < cx * 2 and np.abs(shifty) < cy * 2:
if debug or verbose:
print('Intersection coordinates (X,Y):', R[0], R[1], '\n')
print('Shifts (X,Y): {:.3f}, {:.3f}'.format(shiftx, shifty))
if shift:
array_rec = frame_shift(array, shifty, shiftx, imlib=imlib,
interpolation=interpolation)
return array_rec, shifty, shiftx, centy, centx
else:
return shifty, shiftx
else:
raise RuntimeError("Too large shifts. " + msgerr)
else:
raise RuntimeError("Something went wrong, no intersection found. " +
msgerr)
def shift_fft(array, xshift, yshift):
"""
Subpixel shifting of ``image`` using Fourier transformation.
Parameters
----------
array : 2d numpy ndarray
The image to be shifted.
xshift : float
Amount of desired shift in X direction.
yshift : float
Amount of desired shift in Y direction.
Returns
-------
shifted_array : 2d ndarray
Input ``image`` shifted by ``xshift`` and ``yshift``.
Notes
-----
based on ``LibAndromeda/oneralib/subpixel_shift.pro``, v1.3 2009/05/28
"""
npix = array.shape[0]
if npix != array.shape[1]:
raise ValueError("Input array must be square")
if npix%2:
cte = npix/2-0.5
else:
cte = npix/2
ramp = np.outer(np.ones(npix), np.arange(npix) - cte)
tilt = (-2*np.pi / npix) * (xshift*ramp + yshift*ramp.T)
fact = np.fft.fftshift(np.cos(tilt) + 1j*np.sin(tilt))
array_ft = np.fft.fft2(array) # no np.fft.fftshift applied!
shifted_array = np.fft.ifft2(array_ft * fact).real
return shifted_array
def cube_recenter_satspots(array, xy, subi_size=19, sigfactor=6, plot=True,
fit_type='moff', lbda=None, debug=False, verbose=True,
full_output=False):
""" Function analog to frame_center_satspots but for image sequences. It
actually will call frame_center_satspots for each image in the cube. The
function also returns the shifted images (not recommended to use when the
shifts are of a few percents of a pixel) and plots the histogram of the
shifts and calculate its statistics. This is important to assess the
dispersion of the star center by using artificial waffle/satellite spots
(like those in VLT/SPHERE images) and evaluate the uncertainty of the
position of the center. The use of the shifted images is not recommended.
Parameters
----------
array : numpy ndarray, 3d
Input cube.
xy : tuple of 4 tuples of 2 elements
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
If wavelength vector is not provided, assumes all sat spots of the cube
are at a similar location. If wavelength is provided, only coordinates
of the sat spots in the first channel should be provided. The boxes
location in other channels will be scaled accordingly.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
plot : bool, optional
Whether to plot the shifts.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
lbda: 1d array or list, opt
Wavelength vector. If provided, the subimages will be scaled accordingly
to follow the motion of the satellite spots.
debug : bool, optional
If True debug information is printed and plotted (fit and residuals,
intersections and shifts). This has to be used carefully as it can
produce too much output and plots.
verbose : bool, optional
Whether to print to stdout the timing and additional info.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
Returns
-------
array_rec
The shifted cube.
shift_y, shift_x
[full_output==True] Shifts Y,X to get to the true center for each image.
sat_y, sat_x
[full_output==True] Y,X positions of the satellite spots in each image.
Order: top-left, top-right, bottom-left and bottom-right.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
shift_x = np.zeros((n_frames))
shift_y = np.zeros((n_frames))
sat_y = np.zeros([n_frames,4])
sat_x = np.zeros([n_frames,4])
array_rec = []
if lbda is not None:
cy, cx = frame_center(array[0])
final_xy = []
rescal = lbda/lbda[0]
for i in range(n_frames):
xy_new = []
for s in range(4):
xy_new.append((cx+rescal[i]*(xy[s][0]-cx),cy+rescal[i]*(xy[s][1]-cy)))
xy_new = tuple(xy_new)
final_xy.append(xy_new)
else:
final_xy = [xy for i in range(n_frames)]
if verbose:
print("Final xy positions for sat spots:", final_xy)
print('Looping through the frames, fitting the intersections:')
for i in Progressbar(range(n_frames), verbose=verbose):
res = frame_center_satspots(array[i], final_xy[i], debug=debug, shift=True,
subi_size=subi_size, sigfactor=sigfactor,
fit_type=fit_type, verbose=False)
array_rec.append(res[0])
shift_y[i] = res[1]
shift_x[i] = res[2]
sat_y[i] = res[3]
sat_x[i] = res[4]
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(shift_x, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(shift_y, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(shift_x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(shift_y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if verbose:
msg1 = 'MEAN X,Y: {:.3f}, {:.3f}'
print(msg1.format(np.mean(shift_x), np.mean(shift_y)))
msg2 = 'MEDIAN X,Y: {:.3f}, {:.3f}'
print(msg2.format(np.median(shift_x), np.median(shift_y)))
msg3 = 'STDDEV X,Y: {:.3f}, {:.3f}'
print(msg3.format(np.std(shift_x), np.std(shift_y)))
array_rec = np.array(array_rec)
if full_output:
return array_rec, shift_y, shift_x, sat_y, sat_x
else:
return array_rec
def frame_center_radon(array, cropsize=None, hsize=0.4, step=0.01,
mask_center=None, nproc=None, satspots_cfg=None,
full_output=False, verbose=True,
plot=True, debug=False):
""" Finding the center of a broadband (co-added) frame with speckles and
satellite spots elongated towards the star (center). We use the radon
transform implementation from scikit-image.
Parameters
----------
array : numpy ndarray
Input 2d array or image.
cropsize : None or odd int, optional
Size in pixels of the cropped central area of the input array that will
be used. It should be large enough to contain the bright elongated
speckle or satellite spots.
hsize : float, optional
Size of the box for the grid search. The frame is shifted to each
direction from the center in a hsize length with a given step.
step : float, optional
The step of the coordinates change.
mask_center : None or int, optional
If None the central area of the frame is kept. If int a centered zero
mask will be applied to the frame. By default the center isn't masked.
nproc : int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2.
satspots_cfg: None or str ('x' or '+'), opt
If satellite spots are present, provide a string corresponding to the
configuration of the satellite spots: either as a cross ('x') or as a
plus sign ('+'). Leave to None if no satellite spots present. Usually
the Radon transform centering works better if bright satellite spots
are present.
verbose : bool optional
Whether to print to stdout some messages and info.
plot : bool, optional
Whether to plot the radon cost function.
debug : bool, optional
Whether to print and plot intermediate info.
Returns
-------
[full_output=True] 2d np array
Radon cost function surface is returned if full_output set to True
optimy, optimx : float
Values of the Y, X coordinates of the center of the frame based on the
radon optimization. (always returned)
Notes
-----
Based on Pueyo et al. 2014: http://arxiv.org/abs/1409.6388
"""
from .cosmetics import frame_crop
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if verbose:
start_time = time_ini()
frame = array.copy()
ori_cent, _ = frame_center(frame)
if cropsize is not None:
if not cropsize%2:
raise TypeError("If not None, cropsize should be odd integer")
frame = frame_crop(frame, cropsize, verbose=False)
listyx = np.linspace(start=-hsize, stop=hsize, num=int(2*hsize/step)+1,
endpoint=True)
if not mask_center:
radint = 0
else:
if not isinstance(mask_center, int):
raise TypeError
radint = mask_center
coords = [(y, x) for y in listyx for x in listyx]
cent, _ = frame_center(frame)
frame = get_annulus_segments(frame, radint, cent-radint, mode="mask")[0]
if debug:
if satspots_cfg is not None:
samples = 10
if satspots_cfg == 'x':
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
elif satspots_cfg == '+':
theta = np.hstack((np.linspace(start=-5, stop=5, num=samples,
endpoint=False),
np.linspace(start=85, stop=95, num=samples,
endpoint=False),
np.linspace(start=175, stop=185, num=samples,
endpoint=False),
np.linspace(start=265, stop=275, num=samples,
endpoint=False)))
else:
msg = "If not None, satspots_cfg can only be 'x' or '+'."
raise ValueError(msg)
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
else:
theta = np.linspace(start=0, stop=360, num=int(cent*2),
endpoint=False)
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
costf = []
for coord in coords:
res = _radon_costf(frame, cent, radint, coord, satspots_cfg)
costf.append(res)
costf = np.array(costf)
elif nproc > 1:
res = pool_map(nproc, _radon_costf, frame, cent, radint,
iterable(coords), satspots_cfg)
costf = np.array(res)
if verbose:
msg = 'Done {} radon transform calls distributed in {} processes'
print(msg.format(len(coords), nproc))
cost_bound = costf.reshape(listyx.shape[0], listyx.shape[0])
if plot:
plt.contour(cost_bound, cmap='CMRmap', origin='lower', lw=1, hold='on')
plt.imshow(cost_bound, cmap='CMRmap', origin='lower',
interpolation='nearest')
plt.colorbar()
plt.grid('off')
plt.show()
# argm = np.argmax(costf) # index of 1st max in 1d cost function 'surface'
# optimy, optimx = coords[argm]
# maxima in the 2d cost function surface
num_max = np.where(cost_bound == cost_bound.max())[0].shape[0]
ind_maximay, ind_maximax = np.where(cost_bound == cost_bound.max())
argmy = ind_maximay[int(np.ceil(num_max/2)) - 1]
argmx = ind_maximax[int(np.ceil(num_max/2)) - 1]
y_grid = np.array(coords)[:, 0].reshape(listyx.shape[0], listyx.shape[0])
x_grid = np.array(coords)[:, 1].reshape(listyx.shape[0], listyx.shape[0])
optimy = y_grid[argmy, 0]+(ori_cent-cent)/2
optimx = x_grid[0, argmx]+(ori_cent-cent)/2
if verbose:
print('Cost function max: {}'.format(costf.max()))
print('Cost function # maxima: {}'.format(num_max))
msg = 'Finished grid search radon optimization. Y={:.5f}, X={:.5f}'
print(msg.format(optimy, optimx))
timing(start_time)
if full_output:
return cost_bound, optimy, optimx
else:
return optimy, optimx
def _radon_costf(frame, cent, radint, coords, satspots_cfg=None):
""" Radon cost function used in frame_center_radon().
"""
frame_shifted = frame_shift(frame, coords[0], coords[1])
frame_shifted_ann = get_annulus_segments(frame_shifted, radint,
cent-radint, mode="mask")[0]
if satspots_cfg is None:
theta = np.linspace(start=0, stop=360, num=frame_shifted_ann.shape[0],
endpoint=False)
elif satspots_cfg == 'x':
samples = 10
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
else:
samples = 10
theta = np.hstack((np.linspace(start=-5, stop=5, num=samples,
endpoint=False),
np.linspace(start=85, stop=95, num=samples,
endpoint=False),
np.linspace(start=175, stop=185, num=samples,
endpoint=False),
np.linspace(start=265, stop=275, num=samples,
endpoint=False)))
sinogram = radon(frame_shifted_ann, theta=theta, circle=True)
costf = np.sum(np.abs(sinogram[int(cent), :]))
return costf
def cube_recenter_radon(array, full_output=False, verbose=True, imlib='vip-fft',
interpolation='lanczos4', **kwargs):
""" Recenters a cube looping through its frames and calling the
``frame_center_radon`` function.
Parameters
----------
array : numpy ndarray
Input 3d array or cube.
full_output : {False, True}, bool optional
If True the recentered cube is returned along with the y and x shifts.
verbose : {True, False}, bool optional
Whether to print timing and intermediate information to stdout.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
cropsize : odd int, optional
Size in pixels of the cropped central area of the input array that will
be used. It should be large enough to contain the satellite spots.
hsize : float, optional
Size of the box for the grid search. The frame is shifted to each
direction from the center in a hsize length with a given step.
step : float, optional
The step of the coordinates change.
mask_center : None or int, optional
If None the central area of the frame is kept. If int a centered zero
mask will be applied to the frame. By default the center isn't masked.
nproc : int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2.
debug : bool, optional
Whether to print and plot intermediate info from ``frame_center_radon``.
Returns
-------
array_rec : 3d ndarray
Recentered cube.
y, x : 1d arrays of floats
[full_output] Shifts in y and x.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
y[i], x[i] = frame_center_radon(array[i], verbose=False, plot=False,
**kwargs)
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation)
if verbose:
timing(start_time)
if full_output:
return array_rec, y, x
else:
return array_rec
def cube_recenter_dft_upsampling(array, center_fr1=None, negative=False,
fwhm=4, subi_size=None, upsample_factor=100,
imlib='vip-fft', interpolation='lanczos4',
mask=None, full_output=False, verbose=True,
nproc=1, save_shifts=False, debug=False,
plot=True):
""" Recenters a cube of frames using the DFT upsampling method as
proposed in Guizar et al. 2008 and implemented in the
``register_translation`` function from scikit-image.
The algorithm (DFT upsampling) obtains an initial estimate of the
cross-correlation peak by an FFT and then refines the shift estimation by
upsampling the DFT only in a small neighborhood of that estimate by means
of a matrix-multiply DFT.
Parameters
----------
array : numpy ndarray
Input cube.
center_fr1 = (cy_1, cx_1) : Tuple, optional
Coordinates of the center of the subimage for fitting a 2d Gaussian and
centroiding the 1st frame.
negative : bool, optional
If True the centroiding of the 1st frames is done with a negative
2d Gaussian fit.
fwhm : float, optional
FWHM size in pixels.
subi_size : int or None, optional
Size of the square subimage sides in pixels, used to centroid to first
frame. If subi_size is None then the first frame is assumed to be
centered already.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
upsample_factor : int, optional
Upsampling factor (default 100). Images will be registered to within
1/upsample_factor of a pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
mask: 2D np.ndarray, optional
Binary mask indicating where the cross-correlation should be calculated
in the images. If provided, should be the same size as array frames.
[Note: only ysed uf version of skimage >= 0.18.0]
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
debug : bool, optional
Whether to print to stdout the shifts or not.
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_recentered : numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
Notes
-----
Using the implementation from scikit-image of the algorithm described in
Guizar-Sicairos et al. "Efficient subpixel image registration algorithms,"
Opt. Lett. 33, 156-158 (2008). This algorithm registers two images (2-D
rigid translation) within a fraction of a pixel specified by the user.
Instead of computing a zero-padded FFT (fast Fourier transform), this code
uses selective upsampling by a matrix-multiply DFT (discrete FT) to
dramatically reduce computation time and memory without sacrificing
accuracy. With this procedure all the image points are used to compute the
upsampled cross-correlation in a very small neighborhood around its peak.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
if mask is not None:
if mask.shape[-1]!=array.shape[-1] or mask.shape[-2]!=array.shape[-2]:
msg = "If provided, mask should have same shape as frames"
raise TypeError(msg)
n_frames, sizey, sizex = array.shape
if subi_size is not None:
if center_fr1 is None:
print('`cx_1` or `cy_1` not be provided')
print('Using the coordinated of the 1st frame center for '
'the Gaussian 2d fit')
cy_1, cx_1 = frame_center(array[0])
else:
cy_1, cx_1 = center_fr1
if not isinstance(subi_size, int):
raise ValueError('subi_size must be an integer or None')
if subi_size < fwhm:
raise ValueError('`subi_size` (value in pixels) is too small')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
cy, cx = frame_center(array[0])
# Centroiding first frame with 2d gaussian and shifting
msg0 = "The rest of the frames will be shifted by cross-correlation wrt the" \
" 1st"
if subi_size is not None:
y1, x1 = _centroid_2dg_frame(array_rec, 0, subi_size, cy_1, cx_1,
negative, debug, fwhm)
x[0] = cx - x1
y[0] = cy - y1
array_rec[0] = frame_shift(array_rec[0], shift_y=y[0], shift_x=x[0],
imlib=imlib, interpolation=interpolation)
if verbose:
msg = "Shift for first frame X,Y=({:.3f}, {:.3f})"
print(msg.format(x[0], y[0]))
print(msg0)
if debug:
titd = "original / shifted 1st frame subimage"
plot_frames((frame_crop(array[0], subi_size, verbose=False),
frame_crop(array_rec[0], subi_size, verbose=False)),
grid=True, title=titd)
else:
if verbose:
print("The first frame is assumed to be well centered wrt the"
"center of the array")
print(msg0)
x[0] = 0
y[0] = 0
# Finding the shifts with DTF upsampling of each frame wrt the first
if nproc == 1:
for i in Progressbar(range(1, n_frames), desc="frames", verbose=verbose):
y[i], x[i], array_rec[i] = _shift_dft(array_rec, array, i,
upsample_factor, mask,
interpolation, imlib)
elif nproc > 1:
res = pool_map(nproc, _shift_dft, array_rec, array,
iterable(range(1, n_frames)),
upsample_factor, interpolation, imlib)
res = np.array(res)
y[1:] = res[:,0]
x[1:] = res[:,1]
array_rec[1:] = [frames for frames in res[:,2]]
if debug:
print("\nShifts in X and Y")
for i in range(n_frames):
print(x[i], y[i])
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if save_shifts:
np.savetxt('recent_dft_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
return array_rec, y, x
else:
return array_rec
def _shift_dft(array_rec, array, frnum, upsample_factor, mask, interpolation,
imlib):
"""
function used in recenter_dft_unsampling
"""
if version.parse(skimage.__version__) > version.parse('0.17.0'):
shift_yx = cc_center(array_rec[0], array[frnum],
upsample_factor=upsample_factor, reference_mask=mask,
return_error=False)
else:
shift_yx = cc_center(array_rec[0], array[frnum],
upsample_factor=upsample_factor)
y_i, x_i = shift_yx
array_rec_i = frame_shift(array[frnum], shift_y=y_i, shift_x=x_i,
imlib=imlib, interpolation=interpolation)
return y_i, x_i, array_rec_i
def cube_recenter_2dfit(array, xy=None, fwhm=4, subi_size=5, model='gauss',
nproc=1, imlib='vip-fft', interpolation='lanczos4',
offset=None, negative=False, threshold=False,
sigfactor=2, fix_neg=False, params_2g=None,
save_shifts=False, full_output=False, verbose=True,
debug=False, plot=True):
""" Recenters the frames of a cube. The shifts are found by fitting a 2d
Gaussian or Moffat to a subimage centered at ``xy``. This assumes the frames
don't have too large shifts (>5px). The frames are shifted using the
function frame_shift().
Parameters
----------
array : numpy ndarray
Input cube.
xy : tuple of integers or floats
Integer coordinates of the center of the subimage (wrt the original frame).
For the double gaussian fit with fixed negative gaussian, this should
correspond to the exact location of the center of the negative gaussiam
(e.g. the center of the coronagraph mask) - in that case a tuple of
floats is also accepted.
fwhm : float or numpy ndarray
FWHM size in pixels, either one value (float) that will be the same for
the whole cube, or an array of floats with the same dimension as the
0th dim of array, containing the fwhm for each channel (e.g. in the case
of an ifs cube, where the fwhm varies with wavelength)
subi_size : int, optional
Size of the square subimage sides in pixels.
model : str, optional
Sets the type of fit to be used. 'gauss' for a 2d Gaussian fit,
'moff' for a 2d Moffat fit, 'airy' for a 2d Airy disk fit, and
'2gauss' for a 2d double Gaussian (positive+negative) fit.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
offset : tuple of floats, optional
If None the region of the frames used for the 2d Gaussian/Moffat fit is
shifted to the center of the images (2d arrays). If a tuple is given it
serves as the offset of the fitted area wrt the center of the 2d arrays.
negative : bool, optional
If True a negative 2d Gaussian/Moffat fit is performed.
fix_neg: bool, optional
In case of a double gaussian fit, whether to fix the parameters of the
megative gaussian. If True, they should be provided in params_2g.
params_2g: None or dictionary, optional
In case of a double gaussian fit, dictionary with either fixed or first
guess parameters for the double gaussian. E.g.:
params_2g = {'fwhm_neg': 3.5, 'fwhm_pos': (3.5,4.2), 'theta_neg': 48.,
'theta_pos':145., 'neg_amp': 0.5}
fwhm_neg: float or tuple with fwhm of neg gaussian
fwhm_pos: can be a tuple for x and y axes of pos gaussian (replaces fwhm)
theta_neg: trigonometric angle of the x axis of the neg gaussian (deg)
theta_pos: trigonometric angle of the x axis of the pos gaussian (deg)
neg_amp: amplitude of the neg gaussian wrt the amp of the positive one
Note: it is always recommended to provide theta_pos and theta_neg for a
better fit.
threshold : bool, optional
If True the background pixels (estimated using sigma clipped statistics)
will be replaced by small random Gaussian noise (recommended for 2g).
sigfactor: float, optional
If thresholding is performed, set the the threshold in terms of
gaussian sigma in the subimage (will depend on your cropping size).
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
debug : bool, optional
If True the details of the fitting are shown. Won't work when the cube
contains >20 frames (as it might produce an extremely long output).
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_rec: numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
n_frames, sizey, sizex = array.shape
if not isinstance(subi_size, int):
raise ValueError('`subi_size` must be an integer')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
if isinstance(fwhm, (float, int, np.float32, np.float64)):
fwhm = np.ones(n_frames) * fwhm
if debug and array.shape[0] > 20:
msg = 'Debug with a big array will produce a very long output. '
msg += 'Try with less than 20 frames in debug mode'
raise RuntimeWarning(msg)
if xy is not None:
pos_x, pos_y = xy
cond = model != '2gauss'
if (not isinstance(pos_x, int) or not isinstance(pos_y, int)) and cond:
raise TypeError('`xy` must be a tuple of integers')
else:
pos_y, pos_x = frame_center(array[0])
cy, cx = frame_center(array[0])
array_rec = np.empty_like(array)
if model == 'gauss':
func = _centroid_2dg_frame
elif model == 'moff':
func = _centroid_2dm_frame
elif model == 'airy':
func = _centroid_2da_frame
elif model == '2gauss':
func = _centroid_2d2g_frame
else:
raise ValueError('model not recognized')
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
res = []
print('2d {}-fitting'.format(model))
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
if model == "2gauss":
args = [array, i, subi_size, pos_y, pos_x, debug, fwhm[i],
fix_neg, params_2g, threshold, sigfactor]
else:
args = [array, i, subi_size, pos_y, pos_x, negative, debug,
fwhm[i], threshold, sigfactor]
res.append(func(*args))
res = np.array(res)
elif nproc > 1:
if model == "2gauss":
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
debug, iterable(fwhm), fix_neg, params_2g, threshold,
sigfactor]
else:
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
negative, debug, iterable(fwhm), threshold, sigfactor]
res = pool_map(nproc, func, *args)
res = np.array(res)
y = cy - res[:, 0]
x = cx - res[:, 1]
if model == "2gauss" and not fix_neg:
y_neg = res[:, 2]
x_neg = res[:, 3]
fwhm_x = res[:, 4]
fwhm_y = res[:, 5]
fwhm_neg_x = res[:, 6]
fwhm_neg_y = res[:, 7]
theta = res[:, 8]
theta_neg = res[:, 9]
amp_pos = res[:,10]
amp_neg = res[:, 11]
if offset is not None:
offx, offy = offset
y -= offy
x -= offx
for i in Progressbar(range(n_frames), desc="Shifting", verbose=verbose):
if debug:
print("\nShifts in X and Y")
print(x[i], y[i])
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation)
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
if model == "2gauss" and not fix_neg:
_ = plt.hist(cx-x_neg, bins=b, alpha=0.5,
label=la + ' shifts X (neg gaussian)')
_ = plt.hist(cy-y_neg, bins=b, alpha=0.5,
label=la + ' shifts Y (neg gaussian)')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if save_shifts:
np.savetxt('recent_gauss_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
if model == "2gauss" and not fix_neg:
return (array_rec, y, x, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x,
fwhm_neg_y, theta, theta_neg, amp_pos, amp_neg)
return array_rec, y, x
else:
return array_rec
def _centroid_2dg_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d gaussian fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative gaussian fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False, fwhmx=fwhm, fwhmy=fwhm,
threshold=threshold, sigfactor=sigfactor, debug=debug,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2dm_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d moffat fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dmoffat(sub_image, crop=False, fwhm=fwhm, debug=debug,
threshold=threshold, sigfactor=sigfactor,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2da_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d Airy disk fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dairydisk(sub_image, crop=False, fwhm=fwhm,
threshold=threshold, sigfactor=sigfactor,
full_output=False, debug=debug)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2d2g_frame(cube, frnum, size, pos_y, pos_x, debug=False, fwhm=4,
fix_neg=True, params_2g=None, threshold=False,
sigfactor=1):
""" Finds the centroid by using a 2d double gaussian (positive+negative)
fitting in one frame from a cube. To be called from within
cube_recenter_doublegauss2d_fit().
"""
size = min(cube[frnum].shape[0],cube[frnum].shape[1],size)
#sub_image, y1, x1 = get_square_robust(cube[frnum], size=size, y=pos_y,
# x=pos_x, position=True)
if isinstance(params_2g,dict):
fwhm_neg = params_2g.get('fwhm_neg', 0.8*fwhm)
fwhm_pos = params_2g.get('fwhm_pos', 2*fwhm)
theta_neg = params_2g.get('theta_neg', 0.)
theta_pos = params_2g.get('theta_pos', 0.)
neg_amp = params_2g.get('neg_amp', 1)
res_DF = fit_2d2gaussian(cube[frnum], crop=True, cent=(pos_x,pos_y),
cropsize=size, fwhm_neg=fwhm_neg, fwhm_pos=fwhm_pos,
neg_amp=neg_amp, fix_neg=fix_neg, theta_neg=theta_neg,
theta_pos=theta_pos, threshold=threshold,
sigfactor=sigfactor, full_output=True, debug=debug)
y_i = res_DF['centroid_y']
x_i = res_DF['centroid_x']
if not fix_neg:
y_neg = res_DF['centroid_y_neg']
x_neg = res_DF['centroid_x_neg']
fwhm_x = res_DF['fwhm_x']
fwhm_y = res_DF['fwhm_y']
fwhm_neg_x = res_DF['fwhm_x_neg']
fwhm_neg_y = res_DF['fwhm_y_neg']
theta = res_DF['theta']
theta_neg = res_DF['theta_neg']
amp_pos = res_DF['amplitude']
amp_neg = res_DF['amplitude_neg']
return (y_i, x_i, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x, fwhm_neg_y,
theta, theta_neg, amp_pos, amp_neg)
return y_i, x_i
# TODO: make parameter names match the API
def cube_recenter_via_speckles(cube_sci, cube_ref=None, alignment_iter=5,
gammaval=1, min_spat_freq=0.5, max_spat_freq=3,
fwhm=4, debug=False, recenter_median=False,
fit_type='gaus', negative=True, crop=True,
subframesize=21, mask=None, imlib='vip-fft',
interpolation='lanczos4', plot=True,
full_output=False):
""" Registers frames based on the median speckle pattern. Optionally centers
based on the position of the vortex null in the median frame. Images are
filtered to isolate speckle spatial frequencies.
Parameters
----------
cube_sci : numpy ndarray
Science cube.
cube_ref : numpy ndarray
Reference cube (e.g. for NIRC2 data in RDI mode).
alignment_iter : int, optional
Number of alignment iterations (recomputes median after each iteration).
gammaval : int, optional
Applies a gamma correction to emphasize speckles (useful for faint
stars).
min_spat_freq : float, optional
Spatial frequency for low pass filter.
max_spat_freq : float, optional
Spatial frequency for high pass filter.
fwhm : float, optional
Full width at half maximum.
debug : bool, optional
Outputs extra info.
recenter_median : bool, optional
Recenter the frames at each iteration based on a 2d fit.
fit_type : str, optional
If recenter_median is True, this is the model to which the image is
fitted to for recentering. 'gaus' works well for NIRC2_AGPM data.
'ann' works better for NACO+AGPM data.
negative : bool, optional
If True, uses a negative gaussian fit to determine the center of the
median frame.
crop: bool, optional
Whether to calculate the recentering on a cropped version of the cube
that is speckle-dominated (recommended).
subframesize : int, optional
Sub-frame window size used. Should cover the region where speckles are
the dominant noise source.
mask: 2D np.ndarray, optional
Binary mask indicating where the cross-correlation should be calculated
in the images. If provided, should be the same size as array frames.
imlib : str, optional
Image processing library to use.
interpolation : str, optional
Interpolation method to use.
plot : bool, optional
If True, the shifts are plotted.
full_ouput: bool, optional
Whether to return more varibales, useful for debugging.
Returns
-------
if full_output is False, returns:
cube_reg_sci: Registered science cube (numpy 3d ndarray)
If cube_ref is not None, also returns:
cube_reg_ref: Ref. cube registered to science frames (np 3d ndarray)
If full_output is True, returns in addition to the above:
cube_sci_lpf: Low+high-pass filtered science cube (np 3d ndarray)
cube_stret: Cube with stretched values used for cross-corr (np 3d ndarray)
cum_x_shifts_sci: Vector of x shifts for science frames (np 1d array)
cum_y_shifts_sci: Vector of y shifts for science frames (np 1d array)
And if cube_ref is not None, also returns:
cum_x_shifts_ref: Vector of x shifts for ref. frames.
cum_y_shifts_ref: Vector of y shifts for ref. frames.
"""
n, y, x = cube_sci.shape
check_array(cube_sci, dim=3)
if recenter_median and fit_type not in {'gaus','ann'}:
raise TypeError("fit type not recognized. Should be 'ann' or 'gaus'")
if crop and not subframesize < y/2.:
raise ValueError('`Subframesize` is too large')
if cube_ref is not None:
ref_star = True
nref = cube_ref.shape[0]
else:
ref_star = False
if crop:
cube_sci_subframe = cube_crop_frames(cube_sci, subframesize,
verbose=False)
if ref_star:
cube_ref_subframe = cube_crop_frames(cube_ref, subframesize,
verbose=False)
else:
subframesize = cube_sci.shape[-1]
cube_sci_subframe = cube_sci.copy()
if ref_star:
cube_ref_subframe = cube_ref.copy()
ceny, cenx = frame_center(cube_sci_subframe[0])
print('Sub frame shape: {}'.format(cube_sci_subframe.shape))
print('Center pixel: ({}, {})'.format(ceny, cenx))
# Filtering cubes. Will be used for alignment purposes
cube_sci_lpf = cube_sci_subframe.copy()
if ref_star:
cube_ref_lpf = cube_ref_subframe.copy()
cube_sci_lpf = cube_sci_lpf + np.abs(np.min(cube_sci_lpf))
if ref_star:
cube_ref_lpf = cube_ref_lpf + np.abs(np.min(cube_ref_lpf))
median_size = int(fwhm * max_spat_freq)
# Remove spatial frequencies <0.5 lam/D and >3lam/D to isolate speckles
cube_sci_hpf = cube_filter_highpass(cube_sci_lpf, 'median-subt',
median_size=median_size, verbose=False)
if min_spat_freq>0:
cube_sci_lpf = cube_filter_lowpass(cube_sci_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_sci_lpf = cube_sci_hpf
if ref_star:
cube_ref_hpf = cube_filter_highpass(cube_ref_lpf, 'median-subt',
median_size=median_size,
verbose=False)
if min_spat_freq>0:
cube_ref_lpf = cube_filter_lowpass(cube_ref_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_ref_lpf = cube_ref_hpf
if ref_star:
alignment_cube = np.zeros((1 + n + nref, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
alignment_cube[(n + 1):(n + 2 + nref), :, :] = cube_ref_lpf
else:
alignment_cube = np.zeros((1 + n, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
n_frames = alignment_cube.shape[0] # 1+n or 1+n+nref
cum_y_shifts = 0
cum_x_shifts = 0
for i in range(alignment_iter):
alignment_cube[0] = np.median(alignment_cube[1:(n + 1)], axis=0)
if recenter_median:
# Recenter the median frame using a 2d fit
if fit_type == 'gaus':
crop_sz = int(fwhm)
else:
crop_sz = int(6*fwhm)
if not crop_sz%2:
crop_sz+=1
sub_image, y1, x1 = get_square(alignment_cube[0], size=crop_sz,
y=ceny, x=cenx, position=True)
if fit_type == 'gaus':
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False,
threshold=False, sigfactor=1,
debug=debug, full_output=False)
elif fit_type == 'ann':
y_i, x_i, rad = _fit_2dannulus(sub_image, fwhm=fwhm, crop=False,
hole_rad=0.5, sampl_cen=0.1,
sampl_rad=0.2, ann_width=0.5,
unc_in=2.)
yshift = ceny - (y1 + y_i)
xshift = cenx - (x1 + x_i)
alignment_cube[0] = frame_shift(alignment_cube[0, :, :], yshift,
xshift, imlib=imlib,
interpolation=interpolation)
# center the cube with stretched values
cube_stret = np.log10((np.abs(alignment_cube) + 1) ** gammaval)
if mask is not None and crop:
mask_tmp = frame_crop(mask, subframesize)
else:
mask_tmp = mask
res = cube_recenter_dft_upsampling(cube_stret, (ceny, cenx), fwhm=fwhm,
subi_size=None, full_output=True,
verbose=False, plot=False,
mask=mask_tmp, imlib=imlib,
interpolation=interpolation)
_, y_shift, x_shift = res
sqsum_shifts = np.sum(np.sqrt(y_shift ** 2 + x_shift ** 2))
print('Square sum of shift vecs: ' + str(sqsum_shifts))
for j in range(1, n_frames):
alignment_cube[j] = frame_shift(alignment_cube[j], y_shift[j],
x_shift[j], imlib=imlib,
interpolation=interpolation)
cum_y_shifts += y_shift
cum_x_shifts += x_shift
cube_reg_sci = cube_sci.copy()
cum_y_shifts_sci = cum_y_shifts[1:(n + 1)]
cum_x_shifts_sci = cum_x_shifts[1:(n + 1)]
for i in range(n):
cube_reg_sci[i] = frame_shift(cube_sci[i], cum_y_shifts_sci[i],
cum_x_shifts_sci[i], imlib=imlib,
interpolation=interpolation)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(cum_x_shifts_sci, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(cum_y_shifts_sci, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n))
la = 'Histogram'
_ = plt.hist(cum_x_shifts_sci, bins=b, alpha=0.5, label=la+' shifts X')
_ = plt.hist(cum_y_shifts_sci, bins=b, alpha=0.5, label=la+' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if ref_star:
cube_reg_ref = cube_ref.copy()
cum_y_shifts_ref = cum_y_shifts[(n + 1):]
cum_x_shifts_ref = cum_x_shifts[(n + 1):]
for i in range(nref):
cube_reg_ref[i] = frame_shift(cube_ref[i], cum_y_shifts_ref[i],
cum_x_shifts_ref[i], imlib=imlib,
interpolation=interpolation)
if ref_star:
if full_output:
return (cube_reg_sci, cube_reg_ref, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci, cum_x_shifts_ref,
cum_y_shifts_ref)
else:
return (cube_reg_sci, cube_reg_ref)
else:
if full_output:
return (cube_reg_sci, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci)
else:
return cube_reg_sci
def _fit_2dannulus(array, fwhm=4, crop=False, cent=None, cropsize=15,
hole_rad=0.5, sampl_cen=0.1, sampl_rad=None, ann_width=0.5,
unc_in=2.):
"""Finds the center the center of a donut-shape signal (e.g. a coronagraphic
PSF) by fitting an annulus, using a grid of positions for the center and
radius of the annulus. The best fit is found by maximizing the mean flux
measured in the annular mask. Requires the image to be already roughly
centered (by an uncertainty provided by unc_in).
Parameters
----------
array : array_like
Image with a single donut-like source, already approximately at the
center of the frame.
fwhm : float
Gaussian PSF full width half maximum from fitting (in pixels).
hole_rad: float, opt
First estimate of the hole radius (in terms of fwhm). The grid search
on the radius of the optimal annulus goes from 0.5 to 2 times hole_rad.
Note: for the AGPM PSF of VLT/NACO, the optimal hole_rad ~ 0.5FWHM.
sampl_cen: float, opt
Precision of the grid sampling to find the center of the annulus (in
pixels)
sampl_rad: float, opt or None.
Precision of the grid sampling to find the optimal radius of the
annulus (in pixels). If set to None, there is no grid search for the
optimal radius of the annulus, the value given by hole_rad is used.
ann_width: float, opt
Width of the annulus in FWHM; default is 0.5 FWHM.
unc_in: float, opt
Initial uncertainty on the center location (with respect to center of
input subframe) in pixels; this will set the grid width.
Returns
-------
mean_y : float
Source centroid y position on the full image from fitting.
mean_x : float
Source centroid x position on the full image from fitting.
if sampl_rad is not None, also returns final_hole_rad:
final_hole_rad : float
Best fit radius of the hole, in terms of fwhm.
"""
if cent is None:
ceny, cenx = frame_center(array)
else:
cenx, ceny = cent
if crop:
x_sub_px = cenx%1
y_sub_px = ceny%1
imside = array.shape[0]
psf_subimage, suby, subx = get_square(array, min(cropsize, imside),
int(ceny), int(cenx),
position=True)
ceny, cenx = frame_center(psf_subimage)
ceny+=y_sub_px
cenx+=x_sub_px
else:
psf_subimage = array.copy()
ann_sz = ann_width*fwhm
grid_sh_x = np.arange(-unc_in,unc_in,sampl_cen)
grid_sh_y = np.arange(-unc_in,unc_in,sampl_cen)
if sampl_rad is None:
rads = [hole_rad*fwhm]
else:
rads = np.arange(0.5*hole_rad*fwhm,2*hole_rad*fwhm,sampl_rad)
flux_ann = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
best_rad = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
for ii, xx in enumerate(grid_sh_x):
for jj, yy in enumerate(grid_sh_y):
tmp_tmp = frame_shift(array,yy,xx)
for rr, rad in enumerate(rads):
# mean flux in the annulus
tmp = frame_basic_stats(tmp_tmp, 'annulus',inner_radius=rad,
size=ann_sz, plot=False)
if tmp > flux_ann[ii,jj]:
flux_ann[ii,jj] = tmp
best_rad[ii,jj] = rad
i_max,j_max = np.unravel_index(np.argmax(flux_ann),flux_ann.shape)
mean_x = cenx - grid_sh_x[i_max]
mean_y = ceny - grid_sh_y[j_max]
if sampl_rad is None:
return mean_y, mean_x
else:
final_hole_rad = best_rad[i_max,j_max]/fwhm
return mean_y, mean_x, final_hole_rad
|
py | 1a554d1782257e14492963eeb3b8a4ad4b1a1731 | #!/bin/env python
"""Log file
Author: Friedrich Schotte, Mar 2, 2016 - Oct 7, 2017
"""
__version__ = "1.1.5" # caching
from logging import debug,warn,info,error
class LogFile(object):
name = "logfile"
from persistent_property3 import persistent_property
filename = persistent_property("filename","")
def __init__(self,name="logfile",columns=["date time","value"],filename=None):
"""filename: where to save
columns: list of strings"""
self.name = name
self.columns = columns
if filename is not None: self.filename = filename
from _thread import allocate_lock
self.lock = allocate_lock()
def log(self,*args,**kwargs):
"""Append to logfile
time: time in seconds since 1970-01-01 00:00:00 UTC
"""
from time import time
from time_string import date_time
from normpath3 import normpath
from os.path import exists,dirname; from os import makedirs
values = args
if "time" in kwargs: timestamp = kwargs["time"]
else: timestamp = time()
with self.lock: # Allow only one thread at a time inside this function.
filename = normpath(self.filename)
if not exists(dirname(filename)): makedirs(dirname(filename))
if not exists(filename): header = "#"+"\t".join(self.columns)+"\n"
else: header = ""
fields = [date_time(timestamp)]+[str(v) for v in values]
line = "\t".join(fields)+"\n"
file(filename,"ab").write(header+line)
def history(self,*args,**kwargs):
"""time_range: tmin,tmax: time in seconds since 1970-01-01 00:00:00 UTC
range: imin,imax: all vaues from imin to imax, including imax
(Negative integers count from the end, -1 = last.)
count: last N
*args: column names"""
from numpy import nan
if "count" in kwargs:
count = kwargs["count"]
lines = self.lines(self.last_lines_range(count))
if "time_range" in kwargs:
time_range = kwargs["time_range"]
lines = self.lines(self.timestamp_range(time_range))
column_names = args
column_indices = [self.columns.index(name) for name in column_names]
values = []
for i in range(0,len(lines)):
if len(lines[i]) == len(column_names):
try:
row_values = [convert(lines[i][j],name) for (j,name)
in zip(column_indices,column_names)]
values += [row_values]
except Exception as msg:
warn("logfile: line %d/%d %r: %s" % (i+1,len(lines),lines[i],msg))
values = zip(*values) # organize data in rows
if values == []: values = [[]]*len(column_names)
return values
def lines(self,start_end):
"""Part of the file.
start: byte offset (self.contents[start] is the first character included)
end: byte offset (self.contents[end] will not be included)
Return value: list of lists of strings, each list representing a line
"""
start = int(start_end[0]) # for the Python 3 competability
end = int(start_end[1]) # for the Python 3 competability
lines = self.content[start:end].split("\n")
# Get rid of empty lines.
if lines[:1] == ['']: lines = lines[1:]
if lines[-1:] == ['']: lines = lines[:-1]
# Get rid of comment lines.
while len(lines)>0 and lines[0].startswith("#"): lines = lines[1:]
lines = [l.split("\t") for l in lines]
return lines
def last_lines_range(self,count):
"""Where are the last n lines from the end of the file?
Return value: tuple of byte offsets: begin,end
"""
content = self.content
j = len(content)
if content[j-1:j] == "\n": j -= 1
i = j
for n in range(0,count):
i2 = content.rfind("\n",0,i)
if i2<0: break
i = i2
i += 1
return i,j
def timestamp_range(self,t): # (t1,t2) -> t for python 3
"""Start and end byte offsets of a time range
t1: seconds since 1970-01-01T00:00:00+00
t2: seconds since 1970-01-01T00:00:00+00
"""
t1 = t[0]
t2 = t[1]
return [self.timestamp_location(t) for t in (t1,t2)]
def timestamp_location(self,timestamp):
"""First line with a time stamp later to the given time stamp.
Return value: byte offset from the beginning of the file.
Length of file if all timestamp in the file are earlier
timestamp: seconds since 1970-01-01T00:00:00+00"""
from numpy import isnan,clip
text = self.content
offset = len(text)/2
step = len(text)/4
while step > 0:
##debug("offset %r, step %r" % (offset,step))
t = self.next_timestamp(text,offset)
if isnan(t): offset = len(text); break
if t <= timestamp: offset += step
else: offset -= step
offset = clip(offset,0,len(text))
step = (step+1)/2 if step > 1 else 0
return offset
@staticmethod
def next_timestamp(text,offset):
from time_string import timestamp
from numpy import nan
i = text.find("\n",offset)+1
if i < 0: t = nan
else:
j = text.find("\t",i)
if j < 0: t = nan
else: t = timestamp(text[i:j])
return t
@property
def content(self):
from normpath3 import normpath
filename = normpath(self.filename)
from mmap import mmap,ACCESS_READ
try:
f = file(filename)
content = mmap(f.fileno(),0,access=ACCESS_READ)
except IOError: content = ""
return content
@property
def content(self):
from os.path import exists,getsize
from normpath3 import normpath
filename = normpath(self.filename)
if exists(filename):
size_change = getsize(filename) - len(self.cached_content)
if size_change > 0:
##debug("Logfile: Reading %d bytes" % size_change)
f = file(filename)
f.seek(len(self.cached_content))
self.cached_content += f.read()
elif size_change < 0:
##debug("Logfile: Reading %d bytes" % getsize(filename))
self.cached_content = file(filename).read()
else: self.cached_content = ""
return self.cached_content
def get_cached_content(self):
if self.filename in self.file_cache:
content = self.file_cache[self.filename]
else: content = ""
return content
def set_cached_content(self,content):
self.file_cache[self.filename] = content
cached_content = property(get_cached_content,set_cached_content)
file_cache = {}
@property
def start_time(self):
from time_string import timestamp
from time import time
lines = self.lines((0,80))
try: t = timestamp(lines[0][0])
except: t = time()
return t
def __len__(self): return self.content[:].count("\n")-1
logfile = LogFile
def convert(x,name):
"""Try to convert string to a Python object.
if not possible return a string
name: if "date time", force conversion from string to seconds"""
if name == "date time": return timestamp(x)
try: return float(x)
except: pass
try: return timestamp(x)
except: pass
return x
def timestamp(date_time):
"""Convert a date string to number of seconds since 1 Jan 1970 00:00 UTC
date_time: e.g. "2017-10-04 20:17:34.286479-0500"
or "2017-10-04 20:17:34-0500"
"""
from datetime import datetime
if date_time[-5] in "+-": date_time,TZ = date_time[:-5],date_time[-5:]
else: TZ = "+0000"
if "." in date_time: format = "%Y-%m-%d %H:%M:%S.%f"
else: format = "%Y-%m-%d %H:%M:%S"
utc_dt = datetime.strptime(date_time,format)
timestamp = (utc_dt - datetime(1970, 1, 1)).total_seconds()
TZ_offset = int(TZ[0:3])*3600
timestamp -= TZ_offset
return timestamp
##from time_string import timestamp
if __name__ == "__main__":
from pdb import pm # for debugging
import logging; logging.basicConfig(level=logging.DEBUG)
from channel_archiver3 import channel_archiver
from time import time
self = channel_archiver.logfile("NIH:TEMP.RBV")
print('t=time(); x=self.history("date time","value",time_range=(time()-10*60,time())); time()-t')
print('len(self.content)')
print('t=time(); x=self.content; time()-t')
|
py | 1a554ec8e3ccd13d9d45094eadb53005ada0ed0d | from WMCore.Configuration import Configuration
import os,sys
config = Configuration()
reqNamedFromArg = [ arg for arg in sys.argv if arg.startswith( 'General.requestName=' ) ][0].split( '=' )[-1]
puFromArg = reqNamedFromArg[ reqNamedFromArg.find('PU')+2:]
generationInfo = {'0p5':[0.5 , 200 , 500] ,
'1' : [1.0 , 200 , 500] ,
'1p5' : [1.5 , 200 , 500 ] ,
'10' : [10 , 200 , 500 ] ,
'50' : [50 , 200 , 200 ] ,
'100' : [100 , 500 , 100],
'140' : [140 , 600 , 80 ] ,
'200' : [200 , 1000 , 50 ] }
config.section_('General')
config.General.requestName = ''
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.section_('JobType')
config.JobType.pluginName = 'PrivateMC'
config.JobType.psetName = 'GEN_SIM_DIGI_cfg.py'
config.JobType.allowUndistributedCMSSW = True
config.JobType.maxJobRuntimeMin = 3000
config.JobType.sendPythonFolder = True
config.JobType.numCores = 2
config.JobType.maxMemoryMB = 5000
config.JobType.maxJobRuntimeMin = 5000
config.JobType.pyCfgParams = ["pu={0}".format(generationInfo[puFromArg][0])]
config.section_('Data')
config.Data.outputPrimaryDataset = 'NuGun'
config.Data.splitting = 'EventBased'
config.Data.unitsPerJob = generationInfo[puFromArg][2]
config.Data.totalUnits = generationInfo[puFromArg][2] * generationInfo[puFromArg][1]
config.Data.publication = True
config.Data.outputDatasetTag = 'FBCMNuGunPU{0}'.format(puFromArg)
config.Data.outLFNDirBase = '/store/group/dpg_bril/comm_bril/phase2-sim/FBCM/'
config.section_("Site")
config.Site.storageSite = "T2_CH_CERN"
config.Site.whitelist = ["T2_CH_CERN"]
|
py | 1a554f1fd7690443e078fca71dcab0fea96956d6 | import sublime
from .event_handler import EventHandler
from .settings import Settings
package_control_installed = False
LOCAL_PACKAGES_VERSION = "0.1.3"
evaluating = False
already_evaluate = False
retry_times = 3
def plugin_loaded():
Settings.reset()
Settings.startup()
print("[Local Packages] v%s" % (LOCAL_PACKAGES_VERSION))
check_package_control()
def check_package_control():
try:
__import__("Package Control").package_control
global package_control_installed
package_control_installed = True
except:
global retry_times
if retry_times > 0:
retry_times -= 1
sublime.set_timeout(check_package_control, 3000)
else:
sublime.error_message(
"Package Control is not found.\n\n" +
"Local Packages will now disabled"
)
return
EventHandler().register_handler(
evaluate_install,
EventHandler().ON_LOAD
)
evaluate_install()
def evaluate_install(view=None):
global evaluating, already_evaluate
if evaluating:
return
if not already_evaluate:
print("[Local Packages] Evaluating missing packages")
from .package_evaluator import PackageEvaluatorThread
evaluating = True
PackageEvaluatorThread(
window=sublime.active_window(),
callback=on_installed
).start()
def on_installed(failed_packages=[]):
global evaluating, already_evaluate
evaluating = False
if already_evaluate:
return
if len(failed_packages) > 0:
msg = "Local Packages failed to install %s missing packages...\n" % (
len(failed_packages)
)
limit = 10
for package in failed_packages:
limit -= 1
if limit < 0:
break
msg += " - %s\n" % (package)
if limit < 0:
msg += "and more..."
sublime.error_message(msg)
else:
print("[Local Packages] Dependencies already installed")
already_evaluate = True
|
py | 1a5550315410643e621f97426d34682bc895e9a5 | """
Authorization for Admin API
"""
import re
from shared.models.dashboard_entities import AdminDashboardUser
from shared.service.jwt_auth_wrapper import JWTAuthManager
SCHOOL_REGEX: re.Pattern = re.compile('(?P<school>.+)-admin', flags=re.I)
# JWT Authentication Manager
AUTH_MANAGER = JWTAuthManager(oidc_vault_secret="oidc/admin-jwt",
object_creator=lambda claims, assumed_role, user_roles: AdminDashboardUser(
last_name=claims['family_name'],
first_name=claims['given_name'],
email=claims['email'],
roles=user_roles,
school=SCHOOL_REGEX.match(assumed_role).group('school')
))
OIDC_COOKIE = AUTH_MANAGER.auth_cookie('kc-access', allow_role_switching=True)
# KeyCloak Access Token set by OIDC Proxy (Auth0 Lock)
|
py | 1a555146a2927a6e11de6a11ea4cf08b931b6845 | from urllib.robotparser import RobotFileParser
from urllib.request import urlopen, Request
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
rp = RobotFileParser()
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:67.0) Gecko/20100101 Firefox/67.0'}
req = Request(url='https://www.jianshu.com/robots.txt', headers=headers)
rp.parse(urlopen(req).read().decode('utf-8').split('\n'))
print(rp.can_fetch('*', 'https://www.jianshu.com/p/b67554025d7d'))
print(rp.can_fetch('*', "https://www.jianshu.com/search?q=python&page=1&type=collections")) |
py | 1a5551c564a93ec6b4002aad98ca08c102016d96 | import bin.htmlPython as py
import json
import sys
def test_generateHtml():
ha = open("files/htmlTest.txt")
ha = ha.read().strip()
j = json.load(open("output/aaa/aaa.json"))
htmlFile = open("aaa.html", "w")
sha256 = j['analysis']['res']
artefacts = j['analysis']['data']['art']
metas = j['analysis']['met']
py.generateHTML(metas, artefacts, sha256, htmlFile)
htmlFile.close()
htmlContent = open("aaa.html")
assert htmlContent.read() == ha
|
py | 1a5551f408b3455dfbebdb9e6041941c9a4e3fed | # Import Python Libs
from __future__ import absolute_import
import os
import logging
import shutil
# Local imports
from . import constants
from . import util_which
from . import keyring
from . import ops_pool
from . import rados_client
log = logging.getLogger(__name__)
class Error(Exception):
"""
Error
"""
def __str__(self):
doc = self.__doc__.strip()
return ': '.join([doc] + [str(a) for a in self.args])
class rgw_ctrl(rados_client.ctrl_rados_client):
def __init__(self, **kwargs):
super(rgw_ctrl, self).__init__(**kwargs)
self.service_name = "ceph-radosgw"
# Set path to rgw binary
self.path_service_bin = util_which.which_ceph_rgw.path
self.bootstrap_keyring_type = 'rgw'
self.keyring_service_name = 'client.{name}'.format(name=self.ceph_client_id)
self.keyring_service_capabilities = [
'osd', 'allow rwx',
'mon', 'allow rw'
]
def _set_rgw_path_lib(self):
if self.ceph_client_id == None:
raise Error("rgw name not specified")
self.rgw_path_lib = '{path}/{cluster}-{name}'.format(
path=constants._path_ceph_lib_rgw,
cluster=self.model.cluster_name,
name=self.ceph_client_id
)
def rgw_pools_missing(self):
requiredPools = set([".rgw",
".rgw.control",
".rgw.gc",
".log",
".intent-log",
".usage",
".users",
".users.email",
".users.swift",
".users.uid"
])
pool_ops = ops_pool.ops_pool(self.model)
pool_ops.pool_list()
if self.model.pool_list == None:
log.error("Failed to list available pools")
return False
foundnames = set()
for pool in self.model.pool_list:
foundnames.add(pool)
return list(requiredPools.difference(foundnames))
def rgw_pools_create(self):
rc = True
pool_ops = ops_pool.ops_pool(self.model)
pool_ops.pool_list()
for name in self.rgw_pools_missing():
log.info("Adding missing pool:%s" % (name))
try:
pool_ops.pool_add(name, pg_num=16)
except (ops_pool.Error) as err:
log.error(err)
log.error("Failed to add pool '%s'" % (name))
rc = False
return rc
def prepare(self):
# Due to the way keyring profiles work and the init scripts for rgw we need to
# force users to only create rgw with a 'rgw.' prefix. The reason we dont hide
# this from the user is due to both the systemd files and rgw deployments may
# exist without the prefix if the bootstrap keyring was not used in the key
# creation for the rgw service.
if not self.ceph_client_id.startswith("rgw."):
raise Error("rgw name must start with 'rgw.'")
self.service_available()
self._set_rgw_path_lib()
path_bootstrap_keyring = keyring._get_path_keyring_rgw(self.model.cluster_name)
if not os.path.isfile(path_bootstrap_keyring):
raise Error("Keyring not found at %s" % (path_bootstrap_keyring))
if not os.path.isdir(self.rgw_path_lib):
log.info("Make missing directory:%s" % (self.rgw_path_lib))
os.makedirs(self.rgw_path_lib)
self.keyring_service_path = os.path.join(self.rgw_path_lib, 'keyring')
self.keyring_service_create()
def remove(self):
self._set_rgw_path_lib()
if not os.path.isdir(self.rgw_path_lib):
return
rgw_path_keyring = os.path.join(self.rgw_path_lib, 'keyring')
if os.path.isfile(rgw_path_keyring):
log.info("Remove from auth list keyring:%s" % (rgw_path_keyring))
try:
self.keyring_auth_remove()
except Error:
log.error("Failed to remote from auth list")
removetree = "%s/" % (self.rgw_path_lib)
log.info("Remove directory content:%s" % (removetree))
shutil.rmtree(removetree)
|
py | 1a55521a00d5110fc2b2782d382205b113b1bc63 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple endpoint that returns an email or an attachment from one"""
""" THIS ONLY DEALS WITH PUBLIC EMAILS FOR NOW - AAA IS BEING WORKED ON"""
import plugins.server
import plugins.session
import plugins.messages
import plugins.database
import aiohttp.web
import plugins.aaa
import base64
import typing
async def process(
server: plugins.server.BaseServer, session: plugins.session.SessionObject, indata: dict,
) -> typing.Union[dict, aiohttp.web.Response]:
# First, assume permalink and look up the email based on that
email = await plugins.messages.get_email(session, permalink=indata.get("id"))
# If not found via permalink, it might be message-id instead, so try that
if email is None:
email = await plugins.messages.get_email(session, messageid=indata.get("id"))
# If email was found, process the request if we are allowed to display it
if email and isinstance(email, dict) and not email.get("deleted"):
if plugins.aaa.can_access_email(session, email):
# Are we fetching an attachment?
if not indata.get("attachment"):
email["gravatar"] = plugins.messages.gravatar(email)
return email
else:
fid = indata.get("file")
for entry in email.get("attachments", []):
if entry.get("hash") == fid:
ct = entry.get("content_type") or "application/binary"
headers = {
"Content-Type": ct,
"Content-Length": str(entry.get("size")),
}
if "image/" not in ct and "text/" not in ct:
headers["Content-Disposition"] = f"attachment; filename=\"{entry.get('filename')}\""
try:
assert session.database, "Database not connected!"
attachment = await session.database.get(
index=session.database.dbs.attachment, id=indata.get("file")
)
if attachment:
blob = base64.decodebytes(attachment["_source"].get("source").encode("utf-8"))
return aiohttp.web.Response(headers=headers, status=200, body=blob)
except plugins.database.DBError:
pass # attachment not found
return aiohttp.web.Response(headers={}, status=404, text="Attachment not found")
return aiohttp.web.Response(headers={}, status=404, text="Email not found")
def register(server: plugins.server.BaseServer):
return plugins.server.Endpoint(process)
|
py | 1a5552c0f93c3838670a54e9a32d4cd489b16a69 | import re
def URL_Maker(ID):
URL_Info_Data = 'http://www.tsetmc.com/loader.aspx?ParTree=151311&i=' + ID[0]
URL_Realtime_Data = 'http://www.tsetmc.com/tsev2/data/instinfofast.aspx?i=' + ID[0] + ID[1]
Stock_URLs = [URL_Info_Data, URL_Realtime_Data]
return Stock_URLs
def Negative_Detector(i):
return - float(re.findall(r'(\d+)',i)[0]) if i[0] == '(' else float(i)
|
py | 1a555501169ff50d2ca4ae28fb3842ae14a31695 | # -*- coding: utf-8 -*-
# Radproc - A GIS-compatible Python-Package for automated RADOLAN Composite Processing and Analysis.
# Copyright (c) 2018, Jennifer Kreklow.
# DOI: https://doi.org/10.5281/zenodo.1313701
#
# Distributed under the MIT License (see LICENSE.txt for more information), complemented with the following provision:
# For the scientific transparency and verification of results obtained and communicated to the public after
# using a modified version of the work, You (as the recipient of the source code and author of this modified version,
# used to produce the published results in scientific communications) commit to make this modified source code available
# in a repository that is easily and freely accessible for a duration of five years after the communication of the obtained results.
"""
================================
DWD MR90 Gauge Data Processing
================================
Collection of functions for processing DWD rain gauge data in MR90 format.
Convert gauge data to pandas DataFrames with same format as RADOLAN data and saves them as HDF5 datasets.
.. autosummary::
:nosignatures:
:toctree: generated/
stationfile_to_df
summarize_metadata_files
dwd_gauges_to_hdf5
.. module:: radproc.dwd_gauge
:platform: Windows
:synopsis: Python package radproc (Radar data processing), Module arcgis
.. moduleauthor:: Jennifer Kreklow
"""
import numpy as np
import pandas as pd
import os, gc
from datetime import datetime
from multiprocessing import Pool
import warnings, tables
def _read_line(line):
"""
Read in one line (= 1 hour) of gauge data according to MR90 format description.
10-minute-blocks are merged to 60-minute-blocks and time index is shifted to make data hours begin at hh:50 and convert time zone from MEZ to UTC.
:Parameters:
------------
line : string
data line containing station number, date and minute measurement data of weighing (Tropfer) and seesaw (Wippe) method in 10-minute-blocks.
:Returns:
---------
data : dictionary
with data collected from line.
keys: statnr, startdatum_dt (datetime-object), startdatum (string), dateIndex_UTC, wippe, tropfer, N_gefallen und qualitaetsbyte
"""
data = dict(wippe = "", tropfer = "")
#data['kennung'] = line[0:2]
data['statnr'] = line[2:7].strip()
year = int(line[7:11])
month = int(line[11:13])
day = int(line[13:15])
hour = int(line[15:17])
data['startdatum_dt'] = datetime(year, month, day, hour)
data['startdatum'] = data['startdatum_dt'].strftime("%Y-%m-%d %H:%M")
# Erstellen einer einstündigen pandas TimeSeries mit fester minütlicher Frequenz
# shift -70, da Beginn um xx:50 der Vorstunde und MEZ-1h = UTC
data['dateIndex_UTC'] = pd.date_range(start = data['startdatum'], periods = 60, freq = '1min').shift(-70).tolist()
#leerfeld = line[17:19]
# Zusammenfügen der Messwerte aus den 10-Min-Blöcken zu einem 60-Min-String
# Positionen der Liste (kennzeichnen jew. den Beginn eines Blocks) gem. Formatbeschreibung des DWD
for wippe_start in [19, 100, 181, 262, 343, 424]:
tropfer_start = wippe_start + 30 #Datensatz Wippe: 30 Zeichen
N_gefallen_start = wippe_start + 70 # Datensatz Tropfer: 40 Zeichen
#qualitaetsbyte = wippe_start + 80 #Datensatz Indikator: 10 Zeichen, Qualitätsbyte = 1 Zeichen --> gesamt: 81 Zeichen
data['wippe'] = data['wippe'] + line[wippe_start:tropfer_start]
data['tropfer'] = data['tropfer'] + line[tropfer_start:N_gefallen_start]
#daten['N_gefallen'] = daten['N_gefallen'] + line[N_gefallen_start:qualitaetsbyte]
#daten['qualitaetsbyte'] = daten['qualitaetsbyte'] + line[qualitaetsbyte]
return data
def _interpret_line(data_dict):
"""
Convert and decode data line of one hour from dictionary to pandas DataFrame.
Decode data to precipitation values in mm,
insert np.nan as NoData value where necessary and
convert data to one-column DataFrame with time index
:Parameters:
------------
data_dict : dictionary
with data collected from data line.
necessary keys: statnr, dateIndex_UTC, wippe, tropfer
dictionary can be read in with function _read_line()
:Returns:
---------
df : one-column pandas DataFrame
with precipitation data of one hour in mm
"""
wippe = data_dict['wippe']
tropfer = data_dict['tropfer']
dateIndex = data_dict['dateIndex_UTC']
arr = np.zeros(60, dtype = np.float32)
arr.fill(np.nan)
s = pd.Series(arr, index = dateIndex)
tropferNoData = 60 * "-999"
#wippeNoData = 60 * "-99"
# Interpretation der Daten:
# Standardmäßig werden die Tropfermessungen ausgewertet und in pandas Series s eingefügt.
# Ausnahme: alle 60 Zeitpunkte haben den Wert -999, also NoData. Nur dann wird auf die Wippenwerte zugegriffen.
# Jeder Tropferwert besteht aus vier Zeichen. Diese werden nacheinander abgerufen und interpretiert.
# -999 = Fehlkennung --> np.nan --> pass, da Series s bereits mit NaN initialisiert wurde
# -001 = kein Niederschlag --> 0.0
# xx = xx * 0.01 mm Niederschlag
# einige Zeitpunkte sind fehlerhaft und haben den Wert "0000". Diesen wird der Niederschalg 0.0 zugewiesen.
if tropfer != tropferNoData:
# Tropfermessung vorhanden
k = 0
for i in range(0, len(tropfer), 4):
value = tropfer[i:i+4]
if value == "-999":
pass
elif value == "-001" or value == "0000":
s[dateIndex[k]] = 0.0
else:
try:
s[dateIndex[k]] = float(value)*0.01
except:
s[dateIndex[k]] = np.nan
k += 1
else:
# Wippenmessung vorhanden.
# Jeder Wippenwert besteht aus drei Zeichen. Diese werden nacheinander abgerufen und interpretiert.
# -99 = Fehlkennung --> np.nan
# -01 = kein Niederschlag --> 0.0
# xx = xx * 0.1 mm Niederschlag
# einige Zeitpunkte sind fehlerhaft und haben den Wert "000". Diesen wird der Niederschalg 0.0 zugewiesen.
k = 0
for i in range(0, len(wippe), 3):
value = wippe[i:i+3]
if value == "-99":
pass
elif value == "-01" or value == "000":
s[dateIndex[k]] = 0.0
else:
try:
s[dateIndex[k]] = float(value)*0.1
except:
s[dateIndex[k]] = np.nan
k += 1
# Umwandlung der Series in einen einspaltigen DataFrame.
# Notwendig, um den Spaltennamen mit der Stationsnummer speichern zu können.
df = pd.DataFrame(s.values, index = s.index, columns = [data_dict['statnr']])
return df
def stationfile_to_df(stationfile):
"""
Import a textfile with DWD rain gauge data in MR90 format into a one-column pandas DataFrame.
Downsample frequency from 1 to 5-minute intervals to adjust temporal resolution to best-resolved RADOLAN data produt YW.
Convert time zone to UTC.
:Parameters:
------------
stationfile : string
Path and name of textfile containing rain gauge measurements.
:Returns:
---------
df : one-column pandas DataFrame
with data imported from stationfile downsampled to 5-minute intervals.
"""
#fails = []
#for stationfile in stationfiles: --> unnötig, da map() beim parallel processing die Schleife ersetzt
f = open(stationfile, "r")
lines = f.readlines()
f.close()
df = pd.DataFrame()
for line in lines:
dataline = _read_line(line) #erstelle Dictionary
try:
df_hour = _interpret_line(dataline)
df_5min = df_hour.resample('5min', how = 'sum', closed = 'left', label = 'left')
df = pd.concat([df,df_5min], axis = 0)
except:
# Hinweis: Ausgabe der Fehler funktioniert nicht bei Verwendung von Parallel Processing. Daher auskommentiert.
#print "Problem bei Stunde beginnend um %s UTC in Station %s." % (str(daten['dateIndex_UTC'][0]), daten['statnr'])
#fails.append((str(daten['dateIndex_UTC'][0]), daten['statnr']))
continue
del lines, df_hour, df_5min
gc.collect()
df = df.tz_localize('UTC')
#print "Datei %s erfolgreich bearbeitet. Dauer: %.2f Minuten" % (stationfile, (time.time() - t0)/60)
return df
def summarize_metadata_files(inFolder):
"""
Import all metafiles and summarizes metadata in a single textfile.
Metadata include information on station number and name, geographic coordinates and height above sea level.
:Parameters:
------------
inFolder : string
Path of directory containing metadata files for DWD gauges.
:Returns:
---------
summaryFile : string
Path and name of output summary file created.
"""
metaFiles = [os.path.join(inFolder, mf) for mf in os.listdir(inFolder)]
summaryFile = os.path.join(os.path.split(inFolder)[0], "metadata_summary.txt")
outFile = open(summaryFile, "w")
i = 0
for f in metaFiles:
infile = open(f, "r")
while True:
line = infile.readline().strip()
if line.startswith("Station="):
break
#print line
line = line.replace(":", " ")
outFile.write(line[:-1] + "\n")
infile.close()
i += 1
outFile.close()
return summaryFile
def dwd_gauges_to_hdf5(inFolder, HDFFile):
"""
Import all textfiles containing DWD rain gauge data in MR90 format from input folder into a DataFrame and save it as monthly HDF5 datasets.
Frequency is downsampled from 1 to 5-minute intervals to adjust temporal resolution to RADOLAN product YW.
Time zone is converted from MEZ to UTC.
:Parameters:
------------
inFolder : string
Path of directory containing textfiles with DWD rain gauge data in MR90 format.
HDFFile : string
Path and name of the HDF5 file.
If the specified HDF5 file already exists, the new dataset will be appended; if the HDF5 file doesn't exist, it will be created.
:Returns:
---------
None
Save monthly DataFrames to specified HDF5 file.
:Note:
------
To import gauge data from HDF5, you can use the same functions from radproc.core as for RADOLAN data since both are stored the same data format and structure.
"""
stationfiles = [os.path.join(inFolder, f) for f in os.listdir(inFolder)]
#stationframes = []
# Prozessierung der Stationsdateien mit Parallel Processing, um die Geschwindigkeit zu erhöhen.
# Die Funktion Pool() aus dem Modul multiprocessing erzeugt mehrere Subprozesse auf unterschiedlichen Prozessorkernen,
# welche die Multiprocessing-Sperre (GIL) umgehen, die normalerweise bei Python besteht.
# map() nimmt eine Funktion und eine Liste als Eingabeargumente entgegen und gibt eine Liste
# als Ausgabe zurück. Die Funktion wird auf unterschiedlichen Prozessorkernen für jedes Listenelement ausgeführt.
# optional kann mit Pool(x) die Anzahl x der zu verwendenden Kerne übergeben werden.
# Das Ergebnis stationframes ist eine Liste mit einspaltigen DataFrames der Ombrometerstationen.
p = Pool()
stationframes = p.map(stationfile_to_df, stationfiles)
# Zusammenfügen der Dataframes zu einem DF mit einer Spalte pro Station
gaugeDF = pd.concat(stationframes, axis = 1, join = 'outer', copy=False)
#ombroDF = ombroDF.asfreq('5min')
gaugeDF.columns.name = 'DWD gauges'
gaugeDF.index.name = 'Date (UTC)'
#summaryFile = summarize_metadata_files(inFolder_metadata)
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
hdf = pd.HDFStore(HDFFile, mode = "a")
for year in np.unique(gaugeDF.index.year):
for month in range(1, 13):
try:
ind = "%i-%02i" %(year, month)
HDFDataset = "%i/%i" %(year, month)
hdf.put(HDFDataset, gaugeDF.loc[ind], data_columns = True, index = True)
except:
# in case of unavailable months
continue
hdf.close()
|
py | 1a5555298c8914acc2d26ec14c2175967165d253 | import random
# СЛОВАРИ
# Это структура данных, которая содержит неупорядоченную последовательность.
# Если в списках элементы упорядочены по индексам, то в Словарях объекты распалагаются в парах: ключ-значение
# Словари напоминают списки, но есть одно принцимпиальное различие: они состоят из ключей и значений.
# Ключ, это тот элемент, по котрому мы получаем значение
dictionaty_list = {'name': 'Ariel'} # сначала ключ 'name', потом значение 'Ariel' в фигурных скобках
# ключ является не изменяемым объектом, тогда как значение может быть любым объектом.
# В словаре не может быть одинаковых ключей, тогда как их значения могут повторяться (совпадать).
# в качестве значения можно использовать объекты любого типа, в том числе списки и сами словари.
# В словаре доступ к значениям происходит по ключам, тогда как в списках - по индексам.
# Марка автомобиля - ключ, цена автомобиля - значение
car_prices = {'opel': 5000, 'toyota': 7000, 'bmw': 10000}
# Все как в книжном словаре - находим слово(ключ) и получаем его значение.
print('Выводим цены на автомобили', car_prices) # Вывод {'opel': 5000, 'toyota': 7000, 'bmw': 10000}
# Такая структура данных называется dictionaty / словарь
# Все операции в словаре происходят очень быстро. Это эффективно реализованная конструкция.
# ---------------------- Способы инициализации словарей ----------------------
# 1 Способ
# Словари объявляются (инициализируются) фигурными скобками {}
# dict_temp = {}
# Заполняем словарь ключ-значение
dict_temp = {'dict1': 1, 'dict2': 2.1, 'dict3': 'name', 'dict4': [1, 2, 3]}
print(type(dict_temp), dict_temp) # выводим тип словаря и сам словарь
# 2 Способ
# с помощью метода fromkeys()
dict_temp = dict.fromkeys(['a', 'b']) # dict - указание класса
# Здесь создан словарь, где указаны ключи a и b с пустыми значениями.
print(type(dict_temp), dict_temp) # получаем на выходе <class 'dict'> {'a': None, 'b': None}
# для установки значений нужно подать еще один список
dict_temp = dict.fromkeys(['a', 'b'], [12, '2020']) # dict - указание класса
print('C помощью метода fromkeys()', type(dict_temp),
dict_temp) # получаем на выходе <class 'dict'> {'a': [12, '2020'], 'b': [12, '2020']}
# 3 Способ
# Инициализация словаря с помощью генератора
dict_temp = {a: a ** 2 for a in range(10)} # конструкция такая же, как и в списках.
# a - ключ, a**2 - значение, далее конструкция цикла
print('Инициализация с помощью генератора', type(dict_temp), dict_temp)
# получаем на выходе <class 'dict'> {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81}
# 4 Способ (редко используется, так как все ключи-значения надо вносить вручную)
dict_temp = dict(brend='volvo', price=5000)
print(type(dict_temp), dict_temp) # получаем на выходе <class 'dict'> {'brend': 'volvo', 'price': 5000}
# 5 ак же существует функциональность создания Словаря из списков
# Есть такой класс славаря dict. Это встроенный класс, поэтому он пишется с маленькой буквы.
# Свои собственные классы мы должны записывать с Большой буквы.
# У этого класса dict есть свой собственный метод уровня класса fromkeys()
# С помощью этого метода мы можем создать из двух последовательностей какой-то словарь.
# Например, мы можем передать в него два списка
my_dict = dict.fromkeys((1, 2, 3), ('apple', 'orange', 'banana'))
print(my_dict)
# Получаем словарь, где каждому ключу присваивается второй параметр
# {1: ('apple', 'orange', 'banana'), 2: ('apple', 'orange', 'banana'), 3: ('apple', 'orange', 'banana')}
# То есть, здесь автоматизировано создание словаря, чтобы мы не прописывали его в ручную каждый раз.
# ----------------------- Обращение к содержимому словаря --------------------------
# Обращение к содержимому происходит по ключу, который указываем в квадратых скобках.
print("Выводим марку автомобиля: ", dict_temp['brend']) # выводим значение, указывая ключ
print('Выводим цену Тойоты: ', car_prices['toyota'])
# ------------------ Функции со словарями ------------------------------
# Часто необходимо знать все ключи словаря и все значения
# Получим все ключи словаря
print(dict_temp.keys()) # Возвращается специальный тип dict_keys
# на выходе имеем dict_keys(['brend', 'price'])
# Но, как правило, с этим типом не работают. Его приводят к листу:
print(list(dict_temp.keys())) # на выходе получаем ['brend', 'price']
# И дальше, все операции, которые необходимо проверсти с ключами, делают с типом list
# Значения получают через ф-ю value
print(list(dict_temp.values())) # на выходе имеем ['volvo', 5000]
# В таком формате вывода,при работе с коючами и со значениями можно использовать все методы, которые имеются в листах.
# Также можно работать с парами ключ-значение. Для этого нужно использовать метод items()
# items() возвращает лист из картежей - пара ключ-значение (Картеж, это тот же самый лист, только неизменяемый)
print(list(dict_temp.items())) # на выходе имеем [('brend', 'volvo'), ('price', 5000)]
# ------------------------ Работа с элементами ---------------------------------
# Нам необходимо получать значения, изменять их (потому что словарь, это изменяемый тип) и добавлять новые
# Переинициализируем переменную для рассмотрения следующих задач
dict_temp = {a: a for a in range(10)}
print('Переинициализация', dict_temp) # На выходе: {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
# МЕНЯЕМ значение.
# Какому-то ключу присвоим другое значение
dict_temp[0] = 100
print(dict_temp) # на выходе имеем {0: 100, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
# Нулевому ключу теперь соответствует значение 100
# ДОБАВЛЯЕМ пары ключ-значение
dict_temp['name'] = 'Alex' # Создаем новый ключ и присваиваем ему значение.
print(dict_temp) # Вывод на экран: {0: 100, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 'name': 'Alex'}
car_prices['mazda'] = 4000
print('Добавление нового Ключа \'mazda\' в список автомобилей: ', car_prices)
# Вывод: {'opel': 5000, 'toyota': 7000, 'bmw': 10000, 'mazda': 4000}
# В словаре можно изменить Значение, но нельзя изменить Ключ
car_prices['opel'] = 2000 # Здесь мы пытаемся добавить элемент с Ключом 'opel', который уже есть в словаре
print('Пытаемся добавить второй колюч \'opel\' в словарь ', car_prices)
# Вывод: {'opel': 2000, 'toyota': 7000, 'bmw': 10000, 'mazda': 4000}
# Но добавления не происходит. Происходит изменение значения старого на новое
# Это способ изменения значения в словаре
# Удаление значений из Словаря происходит при помощи команды del
# ----------------------- МЕТОДЫ СЛОВАРЕЙ ---------------------------------
# У словарей большое количество методов. Какие-то мы уже рассмотрели, это keys(), values(), items()
# C помощью методов словарь можно очищать, копировать, удалять
# -------------------- del -----------------------
# Удаляет значение в словаре по ключу
del car_prices['toyota']
print('Удалиил из списка машин Тойоту ', car_prices)
# Вывод: {'opel': 2000, 'bmw': 10000, 'mazda': 4000}
# Но, с командой del надо быть осторожным. Если вы забудите указать Ключ, значение которого хотите удалить,
# то эта команда УДАЛИТ ВЕСЬ СЛОВАРЬ ВМЕСТЕ С ПЕРЕМЕННОЙ!
# Значение при этом не возвращается. Если нужно получить удаленное значение, тогда надо использовать метод pop()
# --------------------------- pop() ------------------------
# Удаляет значение по Ключу
temp = dict_temp.pop('name') # удаляем ключ 'name', в месте с ним удаляется и его значение 'Alex'
# При этом он возвращает удаленное ключ-значение через новую пересенную
print(dict_temp) # Вывод на экран: {0: 100, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
print(temp) # Вывод на экран: Alex
# Если такого ключа нет, будет возвращено default без выбрасывания каких-то исключений
# ------------------ clear() ----------------------
# Метод clear() очищает Словарь с сохранением переменной
car_prices.clear()
print('Очищаем словарь car_prices ', car_prices) # Вывод: {}
# ------------------------- Многоструктурность словарей -----------------------------
# Словари могут содержать не просто парв Ключ - Значение, но и более сложные структуры,
# как, например, другой Словарь.
# Словари часто используются для описания каких-то объектов.
# Для удобства, их можно записывать в несколько строк
person = {
'first name': 'Jack',
'second name': "Brown",
'age': 23,
'hobbies': ['footbal', 'singing', 'photo'], # Список
'children': {'son': 'Alex', 'daugter': 'Marry'} # Другой Словарь
}
# Как получить доступ к содержимому этого словаря?
print('Получаем информацию из словаря: возраст человека ', person['age']) # Вывод: 23
# То есть указываем Ключ и по Ключу получаем его значение.
# Как получить список/list значений?
print('Получаем Список значений из Словаря ', person['hobbies']) # Вывод: ['footbal', 'singing', 'photo']
# Здесь получаем весь список, Но, что делать, если мы хотим получить одно из значений в списке?
# Например к последнему - 'photo'? Это можно сделать в два этапа.
# 1 этап - передача списка в новую переменную
hobbies = person['hobbies'] # Передаем в переменную весь список
# 2 этап - получаем значение из переменной по индексу
print('Получаем значение из переменной по индексу', hobbies[2]) # Вывод: photo
# Это можно сделать и более короткой записью
print('Получаем значение из вложенного Списка коротким спосбом ',
person['hobbies'][2])
# Как получить доступ к вложенному Словарю?
# Также в два этапа:
# 1 этап - объявляем новый Список и присваиваем ему значения из вложенного Словаря
childrens = person['children'] #
# 2 этап - выводим на экран значение, указывая нужное поля из вложенного Словаря
print('Получаем доступ к вложенному Словарю ', childrens['son']) # указываем объявленную до этого переменную
# Более короткий способ записи:
print('Получаем значение из вложенного Словаря коротким способом ',
person['children']['son'])
# Добавление данных в Многоструктурный Словарь
person['car'] = 'Mazda' # добавляем новые Ключ-Значение
print('Выводим на экран весь Многоструктурный Словарь', person)
# Вывод: {'first name': 'Jack', 'second name': 'Brown', 'age': 23, 'hobbies': ['footbal', 'singing', 'photo'],
# 'children': {'son': 'Alex', 'daugter': 'Marry'}, 'car': 'Mazda'}
# в конце добавлена информация о машине.
# Добавляем данные через цикл
lst_name = ['Marry', 'Alex', 'Kate', 'Jack', 'Anna', 'Kate', 'Ronald', 'Maria', 'Tatyana', 'Evgeniy',
'Alex', 'Maria', 'Svetlana', 'Artem', 'Igor', 'Ilya']
names_dict = {}
for i in range(len(lst_name)):
names_dict[lst_name[i]] = lst_name.count(lst_name[i])
# "Marry" 1
# "Alex" 2
# "Kate" 2
# "Jack" 1
# "Anna" 1
# "Ronald" 1
# "Maria" 2
# "Tatyana" 1
# "Evgeniy" 1
# "Svetlana" 1
# "Artem" 1
# "Igor" 1
# "Ilya" 1
# Изменение данных в Многоструктурном Словаре
# Допусти, мы хотим поменять информацию во Вложенном Списке - 'footbal' на 'basketbal'
person['hobbies'][0] = 'basketbal' # получаем доступ к Вложенному Списку и по индексу меняем его значение
print('Меняем значение во Вложенном Списке', person)
# Вывод: {'first name': 'Jack', 'second name': 'Brown', 'age': 23, 'hobbies': ['basketbal', 'singing', 'photo'],
# 'children': {'son': 'Alex', 'daugter': 'Marry'}, 'car': 'Mazda'}
# Еще один метод доступа к Многоструктурному Словарю
print('Получаем список ключей Многостр. Словаря', person.keys()) # С помощью ф-и keys()
# Вывод: dict_keys(['first name', 'second name', 'age', 'hobbies', 'children', 'car'])
print('Получаем список значений Многостр. Словаря', person.values()) # С помощью ф-и values()
# Вывод:
# dict_values(['Jack', 'Brown', 23, ['basketbal', 'singing', 'photo'], {'son': 'Alex', 'daugter': 'Marry'}, 'Mazda'])
print('Получаем список элементов Многостр. Словаря', person.items()) # С помощью ф-и items()
# Вывод: dict_items([('first name', 'Jack'), ('second name', 'Brown'), ('age', 23),
# ('hobbies', ['basketbal', 'singing', 'photo']), ('children', {'son': 'Alex', 'daugter': 'Marry'}), ('car', 'Mazda')])
# Вот такая струтура в скобках ('first name', 'Jack') называесят Tiple - Картеж.
# --------------------- Итерирование по словарю ------------------------------------
# Итерация по Словарям происходит также, как и по Спискам
for pair in dict_temp.items():
print(pair)
# На выходе получаем список пар ключ-значение:
# (0, 100)
# (1, 1)
# (2, 2)
# (3, 3)
# (4, 4)
# (5, 5)
# (6, 6)
# (7, 7)
# (8, 8)
# (9, 9)
# Также можно итерироваться, работая с комплектными элементами
for key, value in dict_temp.items(): # key - первое значение в паре, value - второе значение в паре
print(key, value)
# На выходе:
# 0 100
# 1 1
# 2 2
# 3 3
# 4 4
# 5 5
# 6 6
# 7 7
# 8 8
# 9 9
# Можно тем же способом итерироваться по отдельным элементам конструкции
# По ключам:
for key in dict_temp.keys():
print(key)
# По значениям:
for value in dict_temp.values():
print(value)
# Операции со значениями
for value in dict_temp.values():
print(value + 10, end=' ') # 110 11 12 13 14 15 16 17 18 19
# По нескольким словарям одновременно
jack = {
'name': 'jack',
'car': 'bmw'
}
john = {
'name': 'john',
'car': 'audi'
}
# Нам надо собрать, например, информацию о машинах.
# создаем список словарей
drivers = [jack, john]
cars = []
for persons in drivers:
cars.append(persons['car'])
print(cars)
# Более короткая запись через list comprehension
new_cars = [persons['car'] for persons in drivers]
print(new_cars)
# Такая запись person['car'] по сути - антипатерн. То есть, мы обращаеся к ключу 'car' резюмируя, что он есть.
# Но, а вдруг его там нет? Вдруг в списке есть человек, у которого нет машины? Тогда обращение к несуществующему
# ключу приведет к исключению и скрипт прекратит свою работу. Поэтому считается, что хорошей практикой использование
# специального метода get() у словарей, который первым аргументом достает нам нужные данные по ключу.
new_cars = [person.get('car', '') for persons in drivers]
# То есть, мы знаем или предполагаем, что у словаря есть ключ 'car' и нам надо его получить.
# Тогда мы методу get() передаем название этого ключа, а вторым аргументом '' мы передаем значение по умолчаню
# на тот случаей, если нужного ключа там не осатнется. Чтобы не возникало исключение, он вернет нам пустую строку.
# ------------------------- Сортировка словаря -------------------------------------
# На самом деле содержимое словаря отсортировать нельзя, так как словарь в Python - это неупорядоченная структура данных.
# Даже если вы будете добавлять в словарь элементы упорядоченно, например по алфавиту,
# при выводе они могут отобразится по-другому.
#
# Однако при извлечении элементов из словаря можно сделать так, чтобы это происходило согласно определенному порядку.
# Для этого дополнительно используется упорядоченная структура, которую можно отсортировать. Например, список.
#
# Сортировка по ключам
# Проще всего выполнить сортировку словаря по ключам. Алгоритм вывода содержимого словаря:
#
# Создать список ключей словаря.
# Отсортировать его.
# В цикле for перебрать элементы списка, используя элемент списка как ключ словаря.
d = {'a': 10, 'b': 15, 'c': 4}
list_keys = list(d.keys())
list_keys.sort()
for i in list_keys:
print(i, ':', d[i])
a : 10
b : 15
c : 4
# Сортировка по значениям
# Отсортировать словарь по значениям сложнее, так как обращаться к элементам словаря можно только по ключам.
# Однако можно создать список кортежей ("ключ", "значение") и отсортировать его по вторым элементам пар.
# Далее в программе используется именно данная упорядоченная структура, а не сам оригинальный словарь.
d = {'a': 10, 'b': 15, 'c': 4}
list_d = list(d.items())
print(list_d)
# [('a', 10), ('b', 15), ('c', 4)]
list_d.sort(key=lambda i: i[1])
print(list_d)
# [('c', 4), ('a', 10), ('b', 15)]
for i in list_d:
print(i[0], ':', i[1])
c : 4
a : 10
b : 15
# Если бы мы использовали метод sort() без параметра key, то сортировка была бы выполнена по первым элементам кортежей.
# В качестве значения для key указывается функция. В данном случае используется lambda-функция, что уменьшает объем кода.
# В функцию передаются кортежи, а возвращаются их вторые элементы, по которым происходит сортировка.
#
# Класс OrderedDict модуля collections
# В модуле collections имеется класс OrderedDict, который является подклассом класса dict, то есть обычного встроенного
# в Python словаря. OrderedDict позволяет создавать объекты-словари, которые помнят порядок своих элементов. Также класс имеет ряд методов, которые могут изменять порядок элементов в словаре.
from collections import OrderedDict
a = OrderedDict({1: 10, 0: 5})
print(a)
OrderedDict([(1, 10), (0, 5)])
a[2] = 20
print(a)
OrderedDict([(1, 10), (0, 5), (2, 20)])
for i in a:
print(i, ':', a[i])
# 1 : 10
# 0 : 5
# 2 : 20
# ---------------------------- Решение задачь с помощью Словарей ------------------------------
# Как подсчитать количество повторений одинаковых элементов в списке
text = "Ну и деревня! С роду таких деревень не видел и не знал, что такие такие деревни бывают."
count_dict = {}
# В первом цикле инициализируем словарь
for i in range(len(text)):
count_dict[text[i]] = 0 # Каждому ключу в виде буквы присваиваем значение 0
# Во втором цикле заполняем занчениями (Первый вариант)
# for i in range(len(text)):
# count_dict[text[i]] += 1
#
# print(count_dict)
# {'Н': 1, 'у': 2, ' ': 16, 'и': 7, 'д': 5, 'е': 12, 'р': 4, 'в': 5, 'н': 6, 'я': 1, '!': 1, 'С': 1, 'о': 2, 'т': 5,
# 'а': 5, 'к': 3, 'х': 1, 'ь': 1, 'л': 2, 'з': 1, ',': 1, 'ч': 1, 'б': 1, 'ы': 1, 'ю': 1, '.': 1}
# Второй вариант
for i in text: # здесь проходимся по самому тексту и подмещяем в i конкретную букву
count_dict[i] += 1
print(count_dict)
# Выводим конкретный символ на экран
sym = input('Введите символ: ')
if sym in count_dict:
print('Символ', sym, 'встречается в тексте', count_dict[sym], 'раз.')
else:
print('Такого символа в тексте нет.')
# Как посмотреть список ключей
keys_dict = count_dict.keys()
print('Список ключей ', keys_dict)
# Список ключей dict_keys(['Н', 'у', ' ', 'и', 'д', 'е', 'р', 'в', 'н', 'я', '!', 'С', 'о', 'т', 'а', 'к', 'х', 'ь',
# 'л', 'з', ',', 'ч', 'б', 'ы', 'ю', '.'])
# Список значений
val_dict = count_dict.values()
print('Список значений', val_dict)
# Список значений dict_values([1, 2, 16, 7, 5, 12, 4, 5, 6, 1, 1, 1, 2, 5, 5, 3, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1])
# Напишите функцию (F): на вход список имен и целое число N;
# # на выходе список длины N случайных имен из первого списка (могут повторяться,
# # можно взять значения: количество имен 20, N = 100,
# # рекомендуется использовать функцию random);
lst_name = ['Nancy', 'Alice', 'Mary', 'Hanna', 'Dolores', 'Brian', 'Stanley', 'Andrew', 'Michael', 'Nickolas',
'Johnathan', 'Angeline']
N = 100
def f(lst, n):
rand_list = [] # пустой список для вывода результата
for i in range(n): # запускаем цикл на n итераций
# rand_name = random.choice(lst) # выбираем случайное имя из списка и присваиваем ее переменной
# rand_list.append(rand_name) # добавляем случайное имя в результирующий список
rand_list.append(random.choice(lst)) # объединил две предыдущие строки в одну
return rand_list # возвращаем список с количеством случайных имен n
fin_list = f(lst_name, N) # вызываем ф-ю с передачей в нее параметров
print(fin_list) # выводим результат
# Получили результирующий список fin_list, с которомы будем работать дальше
# Напишите функцию вывода самого частого имени из списка на выходе функции F (список fin_list);
# Решение с помощью цикла
names_dict = {}
for i in range(len(fin_list)):
names_dict[fin_list[i]] = fin_list.count(fin_list[i]) # в цикле загоняем значения в словарь
top_names_list = list(names_dict.items()) # перекидываем инфу из Словаря в Лист
top_names_list.sort(key=lambda i: i[1], reverse=True) # сортируем Список по значениям
# и переворачиваем от большего к меньшим
print(f'Имя {top_names_list[0][0]} встречается чаще других, а именно {top_names_list[0][1]} раз.')
# Решение с помощью функции
def top(fin_list):
# Получаем уникальные значения списка через обертку set
# Через обертку Словарь листаем в цикле список и считаем количество повторений каждого слова
pop_name = dict((fin_list.count(i), i) for i in set(fin_list))
return pop_name[max(pop_name.keys())]
print(f'Имя {top(fin_list)} встречается чаще других, а именно {fin_list.count(top(fin_list))} раз.')
# 3. Напишите функцию вывода самой редкой буквы, с которого начинаются имена в списке на выходе функции F.
# Решение с помощью цикла
letters_dict = {}
for i in range(len(fin_list)):
letters_dict[fin_list[i][0]] = fin_list.count(fin_list[i])
letters_list = sorted(letters_dict.items(), key=lambda i: i[1])
# print(letters_list)
print('Первая буква', letters_list[0][0], 'в именах встречается реже других, а именно', letters_list[0][1], 'раз.')
# решение с помощью функции
letters_list = [fin_list[i][0] for i in range(len(fin_list))]
# print(letters_list)
def rare(letters_list):
letters_dict = dict((letters_list[i], letters_list.count(letters_list[i])) for i in range(len(letters_list)))
letters_dict_sort = sorted(letters_dict.items(), key = lambda i: i[1])
return letters_dict_sort[0]
# print(rare(letters_list))
print(f'Первая буква {(rare(letters_list))[0]} в именах встречается реже других, а именно {(rare(letters_list))[1]} раз.')
|
py | 1a5555e64005ea4f888ce291312e74901c70006f | # Generated by Django 3.2 on 2021-04-11 17:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oyt', '0014_auto_20210411_1656'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='likes',
),
migrations.AddField(
model_name='video',
name='likes',
field=models.JSONField(default=[]),
preserve_default=False,
),
]
|
py | 1a5556628bc9983b84f177a8bb3a5acf13e80ee9 | import pytest
import os
from g_code_parsing.g_code_engine import GCodeEngine
from g_code_parsing.g_code_program.supported_text_modes import (
SupportedTextModes,
)
from opentrons.hardware_control.emulation.settings import (
Settings,
SmoothieSettings,
PipetteSettings,
)
from g_code_parsing.utils import get_configuration_dir
CONFIG = Settings(
host="0.0.0.0",
smoothie=SmoothieSettings(
left=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
right=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
),
)
PROTOCOL_PATH = os.path.join(
get_configuration_dir(), "protocol", "protocols", "smoothie_protocol.py"
)
@pytest.fixture
def protocol_g_code_engine() -> GCodeEngine:
return GCodeEngine(CONFIG)
async def test_watcher_command_list_is_cleared(protocol_g_code_engine: GCodeEngine):
"""
If everything is cleaning up correctly then 2 runs of the same protocol
should return the same exact G-Code
"""
with protocol_g_code_engine.run_protocol(PROTOCOL_PATH) as run_1:
run_1_desc = run_1.get_text_explanation(SupportedTextModes.G_CODE)
with protocol_g_code_engine.run_protocol(PROTOCOL_PATH) as run_2:
run_2_desc = run_2.get_text_explanation(SupportedTextModes.G_CODE)
assert run_1_desc == run_2_desc
|
py | 1a5557cd28cd0baefb4f33371ee8bf982878fd9e | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_profileprotocoloptions_mailsignature
short_description: Configure Mail signature.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
profile-protocol-options:
description: the parameter (profile-protocol-options) in requested url
type: str
required: true
firewall_profileprotocoloptions_mailsignature:
description: the top level parameters set
required: false
type: dict
suboptions:
signature:
type: str
description: 'Email signature to be added to outgoing email (if the signature contains spaces, enclose with quotation marks).'
status:
type: str
description: 'Enable/disable adding an email signature to SMTP email messages as they pass through the FortiGate.'
choices:
- 'disable'
- 'enable'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure Mail signature.
fmgr_firewall_profileprotocoloptions_mailsignature:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
profile-protocol-options: <your own value>
firewall_profileprotocoloptions_mailsignature:
signature: <value of string>
status: <value in [disable, enable]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/profile-protocol-options/{profile-protocol-options}/mail-signature',
'/pm/config/global/obj/firewall/profile-protocol-options/{profile-protocol-options}/mail-signature'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/profile-protocol-options/{profile-protocol-options}/mail-signature/{mail-signature}',
'/pm/config/global/obj/firewall/profile-protocol-options/{profile-protocol-options}/mail-signature/{mail-signature}'
]
url_params = ['adom', 'profile-protocol-options']
module_primary_key = None
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'adom': {
'required': True,
'type': 'str'
},
'profile-protocol-options': {
'required': True,
'type': 'str'
},
'firewall_profileprotocoloptions_mailsignature': {
'required': False,
'type': 'dict',
'options': {
'signature': {
'required': False,
'type': 'str'
},
'status': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_profileprotocoloptions_mailsignature'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_partial_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
|
py | 1a5558adb0f0fcd9916e832b34ab4daa523f009e | # Generated by Django 3.0.3 on 2020-10-05 20:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0020_auto_20201004_0413'),
]
operations = [
migrations.AddField(
model_name='profile',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='Your gender', max_length=12),
),
]
|
py | 1a555949bc8c5e4aed02ed897a57da50bbb19de5 | # -*- coding: utf-8 -*-
"""Plugin to create a Quantum Espresso neb.x input file."""
import copy
import os
from aiida import orm
from aiida.common import InputValidationError, CalcInfo, CodeInfo
from aiida.common.lang import classproperty
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.calculations import _lowercase_dict, _uppercase_dict, _pop_parser_options
from aiida_quantumespresso.utils.convert import convert_input_to_namelist_entry
from .base import CalcJob
class NebCalculation(CalcJob):
"""Nudged Elastic Band code (neb.x) of Quantum ESPRESSO distribution."""
_PREFIX = 'aiida'
# in restarts, will not copy but use symlinks
_default_symlink_usage = False
# Default input and output file names
_DEFAULT_INPUT_FILE = 'neb.dat'
_DEFAULT_OUTPUT_FILE = 'aiida.out'
_PSEUDO_SUBFOLDER = PwCalculation._PSEUDO_SUBFOLDER # pylint: disable=protected-access
_OUTPUT_SUBFOLDER = PwCalculation._OUTPUT_SUBFOLDER # pylint: disable=protected-access
# Keywords that cannot be set (for the PW input)
_blocked_keywords = []
_use_kpoints = True
@classproperty
def _internal_retrieve_list(cls):
# pylint: disable=no-self-argument
# I retrieve them all, even if I don't parse all of them
_neb_ext_list = ['path', 'dat', 'int']
return ['{}.{}'.format(cls._PREFIX, ext) for ext in _neb_ext_list]
@classproperty
def xml_filepaths(cls):
"""Return a list of relative filepaths of XML files."""
# pylint: disable=no-self-argument,not-an-iterable
filepaths = []
for filename in PwCalculation.xml_filenames:
filepath = os.path.join(cls._OUTPUT_SUBFOLDER, cls._PREFIX + '_*[0-9]', cls._PREFIX + '.save', filename)
filepaths.append(filepath)
return filepaths
@classmethod
def define(cls, spec):
"""Define the process specification."""
# yapf: disable
super().define(spec)
spec.input('metadata.options.input_filename', valid_type=str, default=cls._DEFAULT_INPUT_FILE)
spec.input('metadata.options.output_filename', valid_type=str, default=cls._DEFAULT_OUTPUT_FILE)
spec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.neb')
spec.input('first_structure', valid_type=orm.StructureData, help='Initial structure')
spec.input('last_structure', valid_type=orm.StructureData, help='Final structure')
spec.input('parameters', valid_type=orm.Dict, help='NEB-specific input parameters')
spec.input('settings', valid_type=orm.Dict, required=False,
help='Optional parameters to affect the way the calculation job and the parsing are performed.')
spec.input('parent_folder', valid_type=orm.RemoteData, required=False,
help='An optional working directory of a previously completed calculation to restart from.')
# We reuse some inputs from PwCalculation to construct the PW-specific parts of the input files
spec.expose_inputs(PwCalculation, namespace='pw', include=('parameters', 'pseudos', 'kpoints', 'vdw_table'))
spec.output('output_parameters', valid_type=orm.Dict,
help='The output parameters dictionary of the NEB calculation')
spec.output('output_trajectory', valid_type=orm.TrajectoryData)
spec.output('iteration_array', valid_type=orm.ArrayData, required=False)
spec.output('output_mep', valid_type=orm.ArrayData,
help='The original and interpolated energy profiles along the minimum-energy path (mep)')
spec.default_output_node = 'output_parameters'
spec.exit_code(300, 'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(303, 'ERROR_MISSING_XML_FILE',
message='The required XML file is not present in the retrieved folder.')
spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ',
message='The stdout output file could not be read.')
spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE',
message='The output file contains invalid output.')
spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE',
message='The stdout output file was incomplete probably because the calculation got interrupted.')
spec.exit_code(320, 'ERROR_OUTPUT_XML_READ',
message='The XML output file could not be read.')
spec.exit_code(321, 'ERROR_OUTPUT_XML_PARSE',
message='The XML output file could not be parsed.')
spec.exit_code(322, 'ERROR_OUTPUT_XML_FORMAT',
message='The XML output file has an unsupported format.')
spec.exit_code(350, 'ERROR_UNEXPECTED_PARSER_EXCEPTION',
message='The parser raised an unexpected exception.')
@classmethod
def _generate_input_files(cls, neb_parameters, settings_dict):
"""Generate the input data for the NEB part of the calculation."""
# I put the first-level keys as uppercase (i.e., namelist and card names)
# and the second-level keys as lowercase
# (deeper levels are unchanged)
input_params = _uppercase_dict(neb_parameters.get_dict(), dict_name='parameters')
input_params = {k: _lowercase_dict(v, dict_name=k) for k, v in input_params.items()}
# Force default values for blocked keywords. NOTE: this is different from PW/CP
for blocked in cls._blocked_keywords:
namelist = blocked[0].upper()
key = blocked[1].lower()
value = blocked[2]
if namelist in input_params:
if key in input_params[namelist]:
raise InputValidationError(
"You cannot specify explicitly the '{}' key in the '{}' namelist.".format(key, namelist)
)
else:
input_params[namelist] = {}
input_params[namelist][key] = value
# Create an empty dictionary for the compulsory namelist 'PATH' if not present
if 'PATH' not in input_params:
input_params['PATH'] = {}
# In case of climbing image, we need the corresponding card
ci_scheme = input_params['PATH'].get('ci_scheme', 'no-ci').lower()
climbing_image_list = settings_dict.pop('CLIMBING_IMAGES', None)
if ci_scheme == 'manual':
manual_climbing_image = True
if climbing_image_list is None:
raise InputValidationError("'ci_scheme' is {}, but no climbing images were specified for this "
'calculation.'.format(ci_scheme))
if not isinstance(climbing_image_list, list):
raise InputValidationError('Climbing images should be provided as a list.')
num_of_images = input_params['PATH'].get('num_of_images', 2)
if any([(i < 2 or i >= num_of_images) for i in climbing_image_list]):
raise InputValidationError('The climbing images should be in the range between the first '
'and the last image (excluded).')
climbing_image_card = 'CLIMBING_IMAGES\n'
climbing_image_card += ', '.join([str(_) for _ in climbing_image_list]) + '\n'
else:
manual_climbing_image = False
if climbing_image_list is not None:
raise InputValidationError("Climbing images are not accepted when 'ci_scheme' is {}.".format(ci_scheme))
input_data = '&PATH\n'
# namelist content; set to {} if not present, so that we leave an empty namelist
namelist = input_params.pop('PATH', {})
for key, value in sorted(namelist.items()):
input_data += convert_input_to_namelist_entry(key, value)
input_data += '/\n'
# Write CI cards now
if manual_climbing_image:
input_data += climbing_image_card
if input_params:
raise InputValidationError(
'The following namelists are specified in input_params, but are '
'not valid namelists for the current type of calculation: '
'{}'.format(','.join(list(input_params.keys()))))
return input_data
def prepare_for_submission(self, folder):
"""Prepare the calculation job for submission by transforming input nodes into input files.
In addition to the input files being written to the sandbox folder, a `CalcInfo` instance will be returned that
contains lists of files that need to be copied to the remote machine before job submission, as well as file
lists that are to be retrieved after job completion.
:param folder: a sandbox folder to temporarily write files on disk.
:return: :py:`~aiida.common.datastructures.CalcInfo` instance.
"""
# pylint: disable=too-many-branches,too-many-statements
import numpy as np
local_copy_list = []
remote_copy_list = []
remote_symlink_list = []
# Convert settings dictionary to have uppercase keys, or create an empty one if none was given.
if 'settings' in self.inputs:
settings_dict = _uppercase_dict(self.inputs.settings.get_dict(), dict_name='settings')
else:
settings_dict = {}
first_structure = self.inputs.first_structure
last_structure = self.inputs.last_structure
# Check that the first and last image have the same cell
if abs(np.array(first_structure.cell)-
np.array(last_structure.cell)).max() > 1.e-4:
raise InputValidationError('Different cell in the fist and last image')
# Check that the first and last image have the same number of sites
if len(first_structure.sites) != len(last_structure.sites):
raise InputValidationError('Different number of sites in the fist and last image')
# Check that sites in the initial and final structure have the same kinds
if first_structure.get_site_kindnames() != last_structure.get_site_kindnames():
raise InputValidationError('Mismatch between the kind names and/or order between '
'the first and final image')
# Check that a pseudo potential was specified for each kind present in the `StructureData`
# self.inputs.pw.pseudos is a plumpy.utils.AttributesFrozendict
kindnames = [kind.name for kind in first_structure.kinds]
if set(kindnames) != set(self.inputs.pw.pseudos.keys()):
raise InputValidationError(
'Mismatch between the defined pseudos and the list of kinds of the structure.\nPseudos: {};\n'
'Kinds: {}'.format(', '.join(list(self.inputs.pw.pseudos.keys())), ', '.join(list(kindnames))))
##############################
# END OF INITIAL INPUT CHECK #
##############################
# Create the subfolder that will contain the pseudopotentials
folder.get_subfolder(self._PSEUDO_SUBFOLDER, create=True)
# Create the subfolder for the output data (sometimes Quantum ESPRESSO codes crash if the folder does not exist)
folder.get_subfolder(self._OUTPUT_SUBFOLDER, create=True)
# We first prepare the NEB-specific input file.
neb_input_filecontent = self._generate_input_files(self.inputs.parameters, settings_dict)
with folder.open(self.inputs.metadata.options.input_filename, 'w') as handle:
handle.write(neb_input_filecontent)
# We now generate the PW input files for each input structure
local_copy_pseudo_list = []
for i, structure in enumerate([first_structure, last_structure]):
# We need to a pass a copy of the settings_dict for each structure
this_settings_dict = copy.deepcopy(settings_dict)
pw_input_filecontent, this_local_copy_pseudo_list = PwCalculation._generate_PWCPinputdata( # pylint: disable=protected-access
self.inputs.pw.parameters, this_settings_dict, self.inputs.pw.pseudos, structure, self.inputs.pw.kpoints
)
local_copy_pseudo_list += this_local_copy_pseudo_list
with folder.open('pw_{}.in'.format(i + 1), 'w') as handle:
handle.write(pw_input_filecontent)
# We need to pop the settings that were used in the PW calculations
for key in list(settings_dict.keys()):
if key not in list(this_settings_dict.keys()):
settings_dict.pop(key)
# We avoid to copy twice the same pseudopotential to the same filename
local_copy_pseudo_list = set(local_copy_pseudo_list)
# We check that two different pseudopotentials are not copied
# with the same name (otherwise the first is overwritten)
if len({filename for (uuid, filename, local_path) in local_copy_pseudo_list}) < len(local_copy_pseudo_list):
raise InputValidationError('Same filename for two different pseudopotentials')
local_copy_list += local_copy_pseudo_list
# If present, add also the Van der Waals table to the pseudo dir. Note that the name of the table is not checked
# but should be the one expected by Quantum ESPRESSO.
vdw_table = self.inputs.get('pw.vdw_table', None)
if vdw_table:
local_copy_list.append((
vdw_table.uuid,
vdw_table.filename,
os.path.join(self._PSEUDO_SUBFOLDER, vdw_table.filename)
))
# operations for restart
parent_calc_folder = self.inputs.get('parent_folder', None)
symlink = settings_dict.pop('PARENT_FOLDER_SYMLINK', self._default_symlink_usage) # a boolean
if symlink:
if parent_calc_folder is not None:
# I put the symlink to the old parent ./out folder
remote_symlink_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
self._OUTPUT_SUBFOLDER, '*'), # asterisk: make individual symlinks for each file
self._OUTPUT_SUBFOLDER
))
# and to the old parent prefix.path
remote_symlink_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
'{}.path'.format(self._PREFIX)),
'{}.path'.format(self._PREFIX)
))
else:
# copy remote output dir and .path file, if specified
if parent_calc_folder is not None:
remote_copy_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
self._OUTPUT_SUBFOLDER, '*'),
self._OUTPUT_SUBFOLDER
))
# and copy the old parent prefix.path
remote_copy_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
'{}.path'.format(self._PREFIX)),
'{}.path'.format(self._PREFIX)
))
# here we may create an aiida.EXIT file
create_exit_file = settings_dict.pop('ONLY_INITIALIZATION', False)
if create_exit_file:
exit_filename = '{}.EXIT'.format(self._PREFIX)
with folder.open(exit_filename, 'w') as handle:
handle.write('\n')
calcinfo = CalcInfo()
codeinfo = CodeInfo()
calcinfo.uuid = self.uuid
cmdline_params = settings_dict.pop('CMDLINE', [])
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
calcinfo.remote_symlink_list = remote_symlink_list
# In neb calculations there is no input read from standard input!!
codeinfo.cmdline_params = (['-input_images', '2'] + list(cmdline_params))
codeinfo.stdout_name = self.inputs.metadata.options.output_filename
codeinfo.code_uuid = self.inputs.code.uuid
calcinfo.codes_info = [codeinfo]
# Retrieve the output files and the xml files
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self.inputs.metadata.options.output_filename)
calcinfo.retrieve_list.append((
os.path.join(self._OUTPUT_SUBFOLDER, self._PREFIX + '_*[0-9]', 'PW.out'), # source relative path (globbing)
'.', # destination relative path
2 # depth to preserve
))
for xml_filepath in self.xml_filepaths: # pylint: disable=not-an-iterable
calcinfo.retrieve_list.append([xml_filepath, '.', 3])
calcinfo.retrieve_list += settings_dict.pop('ADDITIONAL_RETRIEVE_LIST', [])
calcinfo.retrieve_list += self._internal_retrieve_list
# We might still have parser options in the settings dictionary: pop them.
_pop_parser_options(self, settings_dict)
if settings_dict:
unknown_keys = ', '.join(list(settings_dict.keys()))
raise InputValidationError('`settings` contained unexpected keys: {}'.format(unknown_keys))
return calcinfo
|
py | 1a555a49126b62673f8367f5ba342870b3960a1e |
import this # => display the Zen of Py
# 1. Any python file can be imported as module
# to load from another module:
import sys
sys.path += ["path_to_folder"] # and import MyModule
if __name__ == "__main__":
pass # this code will exec only if the script is ran. if loaded as module, it will not run
# PACKAGES
# MathOps - one must define main dir for package
# __init__.py - must have __init__.py, it runs at dir loading
# Simple
# __init__.py
# Arithmetic.py
# Bits.py
# One can access as import MathOps.Simple or import MathOps.Bits etc.abs
# for from MathOps import * to work, one must define __all__ = ["PyFileName1", ..] (dir relative)
# can dynnamically import module with m = importlib.import_module("name")
# with exec(string) one can execute dynamic code
|
py | 1a555ac2b550f082ca72457f29001f21d700c2c0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-07-03 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_elastic_migrations', '0004_auto_20180703_1232'),
]
operations = [
migrations.AlterField(
model_name='indexaction',
name='status',
field=models.CharField(choices=[('queued', 'queued'), ('in_progress', 'in_progress'), ('complete', 'complete'), ('aborted', 'aborted')],
default='queued', max_length=32),
),
]
|
py | 1a555b3cdfd939a15ce403edeb74bf0c9dea4cd0 | #!/usr/bin/python
from urlparse import urlparse
import sys
for line in sys.stdin:
data1 = line.strip().split("GET")
data2 = line.strip().split("POST")
if len(data) == 2:
path1 = urlparse(data[1].split()[0]).path
path2 = urlparse(data[1].split()[0]).path
if path1:
print str(path1)
if path2:
print str(path2)
|
py | 1a555c24c3034ffc1a88e267b3163d8e64da086e | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines an explainable lightgbm model."""
import inspect
import json
import logging
from packaging import version
from scipy.sparse import issparse
from ...common.constants import (ExplainableModelType, Extension,
LightGBMSerializationConstants,
ShapValuesOutput)
from ...common.warnings_suppressor import shap_warnings_suppressor
from .explainable_model import (BaseExplainableModel, _clean_doc,
_get_initializer_args)
from .tree_model_utils import (_expected_values_tree_surrogate,
_explain_local_tree_surrogate)
with shap_warnings_suppressor():
import shap
try:
import lightgbm
from lightgbm import Booster, LGBMClassifier, LGBMRegressor
if (version.parse(lightgbm.__version__) <= version.parse('2.2.1')):
print("Using older than supported version of lightgbm, please upgrade to version greater than 2.2.1")
except ImportError:
print("Could not import lightgbm, required if using LGBMExplainableModel")
DEFAULT_RANDOM_STATE = 123
_N_FEATURES = '_n_features'
_N_CLASSES = '_n_classes'
NUM_ITERATIONS = 'num_iterations'
_FITTED = 'fitted_'
class _LGBMFunctionWrapper(object):
"""Decorate the predict method, temporary workaround for sparse case until TreeExplainer support is added.
:param function: The prediction function to wrap.
:type function: function
"""
def __init__(self, function):
"""Wraps a function to reshape the input data.
:param function: The prediction function to wrap.
:type function: function
"""
self._function = function
def predict_wrapper(self, X, *args, **kwargs):
"""Wraps a prediction function from lightgbm learner.
If version is ==3.0.0, densifies the input dataset.
:param X: The model evaluation examples.
:type X: numpy.ndarray
:return: Prediction result.
:rtype: numpy.ndarray
"""
if issparse(X):
X = X.toarray()
return self._function(X, *args, **kwargs)
class _SparseTreeExplainer(object):
"""Wraps the lightgbm model to enable sparse feature contributions.
If version is >=3.1.0, runs on sparse input data by calling predict function directly.
:param lgbm: The lightgbm model to wrap.
:type lgbm: LGBMModel
:param tree_explainer: The tree_explainer used for dense data.
:type tree_explainer: shap.TreeExplainer
"""
def __init__(self, lgbm, tree_explainer):
"""Wraps the lightgbm model to enable sparse feature contributions.
:param lgbm: The lightgbm model to wrap.
:type lgbm: LGBMModel
:param tree_explainer: The tree_explainer used for dense data.
:type tree_explainer: shap.TreeExplainer
"""
self._lgbm = lgbm
self._tree_explainer = tree_explainer
self._num_iters = -1
# Get the number of iterations trained for from the booster
if hasattr(self._lgbm._Booster, 'params'):
if NUM_ITERATIONS in self._lgbm._Booster.params:
self._num_iters = self._lgbm._Booster.params[NUM_ITERATIONS]
# If best iteration specified, use that
if self._lgbm._best_iteration is not None:
self._num_iters = self._lgbm._best_iteration
self.expected_value = None
def shap_values(self, X):
"""Calls lightgbm predict directly for sparse case.
If lightgbm version is >=3.1.0, runs on sparse input data
by calling predict function directly with pred_contrib=True.
Uses tree explainer for dense input data.
:param X: The model evaluation examples.
:type X: numpy.ndarray or scipy.sparse.csr_matrix
:return: The feature importance values.
:rtype: numpy.ndarray, scipy.sparse or list of scipy.sparse
"""
if issparse(X):
shap_values = self._lgbm.predict(X,
num_iteration=self._num_iters,
pred_contrib=True)
if isinstance(shap_values, list):
shape = shap_values[0].shape
self.expected_value = shap_values[0][0, shape[1] - 1]
for idx, class_values in enumerate(shap_values):
shap_values[idx] = class_values[:, :shape[1] - 1]
else:
shape = shap_values.shape
self.expected_value = shap_values[0, shape[1] - 1]
shap_values = shap_values[:, :shape[1] - 1]
else:
shap_values = self._tree_explainer.shap_values(X)
self.expected_value = self._tree_explainer.expected_value
return shap_values
class LGBMExplainableModel(BaseExplainableModel):
available_explanations = [Extension.GLOBAL, Extension.LOCAL]
explainer_type = Extension.GLASSBOX
"""LightGBM (fast, high performance framework based on decision tree) explainable model.
Please see documentation for more details: https://github.com/Microsoft/LightGBM
Additional arguments to LightGBMClassifier and LightGBMRegressor can be passed through kwargs.
:param multiclass: Set to true to generate a multiclass model.
:type multiclass: bool
:param random_state: Int to seed the model.
:type random_state: int
:param shap_values_output: The type of the output from explain_local when using TreeExplainer.
Currently only types 'default', 'probability' and 'teacher_probability' are supported. If
'probability' is specified, then we approximately scale the raw log-odds values from the
TreeExplainer to probabilities.
:type shap_values_output: interpret_community.common.constants.ShapValuesOutput
:param classification: Indicates if this is a classification or regression explanation.
:type classification: bool
"""
def __init__(self, multiclass=False, random_state=DEFAULT_RANDOM_STATE,
shap_values_output=ShapValuesOutput.DEFAULT, classification=True, **kwargs):
"""Initialize the LightGBM Model.
Additional arguments to LightGBMClassifier and LightGBMRegressor can be passed through kwargs.
:param multiclass: Set to true to generate a multiclass model.
:type multiclass: bool
:param random_state: Int to seed the model.
:type random_state: int
:param shap_values_output: The type of the output from explain_local when using TreeExplainer.
Currently only types 'default', 'probability' and 'teacher_probability' are supported. If
'probability' is specified, then we approximately scale the raw log-odds values from the
TreeExplainer to probabilities.
:type shap_values_output: interpret_community.common.constants.ShapValuesOutput
:param classification: Indicates if this is a classification or regression explanation.
:type classification: bool
"""
self.multiclass = multiclass
initializer_args = _get_initializer_args(kwargs)
if self.multiclass:
initializer = LGBMClassifier
else:
initializer = LGBMRegressor
self._lgbm = initializer(random_state=random_state, **initializer_args)
super(LGBMExplainableModel, self).__init__(**kwargs)
self._logger.debug('Initializing LGBMExplainableModel')
self._method = 'lightgbm'
self._tree_explainer = None
self._shap_values_output = shap_values_output
self._classification = classification
try:
__init__.__doc__ = (__init__.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.__init__.__doc__) +
'\nOtherwise, if multiclass=False, uses the parameters for LGBMRegressor:\n' +
_clean_doc(LGBMRegressor.__init__.__doc__))
except Exception:
pass
def fit(self, dataset, labels, **kwargs):
"""Call lightgbm fit to fit the explainable model.
:param dataset: The dataset to train the model on.
:type dataset: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:param labels: The labels to train the model on.
:type labels: numpy.ndarray
"""
self._lgbm.fit(dataset, labels, **kwargs)
try:
fit.__doc__ = (fit.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.fit.__doc__) +
'\nOtherwise, if multiclass=False, uses the parameters for LGBMRegressor:\n' +
_clean_doc(LGBMRegressor.fit.__doc__))
except Exception:
pass
def predict(self, dataset, **kwargs):
"""Call lightgbm predict to predict labels using the explainable model.
:param dataset: The dataset to predict on.
:type dataset: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:return: The predictions of the model.
:rtype: list
"""
return self._lgbm.predict(dataset, **kwargs)
try:
predict.__doc__ = (predict.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.predict.__doc__) +
'\nOtherwise, if multiclass=False, uses the parameters for LGBMRegressor:\n' +
_clean_doc(LGBMRegressor.predict.__doc__))
except Exception:
pass
def predict_proba(self, dataset, **kwargs):
"""Call lightgbm predict_proba to predict probabilities using the explainable model.
:param dataset: The dataset to predict probabilities on.
:type dataset: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:return: The predictions of the model.
:rtype: list
"""
if self.multiclass:
return self._lgbm.predict_proba(dataset, **kwargs)
else:
raise Exception("predict_proba not supported for regression or binary classification dataset")
try:
predict_proba.__doc__ = (predict_proba.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.predict_proba.__doc__) +
'\nOtherwise predict_proba is not supported for ' +
'regression or binary classification.\n')
except Exception:
pass
def explain_global(self, **kwargs):
"""Call lightgbm feature importances to get the global feature importances from the explainable model.
:return: The global explanation of feature importances.
:rtype: numpy.ndarray
"""
return self._lgbm.feature_importances_
def _init_tree_explainer(self):
"""Creates the TreeExplainer.
Includes a temporary fix for lightgbm 3.0 by wrapping predict method
for sparse case to output dense data.
Includes another temporary fix for lightgbm >= 3.1 to call predict
function directly for sparse input data until shap TreeExplainer
support is added.
"""
if self._tree_explainer is None:
self._tree_explainer = shap.TreeExplainer(self._lgbm)
if version.parse('3.1.0') <= version.parse(lightgbm.__version__):
self._tree_explainer = _SparseTreeExplainer(self._lgbm, self._tree_explainer)
elif version.parse('3.0.0') == version.parse(lightgbm.__version__):
wrapper = _LGBMFunctionWrapper(self._tree_explainer.model.original_model.predict)
self._tree_explainer.model.original_model.predict = wrapper.predict_wrapper
def explain_local(self, evaluation_examples, probabilities=None, **kwargs):
"""Use TreeExplainer to get the local feature importances from the trained explainable model.
:param evaluation_examples: The evaluation examples to compute local feature importances for.
:type evaluation_examples: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:param probabilities: If output_type is probability, can specify the teacher model's
probability for scaling the shap values.
:type probabilities: numpy.ndarray
:return: The local explanation of feature importances.
:rtype: Union[list, numpy.ndarray]
"""
self._init_tree_explainer()
return _explain_local_tree_surrogate(self._lgbm, evaluation_examples, self._tree_explainer,
self._shap_values_output, self._classification,
probabilities, self.multiclass)
@property
def expected_values(self):
"""Use TreeExplainer to get the expected values.
:return: The expected values of the LightGBM tree model.
:rtype: list
"""
self._init_tree_explainer()
return _expected_values_tree_surrogate(self._lgbm, self._tree_explainer, self._shap_values_output,
self._classification, self.multiclass)
@property
def model(self):
"""Retrieve the underlying model.
:return: The lightgbm model, either classifier or regressor.
:rtype: Union[LGBMClassifier, LGBMRegressor]
"""
return self._lgbm
@staticmethod
def explainable_model_type():
"""Retrieve the model type.
:return: Tree explainable model type.
:rtype: ExplainableModelType
"""
return ExplainableModelType.TREE_EXPLAINABLE_MODEL_TYPE
def _save(self):
"""Return a string dictionary representation of the LGBMExplainableModel.
:return: A serialized dictionary representation of the LGBMExplainableModel.
:rtype: dict
"""
properties = {}
# Save all of the properties
for key, value in self.__dict__.items():
if key in LightGBMSerializationConstants.nonify_properties:
properties[key] = None
elif key in LightGBMSerializationConstants.save_properties:
# Save booster model to string representation
# This is not recommended but can be necessary to get around pickle being not secure
# See here for more info:
# https://github.com/Microsoft/LightGBM/issues/1942
# https://github.com/Microsoft/LightGBM/issues/1217
properties[key] = value.booster_.model_to_string()
else:
properties[key] = json.dumps(value)
# Need to add _n_features
properties[_N_FEATURES] = self._lgbm._n_features
# And if classification case need to add _n_classes
if self.multiclass:
properties[_N_CLASSES] = self._lgbm._n_classes
if hasattr(self._lgbm, _FITTED):
properties[_FITTED] = json.dumps(getattr(self._lgbm, _FITTED))
return properties
@staticmethod
def _load(properties):
"""Load a LGBMExplainableModel from the given properties.
:param properties: A serialized dictionary representation of the LGBMExplainableModel.
:type properties: dict
:return: The deserialized LGBMExplainableModel.
:rtype: interpret_community.mimic.models.LGBMExplainableModel
"""
# create the LGBMExplainableModel without any properties using the __new__ function, similar to pickle
lgbm_model = LGBMExplainableModel.__new__(LGBMExplainableModel)
# Get _n_features
_n_features = properties.pop(_N_FEATURES)
# If classification case get _n_classes
if json.loads(properties[LightGBMSerializationConstants.MULTICLASS]):
_n_classes = properties.pop(_N_CLASSES)
fitted_ = None
if _FITTED in properties:
fitted_ = json.loads(properties[_FITTED])
elif version.parse('3.3.1') <= version.parse(lightgbm.__version__):
# If deserializing older model in newer version set this to true to prevent errors on calls
fitted_ = True
# load all of the properties
for key, value in properties.items():
# Regenerate the properties on the fly
if key in LightGBMSerializationConstants.nonify_properties:
if key == LightGBMSerializationConstants.LOGGER:
parent = logging.getLogger(__name__)
lightgbm_identity = json.loads(properties[LightGBMSerializationConstants.IDENTITY])
lgbm_model.__dict__[key] = parent.getChild(lightgbm_identity)
elif key == LightGBMSerializationConstants.TREE_EXPLAINER:
lgbm_model.__dict__[key] = None
else:
raise Exception("Unknown nonify key on deserialize in LightGBMExplainableModel: {}".format(key))
elif key in LightGBMSerializationConstants.save_properties:
# Load the booster from file and re-create the LGBMClassifier or LGBMRegressor
# This is not recommended but can be necessary to get around pickle being not secure
# See here for more info:
# https://github.com/Microsoft/LightGBM/issues/1942
# https://github.com/Microsoft/LightGBM/issues/1217
booster_args = {LightGBMSerializationConstants.MODEL_STR: value}
is_multiclass = json.loads(properties[LightGBMSerializationConstants.MULTICLASS])
if is_multiclass:
objective = LightGBMSerializationConstants.MULTICLASS
else:
objective = LightGBMSerializationConstants.REGRESSION
if LightGBMSerializationConstants.MODEL_STR in inspect.getargspec(Booster).args:
extras = {LightGBMSerializationConstants.OBJECTIVE: objective}
lgbm_booster = Booster(**booster_args, params=extras)
else:
# For backwards compatibility with older versions of lightgbm
booster_args[LightGBMSerializationConstants.OBJECTIVE] = objective
lgbm_booster = Booster(params=booster_args)
if is_multiclass:
new_lgbm = LGBMClassifier()
new_lgbm._Booster = lgbm_booster
new_lgbm._n_classes = _n_classes
else:
new_lgbm = LGBMRegressor()
new_lgbm._Booster = lgbm_booster
# Specify fitted_ for newer versions of lightgbm on deserialize
if fitted_ is not None:
new_lgbm.fitted_ = fitted_
new_lgbm._n_features = _n_features
lgbm_model.__dict__[key] = new_lgbm
elif key in LightGBMSerializationConstants.enum_properties:
# NOTE: If more enums added in future, will need to handle this differently
lgbm_model.__dict__[key] = ShapValuesOutput(json.loads(value))
else:
lgbm_model.__dict__[key] = json.loads(value)
return lgbm_model
|
py | 1a555cf04820b570e0e517fcd97c35a91b7b4cfe | version = (0, 5, 2)
version_string = "0.5.2" |
py | 1a555dbab4934cbb44924de122f9efe3c82a67b6 | import datetime
def printAppBanner():
print("--------------------------------------")
print(" BIRTHDAY APP ")
print("--------------------------------------")
def getBirthday():
print("What is your birth date?")
year = int(input("Year [YYY]? "))
month = int(input("Month [MM]? "))
day = int(input("Day [DD]? "))
return datetime.date(year, month, day)
def currDate():
return datetime.date.today()
def get_days_difference(current_date, birthday):
#print(current_date)
#print(birthday)
this_year_birthday = datetime.date(current_date.year, birthday.month, birthday.day)
next_year_birthday = datetime.date(current_date.year+1, birthday.month, birthday.day)
#print(this_year_birthday)
diffT = this_year_birthday - current_date
diffN = next_year_birthday - current_date
if (abs(diffT.days) > diffN.days):
return diffN.days
return diffT.days
def printMessage(days_difference):
if days_difference == 0:
print("Today is your birthday, Happy Birthday!!!")
elif days_difference > 0:
print("Looks like your birthday is in {} days".format(days_difference))
else:
print("Looks like your birthday was {} days before".format(abs(days_difference)))
def main():
printAppBanner()
birthday = getBirthday()
current_date = currDate()
days_difference = get_days_difference(current_date, birthday)
printMessage(days_difference)
main()
|
py | 1a555e260fba5cad14964ff418941f2ec2c4b2b4 | """INIT
Revision ID: 0be9593348ba
Revises:
Create Date: 2021-01-08 20:02:32.175135
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "0be9593348ba"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"anubis_config",
sa.Column("key", sa.String(length=128), nullable=False),
sa.Column("value", sa.String(length=2048), nullable=True),
sa.PrimaryKeyConstraint("key"),
)
op.create_table(
"course",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("name", sa.String(length=256), nullable=False),
sa.Column("course_code", sa.String(length=256), nullable=False),
sa.Column("semester", sa.String(length=256), nullable=True),
sa.Column("section", sa.String(length=256), nullable=True),
sa.Column("professor", sa.String(length=256), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"static_file",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("filename", sa.String(length=256), nullable=True),
sa.Column("path", sa.String(length=256), nullable=True),
sa.Column("content_type", sa.String(length=128), nullable=True),
sa.Column("blob", sa.LargeBinary(length=(2 ** 32) - 1), nullable=True),
sa.Column("hidden", sa.Boolean(), nullable=True),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"user",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("netid", sa.String(length=128), nullable=False),
sa.Column("github_username", sa.String(length=128), nullable=True),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("is_admin", sa.Boolean(), nullable=False),
sa.Column("is_superuser", sa.Boolean(), nullable=False),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id", "netid"),
)
op.create_index(
op.f("ix_user_github_username"), "user", ["github_username"], unique=False
)
op.create_index(op.f("ix_user_netid"), "user", ["netid"], unique=True)
op.create_table(
"assignment",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("course_id", sa.String(length=128), nullable=True),
sa.Column("name", sa.String(length=256), nullable=False),
sa.Column("hidden", sa.Boolean(), nullable=True),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("github_classroom_url", sa.String(length=256), nullable=True),
sa.Column("pipeline_image", sa.String(length=256), nullable=True),
sa.Column("unique_code", sa.String(length=8), nullable=True),
sa.Column("ide_enabled", sa.Boolean(), nullable=True),
sa.Column("release_date", sa.DateTime(), nullable=False),
sa.Column("due_date", sa.DateTime(), nullable=False),
sa.Column("grace_date", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["course_id"],
["course.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
sa.UniqueConstraint("pipeline_image"),
sa.UniqueConstraint("unique_code"),
)
op.create_index(
op.f("ix_assignment_course_id"), "assignment", ["course_id"], unique=False
)
op.create_table(
"in_course",
sa.Column("owner_id", sa.String(length=128), nullable=False),
sa.Column("course_id", sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(
["course_id"],
["course.id"],
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("owner_id", "course_id"),
)
op.create_table(
"assignment_question",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("assignment_id", sa.String(length=128), nullable=True),
sa.Column("question", sa.Text(), nullable=False),
sa.Column("solution", sa.Text(), nullable=True),
sa.Column("sequence", sa.Integer(), nullable=False),
sa.Column("code_question", sa.Boolean(), nullable=True),
sa.Column("code_language", sa.String(length=128), nullable=True),
sa.Column("placeholder", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["assignment_id"],
["assignment.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_assignment_question_assignment_id"),
"assignment_question",
["assignment_id"],
unique=False,
)
op.create_index(
op.f("ix_assignment_question_sequence"),
"assignment_question",
["sequence"],
unique=False,
)
op.create_table(
"assignment_repo",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("owner_id", sa.String(length=128), nullable=True),
sa.Column("assignment_id", sa.String(length=128), nullable=False),
sa.Column("github_username", sa.String(length=256), nullable=False),
sa.Column("repo_url", sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(
["assignment_id"],
["assignment.id"],
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"assignment_test",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("assignment_id", sa.String(length=128), nullable=True),
sa.Column("name", sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(
["assignment_id"],
["assignment.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_assignment_test_name"), "assignment_test", ["name"], unique=False
)
op.create_table(
"assigned_student_question",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("response", sa.Text(), nullable=False),
sa.Column("owner_id", sa.String(length=128), nullable=True),
sa.Column("assignment_id", sa.String(length=128), nullable=False),
sa.Column("question_id", sa.String(length=128), nullable=False),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["assignment_id"],
["assignment.id"],
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.id"],
),
sa.ForeignKeyConstraint(
["question_id"],
["assignment_question.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_assigned_student_question_assignment_id"),
"assigned_student_question",
["assignment_id"],
unique=False,
)
op.create_index(
op.f("ix_assigned_student_question_question_id"),
"assigned_student_question",
["question_id"],
unique=False,
)
op.create_table(
"submission",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("owner_id", sa.String(length=128), nullable=True),
sa.Column("assignment_id", sa.String(length=128), nullable=False),
sa.Column("assignment_repo_id", sa.String(length=128), nullable=False),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.Column("commit", sa.String(length=128), nullable=False),
sa.Column("processed", sa.Boolean(), nullable=True),
sa.Column("state", sa.String(length=128), nullable=True),
sa.Column("errors", sa.JSON(), nullable=True),
sa.Column("token", sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(
["assignment_id"],
["assignment.id"],
),
sa.ForeignKeyConstraint(
["assignment_repo_id"],
["assignment_repo.id"],
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_submission_assignment_id"),
"submission",
["assignment_id"],
unique=False,
)
op.create_index(op.f("ix_submission_commit"), "submission", ["commit"], unique=True)
op.create_index(
op.f("ix_submission_owner_id"), "submission", ["owner_id"], unique=False
)
op.create_table(
"theia_session",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("owner_id", sa.String(length=128), nullable=False),
sa.Column("assignment_id", sa.String(length=128), nullable=False),
sa.Column("repo_id", sa.String(length=128), nullable=False),
sa.Column("active", sa.Boolean(), nullable=True),
sa.Column("state", sa.String(length=128), nullable=True),
sa.Column("cluster_address", sa.String(length=256), nullable=True),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("ended", sa.DateTime(), nullable=True),
sa.Column("last_heartbeat", sa.DateTime(), nullable=True),
sa.Column("last_proxy", sa.DateTime(), nullable=True),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["assignment_id"],
["assignment.id"],
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.id"],
),
sa.ForeignKeyConstraint(
["repo_id"],
["assignment_repo.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"submission_build",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("submission_id", sa.String(length=128), nullable=True),
sa.Column("stdout", sa.Text(), nullable=True),
sa.Column("passed", sa.Boolean(), nullable=True),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["submission_id"],
["submission.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_submission_build_submission_id"),
"submission_build",
["submission_id"],
unique=False,
)
op.create_table(
"submission_test_result",
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("submission_id", sa.String(length=128), nullable=False),
sa.Column("assignment_test_id", sa.String(length=128), nullable=False),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.Column("stdout", sa.Text(), nullable=True),
sa.Column("message", sa.Text(), nullable=True),
sa.Column("passed", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["assignment_test_id"],
["assignment_test.id"],
),
sa.ForeignKeyConstraint(
["submission_id"],
["submission.id"],
),
sa.PrimaryKeyConstraint("id", "submission_id", "assignment_test_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("submission_test_result")
op.drop_index(
op.f("ix_submission_build_submission_id"), table_name="submission_build"
)
op.drop_table("submission_build")
op.drop_table("theia_session")
op.drop_index(op.f("ix_submission_owner_id"), table_name="submission")
op.drop_index(op.f("ix_submission_commit"), table_name="submission")
op.drop_index(op.f("ix_submission_assignment_id"), table_name="submission")
op.drop_table("submission")
op.drop_index(
op.f("ix_assigned_student_question_question_id"),
table_name="assigned_student_question",
)
op.drop_index(
op.f("ix_assigned_student_question_assignment_id"),
table_name="assigned_student_question",
)
op.drop_table("assigned_student_question")
op.drop_index(op.f("ix_assignment_test_name"), table_name="assignment_test")
op.drop_table("assignment_test")
op.drop_table("assignment_repo")
op.drop_index(
op.f("ix_assignment_question_sequence"), table_name="assignment_question"
)
op.drop_index(
op.f("ix_assignment_question_assignment_id"), table_name="assignment_question"
)
op.drop_table("assignment_question")
op.drop_table("in_course")
op.drop_index(op.f("ix_assignment_course_id"), table_name="assignment")
op.drop_table("assignment")
op.drop_index(op.f("ix_user_netid"), table_name="user")
op.drop_index(op.f("ix_user_github_username"), table_name="user")
op.drop_table("user")
op.drop_table("static_file")
op.drop_table("course")
op.drop_table("config")
# ### end Alembic commands ###
|
py | 1a555eb15362f1895f934e7b274d06bcf666fa00 | '''
Trains a convolutional neural network, using a pre-trained ImageNet model,
to infer the name of a flower given its image.
'''
# -------------------- IMPORT PACKAGES --------------------
import argparse
import os
from copy import deepcopy
from time import time
import torch
from torch import nn, optim
from torchvision import datasets, transforms, models
# -------------------- SETUP MAJOR INPUT VALUES --------------------
# Setup major inputs required from command line
parser = argparse.ArgumentParser(description='Trains a neural network')
parser.add_argument('data_directory', type=str,
help='Filepath for input data of format "data_dir/". \
Expected to be the parent directory with \
folders "train", "validation", and "test" inside, with each structured \
according to torchivision.datasets.ImageFolder requirements')
# Setup optional parameters that can be entered from the command line
parser.add_argument('-s', '--save_dir', type=str, metavar='',
default = 'model_checkpoints/',
help = 'Filepath indicating where trained model checkpoint files \
should be stored')
parser.add_argument('-a', '--arch', type=str, metavar='',
default = 'inception',
help = 'Pre-trained model from torchivision.models to use \
for the feature detector layers of your model')
parser.add_argument('-l', '--learning_rate', type=float,
default = 0.0005, metavar='',
help = 'Learning rate to use for the Adam optimizer')
parser.add_argument('-u', '--hidden_units', nargs='+', type=int,
default = [512, 256], metavar='',
help = 'Number of nodes to use in each hidden layer, ordered from \
earliest to latest layer. Not inclusive of the input layer \
(node count dictated by model architecture chosen) and \
output layer (always 102 = number of flower labels). \
Note that usage is --hidden_units count1 count2 count3...')
parser.add_argument('-d', '--dropout', type=bool,
default = True, metavar='',
help = 'Determines if dropout with p=0.2 will be used for \
each hidden layer')
parser.add_argument('-e', '--epochs', type=int,
default = 30, metavar='',
help = 'Number of epochs to use for training and validation')
parser.add_argument('-g', '--gpu', type=bool,
default = True, metavar='',
help = 'If GPU is available, indicates that it should be used')
parser.add_argument('-t', '--quick_test', type=bool,
default = False, metavar='',
help = 'If you just want to test the base code functionality quickly, \
set this to True. Will only load first batch for all training \
and testing.')
args = parser.parse_args()
# -------------------- ARCHITECTURE-SPECIFIC SETUP --------------------
# Sets parameters for various items that are not model-architecture-agnostic
# torchvision.models.inception_v3()
if args.arch == 'inception':
crop_size = 299
model = models.inception_v3(pretrained=True)
#classifier = model.fc
input_nodes = 2048
elif args.arch == 'densenet':
crop_size = 224
model = models.densenet161(pretrained=True)
#classifier = model.classifier
input_nodes = 2208
else:
print("An unsupported model architecture was supplied. \
Program terminating...")
exit()
# Freeze parameters so we don't backprop through the pre-trained
# feature detector
for param in model.parameters():
param.requires_grad = False
# -------------------- DATA LOADING AND TRANSFORMATIONS --------------------
# Initial transformations for cropping should be dictated by
# model architecture chosen (resize should always be the same 512)
# Means and stdevs common for pre-trained networks
means = [0.485, 0.456, 0.406]
stdevs = [0.229, 0.224, 0.225]
data_dir = args.data_directory
# Code here adapted from https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
image_transforms = {'train': \
transforms.Compose([transforms.RandomResizedCrop(crop_size),
transforms.ColorJitter(brightness=0.15,
contrast=0.15,
saturation=0.15,
hue=0),
transforms.RandomAffine(30),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, stdevs)]),
'valid': transforms.Compose([transforms.Resize(512),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(means, stdevs)]),
'test': transforms.Compose([transforms.Resize(512),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(means, stdevs)])
}
phases = ['train', 'valid', 'test']
data = {phase: datasets.ImageFolder(os.path.join(data_dir, phase),
image_transforms[phase]) for phase in phases}
dataloaders = {phase: torch.utils.data.DataLoader(data[phase],
batch_size=64) for phase in phases}
# Set training dataloader to have shuffle = True
dataloaders['train'] = torch.utils.data.DataLoader(data['train'],
batch_size=64, shuffle = True)
# -------------------- CLASSIFIER BUILDING --------------------
# Build classifier portion of convolutional neural net to replace
# original ImageNet classifier
classifier = nn.Sequential()
nodes = args.hidden_units
classifier.add_module('hidden1', nn.Linear(input_nodes, nodes[0]))
for i, _ in enumerate(nodes):
if i+1 >= len(nodes): break
classifier.add_module('activation' + str(i+1), nn.ReLU())
if args.dropout: classifier.add_module('dropout' + str(i+1),
nn.Dropout(0.2))
classifier.add_module('hidden' + str(i+2), nn.Linear(nodes[i], nodes[i+1]))
classifier.add_module('activation' + str(i+1), nn.ReLU())
classifier.add_module('output', nn.Linear(nodes[-1], 102))
classifier.add_module('activation_output', nn.LogSoftmax(dim=1))
if args.arch == 'inception':
model.fc = classifier
model_params = model.fc.parameters()
print(f"Classifier architecture:")
print(model.fc)
elif args.arch == 'densenet':
model.classifier = classifier
model_params = model.classifier.parameters()
print(f"Classifier architecture:")
print(model.classifier)
# -------------------- START EPOCHS --------------------
# If GPU is enabled, set device = 'cuda'. Otherwise use CPU
if torch.cuda.is_available() and args.gpu:
device = torch.device("cuda:0")
elif args.gpu and not torch.cuda.is_available():
print("\nGPU unavailable, using CPU\n")
device = torch.device("cpu")
else:
device = torch.device("cpu")
model.to(device)
# Good loss function to use for LogSoftMax activation layer
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model_params, lr=args.learning_rate)
t0 = time()
# Prep for saving the best epoch's model weights
# Code for this adapted from https://medium.com/datadriveninvestor/creating-a-pytorch-image-classifier-da9db139ba80
from copy import deepcopy
best = {'acc': 0.0, 'epoch': 0, 'weights': deepcopy(model.state_dict())}
epochs = args.epochs
# Used to keep the Udacity online workspace from
# disconnecting/going to sleep
from workspace_utils import keep_awake
# Keep GPU session awake in Udacity workspace until done training
epoch_iter = keep_awake(range(epochs))
#epoch_iter = range(epochs)
for e in epoch_iter:
# -------------------- TRAINING --------------------
# Make sure model is in training mode
model.train()
training_loss = 0
training_batch_counter = 0
for images, labels in dataloaders['train']:
# Move input and label tensors to the GPU or CPU
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(images)
if args.arch == 'inception':
loss = criterion(outputs.logits, labels)
elif args.arch == 'densenet':
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
training_loss += loss.item()
# Monitor every 10 batches and final batch
if training_batch_counter % 10 == 0 or \
training_batch_counter == (len(dataloaders['train']) - 1):
print(f"Training batch {training_batch_counter}\nLoss = \
{training_loss/(training_batch_counter + 1)}\n")
training_batch_counter += 1
if args.quick_test: break
# -------------------- VALIDATION --------------------
# turn off gradients for speedup in validation
with torch.no_grad():
# set model to evaluation mode and remove un-needed things
# like Dropout layers
model.eval()
accuracy = 0
valid_loss = 0
val_batch_counter = 0
for images, labels in dataloaders['valid']:
# Move input and label tensors to the GPU or CPU
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
probs = torch.exp(outputs)
_, top_class = probs.topk(1, dim = 1)
equals = top_class == labels.view(*top_class.shape)
valid_loss += loss.item()
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Monitor every 3 batches and final batch
if val_batch_counter % 3 == 0 or \
val_batch_counter == (len(dataloaders['valid']) - 1):
print(f"Validation batch {val_batch_counter}\nLoss = \
{valid_loss/(val_batch_counter + 1)}\n and \
accuracy = {accuracy/(val_batch_counter + 1)}\n")
val_batch_counter += 1
if args.quick_test: break
# -------------------- EPOCH REPORTING --------------------
# Note that normalizing to train/validloader length is due to
# need to divide by batch size to effectively average the
# quantity in question
training_loss /= len(dataloaders['train'])
valid_loss /= len(dataloaders['valid'])
accuracy /= len(dataloaders['valid'])
print(f"For epoch {e+1}/{epochs}...")
print(f"{round((time()-t0)/60, 3)} minutes since training started")
print(f"Training loss = {training_loss}")
print(f"Validation loss = {valid_loss}")
print(f"Accuracy = {accuracy}\n\n")
# Update best accuracy and weights if new superior model is found
if accuracy > best['acc']:
best['acc'] = accuracy
best['epoch'] = e+1
best['weights'] = deepcopy(model.state_dict())
print("Best accuracy updated this epoch \
to {}\n\n\n".format(best['acc']))
if args.quick_test: break
# -------------------- END EPOCHS --------------------
print("Best accuracy found was {} in epoch {}".format(best['acc'],
best['epoch']))
# Set model weights to the optimal ones found across all epochs
# NOTE: you may get an error
# IncompatibleKeys(missing_keys=[], unexpected_keys=[])
# This error can be ignored. Model weights were still set properly.
model.load_state_dict(best['weights'])
# -------------------- TESTING --------------------
# turn off gradients for speedup in testing
with torch.no_grad():
# set model to evaluation mode and remove
# un-needed things like Dropout layers
model.eval()
test_accuracy = 0
test_loss = 0
for images, labels in dataloaders['test']:
# Move input and label tensors to the GPU or CPU
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
probs = torch.exp(outputs)
_, top_class = probs.topk(1, dim = 1)
equals = top_class == labels.view(*top_class.shape)
test_loss += loss.item()
test_accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
if args.quick_test: break
# Note that normalizing to train/validloader length is due to need to
# divide by batch size to effectively average the quantity in question
test_loss /= len(dataloaders['test'])
test_accuracy /= len(dataloaders['test'])
print(f"Testing loss = {test_loss}")
print(f"Testing accuracy = {test_accuracy}\n\n")
# -------------------- SAVING THE MODEL --------------------
# Note that class_to_idx provides the mapping of my folder names to the
# index used in the model
if args.arch == 'inception':
model_arch = models.inception_v3(pretrained=True)
elif args.arch == 'densenet':
model_arch = models.densenet161(pretrained=True)
checkpoint = {
'arch': model_arch,
'classifier': classifier,
'model_state': model.state_dict(),
'epoch_count': best['epoch'],
'training_loss': training_loss,
'validation_accuracy': best['acc'],
'test_loss': test_loss,
'test_accuracy': test_accuracy,
'opt_state': optimizer.state_dict(),
'class_to_idx': data['train'].class_to_idx,
'idx_to_class': {v: k for k,v \
in data['train'].class_to_idx.items()}}
# Determine the highest number X among the existing checkpoints
# which are assumed to have filenames of the format checkpointX.pth
# Code adapted from
# https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory
from os import listdir
from os.path import isfile, join
existing_chkpts = [f for f in listdir(args.save_dir) \
if isfile(join(args.save_dir, f))]
# Code adapted from
# https://stackoverflow.com/questions/4666973/how-to-extract-the-substring-between-two-markers
# Take list of existing checkpoint filenames and
# generate string "checkpointn+1" where n is the highest
# value used for checkpoint filenames. Guarantees we won't
# overwrite an existing checkpoint
import re
file_indices = []
for e in existing_chkpts:
m = re.search('checkpoint(.+).pth', e)
if m:
file_indices.append(int(m.group(1)))
# Check that there are any files of proper name scheme in there at all
if file_indices:
file_n = max(file_indices) + 1
else:
file_n = 0
save_path = args.save_dir + 'checkpoint' + str(file_n) + '.pth'
torch.save(checkpoint, save_path) |
py | 1a555fe6d97e186a81e384f284f5ddc01d39a6d5 | import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL_USER')
MAIL_PASSWORD = os.environ.get('EMAIL_PASS')
|
py | 1a556073a5636fb5497ceaf3b166bd2435ea69d7 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import dataclasses
import json
import logging
import re
from collections import defaultdict, OrderedDict
from dataclasses import dataclass, field # pylint: disable=wrong-import-order
from datetime import datetime, timedelta
from typing import (
Any,
cast,
Dict,
Hashable,
List,
NamedTuple,
Optional,
Tuple,
Type,
Union,
)
import pandas as pd
import sqlalchemy as sa
import sqlparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_babel import lazy_gettext as _
from jinja2.exceptions import TemplateError
from sqlalchemy import (
and_,
asc,
Boolean,
Column,
DateTime,
desc,
Enum,
ForeignKey,
Integer,
or_,
select,
String,
Table,
Text,
update,
)
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm import backref, Query, relationship, RelationshipProperty, Session
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.sql import column, ColumnElement, literal_column, table, text
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy.sql.expression import Label, Select, TextAsFrom, TextClause
from sqlalchemy.sql.selectable import Alias, TableClause
from superset import app, db, is_feature_enabled, security_manager
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.connectors.sqla.utils import (
get_physical_table_metadata,
get_virtual_table_metadata,
)
from superset.db_engine_specs.base import BaseEngineSpec, TimestampExpression
from superset.exceptions import QueryObjectValidationError
from superset.jinja_context import (
BaseTemplateProcessor,
ExtraCache,
get_template_processor,
)
from superset.models.annotations import Annotation
from superset.models.core import Database
from superset.models.helpers import AuditMixinNullable, QueryResult
from superset.sql_parse import ParsedQuery
from superset.typing import AdhocMetric, Metric, OrderBy, QueryObjectDict
from superset.utils import core as utils
from superset.utils.core import GenericDataType, remove_duplicates
config = app.config
metadata = Model.metadata # pylint: disable=no-member
logger = logging.getLogger(__name__)
VIRTUAL_TABLE_ALIAS = "virtual_table"
class SqlaQuery(NamedTuple):
extra_cache_keys: List[Any]
labels_expected: List[str]
prequeries: List[str]
sqla_query: Select
class QueryStringExtended(NamedTuple):
labels_expected: List[str]
prequeries: List[str]
sql: str
@dataclass
class MetadataResult:
added: List[str] = field(default_factory=list)
removed: List[str] = field(default_factory=list)
modified: List[str] = field(default_factory=list)
class AnnotationDatasource(BaseDatasource):
"""Dummy object so we can query annotations using 'Viz' objects just like
regular datasources.
"""
cache_timeout = 0
changed_on = None
type = "annotation"
column_names = [
"created_on",
"changed_on",
"id",
"start_dttm",
"end_dttm",
"layer_id",
"short_descr",
"long_descr",
"json_metadata",
"created_by_fk",
"changed_by_fk",
]
def query(self, query_obj: QueryObjectDict) -> QueryResult:
error_message = None
qry = db.session.query(Annotation)
qry = qry.filter(Annotation.layer_id == query_obj["filter"][0]["val"])
if query_obj["from_dttm"]:
qry = qry.filter(Annotation.start_dttm >= query_obj["from_dttm"])
if query_obj["to_dttm"]:
qry = qry.filter(Annotation.end_dttm <= query_obj["to_dttm"])
status = utils.QueryStatus.SUCCESS
try:
df = pd.read_sql_query(qry.statement, db.engine)
except Exception as ex: # pylint: disable=broad-except
df = pd.DataFrame()
status = utils.QueryStatus.FAILED
logger.exception(ex)
error_message = utils.error_msg_from_exception(ex)
return QueryResult(
status=status,
df=df,
duration=timedelta(0),
query="",
error_message=error_message,
)
def get_query_str(self, query_obj: QueryObjectDict) -> str:
raise NotImplementedError()
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
raise NotImplementedError()
class TableColumn(Model, BaseColumn):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = "table_columns"
__table_args__ = (UniqueConstraint("table_id", "column_name"),)
table_id = Column(Integer, ForeignKey("tables.id"))
table = relationship(
"SqlaTable",
backref=backref("columns", cascade="all, delete-orphan"),
foreign_keys=[table_id],
)
is_dttm = Column(Boolean, default=False)
expression = Column(Text)
python_date_format = Column(String(255))
export_fields = [
"table_id",
"column_name",
"verbose_name",
"is_dttm",
"is_active",
"type",
"groupby",
"filterable",
"expression",
"description",
"python_date_format",
]
update_from_object_fields = [s for s in export_fields if s not in ("table_id",)]
export_parent = "table"
@property
def is_boolean(self) -> bool:
"""
Check if the column has a boolean datatype.
"""
return self.type_generic == GenericDataType.BOOLEAN
@property
def is_numeric(self) -> bool:
"""
Check if the column has a numeric datatype.
"""
return self.type_generic == GenericDataType.NUMERIC
@property
def is_string(self) -> bool:
"""
Check if the column has a string datatype.
"""
return self.type_generic == GenericDataType.STRING
@property
def is_temporal(self) -> bool:
"""
Check if the column has a temporal datatype. If column has been set as
temporal/non-temporal (`is_dttm` is True or False respectively), return that
value. This usually happens during initial metadata fetching or when a column
is manually set as temporal (for this `python_date_format` needs to be set).
"""
if self.is_dttm is not None:
return self.is_dttm
return self.type_generic == GenericDataType.TEMPORAL
@property
def db_engine_spec(self) -> Type[BaseEngineSpec]:
return self.table.db_engine_spec
@property
def type_generic(self) -> Optional[utils.GenericDataType]:
if self.is_dttm:
return GenericDataType.TEMPORAL
column_spec = self.db_engine_spec.get_column_spec(self.type)
return column_spec.generic_type if column_spec else None
def get_sqla_col(self, label: Optional[str] = None) -> Column:
label = label or self.column_name
db_engine_spec = self.db_engine_spec
column_spec = db_engine_spec.get_column_spec(self.type)
type_ = column_spec.sqla_type if column_spec else None
if self.expression:
tp = self.table.get_template_processor()
expression = tp.process_template(self.expression)
col = literal_column(expression, type_=type_)
else:
col = column(self.column_name, type_=type_)
col = self.table.make_sqla_column_compatible(col, label)
return col
@property
def datasource(self) -> RelationshipProperty:
return self.table
def get_time_filter(
self,
start_dttm: DateTime,
end_dttm: DateTime,
time_range_endpoints: Optional[
Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint]
],
) -> ColumnElement:
col = self.get_sqla_col(label="__time")
l = []
if start_dttm:
l.append(
col >= text(self.dttm_sql_literal(start_dttm, time_range_endpoints))
)
if end_dttm:
if (
time_range_endpoints
and time_range_endpoints[1] == utils.TimeRangeEndpoint.EXCLUSIVE
):
l.append(
col < text(self.dttm_sql_literal(end_dttm, time_range_endpoints))
)
else:
l.append(col <= text(self.dttm_sql_literal(end_dttm, None)))
return and_(*l)
def get_timestamp_expression(
self, time_grain: Optional[str], label: Optional[str] = None
) -> Union[TimestampExpression, Label]:
"""
Return a SQLAlchemy Core element representation of self to be used in a query.
:param time_grain: Optional time grain, e.g. P1Y
:param label: alias/label that column is expected to have
:return: A TimeExpression object wrapped in a Label if supported by db
"""
label = label or utils.DTTM_ALIAS
pdf = self.python_date_format
is_epoch = pdf in ("epoch_s", "epoch_ms")
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
if self.expression:
col = literal_column(self.expression)
else:
col = column(self.column_name)
time_expr = self.db_engine_spec.get_timestamp_expr(
col, pdf, time_grain, self.type
)
return self.table.make_sqla_column_compatible(time_expr, label)
def dttm_sql_literal(
self,
dttm: DateTime,
time_range_endpoints: Optional[
Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint]
],
) -> str:
"""Convert datetime object to a SQL expression string"""
dttm_type = self.type or ("DATETIME" if self.is_dttm else None)
sql = self.db_engine_spec.convert_dttm(dttm_type, dttm) if dttm_type else None
if sql:
return sql
tf = self.python_date_format
# Fallback to the default format (if defined) only if the SIP-15 time range
# endpoints, i.e., [start, end) are enabled.
if not tf and time_range_endpoints == (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
):
tf = (
self.table.database.get_extra()
.get("python_date_format_by_column_name", {})
.get(self.column_name)
)
if tf:
if tf in ["epoch_ms", "epoch_s"]:
seconds_since_epoch = int(dttm.timestamp())
if tf == "epoch_s":
return str(seconds_since_epoch)
return str(seconds_since_epoch * 1000)
return f"'{dttm.strftime(tf)}'"
# TODO(john-bodley): SIP-15 will explicitly require a type conversion.
return f"""'{dttm.strftime("%Y-%m-%d %H:%M:%S.%f")}'"""
@property
def data(self) -> Dict[str, Any]:
attrs = (
"id",
"column_name",
"verbose_name",
"description",
"expression",
"filterable",
"groupby",
"is_dttm",
"type",
"type_generic",
"python_date_format",
)
return {s: getattr(self, s) for s in attrs if hasattr(self, s)}
class SqlMetric(Model, BaseMetric):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = "sql_metrics"
__table_args__ = (UniqueConstraint("table_id", "metric_name"),)
table_id = Column(Integer, ForeignKey("tables.id"))
table = relationship(
"SqlaTable",
backref=backref("metrics", cascade="all, delete-orphan"),
foreign_keys=[table_id],
)
expression = Column(Text, nullable=False)
extra = Column(Text)
export_fields = [
"metric_name",
"verbose_name",
"metric_type",
"table_id",
"expression",
"description",
"d3format",
"extra",
"warning_text",
]
update_from_object_fields = list(
[s for s in export_fields if s not in ("table_id",)]
)
export_parent = "table"
def get_sqla_col(self, label: Optional[str] = None) -> Column:
label = label or self.metric_name
tp = self.table.get_template_processor()
sqla_col: ColumnClause = literal_column(tp.process_template(self.expression))
return self.table.make_sqla_column_compatible(sqla_col, label)
@property
def perm(self) -> Optional[str]:
return (
("{parent_name}.[{obj.metric_name}](id:{obj.id})").format(
obj=self, parent_name=self.table.full_name
)
if self.table
else None
)
def get_perm(self) -> Optional[str]:
return self.perm
def get_extra_dict(self) -> Dict[str, Any]:
try:
return json.loads(self.extra)
except (TypeError, json.JSONDecodeError):
return {}
@property
def is_certified(self) -> bool:
return bool(self.get_extra_dict().get("certification"))
@property
def certified_by(self) -> Optional[str]:
return self.get_extra_dict().get("certification", {}).get("certified_by")
@property
def certification_details(self) -> Optional[str]:
return self.get_extra_dict().get("certification", {}).get("details")
@property
def warning_markdown(self) -> Optional[str]:
return self.get_extra_dict().get("warning_markdown")
@property
def data(self) -> Dict[str, Any]:
attrs = (
"is_certified",
"certified_by",
"certification_details",
"warning_markdown",
)
attr_dict = {s: getattr(self, s) for s in attrs}
attr_dict.update(super().data)
return attr_dict
sqlatable_user = Table(
"sqlatable_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("table_id", Integer, ForeignKey("tables.id")),
)
class SqlaTable( # pylint: disable=too-many-public-methods,too-many-instance-attributes
Model, BaseDatasource
):
"""An ORM object for SqlAlchemy table references"""
type = "table"
query_language = "sql"
is_rls_supported = True
columns: List[TableColumn] = []
metrics: List[SqlMetric] = []
metric_class = SqlMetric
column_class = TableColumn
owner_class = security_manager.user_model
__tablename__ = "tables"
# Note this uniqueness constraint is not part of the physical schema, i.e., it does
# not exist in the migrations, but is required by `import_from_dict` to ensure the
# correct filters are applied in order to identify uniqueness.
#
# The reason it does not physically exist is MySQL, PostgreSQL, etc. have a
# different interpretation of uniqueness when it comes to NULL which is problematic
# given the schema is optional.
__table_args__ = (UniqueConstraint("database_id", "schema", "table_name"),)
table_name = Column(String(250), nullable=False)
main_dttm_col = Column(String(250))
database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False)
fetch_values_predicate = Column(String(1000))
owners = relationship(owner_class, secondary=sqlatable_user, backref="tables")
database: Database = relationship(
"Database",
backref=backref("tables", cascade="all, delete-orphan"),
foreign_keys=[database_id],
)
schema = Column(String(255))
sql = Column(Text)
is_sqllab_view = Column(Boolean, default=False)
template_params = Column(Text)
extra = Column(Text)
baselink = "tablemodelview"
export_fields = [
"table_name",
"main_dttm_col",
"description",
"default_endpoint",
"database_id",
"offset",
"cache_timeout",
"schema",
"sql",
"params",
"template_params",
"filter_select_enabled",
"fetch_values_predicate",
"extra",
]
update_from_object_fields = [f for f in export_fields if f != "database_id"]
export_parent = "database"
export_children = ["metrics", "columns"]
sqla_aggregations = {
"COUNT_DISTINCT": lambda column_name: sa.func.COUNT(sa.distinct(column_name)),
"COUNT": sa.func.COUNT,
"SUM": sa.func.SUM,
"AVG": sa.func.AVG,
"MIN": sa.func.MIN,
"MAX": sa.func.MAX,
}
def __repr__(self) -> str:
return self.name
@property
def db_engine_spec(self) -> Type[BaseEngineSpec]:
return self.database.db_engine_spec
@property
def changed_by_name(self) -> str:
if not self.changed_by:
return ""
return str(self.changed_by)
@property
def changed_by_url(self) -> str:
if not self.changed_by:
return ""
return f"/superset/profile/{self.changed_by.username}"
@property
def connection(self) -> str:
return str(self.database)
@property
def description_markeddown(self) -> str:
return utils.markdown(self.description)
@property
def datasource_name(self) -> str:
return self.table_name
@property
def datasource_type(self) -> str:
return self.type
@property
def database_name(self) -> str:
return self.database.name
@classmethod
def get_datasource_by_name(
cls,
session: Session,
datasource_name: str,
schema: Optional[str],
database_name: str,
) -> Optional["SqlaTable"]:
schema = schema or None
query = (
session.query(cls)
.join(Database)
.filter(cls.table_name == datasource_name)
.filter(Database.database_name == database_name)
)
# Handling schema being '' or None, which is easier to handle
# in python than in the SQLA query in a multi-dialect way
for tbl in query.all():
if schema == (tbl.schema or None):
return tbl
return None
@property
def link(self) -> Markup:
name = escape(self.name)
anchor = f'<a target="_blank" href="{self.explore_url}">{name}</a>'
return Markup(anchor)
def get_schema_perm(self) -> Optional[str]:
"""Returns schema permission if present, database one otherwise."""
return security_manager.get_schema_perm(self.database, self.schema)
def get_perm(self) -> str:
return f"[{self.database}].[{self.table_name}](id:{self.id})"
@property
def name(self) -> str:
if not self.schema:
return self.table_name
return "{}.{}".format(self.schema, self.table_name)
@property
def full_name(self) -> str:
return utils.get_datasource_full_name(
self.database, self.table_name, schema=self.schema
)
@property
def dttm_cols(self) -> List[str]:
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col and self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self) -> List[str]:
return [c.column_name for c in self.columns if c.is_numeric]
@property
def any_dttm_col(self) -> Optional[str]:
cols = self.dttm_cols
return cols[0] if cols else None
@property
def html(self) -> str:
df = pd.DataFrame((c.column_name, c.type) for c in self.columns)
df.columns = ["field", "type"]
return df.to_html(
index=False,
classes=("dataframe table table-striped table-bordered " "table-condensed"),
)
@property
def sql_url(self) -> str:
return self.database.sql_url + "?table_name=" + str(self.table_name)
def external_metadata(self) -> List[Dict[str, str]]:
if self.sql:
return get_virtual_table_metadata(dataset=self)
return get_physical_table_metadata(
database=self.database, table_name=self.table_name, schema_name=self.schema,
)
@property
def time_column_grains(self) -> Dict[str, Any]:
return {
"time_columns": self.dttm_cols,
"time_grains": [grain.name for grain in self.database.grains()],
}
@property
def select_star(self) -> Optional[str]:
# show_cols and latest_partition set to false to avoid
# the expensive cost of inspecting the DB
return self.database.select_star(
self.table_name, schema=self.schema, show_cols=False, latest_partition=False
)
@property
def health_check_message(self) -> Optional[str]:
check = config["DATASET_HEALTH_CHECK"]
return check(self) if check else None
@property
def data(self) -> Dict[str, Any]:
data_ = super().data
if self.type == "table":
data_["granularity_sqla"] = utils.choicify(self.dttm_cols)
data_["time_grain_sqla"] = [
(g.duration, g.name) for g in self.database.grains() or []
]
data_["main_dttm_col"] = self.main_dttm_col
data_["fetch_values_predicate"] = self.fetch_values_predicate
data_["template_params"] = self.template_params
data_["is_sqllab_view"] = self.is_sqllab_view
data_["health_check_message"] = self.health_check_message
data_["extra"] = self.extra
return data_
@property
def extra_dict(self) -> Dict[str, Any]:
try:
return json.loads(self.extra)
except (TypeError, json.JSONDecodeError):
return {}
def get_fetch_values_predicate(self) -> TextClause:
tp = self.get_template_processor()
try:
return text(tp.process_template(self.fetch_values_predicate))
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in fetch values predicate: %(msg)s",
msg=ex.message,
)
)
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
"""Runs query against sqla to retrieve some
sample values for the given column.
"""
cols = {col.column_name: col for col in self.columns}
target_col = cols[column_name]
tp = self.get_template_processor()
qry = (
select([target_col.get_sqla_col()])
.select_from(self.get_from_clause(tp))
.distinct()
)
if limit:
qry = qry.limit(limit)
if self.fetch_values_predicate:
qry = qry.where(self.get_fetch_values_predicate())
engine = self.database.get_sqla_engine()
sql = "{}".format(qry.compile(engine, compile_kwargs={"literal_binds": True}))
sql = self.mutate_query_from_config(sql)
df = pd.read_sql_query(sql=sql, con=engine)
return df[column_name].to_list()
def mutate_query_from_config(self, sql: str) -> str:
"""Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context"""
sql_query_mutator = config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
username = utils.get_username()
sql = sql_query_mutator(sql, username, security_manager, self.database)
return sql
def get_template_processor(self, **kwargs: Any) -> BaseTemplateProcessor:
return get_template_processor(table=self, database=self.database, **kwargs)
def get_query_str_extended(self, query_obj: QueryObjectDict) -> QueryStringExtended:
sqlaq = self.get_sqla_query(**query_obj)
sql = self.database.compile_sqla_query(sqlaq.sqla_query)
sql = sqlparse.format(sql, reindent=True)
sql = self.mutate_query_from_config(sql)
return QueryStringExtended(
labels_expected=sqlaq.labels_expected, sql=sql, prequeries=sqlaq.prequeries
)
def get_query_str(self, query_obj: QueryObjectDict) -> str:
query_str_ext = self.get_query_str_extended(query_obj)
all_queries = query_str_ext.prequeries + [query_str_ext.sql]
return ";\n\n".join(all_queries) + ";"
def get_sqla_table(self) -> TableClause:
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
return tbl
def get_from_clause(
self, template_processor: Optional[BaseTemplateProcessor] = None
) -> Union[TableClause, Alias]:
"""
Return where to select the columns and metrics from. Either a physical table
or a virtual table with it's own subquery.
"""
if not self.is_virtual:
return self.get_sqla_table()
from_sql = self.get_rendered_sql(template_processor)
parsed_query = ParsedQuery(from_sql)
if not (
parsed_query.is_unknown()
or self.db_engine_spec.is_readonly_query(parsed_query)
):
raise QueryObjectValidationError(
_("Virtual dataset query must be read-only")
)
return TextAsFrom(sa.text(from_sql), []).alias(VIRTUAL_TABLE_ALIAS)
def get_rendered_sql(
self, template_processor: Optional[BaseTemplateProcessor] = None
) -> str:
"""
Render sql with template engine (Jinja).
"""
sql = self.sql
if template_processor:
try:
sql = template_processor.process_template(sql)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error while rendering virtual dataset query: %(msg)s",
msg=ex.message,
)
)
sql = sqlparse.format(sql.strip("\t\r\n; "), strip_comments=True)
if not sql:
raise QueryObjectValidationError(_("Virtual dataset query cannot be empty"))
if len(sqlparse.split(sql)) > 1:
raise QueryObjectValidationError(
_("Virtual dataset query cannot consist of multiple statements")
)
return sql
def adhoc_metric_to_sqla(
self, metric: AdhocMetric, columns_by_name: Dict[str, TableColumn]
) -> ColumnElement:
"""
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict columns_by_name: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
"""
expression_type = metric.get("expressionType")
label = utils.get_metric_name(metric)
if expression_type == utils.AdhocMetricExpressionType.SIMPLE:
column_name = cast(str, metric["column"].get("column_name"))
table_column: Optional[TableColumn] = columns_by_name.get(column_name)
if table_column:
sqla_column = table_column.get_sqla_col()
else:
sqla_column = column(column_name)
sqla_metric = self.sqla_aggregations[metric["aggregate"]](sqla_column)
elif expression_type == utils.AdhocMetricExpressionType.SQL:
tp = self.get_template_processor()
expression = tp.process_template(cast(str, metric["sqlExpression"]))
sqla_metric = literal_column(expression)
else:
raise QueryObjectValidationError("Adhoc metric expressionType is invalid")
return self.make_sqla_column_compatible(sqla_metric, label)
def make_sqla_column_compatible(
self, sqla_col: ColumnElement, label: Optional[str] = None
) -> ColumnElement:
"""Takes a sqlalchemy column object and adds label info if supported by engine.
:param sqla_col: sqlalchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
"""
label_expected = label or sqla_col.name
db_engine_spec = self.db_engine_spec
# add quotes to tables
if db_engine_spec.allows_alias_in_select:
label = db_engine_spec.make_label_compatible(label_expected)
sqla_col = sqla_col.label(label)
sqla_col.key = label_expected
return sqla_col
def make_orderby_compatible(
self, select_exprs: List[ColumnElement], orderby_exprs: List[ColumnElement]
) -> None:
"""
If needed, make sure aliases for selected columns are not used in
`ORDER BY`.
In some databases (e.g. Presto), `ORDER BY` clause is not able to
automatically pick the source column if a `SELECT` clause alias is named
the same as a source column. In this case, we update the SELECT alias to
another name to avoid the conflict.
"""
if self.db_engine_spec.allows_alias_to_source_column:
return
def is_alias_used_in_orderby(col: ColumnElement) -> bool:
if not isinstance(col, Label):
return False
regexp = re.compile(f"\\(.*\\b{re.escape(col.name)}\\b.*\\)", re.IGNORECASE)
return any(regexp.search(str(x)) for x in orderby_exprs)
# Iterate through selected columns, if column alias appears in orderby
# use another `alias`. The final output columns will still use the
# original names, because they are updated by `labels_expected` after
# querying.
for col in select_exprs:
if is_alias_used_in_orderby(col):
col.name = f"{col.name}__"
def _get_sqla_row_level_filters(
self, template_processor: BaseTemplateProcessor
) -> List[str]:
"""
Return the appropriate row level security filters for
this table and the current user.
:param BaseTemplateProcessor template_processor: The template
processor to apply to the filters.
:returns: A list of SQL clauses to be ANDed together.
:rtype: List[str]
"""
filters_grouped: Dict[Union[int, str], List[str]] = defaultdict(list)
try:
for filter_ in security_manager.get_rls_filters(self):
clause = text(
f"({template_processor.process_template(filter_.clause)})"
)
filters_grouped[filter_.group_key or filter_.id].append(clause)
return [or_(*clauses) for clauses in filters_grouped.values()]
except TemplateError as ex:
raise QueryObjectValidationError(
_("Error in jinja expression in RLS filters: %(msg)s", msg=ex.message,)
)
def get_sqla_query( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
self,
metrics: Optional[List[Metric]] = None,
granularity: Optional[str] = None,
from_dttm: Optional[datetime] = None,
to_dttm: Optional[datetime] = None,
columns: Optional[List[str]] = None,
groupby: Optional[List[str]] = None,
filter: Optional[ # pylint: disable=redefined-builtin
List[Dict[str, Any]]
] = None,
is_timeseries: bool = True,
timeseries_limit: int = 15,
timeseries_limit_metric: Optional[Metric] = None,
row_limit: Optional[int] = None,
row_offset: Optional[int] = None,
inner_from_dttm: Optional[datetime] = None,
inner_to_dttm: Optional[datetime] = None,
orderby: Optional[List[OrderBy]] = None,
extras: Optional[Dict[str, Any]] = None,
order_desc: bool = True,
is_rowcount: bool = False,
apply_fetch_values_predicate: bool = False,
) -> SqlaQuery:
"""Querying any sqla table from this common interface"""
template_kwargs = {
"from_dttm": from_dttm.isoformat() if from_dttm else None,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"row_offset": row_offset,
"to_dttm": to_dttm.isoformat() if to_dttm else None,
"filter": filter,
"columns": [col.column_name for col in self.columns],
}
template_kwargs.update(self.template_params_dict)
extra_cache_keys: List[Any] = []
template_kwargs["extra_cache_keys"] = extra_cache_keys
removed_filters: List[str] = []
template_kwargs["removed_filters"] = removed_filters
template_processor = self.get_template_processor(**template_kwargs)
db_engine_spec = self.db_engine_spec
prequeries: List[str] = []
orderby = orderby or []
extras = extras or {}
need_groupby = bool(metrics is not None or groupby)
metrics = metrics or []
# For backward compatibility
if granularity not in self.dttm_cols and granularity is not None:
granularity = self.main_dttm_col
# Database spec supports join-free timeslot grouping
time_groupby_inline = db_engine_spec.time_groupby_inline
columns_by_name: Dict[str, TableColumn] = {
col.column_name: col for col in self.columns
}
metrics_by_name: Dict[str, SqlMetric] = {m.metric_name: m for m in self.metrics}
if not granularity and is_timeseries:
raise QueryObjectValidationError(
_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"
)
)
if not metrics and not columns and not groupby:
raise QueryObjectValidationError(_("Empty query?"))
metrics_exprs: List[ColumnElement] = []
for metric in metrics:
if utils.is_adhoc_metric(metric):
assert isinstance(metric, dict)
metrics_exprs.append(self.adhoc_metric_to_sqla(metric, columns_by_name))
elif isinstance(metric, str) and metric in metrics_by_name:
metrics_exprs.append(metrics_by_name[metric].get_sqla_col())
else:
raise QueryObjectValidationError(
_("Metric '%(metric)s' does not exist", metric=metric)
)
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
# To ensure correct handling of the ORDER BY labeling we need to reference the
# metric instance if defined in the SELECT clause.
# use the key of the ColumnClause for the expected label
metrics_exprs_by_label = {m.key: m for m in metrics_exprs}
metrics_exprs_by_expr = {str(m): m for m in metrics_exprs}
# Since orderby may use adhoc metrics, too; we need to process them first
orderby_exprs: List[ColumnElement] = []
for orig_col, ascending in orderby:
col: Union[AdhocMetric, ColumnElement] = orig_col
if isinstance(col, dict):
col = cast(AdhocMetric, col)
if utils.is_adhoc_metric(col):
# add adhoc sort by column to columns_by_name if not exists
col = self.adhoc_metric_to_sqla(col, columns_by_name)
# if the adhoc metric has been defined before
# use the existing instance.
col = metrics_exprs_by_expr.get(str(col), col)
need_groupby = True
elif col in columns_by_name:
col = columns_by_name[col].get_sqla_col()
elif col in metrics_exprs_by_label:
col = metrics_exprs_by_label[col]
need_groupby = True
elif col in metrics_by_name:
col = metrics_by_name[col].get_sqla_col()
need_groupby = True
if isinstance(col, ColumnElement):
orderby_exprs.append(col)
else:
# Could not convert a column reference to valid ColumnElement
raise QueryObjectValidationError(
_("Unknown column used in orderby: %(col)s", col=orig_col)
)
select_exprs: List[Union[Column, Label]] = []
groupby_exprs_sans_timestamp = OrderedDict()
# filter out the pseudo column __timestamp from columns
columns = columns or []
columns = [col for col in columns if col != utils.DTTM_ALIAS]
if need_groupby:
# dedup columns while preserving order
columns = groupby or columns
for selected in columns:
# if groupby field/expr equals granularity field/expr
if selected == granularity:
time_grain = extras.get("time_grain_sqla")
sqla_col = columns_by_name[selected]
outer = sqla_col.get_timestamp_expression(time_grain, selected)
# if groupby field equals a selected column
elif selected in columns_by_name:
outer = columns_by_name[selected].get_sqla_col()
else:
outer = literal_column(f"({selected})")
outer = self.make_sqla_column_compatible(outer, selected)
groupby_exprs_sans_timestamp[outer.name] = outer
select_exprs.append(outer)
elif columns:
for selected in columns:
select_exprs.append(
columns_by_name[selected].get_sqla_col()
if selected in columns_by_name
else self.make_sqla_column_compatible(literal_column(selected))
)
metrics_exprs = []
time_range_endpoints = extras.get("time_range_endpoints")
groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items())
if granularity:
if granularity not in columns_by_name:
raise QueryObjectValidationError(
_(
'Time column "%(col)s" does not exist in dataset',
col=granularity,
)
)
dttm_col = columns_by_name[granularity]
time_grain = extras.get("time_grain_sqla")
time_filters = []
if is_timeseries:
timestamp = dttm_col.get_timestamp_expression(time_grain)
# always put timestamp as the first column
select_exprs.insert(0, timestamp)
groupby_exprs_with_timestamp[timestamp.name] = timestamp
# Use main dttm column to support index with secondary dttm columns.
if (
db_engine_spec.time_secondary_columns
and self.main_dttm_col in self.dttm_cols
and self.main_dttm_col != dttm_col.column_name
):
time_filters.append(
columns_by_name[self.main_dttm_col].get_time_filter(
from_dttm, to_dttm, time_range_endpoints
)
)
time_filters.append(
dttm_col.get_time_filter(from_dttm, to_dttm, time_range_endpoints)
)
# Always remove duplicates by column name, as sometimes `metrics_exprs`
# can have the same name as a groupby column (e.g. when users use
# raw columns as custom SQL adhoc metric).
select_exprs = remove_duplicates(
select_exprs + metrics_exprs, key=lambda x: x.name
)
# Expected output columns
labels_expected = [c.key for c in select_exprs]
# Order by columns are "hidden" columns, some databases require them
# always be present in SELECT if an aggregation function is used
if not db_engine_spec.allows_hidden_ordeby_agg:
select_exprs = remove_duplicates(select_exprs + orderby_exprs)
qry = sa.select(select_exprs)
tbl = self.get_from_clause(template_processor)
if groupby_exprs_with_timestamp:
qry = qry.group_by(*groupby_exprs_with_timestamp.values())
where_clause_and = []
having_clause_and = []
for flt in filter: # type: ignore
if not all([flt.get(s) for s in ["col", "op"]]):
continue
col = flt["col"]
val = flt.get("val")
op = flt["op"].upper()
col_obj = columns_by_name.get(col)
if is_feature_enabled("ENABLE_TEMPLATE_REMOVE_FILTERS"):
if col in removed_filters:
# Skip generating SQLA filter when the jinja template handles it.
continue
if col_obj:
col_spec = db_engine_spec.get_column_spec(col_obj.type)
is_list_target = op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
)
if col_spec:
target_type = col_spec.generic_type
else:
target_type = GenericDataType.STRING
eq = self.filter_values_handler(
values=val,
target_column_type=target_type,
is_list_target=is_list_target,
)
if is_list_target:
assert isinstance(eq, (tuple, list))
if len(eq) == 0:
raise QueryObjectValidationError(
_("Filter value list cannot be empty")
)
if None in eq:
eq = [x for x in eq if x is not None]
is_null_cond = col_obj.get_sqla_col().is_(None)
if eq:
cond = or_(is_null_cond, col_obj.get_sqla_col().in_(eq))
else:
cond = is_null_cond
else:
cond = col_obj.get_sqla_col().in_(eq)
if op == utils.FilterOperator.NOT_IN.value:
cond = ~cond
where_clause_and.append(cond)
elif op == utils.FilterOperator.IS_NULL.value:
where_clause_and.append(col_obj.get_sqla_col().is_(None))
elif op == utils.FilterOperator.IS_NOT_NULL.value:
where_clause_and.append(col_obj.get_sqla_col().isnot(None))
elif op == utils.FilterOperator.IS_TRUE.value:
where_clause_and.append(col_obj.get_sqla_col().is_(True))
elif op == utils.FilterOperator.IS_FALSE.value:
where_clause_and.append(col_obj.get_sqla_col().is_(False))
else:
if eq is None:
raise QueryObjectValidationError(
_(
"Must specify a value for filters "
"with comparison operators"
)
)
if op == utils.FilterOperator.EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() == eq)
elif op == utils.FilterOperator.NOT_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() != eq)
elif op == utils.FilterOperator.GREATER_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() > eq)
elif op == utils.FilterOperator.LESS_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() < eq)
elif op == utils.FilterOperator.GREATER_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() >= eq)
elif op == utils.FilterOperator.LESS_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() <= eq)
elif op == utils.FilterOperator.LIKE.value:
where_clause_and.append(col_obj.get_sqla_col().like(eq))
elif op == utils.FilterOperator.ILIKE.value:
where_clause_and.append(col_obj.get_sqla_col().ilike(eq))
else:
raise QueryObjectValidationError(
_("Invalid filter operation type: %(op)s", op=op)
)
if is_feature_enabled("ROW_LEVEL_SECURITY"):
where_clause_and += self._get_sqla_row_level_filters(template_processor)
if extras:
where = extras.get("where")
if where:
try:
where = template_processor.process_template(where)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in WHERE clause: %(msg)s",
msg=ex.message,
)
)
where_clause_and += [sa.text("({})".format(where))]
having = extras.get("having")
if having:
try:
having = template_processor.process_template(having)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in HAVING clause: %(msg)s",
msg=ex.message,
)
)
having_clause_and += [sa.text("({})".format(having))]
if apply_fetch_values_predicate and self.fetch_values_predicate:
qry = qry.where(self.get_fetch_values_predicate())
if granularity:
qry = qry.where(and_(*(time_filters + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
self.make_orderby_compatible(select_exprs, orderby_exprs)
for col, (orig_col, ascending) in zip(orderby_exprs, orderby):
if not db_engine_spec.allows_alias_in_orderby and isinstance(col, Label):
# if engine does not allow using SELECT alias in ORDER BY
# revert to the underlying column
col = col.element
direction = asc if ascending else desc
qry = qry.order_by(direction(col))
if row_limit:
qry = qry.limit(row_limit)
if row_offset:
qry = qry.offset(row_offset)
if (
is_timeseries # pylint: disable=too-many-boolean-expressions
and timeseries_limit
and not time_groupby_inline
and groupby_exprs_sans_timestamp
):
if db_engine_spec.allows_joins:
# some sql dialects require for order by expressions
# to also be in the select clause -- others, e.g. vertica,
# require a unique inner alias
inner_main_metric_expr = self.make_sqla_column_compatible(
main_metric_expr, "mme_inner__"
)
inner_groupby_exprs = []
inner_select_exprs = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
inner_select_exprs += [inner_main_metric_expr]
subq = select(inner_select_exprs).select_from(tbl)
inner_time_filter = dttm_col.get_time_filter(
inner_from_dttm or from_dttm,
inner_to_dttm or to_dttm,
time_range_endpoints,
)
subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
subq = subq.group_by(*inner_groupby_exprs)
ob = inner_main_metric_expr
if timeseries_limit_metric:
ob = self._get_timeseries_orderby(
timeseries_limit_metric, metrics_by_name, columns_by_name
)
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
# in this case the column name, not the alias, needs to be
# conditionally mutated, as it refers to the column alias in
# the inner query
col_name = db_engine_spec.make_label_compatible(gby_name + "__")
on_clause.append(gby_obj == column(col_name))
tbl = tbl.join(subq.alias(), and_(*on_clause))
else:
if timeseries_limit_metric:
orderby = [
(
self._get_timeseries_orderby(
timeseries_limit_metric,
metrics_by_name,
columns_by_name,
),
False,
)
]
# run prequery to get top groups
prequery_obj = {
"is_timeseries": False,
"row_limit": timeseries_limit,
"metrics": metrics,
"granularity": granularity,
"groupby": groupby,
"from_dttm": inner_from_dttm or from_dttm,
"to_dttm": inner_to_dttm or to_dttm,
"filter": filter,
"orderby": orderby,
"extras": extras,
"columns": columns,
"order_desc": True,
}
result = self.query(prequery_obj)
prequeries.append(result.query)
dimensions = [
c
for c in result.df.columns
if c not in metrics and c in groupby_exprs_sans_timestamp
]
top_groups = self._get_top_groups(
result.df, dimensions, groupby_exprs_sans_timestamp
)
qry = qry.where(top_groups)
qry = qry.select_from(tbl)
if is_rowcount:
if not db_engine_spec.allows_subqueries:
raise QueryObjectValidationError(
_("Database does not support subqueries")
)
label = "rowcount"
col = self.make_sqla_column_compatible(literal_column("COUNT(*)"), label)
qry = select([col]).select_from(qry.alias("rowcount_qry"))
labels_expected = [label]
return SqlaQuery(
extra_cache_keys=extra_cache_keys,
labels_expected=labels_expected,
sqla_query=qry,
prequeries=prequeries,
)
def _get_timeseries_orderby(
self,
timeseries_limit_metric: Metric,
metrics_by_name: Dict[str, SqlMetric],
columns_by_name: Dict[str, TableColumn],
) -> Column:
if utils.is_adhoc_metric(timeseries_limit_metric):
assert isinstance(timeseries_limit_metric, dict)
ob = self.adhoc_metric_to_sqla(timeseries_limit_metric, columns_by_name)
elif (
isinstance(timeseries_limit_metric, str)
and timeseries_limit_metric in metrics_by_name
):
ob = metrics_by_name[timeseries_limit_metric].get_sqla_col()
else:
raise QueryObjectValidationError(
_("Metric '%(metric)s' does not exist", metric=timeseries_limit_metric)
)
return ob
def _get_top_groups( # pylint: disable=no-self-use
self,
df: pd.DataFrame,
dimensions: List[str],
groupby_exprs: "OrderedDict[str, Any]",
) -> ColumnElement:
groups = []
for _unused, row in df.iterrows():
group = []
for dimension in dimensions:
group.append(groupby_exprs[dimension] == row[dimension])
groups.append(and_(*group))
return or_(*groups)
def query(self, query_obj: QueryObjectDict) -> QueryResult:
qry_start_dttm = datetime.now()
query_str_ext = self.get_query_str_extended(query_obj)
sql = query_str_ext.sql
status = utils.QueryStatus.SUCCESS
errors = None
error_message = None
def assign_column_label(df: pd.DataFrame) -> Optional[pd.DataFrame]:
"""
Some engines change the case or generate bespoke column names, either by
default or due to lack of support for aliasing. This function ensures that
the column names in the DataFrame correspond to what is expected by
the viz components.
Sometimes a query may also contain only order by columns that are not used
as metrics or groupby columns, but need to present in the SQL `select`,
filtering by `labels_expected` make sure we only return columns users want.
:param df: Original DataFrame returned by the engine
:return: Mutated DataFrame
"""
labels_expected = query_str_ext.labels_expected
if df is not None and not df.empty:
if len(df.columns) < len(labels_expected):
raise QueryObjectValidationError(
_("Db engine did not return all queried columns")
)
if len(df.columns) > len(labels_expected):
df = df.iloc[:, 0 : len(labels_expected)]
df.columns = labels_expected
return df
try:
df = self.database.get_df(sql, self.schema, mutator=assign_column_label)
except Exception as ex: # pylint: disable=broad-except
df = pd.DataFrame()
status = utils.QueryStatus.FAILED
logger.warning(
"Query %s on schema %s failed", sql, self.schema, exc_info=True
)
db_engine_spec = self.db_engine_spec
errors = [
dataclasses.asdict(error) for error in db_engine_spec.extract_errors(ex)
]
error_message = utils.error_msg_from_exception(ex)
return QueryResult(
status=status,
df=df,
duration=datetime.now() - qry_start_dttm,
query=sql,
errors=errors,
error_message=error_message,
)
def get_sqla_table_object(self) -> Table:
return self.database.get_table(self.table_name, schema=self.schema)
def fetch_metadata(self, commit: bool = True) -> MetadataResult:
"""
Fetches the metadata for the table and merges it in
:param commit: should the changes be committed or not.
:return: Tuple with lists of added, removed and modified column names.
"""
new_columns = self.external_metadata()
metrics = []
any_date_col = None
db_engine_spec = self.db_engine_spec
old_columns = db.session.query(TableColumn).filter(TableColumn.table == self)
old_columns_by_name: Dict[str, TableColumn] = {
col.column_name: col for col in old_columns
}
results = MetadataResult(
removed=[
col
for col in old_columns_by_name
if col not in {col["name"] for col in new_columns}
]
)
# clear old columns before adding modified columns back
self.columns = []
for col in new_columns:
old_column = old_columns_by_name.pop(col["name"], None)
if not old_column:
results.added.append(col["name"])
new_column = TableColumn(
column_name=col["name"], type=col["type"], table=self
)
new_column.is_dttm = new_column.is_temporal
db_engine_spec.alter_new_orm_column(new_column)
else:
new_column = old_column
if new_column.type != col["type"]:
results.modified.append(col["name"])
new_column.type = col["type"]
new_column.expression = ""
new_column.groupby = True
new_column.filterable = True
self.columns.append(new_column)
if not any_date_col and new_column.is_temporal:
any_date_col = col["name"]
self.columns.extend(
[col for col in old_columns_by_name.values() if col.expression]
)
metrics.append(
SqlMetric(
metric_name="count",
verbose_name="COUNT(*)",
metric_type="count",
expression="COUNT(*)",
)
)
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
self.add_missing_metrics(metrics)
# Apply config supplied mutations.
config["SQLA_TABLE_MUTATOR"](self)
db.session.merge(self)
if commit:
db.session.commit()
return results
@classmethod
def query_datasources_by_name(
cls,
session: Session,
database: Database,
datasource_name: str,
schema: Optional[str] = None,
) -> List["SqlaTable"]:
query = (
session.query(cls)
.filter_by(database_id=database.id)
.filter_by(table_name=datasource_name)
)
if schema:
query = query.filter_by(schema=schema)
return query.all()
@staticmethod
def default_query(qry: Query) -> Query:
return qry.filter_by(is_sqllab_view=False)
def has_extra_cache_key_calls(self, query_obj: QueryObjectDict) -> bool:
"""
Detects the presence of calls to `ExtraCache` methods in items in query_obj that
can be templated. If any are present, the query must be evaluated to extract
additional keys for the cache key. This method is needed to avoid executing the
template code unnecessarily, as it may contain expensive calls, e.g. to extract
the latest partition of a database.
:param query_obj: query object to analyze
:return: True if there are call(s) to an `ExtraCache` method, False otherwise
"""
templatable_statements: List[str] = []
if self.sql:
templatable_statements.append(self.sql)
if self.fetch_values_predicate:
templatable_statements.append(self.fetch_values_predicate)
extras = query_obj.get("extras", {})
if "where" in extras:
templatable_statements.append(extras["where"])
if "having" in extras:
templatable_statements.append(extras["having"])
if is_feature_enabled("ROW_LEVEL_SECURITY") and self.is_rls_supported:
templatable_statements += [
f.clause for f in security_manager.get_rls_filters(self)
]
for statement in templatable_statements:
if ExtraCache.regex.search(statement):
return True
return False
def get_extra_cache_keys(self, query_obj: QueryObjectDict) -> List[Hashable]:
"""
The cache key of a SqlaTable needs to consider any keys added by the parent
class and any keys added via `ExtraCache`.
:param query_obj: query object to analyze
:return: The extra cache keys
"""
extra_cache_keys = super().get_extra_cache_keys(query_obj)
if self.has_extra_cache_key_calls(query_obj):
sqla_query = self.get_sqla_query(**query_obj)
extra_cache_keys += sqla_query.extra_cache_keys
return extra_cache_keys
@staticmethod
def before_update(
mapper: Mapper, # pylint: disable=unused-argument
connection: Connection, # pylint: disable=unused-argument
target: "SqlaTable",
) -> None:
"""
Check whether before update if the target table already exists.
Note this listener is called when any fields are being updated and thus it is
necessary to first check whether the reference table is being updated.
Note this logic is temporary, given uniqueness is handled via the dataset DAO,
but is necessary until both the legacy datasource editor and datasource/save
endpoints are deprecated.
:param mapper: The table mapper
:param connection: The DB-API connection
:param target: The mapped instance being persisted
:raises Exception: If the target table is not unique
"""
from superset.datasets.commands.exceptions import get_dataset_exist_error_msg
from superset.datasets.dao import DatasetDAO
# Check whether the relevant attributes have changed.
state = db.inspect(target) # pylint: disable=no-member
for attr in ["database_id", "schema", "table_name"]:
history = state.get_history(attr, True)
if history.has_changes():
break
else:
return None
if not DatasetDAO.validate_uniqueness(
target.database_id, target.schema, target.table_name
):
raise Exception(get_dataset_exist_error_msg(target.full_name))
@staticmethod
def update_table(
_mapper: Mapper, _connection: Connection, obj: Union[SqlMetric, TableColumn]
) -> None:
"""
Forces an update to the table's changed_on value when a metric or column on the
table is updated. This busts the cache key for all charts that use the table.
:param _mapper: Unused.
:param _connection: Unused.
:param obj: The metric or column that was updated.
"""
db.session.execute(update(SqlaTable).where(SqlaTable.id == obj.table.id))
sa.event.listen(SqlaTable, "after_insert", security_manager.set_perm)
sa.event.listen(SqlaTable, "after_update", security_manager.set_perm)
sa.event.listen(SqlaTable, "before_update", SqlaTable.before_update)
sa.event.listen(SqlMetric, "after_update", SqlaTable.update_table)
sa.event.listen(TableColumn, "after_update", SqlaTable.update_table)
RLSFilterRoles = Table(
"rls_filter_roles",
metadata,
Column("id", Integer, primary_key=True),
Column("role_id", Integer, ForeignKey("ab_role.id"), nullable=False),
Column("rls_filter_id", Integer, ForeignKey("row_level_security_filters.id")),
)
RLSFilterTables = Table(
"rls_filter_tables",
metadata,
Column("id", Integer, primary_key=True),
Column("table_id", Integer, ForeignKey("tables.id")),
Column("rls_filter_id", Integer, ForeignKey("row_level_security_filters.id")),
)
class RowLevelSecurityFilter(Model, AuditMixinNullable):
"""
Custom where clauses attached to Tables and Roles.
"""
__tablename__ = "row_level_security_filters"
id = Column(Integer, primary_key=True)
filter_type = Column(
Enum(*[filter_type.value for filter_type in utils.RowLevelSecurityFilterType])
)
group_key = Column(String(255), nullable=True)
roles = relationship(
security_manager.role_model,
secondary=RLSFilterRoles,
backref="row_level_security_filters",
)
tables = relationship(
SqlaTable, secondary=RLSFilterTables, backref="row_level_security_filters"
)
clause = Column(Text, nullable=False)
|
py | 1a556080170c3e9f8f639e320fdd54f3a8feea36 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['JobAgentArgs', 'JobAgent']
@pulumi.input_type
class JobAgentArgs:
def __init__(__self__, *,
database_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
job_agent_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a JobAgent resource.
:param pulumi.Input[str] database_id: Resource ID of the database to store job metadata in.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] job_agent_name: The name of the job agent to be created or updated.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input['SkuArgs'] sku: The name and tier of the SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "database_id", database_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
if job_agent_name is not None:
pulumi.set(__self__, "job_agent_name", job_agent_name)
if location is not None:
pulumi.set(__self__, "location", location)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="databaseId")
def database_id(self) -> pulumi.Input[str]:
"""
Resource ID of the database to store job metadata in.
"""
return pulumi.get(self, "database_id")
@database_id.setter
def database_id(self, value: pulumi.Input[str]):
pulumi.set(self, "database_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="jobAgentName")
def job_agent_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the job agent to be created or updated.
"""
return pulumi.get(self, "job_agent_name")
@job_agent_name.setter
def job_agent_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_agent_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The name and tier of the SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class JobAgent(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database_id: Optional[pulumi.Input[str]] = None,
job_agent_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
An Azure SQL job agent.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] database_id: Resource ID of the database to store job metadata in.
:param pulumi.Input[str] job_agent_name: The name of the job agent to be created or updated.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The name and tier of the SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: JobAgentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure SQL job agent.
:param str resource_name: The name of the resource.
:param JobAgentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(JobAgentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database_id: Optional[pulumi.Input[str]] = None,
job_agent_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = JobAgentArgs.__new__(JobAgentArgs)
if database_id is None and not opts.urn:
raise TypeError("Missing required property 'database_id'")
__props__.__dict__["database_id"] = database_id
__props__.__dict__["job_agent_name"] = job_agent_name
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/v20201101preview:JobAgent"), pulumi.Alias(type_="azure-native:sql:JobAgent"), pulumi.Alias(type_="azure-nextgen:sql:JobAgent"), pulumi.Alias(type_="azure-native:sql/v20170301preview:JobAgent"), pulumi.Alias(type_="azure-nextgen:sql/v20170301preview:JobAgent"), pulumi.Alias(type_="azure-native:sql/v20200202preview:JobAgent"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:JobAgent"), pulumi.Alias(type_="azure-native:sql/v20200801preview:JobAgent"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:JobAgent")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(JobAgent, __self__).__init__(
'azure-native:sql/v20201101preview:JobAgent',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'JobAgent':
"""
Get an existing JobAgent resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = JobAgentArgs.__new__(JobAgentArgs)
__props__.__dict__["database_id"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return JobAgent(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="databaseId")
def database_id(self) -> pulumi.Output[str]:
"""
Resource ID of the database to store job metadata in.
"""
return pulumi.get(self, "database_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The name and tier of the SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The state of the job agent.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
|
py | 1a556089dc8d99efd4a3318637778b37085fc0ce | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def _add_classification(df):
i = pd.read_csv("inputs.csv")
frags = set(i['fragment'])
leads = set(i['lead'])
classification = []
for a in list(df['other_id']):
if a in frags:
classification.append("fragment")
elif a in leads:
classification.append("lead")
df["classification"] = classification
return df
def barplot1(df):
ligs = set(df["apo"])
order_dic = {np.mean([float(a) for a in list(df.loc[df["apo"] == l]["volume_overlap"])]): l for l in ligs}
ks = sorted(order_dic.keys(), reverse=True)
ord = [order_dic[k] for k in ks]
g = sns.FacetGrid(df, col="buriedness_method", height=7, aspect=1, legend_out=True)
g = (g.map(sns.barplot, 'apo', 'volume_overlap', 'lig_class', order=ord, hue_order=["fragment", "lead"],
ci=None, palette="Reds").add_legend())
g.set_xticklabels(rotation=45)
g.set(xlabel='', ylabel="", title="")
plt.savefig("barplot1.png")
plt.close()
def barplot2(df, clas = "fragment"):
def over_threshold(l, threshold):
return [l for item in l if item >= threshold]
df = df.loc[df["atom_type"] != 'apolar']
df = df.loc[df["classification"] == clas]
buriedness_method = ["ligsite", "ghecom", "ghecom_internal"]
data = {}
for b in buriedness_method:
x = df.loc[df["buriedness_method"] == b]
x = x.loc[x["atom_type"] != "apolar"]
data.update({b: list(x["atomic_overlap"])})
total = max([len(k) for k in data.keys()])
bm = []
perc = []
thres = []
thresholds = [1, 5, 10, 50, 100]
for t in thresholds:
for b, d in data.items():
bm.append(b)
thres.append(t)
perc.append((len(over_threshold(d, t)) / total) * 100)
ndf = pd.DataFrame({"thresholds": thres, "passed": perc, "buriedness_method": bm})
ax = sns.barplot(x=ndf["thresholds"], y=ndf["passed"], hue=ndf["buriedness_method"],
hue_order= ['ligsite', 'ghecom', 'ghecom_internal'], palette='Reds')
ax.set(xlabel="Overlap threshold (%)", ylabel="Atomic overlap greater than threshold (%)", title="", ylim=[0, 100])
plt.savefig("barplot2_{}.png".format(clas))
plt.close()
def boxplot1(df):
ax = sns.boxplot(x='buriedness_method', y='volume_overlap', hue="classification", data=df, order=["ligsite", "ghecom", "ghecom_internal"],
palette="Reds")
ax.set(xlabel='Buriedness Method', ylabel="Percentage Volume Overlap", title="", ylim=[-5,100])
plt.savefig("boxplot1.png")
plt.close()
if __name__ == "__main__":
sns.set(style="ticks", color_codes=True)
df = pd.read_csv("analysis.csv")
df = df.loc[df["ligand_cavity"] == True]
# barplot1(df)
# barplot1(df)
barplot2(df, clas="fragment")
barplot2(df, clas="lead")
|
py | 1a556107ec49f6c3b86972efb965ec190c356706 | """ Space Rangers 2: Reboot
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Install vb6run
"""
util.protontricks('vb6run')
|
py | 1a55612dc2e39df29c485f4f72b4bca54b4a5d0a | from newton.db.seed import Trades
from .base import DaoBase
class TradesDao(DaoBase):
def _get_model(self):
return Trades
def history(self, from_datetime, to_datetime, filters=None):
with self._session() as s:
q = s.query(self._Model)
if from_datetime is not None:
q = q.filter(self._Model.entry_datetime >= from_datetime)
if to_datetime is not None:
q = q.filter(self._Model.exit_datetime <= to_datetime)
if filters is None:
return q.all()
if not isinstance(filters, dict):
raise TypeError("params should be 'dict'")
q = self._custom_filters(q, filters)
return q.all()
|
py | 1a5561c6e0ed7992c83cc4b42ddb42a4e0607ea9 | from __future__ import division, print_function
from astropy.io import fits as pyfits
from astropy.utils.data import get_pkg_data_filename
from astropy import units as u
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_equal,
assert_allclose)
import pytest
import tempfile
import warnings
from ..lightcurve import LightCurve, KeplerLightCurve, TessLightCurve
from ..lightcurvefile import LightCurveFile, KeplerLightCurveFile, TessLightCurveFile
from ..targetpixelfile import KeplerTargetPixelFile, TessTargetPixelFile
from ..utils import LightkurveWarning
from .test_targetpixelfile import TABBY_TPF
# 8th Quarter of Tabby's star
TABBY_Q8 = ("https://archive.stsci.edu/missions/kepler/lightcurves"
"/0084/008462852/kplr008462852-2011073133259_llc.fits")
K2_C08 = ("https://archive.stsci.edu/missions/k2/lightcurves/c8/"
"220100000/39000/ktwo220139473-c08_llc.fits")
KEPLER10 = ("https://archive.stsci.edu/missions/kepler/lightcurves/"
"0119/011904151/kplr011904151-2010009091648_llc.fits")
TESS_SIM = ("https://archive.stsci.edu/missions/tess/ete-6/tid/00/000/"
"004/104/tess2019128220341-0000000410458113-0016-s_lc.fits")
filename_tess = get_pkg_data_filename("data/tess25155310-s01-first-cadences.fits.gz")
filename_tess_custom = get_pkg_data_filename("data/test_TESS_interact_generated_custom-lc.fits")
filename_K2_custom = get_pkg_data_filename("data/test_K2_interact_generated_custom-lc.fits")
def test_invalid_lightcurve():
"""Invalid LightCurves should not be allowed."""
err_string = ("Input arrays have different lengths."
" len(time)=5, len(flux)=4")
time = np.array([1, 2, 3, 4, 5])
flux = np.array([1, 2, 3, 4])
with pytest.raises(ValueError) as err:
LightCurve(time=time, flux=flux)
assert err_string == err.value.args[0]
def test_empty_lightcurve():
"""LightCurves with no data should not be allowed."""
err_string = ("either time or flux must be given")
with pytest.raises(ValueError) as err:
LightCurve()
assert err_string == err.value.args[0]
def test_lc_nan_time():
time = np.array([1, 2, 3, np.nan])
flux = np.array([1, 2, 3, 4])
with pytest.warns(LightkurveWarning, match='contains NaN times'):
LightCurve(time=time, flux=flux)
def test_math_operators():
lc = LightCurve(time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5))
lc_add = lc + 1
lc_sub = lc - 1
lc_mul = lc * 2
lc_div = lc / 2
assert_array_equal(lc_add.flux, lc.flux + 1)
assert_array_equal(lc_sub.flux, lc.flux - 1)
assert_array_equal(lc_mul.flux, lc.flux * 2)
assert_array_equal(lc_div.flux, lc.flux / 2)
def test_rmath_operators():
lc = LightCurve(time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5))
lc_add = 1 + lc
lc_sub = 1 - lc
lc_mul = 2 * lc
lc_div = 2 / lc
assert_array_equal(lc_add.flux, lc.flux + 1)
assert_array_equal(lc_sub.flux, 1 - lc.flux)
assert_array_equal(lc_mul.flux, lc.flux * 2)
assert_array_equal(lc_div.flux, 2 / lc.flux)
@pytest.mark.remote_data
@pytest.mark.parametrize("path, mission", [(TABBY_Q8, "Kepler"), (K2_C08, "K2")])
def test_KeplerLightCurveFile(path, mission):
lcf = KeplerLightCurveFile(path, quality_bitmask=None)
assert lcf.obsmode == 'long cadence'
assert len(lcf.pos_corr1) == len(lcf.pos_corr2)
# The liberal bitmask will cause the lightcurve to contain NaN times
with pytest.warns(LightkurveWarning, match='NaN times'):
lc = lcf.get_lightcurve('SAP_FLUX')
assert lc.channel == lcf.channel
assert lc.mission.lower() == mission.lower()
if lc.mission.lower() == 'kepler':
assert lc.campaign is None
assert lc.quarter == 8
elif lc.mission.lower() == 'k2':
assert lc.campaign == 8
assert lc.quarter is None
assert lc.time_format == 'bkjd'
assert lc.time_scale == 'tdb'
assert lc.astropy_time.scale == 'tdb'
# Does the data match what one would obtain using pyfits.open?
hdu = pyfits.open(path)
assert lc.label == hdu[0].header['OBJECT']
assert_array_equal(lc.time, hdu[1].data['TIME'])
assert_array_equal(lc.flux, hdu[1].data['SAP_FLUX'] / ((hdu[1].header['CROWDSAP'] * hdu[1].header['FLFRCSAP'])))
with pytest.raises(KeyError):
lcf.get_lightcurve('BLABLA')
@pytest.mark.remote_data
@pytest.mark.parametrize("quality_bitmask",
['hardest', 'hard', 'default', None,
1, 100, 2096639])
def test_TessLightCurveFile(quality_bitmask):
tess_file = TessLightCurveFile(TESS_SIM, quality_bitmask=quality_bitmask)
hdu = pyfits.open(TESS_SIM)
lc = tess_file.SAP_FLUX
assert lc.mission == 'TESS'
assert lc.label == hdu[0].header['OBJECT']
assert lc.time_format == 'btjd'
assert lc.time_scale == 'tdb'
assert_array_equal(lc.time[0:10], hdu[1].data['TIME'][0:10])
assert_array_equal(lc.flux[0:10], hdu[1].data['SAP_FLUX'][0:10])
# Regression test for https://github.com/KeplerGO/lightkurve/pull/236
assert np.isnan(lc.time).sum() == 0
with pytest.raises(KeyError):
tess_file.get_lightcurve('DOESNOTEXIST')
@pytest.mark.remote_data
@pytest.mark.parametrize("quality_bitmask, answer", [('hardest', 2661),
('hard', 2706), ('default', 3113), (None, 3279),
(1, 3279), (100, 3252), (2096639, 2661)])
def test_bitmasking(quality_bitmask, answer):
"""Test whether the bitmasking behaves like it should"""
lcf = KeplerLightCurveFile(TABBY_Q8, quality_bitmask=quality_bitmask)
with warnings.catch_warnings():
# Ignore "LightCurve contains NaN times" warnings triggered by liberal masks
warnings.simplefilter("ignore", LightkurveWarning)
flux = lcf.get_lightcurve('SAP_FLUX').flux
assert len(flux) == answer
def test_lightcurve_fold():
"""Test the ``LightCurve.fold()`` method."""
lc = LightCurve(time=np.linspace(0, 10, 100), flux=np.zeros(100)+1,
targetid=999, label='mystar', meta={'ccd': 2}, time_format='bkjd')
fold = lc.fold(period=1)
assert_almost_equal(fold.phase[0], -0.5, 2)
assert_almost_equal(np.min(fold.phase), -0.5, 2)
assert_almost_equal(np.max(fold.phase), 0.5, 2)
assert fold.targetid == lc.targetid
assert fold.label == lc.label
assert fold.meta == lc.meta
assert_array_equal(np.sort(fold.time_original), lc.time)
assert len(fold.time_original) == len(lc.time)
fold = lc.fold(period=1, t0=-0.1)
assert_almost_equal(fold.time[0], -0.5, 2)
assert_almost_equal(np.min(fold.phase), -0.5, 2)
assert_almost_equal(np.max(fold.phase), 0.5, 2)
with warnings.catch_warnings():
# `transit_midpoint` is deprecated and its use will emit a warning
warnings.simplefilter("ignore", LightkurveWarning)
fold = lc.fold(period=1, transit_midpoint=-0.1)
assert_almost_equal(fold.time[0], -0.5, 2)
ax = fold.plot()
assert (ax.get_xlabel() == 'Phase')
ax = fold.scatter()
assert (ax.get_xlabel() == 'Phase')
ax = fold.errorbar()
assert (ax.get_xlabel() == 'Phase')
plt.close('all')
# bad transit midpoint should give a warning
# if user tries a t0 in JD but time is in BKJD
with pytest.warns(LightkurveWarning, match='appears to be given in JD'):
lc.fold(10, 2456600)
def test_lightcurve_fold_issue520():
"""Regression test for #520; accept quantities in `fold()`."""
lc = LightCurve(time=np.linspace(0, 10, 100), flux=np.zeros(100)+1)
lc.fold(period=1*u.day, t0=5*u.day)
def test_lightcurve_append():
"""Test ``LightCurve.append()``."""
lc = LightCurve(time=[1, 2, 3], flux=[1, .5, 1], flux_err=[0.1, 0.2, 0.3])
lc = lc.append(lc)
assert_array_equal(lc.time, 2*[1, 2, 3])
assert_array_equal(lc.flux, 2*[1, .5, 1])
assert_array_equal(lc.flux_err, 2*[0.1, 0.2, 0.3])
# KeplerLightCurve has extra data
lc = KeplerLightCurve(time=[1, 2, 3], flux=[1, .5, 1],
centroid_col=[4, 5, 6], centroid_row=[7, 8, 9],
cadenceno=[10, 11, 12], quality=[10, 20, 30])
lc = lc.append(lc)
assert_array_equal(lc.time, 2*[1, 2, 3])
assert_array_equal(lc.flux, 2*[1, .5, 1])
assert_array_equal(lc.centroid_col, 2*[4, 5, 6])
assert_array_equal(lc.centroid_row, 2*[7, 8, 9])
assert_array_equal(lc.cadenceno, 2*[10, 11, 12])
assert_array_equal(lc.quality, 2*[10, 20, 30])
def test_lightcurve_append_multiple():
"""Test ``LightCurve.append()`` for multiple lightcurves at once."""
lc = LightCurve(time=[1, 2, 3], flux=[1, .5, 1])
lc = lc.append([lc, lc, lc])
assert_array_equal(lc.flux, 4*[1, .5, 1])
assert_array_equal(lc.time, 4*[1, 2, 3])
def test_lightcurve_copy():
"""Test ``LightCurve.copy()``."""
time = np.array([1, 2, 3, 4])
flux = np.array([1, 2, 3, 4])
error = np.array([0.1, 0.2, 0.3, 0.4])
lc = LightCurve(time=time, flux=flux, flux_err=error)
nlc = lc.copy()
assert_array_equal(lc.time, nlc.time)
assert_array_equal(lc.flux, nlc.flux)
assert_array_equal(lc.flux_err, nlc.flux_err)
nlc.time[1] = 5
nlc.flux[1] = 6
nlc.flux_err[1] = 7
# By changing 1 of the 4 data points in the new lightcurve's array-like
# attributes, we expect assert_array_equal to raise an AssertionError
# indicating a mismatch of 1/4 (or 25%).
with pytest.raises(AssertionError, match=r'ismatch.* 25'):
assert_array_equal(lc.time, nlc.time)
with pytest.raises(AssertionError, match=r'ismatch.* 25'):
assert_array_equal(lc.flux, nlc.flux)
with pytest.raises(AssertionError, match=r'ismatch.* 25'):
assert_array_equal(lc.flux_err, nlc.flux_err)
# KeplerLightCurve has extra data
lc = KeplerLightCurve(time=[1, 2, 3], flux=[1, .5, 1],
centroid_col=[4, 5, 6], centroid_row=[7, 8, 9],
cadenceno=[10, 11, 12], quality=[10, 20, 30])
nlc = lc.copy()
assert_array_equal(lc.time, nlc.time)
assert_array_equal(lc.flux, nlc.flux)
assert_array_equal(lc.centroid_col, nlc.centroid_col)
assert_array_equal(lc.centroid_row, nlc.centroid_row)
assert_array_equal(lc.cadenceno, nlc.cadenceno)
assert_array_equal(lc.quality, nlc.quality)
nlc.time[1] = 6
nlc.flux[1] = 7
nlc.centroid_col[1] = 8
nlc.centroid_row[1] = 9
nlc.cadenceno[1] = 10
nlc.quality[1] = 11
# As before, by changing 1/3 data points, we expect a mismatch of 33.3%
# with a repeating decimal. However, float precision for python 2.7 is 10
# decimal digits, while python 3.6's is 13 decimal digits. Therefore,
# a regular expression is needed for both versions.
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.time, nlc.time)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.flux, nlc.flux)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.centroid_col, nlc.centroid_col)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.centroid_row, nlc.centroid_row)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.cadenceno, nlc.cadenceno)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.quality, nlc.quality)
@pytest.mark.parametrize("path, mission", [(filename_tess_custom, "TESS"),
(filename_K2_custom, "K2")])
def test_custom_lightcurve_file(path, mission):
"""Test whether we can read in custom interact()-produced lightcurvefiles"""
if mission == "K2":
lcf_custom = KeplerLightCurveFile(path)
elif mission == "TESS":
lcf_custom = TessLightCurveFile(path)
assert lcf_custom.hdu[2].name == 'APERTURE'
assert lcf_custom.cadenceno[0] >= 0
assert lcf_custom.dec == lcf_custom.dec
assert lcf_custom.time[-1] > lcf_custom.time[0]
# .interact() files currently define FLUX, and not SAP_FLUX nor PDCSAP_FLUX
lc = lcf_custom.get_lightcurve('FLUX')
assert len(lc.flux) > 0
with pytest.raises(KeyError):
lcf_custom.get_lightcurve('BLABLA')
with pytest.raises(KeyError):
lcf_custom.SAP_FLUX
with pytest.raises(KeyError):
lcf_custom.PDCSAP_FLUX
assert lc.mission.lower() == mission.lower()
# Does the data match what one would obtain using pyfits.open?
hdu = pyfits.open(path)
assert lc.label == hdu[0].header['OBJECT']
assert_array_equal(lc.time, hdu[1].data['TIME'])
assert_array_equal(lc.flux, hdu[1].data['FLUX'])
# TESS has QUALITY while Kepler/K2 has SAP_QUALITY:
if mission == "TESS":
assert "QUALITY" in lcf_custom.hdu[1].columns.names
assert_array_equal(lc.quality, hdu[1].data['QUALITY'])
if mission in ["K2", "Kepler"]:
assert "SAP_QUALITY" in lcf_custom.hdu[1].columns.names
assert_array_equal(lc.quality, hdu[1].data['SAP_QUALITY'])
@pytest.mark.remote_data
def test_lightcurve_plots():
"""Sanity check to verify that lightcurve plotting works"""
for lcf in [KeplerLightCurveFile(TABBY_Q8), TessLightCurveFile(TESS_SIM)]:
lcf.plot()
lcf.plot(flux_types=['SAP_FLUX', 'PDCSAP_FLUX'])
lcf.scatter()
lcf.errorbar()
lcf.SAP_FLUX.plot()
lcf.SAP_FLUX.plot(normalize=False, title="Not the default")
lcf.SAP_FLUX.scatter()
lcf.SAP_FLUX.scatter(c='C3')
lcf.SAP_FLUX.scatter(c=lcf.SAP_FLUX.time, show_colorbar=True, colorbar_label='Time')
lcf.SAP_FLUX.errorbar()
plt.close('all')
@pytest.mark.remote_data
def test_lightcurve_scatter():
"""Sanity check to verify that lightcurve scatter plotting works"""
lcf = KeplerLightCurveFile(KEPLER10)
lc = lcf.PDCSAP_FLUX.flatten()
# get an array of original times, in the same order as the folded lightcurve
foldkw = dict(period=0.837491)
originaltime = LightCurve(lc.time, lc.time)
foldedtimeinorder = originaltime.fold(**foldkw).flux
# plot a grid of phase-folded and not, with colors
fi, ax = plt.subplots(2, 2, figsize=(10,6), sharey=True, sharex='col')
scatterkw = dict( s=5, cmap='winter')
lc.scatter(ax=ax[0,0])
lc.fold(**foldkw).scatter(ax=ax[0,1])
lc.scatter(ax=ax[1,0], c=lc.time, **scatterkw)
lc.fold(**foldkw).scatter(ax=ax[1,1], c=foldedtimeinorder, **scatterkw)
plt.ylim(0.999, 1.001)
def test_cdpp():
"""Test the basics of the CDPP noise metric."""
# A flat lightcurve should have a CDPP close to zero
assert_almost_equal(LightCurve(np.arange(200), np.ones(200)).estimate_cdpp(), 0)
# An artificial lightcurve with sigma=100ppm should have cdpp=100ppm
lc = LightCurve(np.arange(10000), np.random.normal(loc=1, scale=100e-6, size=10000))
assert_almost_equal(lc.estimate_cdpp(transit_duration=1), 100, decimal=-0.5)
# Transit_duration must be an integer (cadences)
with pytest.raises(ValueError):
lc.estimate_cdpp(transit_duration=6.5)
@pytest.mark.remote_data
def test_cdpp_tabby():
"""Compare the cdpp noise metric against the pipeline value."""
lcf = KeplerLightCurveFile(TABBY_Q8)
# Tabby's star shows dips after cadence 1000 which increase the cdpp
lc = LightCurve(lcf.PDCSAP_FLUX.time[:1000], lcf.PDCSAP_FLUX.flux[:1000])
assert(np.abs(lc.estimate_cdpp() - lcf.header(ext=1)['CDPP6_0']) < 30)
def test_bin():
"""Does binning work?"""
lc = LightCurve(time=np.arange(10),
flux=2*np.ones(10),
flux_err=2**.5*np.ones(10))
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.flux, 2*np.ones(5))
assert_allclose(binned_lc.flux_err, np.ones(5))
assert len(binned_lc.time) == 5
with pytest.raises(ValueError):
lc.bin(method='doesnotexist')
# If `flux_err` is missing, the errors on the bins should be the stddev
lc = LightCurve(time=np.arange(10),
flux=2*np.ones(10))
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.flux_err, np.zeros(5))
# Regression test for #377
lc = KeplerLightCurve(time=np.arange(10),
flux=2*np.ones(10))
lc.bin(5).remove_outliers()
# Second regression test for #377
lc = KeplerLightCurve(time=np.arange(1000) * 0.02,
flux=1*np.ones(1000) + np.random.normal(0, 1e-6, 1000),
cadenceno=np.arange(1000))
assert np.isclose(lc.bin(2).estimate_cdpp(), 1, rtol=1)
# Regression test for #500
lc = LightCurve(time=np.arange(2000),
flux=np.random.normal(loc=42, scale=0.01, size=2000))
assert np.round(lc.bin(2000).flux_err[0], 2) == 0.01
def test_bin_quality():
"""Binning must also revise the quality and centroid columns."""
lc = KeplerLightCurve(time=[1, 2, 3, 4],
flux=[1, 1, 1, 1],
quality=[0, 1, 2, 3],
centroid_col=[0, 1, 0, 1],
centroid_row=[0, 2, 0, 2])
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.quality, [1, 3]) # Expect bitwise or
assert_allclose(binned_lc.centroid_col, [0.5, 0.5]) # Expect mean
assert_allclose(binned_lc.centroid_row, [1, 1]) # Expect mean
def test_normalize():
"""Does the `LightCurve.normalize()` method normalize the flux?"""
lc = LightCurve(time=np.arange(10), flux=5*np.ones(10), flux_err=0.05*np.ones(10))
assert_allclose(np.median(lc.normalize().flux), 1)
assert_allclose(np.median(lc.normalize().flux_err), 0.05/5)
def test_to_pandas():
"""Test the `LightCurve.to_pandas()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err)
try:
df = lc.to_pandas()
assert_allclose(df.index, time)
assert_allclose(df.flux, flux)
assert_allclose(df.flux_err, flux_err)
df.describe() # Will fail if for Endianness bugs
except ImportError:
# pandas is an optional dependency
pass
def test_to_pandas_kepler():
"""When to_pandas() is executed on a KeplerLightCurve, it should include
extra columns such as `quality`."""
time, flux, quality = range(3), np.ones(3), np.zeros(3)
lc = KeplerLightCurve(time, flux, quality=quality)
try:
df = lc.to_pandas()
assert_allclose(df.quality, quality)
except ImportError:
# pandas is an optional dependency
pass
def test_to_table():
"""Test the `LightCurve.to_table()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err)
tbl = lc.to_table()
assert_allclose(tbl['time'], time)
assert_allclose(tbl['flux'], flux)
assert_allclose(tbl['flux_err'], flux_err)
def test_to_csv():
"""Test the `LightCurve.to_csv()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
try:
lc = LightCurve(time, flux, flux_err)
assert(lc.to_csv(index=False, line_terminator='\n') == 'time,flux,flux_err\n0,1.0,0.0\n1,1.0,0.0\n2,1.0,0.0\n')
except ImportError:
# pandas is an optional dependency
pass
@pytest.mark.remote_data
def test_to_fits():
"""Test the KeplerLightCurve.to_fits() method"""
lcf = KeplerLightCurveFile(TABBY_Q8)
hdu = lcf.PDCSAP_FLUX.to_fits()
KeplerLightCurveFile(hdu) # Regression test for #233
assert type(hdu).__name__ is 'HDUList'
assert len(hdu) == 2
assert hdu[0].header['EXTNAME'] == 'PRIMARY'
assert hdu[1].header['EXTNAME'] == 'LIGHTCURVE'
assert hdu[1].header['TTYPE1'] == 'TIME'
assert hdu[1].header['TTYPE2'] == 'FLUX'
assert hdu[1].header['TTYPE3'] == 'FLUX_ERR'
assert hdu[1].header['TTYPE4'] == 'CADENCENO'
hdu = LightCurve([0, 1, 2, 3, 4], [1, 1, 1, 1, 1]).to_fits()
# Test "round-tripping": can we read-in what we write
lcf_new = LightCurveFile(hdu) # Regression test for #233
assert hdu[0].header['EXTNAME'] == 'PRIMARY'
assert hdu[1].header['EXTNAME'] == 'LIGHTCURVE'
assert hdu[1].header['TTYPE1'] == 'TIME'
assert hdu[1].header['TTYPE2'] == 'FLUX'
# Test aperture mask support in to_fits
for tpf in [KeplerTargetPixelFile(TABBY_TPF), TessTargetPixelFile(filename_tess)]:
random_mask = np.random.randint(0, 2, size=tpf.flux[0].shape, dtype=bool)
thresh_mask = tpf.create_threshold_mask(threshold=3)
lc = tpf.to_lightcurve(aperture_mask=random_mask)
lc.to_fits(path=tempfile.NamedTemporaryFile().name, aperture_mask=random_mask)
lc.to_fits(path=tempfile.NamedTemporaryFile().name, overwrite=True,
flux_column_name='SAP_FLUX')
lc = tpf[0:2].to_lightcurve(aperture_mask=thresh_mask)
lc.to_fits(aperture_mask=thresh_mask, path=tempfile.NamedTemporaryFile().name)
# Test the extra data kwargs
bkg_mask = ~tpf.create_threshold_mask(threshold=0.1)
bkg_lc = tpf.to_lightcurve(aperture_mask=bkg_mask)
lc = tpf.to_lightcurve(aperture_mask=tpf.hdu['APERTURE'].data)
lc = tpf.to_lightcurve(aperture_mask=None)
lc = tpf.to_lightcurve(aperture_mask=thresh_mask)
lc_out = lc - bkg_lc.flux * (thresh_mask.sum()/bkg_mask.sum())
lc_out.to_fits(aperture_mask=thresh_mask, path=tempfile.NamedTemporaryFile().name,
overwrite=True, extra_data={'BKG': bkg_lc.flux})
@pytest.mark.remote_data
def test_astropy_time():
'''Test the `astropy_time` property'''
lcf = KeplerLightCurveFile(TABBY_Q8)
astropy_time = lcf.astropy_time
iso = astropy_time.iso
assert astropy_time.scale == 'tdb'
assert len(iso) == len(lcf.time)
#assert iso[0] == '2011-01-06 20:45:08.811'
#assert iso[-1] == '2011-03-14 20:18:16.734'
def test_astropy_time_bkjd():
"""Does `LightCurve.astropy_time` support bkjd?"""
bkjd = np.array([100, 200])
lc = LightCurve(time=[100, 200], time_format='bkjd')
assert_allclose(lc.astropy_time.jd, bkjd + 2454833.)
def test_lightcurve_repr():
"""Do __str__ and __repr__ work?"""
time, flux = range(3), np.ones(3)
str(LightCurve(time, flux))
str(KeplerLightCurve(time, flux))
str(TessLightCurve(time, flux))
repr(LightCurve(time, flux))
repr(KeplerLightCurve(time, flux))
repr(TessLightCurve(time, flux))
@pytest.mark.remote_data
def test_lightcurvefile_repr():
"""Do __str__ and __repr__ work?"""
lcf = KeplerLightCurveFile(TABBY_Q8)
str(lcf)
repr(lcf)
lcf = TessLightCurveFile(TESS_SIM)
str(lcf)
repr(lcf)
def test_slicing():
"""Does LightCurve.__getitem__() allow slicing?"""
time = np.linspace(0, 10, 10)
flux = np.linspace(100, 200, 10)
flux_err = np.linspace(5, 50, 10)
lc = LightCurve(time, flux, flux_err)
assert_array_equal(lc[0:5].time, time[0:5])
assert_array_equal(lc[2::2].flux, flux[2::2])
assert_array_equal(lc[5:9:-1].flux_err, flux_err[5:9:-1])
# KeplerLightCurves contain additional data arrays that need to be sliced
centroid_col = np.linspace(40, 50, 10)
centroid_row = np.linspace(50, 60, 10)
quality = np.linspace(70, 80, 10)
cadenceno = np.linspace(90, 100, 10)
lc = KeplerLightCurve(time, flux, flux_err,
centroid_col=centroid_col,
centroid_row=centroid_row,
cadenceno=cadenceno,
quality=quality)
assert_array_equal(lc[::3].centroid_col, centroid_col[::3])
assert_array_equal(lc[4:].centroid_row, centroid_row[4:])
assert_array_equal(lc[10:2].quality, quality[10:2])
assert_array_equal(lc[3:6].cadenceno, cadenceno[3:6])
# The same is true for TessLightCurve
lc = TessLightCurve(time, flux, flux_err,
centroid_col=centroid_col,
centroid_row=centroid_row,
cadenceno=cadenceno,
quality=quality)
assert_array_equal(lc[::4].centroid_col, centroid_col[::4])
assert_array_equal(lc[5:].centroid_row, centroid_row[5:])
assert_array_equal(lc[10:3].quality, quality[10:3])
assert_array_equal(lc[4:6].cadenceno, cadenceno[4:6])
def test_boolean_masking():
lc = KeplerLightCurve(time=[1, 2, 3], flux=[1, 1, 10],
quality=[0, 0, 200], cadenceno=[5, 6, 7])
assert_array_equal(lc[lc.flux < 5].time, [1, 2])
assert_array_equal(lc[lc.flux < 5].flux, [1, 1])
assert_array_equal(lc[lc.flux < 5].quality, [0, 0])
assert_array_equal(lc[lc.flux < 5].cadenceno, [5, 6])
def test_remove_nans():
"""Does LightCurve.__getitem__() allow slicing?"""
time, flux = [1, 2, 3, 4], [100, np.nan, 102, np.nan]
lc_clean = LightCurve(time, flux).remove_nans()
assert_array_equal(lc_clean.time, [1, 3])
assert_array_equal(lc_clean.flux, [100, 102])
def test_remove_outliers():
# Does `remove_outliers()` remove outliers?
lc = LightCurve([1, 2, 3, 4], [1, 1, 1000, 1])
lc_clean = lc.remove_outliers(sigma=1)
assert_array_equal(lc_clean.time, [1, 2, 4])
assert_array_equal(lc_clean.flux, [1, 1, 1])
# It should also be possible to return the outlier mask
lc_clean, outlier_mask = lc.remove_outliers(sigma=1, return_mask=True)
assert(len(outlier_mask) == len(lc.flux))
assert(outlier_mask.sum() == 1)
# Can we set sigma_lower and sigma_upper?
lc = LightCurve(time=[1, 2, 3, 4, 5], flux=[1, 1000, 1, -1000, 1])
lc_clean = lc.remove_outliers(sigma_lower=float('inf'), sigma_upper=1)
assert_array_equal(lc_clean.time, [1, 3, 4, 5])
assert_array_equal(lc_clean.flux, [1, 1, -1000, 1])
@pytest.mark.remote_data
def test_properties(capfd):
'''Test if the describe function produces an output.
The output is 624 characters at the moment, but we might add more properties.'''
lcf = KeplerLightCurveFile(TABBY_Q8)
kplc = lcf.get_lightcurve('SAP_FLUX')
kplc.show_properties()
out, _ = capfd.readouterr()
assert len(out) > 500
def test_flatten_with_nans():
"""Flatten should not remove NaNs."""
lc = LightCurve(time=[1, 2, 3, 4, 5],
flux=[np.nan, 1.1, 1.2, np.nan, 1.4],
flux_err=[1.0, np.nan, 1.2, 1.3, np.nan])
flat_lc = lc.flatten(window_length=3)
assert(len(flat_lc.time) == 5)
assert(np.isfinite(flat_lc.flux).sum() == 3)
assert(np.isfinite(flat_lc.flux_err).sum() == 3)
def test_flatten_robustness():
"""Test various special cases for flatten()."""
# flatten should work with integer fluxes
lc = LightCurve([1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60])
expected_result = np.array([1., 1., 1., 1., 1., 1.])
flat_lc = lc.flatten(window_length=3, polyorder=1)
assert_allclose(flat_lc.flux, expected_result)
# flatten should work even if `window_length > len(flux)`
flat_lc = lc.flatten(window_length=7, polyorder=1)
assert_allclose(flat_lc.flux, flat_lc.flux / np.median(flat_lc.flux))
# flatten should work even if `polyorder >= window_length`
flat_lc = lc.flatten(window_length=3, polyorder=3)
assert_allclose(flat_lc.flux, expected_result)
flat_lc = lc.flatten(window_length=3, polyorder=5)
assert_allclose(flat_lc.flux, expected_result)
# flatten should work even if `break_tolerance = None`
flat_lc = lc.flatten(window_length=3, break_tolerance=None)
assert_allclose(flat_lc.flux, expected_result)
flat_lc, trend_lc = lc.flatten(return_trend=True)
assert_allclose(flat_lc.time, trend_lc.time)
assert_allclose(lc.flux, flat_lc.flux * trend_lc.flux)
def test_iterative_flatten():
'''Test the iterative sigma clipping in flatten '''
# Test a light curve with a single, buried outlier.
x = np.arange(2000)
y = np.sin(x/200)/100 + 1
y[250] -= 0.01
lc = LightCurve(x, y)
# Flatten it
c, f = lc.flatten(window_length=25, niters=2, sigma=3, return_trend=True)
# Only one outlier should remain.
assert np.isclose(c.flux, 1, rtol=0.00001).sum() == 1999
mask = np.zeros(2000, dtype=bool)
mask[250] = True
# Flatten it using a mask to remove the bad data point.
c, f = lc.flatten(window_length=25, niters=1, sigma=3, mask=mask,
return_trend=True)
# Only one outlier should remain.
assert np.isclose(c.flux, 1, rtol=0.00001).sum() == 1999
def test_fill_gaps():
lc = LightCurve([1,2,3,4,6,7,8], [1,1,1,1,1,1,1])
nlc = lc.fill_gaps()
assert(len(lc.time) < len(nlc.time))
assert(np.any(nlc.time == 5))
assert(np.all(nlc.flux == 1))
lc = LightCurve([1,2,3,4,6,7,8], [1,1,np.nan,1,1,1,1])
nlc = lc.fill_gaps()
assert(len(lc.time) < len(nlc.time))
assert(np.any(nlc.time == 5))
assert(np.all(nlc.flux == 1))
assert(np.all(np.isfinite(nlc.flux)))
# Because fill_gaps() uses pandas, check that it works regardless of endianness
# For details see https://github.com/KeplerGO/lightkurve/issues/188
lc = LightCurve(np.array([1, 2, 3, 4, 6, 7, 8], dtype='>f8'),
np.array([1, 1, 1, np.nan, np.nan, 1, 1], dtype='>f8'))
lc.fill_gaps()
lc = LightCurve(np.array([1, 2, 3, 4, 6, 7, 8], dtype='<f8'),
np.array([1, 1, 1, np.nan, np.nan, 1, 1], dtype='<f8'))
lc.fill_gaps()
def test_targetid():
"""Is a generic targetid available on each type of LighCurve object?"""
lc = LightCurve(time=[], targetid=5)
assert lc.targetid == 5
# Can we assign a new value?
lc.targetid = 99
assert lc.targetid == 99
# Does it work for Kepler?
lc = KeplerLightCurve(time=[], targetid=10)
assert lc.targetid == 10
# Can we assign a new value?
lc.targetid = 99
assert lc.targetid == 99
# Does it work for TESS?
lc = TessLightCurve(time=[], targetid=20)
assert lc.targetid == 20
def test_regression_346():
"""Regression test for https://github.com/KeplerGO/lightkurve/issues/346"""
# This previously triggered an IndexError:
KeplerLightCurveFile(K2_C08).PDCSAP_FLUX.to_corrector().correct().estimate_cdpp()
def test_to_timeseries():
"""Test the `LightCurve.to_timeseries()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err, time_format="jd")
try:
ts = lc.to_timeseries()
assert_allclose(ts['time'].value, time)
assert_allclose(ts['flux'], flux)
assert_allclose(ts['flux_err'], flux_err)
except ImportError:
# Requires AstroPy v3.2 or later
pass
|
py | 1a556220545cd76ec37892f2e21e3dfdb49214f2 | """empty message
Revision ID: 5b4a3e3232c8
Revises: 6b071c7c748f
Create Date: 2021-03-13 23:07:54.586777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5b4a3e3232c8'
down_revision = '6b071c7c748f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('predictions', sa.Column('confidence', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('predictions', 'confidence')
# ### end Alembic commands ###
|
py | 1a5562a2cd3dd309de303d0e610178c884ab7065 | from vizdoomgymmaze.envs.vizdoomenv import VizdoomEnv
class VizdoomTakeCover(VizdoomEnv):
def __init__(self):
super(VizdoomTakeCover, self).__init__(7)
|
py | 1a5562a5292dd7f8dab02f67228c6a99c616d022 | # Generated by Django 3.0.1 on 2020-06-21 17:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('properties', '0003_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='property_comment',
field=models.ManyToManyField(related_name='comments', to='properties.Property'),
),
]
|
py | 1a5562a9b2a4d6156e516af54f81e7eb8385a548 | # engine/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
import contextlib
import sys
from .interfaces import Connectable
from .interfaces import ExceptionContext
from .util import _distill_params
from .. import exc
from .. import inspection
from .. import log
from .. import util
from ..sql import compiler
from ..sql import util as sql_util
"""Defines :class:`.Connection` and :class:`.Engine`.
"""
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
Provides execution support for string-based SQL statements as well as
:class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator`
objects. Provides a :meth:`begin` method to return :class:`.Transaction`
objects.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single dbapi connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
_schema_translate_map = None
def __init__(
self,
engine,
connection=None,
close_with_result=False,
_branch_from=None,
_execution_options=None,
_dispatch=None,
_has_events=None,
):
"""Construct a new Connection.
"""
self.engine = engine
self.dialect = engine.dialect
self.__branch_from = _branch_from
self.__branch = _branch_from is not None
if _branch_from:
self.__connection = connection
self._execution_options = _execution_options
self._echo = _branch_from._echo
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
self._schema_translate_map = _branch_from._schema_translate_map
else:
self.__connection = (
connection
if connection is not None
else engine.raw_connection()
)
self.__transaction = None
self.__savepoint_seq = 0
self.should_close_with_result = close_with_result
self.__invalid = False
self.__can_reconnect = True
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events
)
assert not _execution_options
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, self.__branch)
def schema_for_object(self, obj):
"""return the schema name for the given schema item taking into
account current schema translate map.
"""
name = obj.schema
schema_translate_map = self._schema_translate_map
if (
schema_translate_map
and name in schema_translate_map
and obj._use_schema_map
):
return schema_translate_map[name]
else:
return name
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
The Core uses this very sparingly, only in the case of
custom SQL default functions that are to be INSERTed as the
primary key of a row where we need to get the value back, so we have
to invoke it distinctly - this is a very uncommon case.
Userland code accesses _branch() when the connect()
method is called. The branched connection
acts as much as possible like the parent, except that it stays
connected when a close() event occurs.
"""
if self.__branch_from:
return self.__branch_from._branch()
else:
return self.engine._connection_cls(
self.engine,
self.__connection,
_branch_from=self,
_execution_options=self._execution_options,
_has_events=self._has_events,
_dispatch=self.dispatch,
)
@property
def _root(self):
"""return the 'root' connection.
Returns 'self' if this connection is not a branch, else
returns the root connection from which we ultimately branched.
"""
if self.__branch_from:
return self.__branch_from
else:
return self
def _clone(self):
"""Create a shallow copy of this Connection.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def execution_options(self, **opt):
r""" Set non-SQL options for the connection which take effect
during execution.
The method returns a copy of this :class:`.Connection` which references
the same underlying DBAPI connection, but also defines the given
execution options which will take effect for a call to
:meth:`execute`. As the new :class:`.Connection` references the same
underlying resource, it's usually a good idea to ensure that the copies
will be discarded immediately, which is implicit if used as in::
result = connection.execution_options(stream_results=True).\
execute(stmt)
Note that any key/value can be passed to
:meth:`.Connection.execution_options`, and it will be stored in the
``_execution_options`` dictionary of the :class:`.Connection`. It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such), and an explicit
transaction is not in progress.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`.Connection` compiles a clause
expression into a :class:`.Compiled` object.
It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param isolation_level: Available on: :class:`.Connection`.
Set the transaction isolation level for the lifespan of this
:class:`.Connection` object. Valid values include those string
values accepted by the :paramref:`.create_engine.isolation_level`
parameter passed to :func:`.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
The isolation level option applies the isolation level by emitting
statements on the DBAPI connection, and **necessarily affects the
original Connection object overall**, not just the copy that is
returned by the call to :meth:`.Connection.execution_options`
method. The isolation level will remain at the given setting until
the DBAPI connection itself is returned to the connection pool, i.e.
the :meth:`.Connection.close` method on the original
:class:`.Connection` is called, where an event handler will emit
additional statements on the DBAPI connection in order to revert the
isolation level change.
.. warning:: The ``isolation_level`` execution option should
**not** be used when a transaction is already established, that
is, the :meth:`.Connection.begin` method or similar has been
called. A database cannot change the isolation level on a
transaction in progress, and different DBAPIs and/or
SQLAlchemy dialects may implicitly roll back or commit
the transaction, or not affect the connection at all.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`.Connection` is invalidated, e.g. via
the :meth:`.Connection.invalidate` method, or if a
disconnection error occurs. The new connection produced after
the invalidation will not have the isolation level re-applied
to it automatically.
.. seealso::
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:meth:`.Connection.get_isolation_level` - view current level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`SQL Server Transaction Isolation <mssql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2, mysqldb and pymysql dialects.
:param schema_translate_map: Available on: Connection, Engine.
A dictionary mapping schema names to schema names, that will be
applied to the :paramref:`.Table.schema` element of each
:class:`.Table` encountered when SQL or DDL expression elements
are compiled into strings; the resulting schema name will be
converted based on presence in the map of the original name.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
.. seealso::
:meth:`.Engine.execution_options`
:meth:`.Executable.execution_options`
:meth:`.Connection.get_execution_options`
""" # noqa
c = self._clone()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`.Connection.execution_options`
"""
return self._execution_options
@property
def closed(self):
"""Return True if this connection is closed."""
return (
"_Connection__connection" not in self.__dict__
and not self.__can_reconnect
)
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
return self._root.__invalid
@property
def connection(self):
"""The underlying DB-API connection managed by this Connection.
.. seealso::
:ref:`dbapi_connections`
"""
try:
return self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
pass
try:
return self._revalidate_connection()
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def get_isolation_level(self):
"""Return the current isolation level assigned to this
:class:`.Connection`.
This will typically be the default isolation level as determined
by the dialect, unless if the
:paramref:`.Connection.execution_options.isolation_level`
feature has been used to alter the isolation level on a
per-:class:`.Connection` basis.
This attribute will typically perform a live SQL operation in order
to procure the current isolation level, so the value returned is the
actual level on the underlying DBAPI connection regardless of how
this state was set. Compare to the
:attr:`.Connection.default_isolation_level` accessor
which returns the dialect-level setting without performing a SQL
query.
.. versionadded:: 0.9.9
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
"""
try:
return self.dialect.get_isolation_level(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
@property
def default_isolation_level(self):
"""The default isolation level assigned to this :class:`.Connection`.
This is the isolation level setting that the :class:`.Connection`
has when first procured via the :meth:`.Engine.connect` method.
This level stays in place until the
:paramref:`.Connection.execution_options.isolation_level` is used
to change the setting on a per-:class:`.Connection` basis.
Unlike :meth:`.Connection.get_isolation_level`, this attribute is set
ahead of time from the first connection procured by the dialect,
so SQL query is not invoked when this accessor is called.
.. versionadded:: 0.9.9
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
"""
return self.dialect.default_isolation_level
def _revalidate_connection(self):
if self.__branch_from:
return self.__branch_from._revalidate_connection()
if self.__can_reconnect and self.__invalid:
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Can't reconnect until invalid "
"transaction is rolled back"
)
self.__connection = self.engine.raw_connection(_connection=self)
self.__invalid = False
return self.__connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _connection_is_valid(self):
# use getattr() for is_valid to support exceptions raised in
# dialect initializer, where the connection is not wrapped in
# _ConnectionFairy
return getattr(self.__connection, "is_valid", False)
@property
def _still_open_and_connection_is_valid(self):
return (
not self.closed
and not self.invalidated
and getattr(self.__connection, "is_valid", False)
)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`.Connection`.
"""
return self.connection.info
@util.deprecated_20(":meth:`.Connection.connect`")
def connect(self, close_with_result=False):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`.Connection`.
The underlying DBAPI connection is literally closed (if
possible), and is discarded. Its source connection pool will
typically lazily create a new connection to replace it.
Upon the next use (where "use" typically means using the
:meth:`.Connection.execute` method or similar),
this :class:`.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`.Pool` as a source of connectivity (e.g. a "reconnection").
If a transaction was in progress (e.g. the
:meth:`.Connection.begin` method has been called) when
:meth:`.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`.Connection.invalidate` method, just like auto-invalidation,
will at the connection pool level invoke the
:meth:`.PoolEvents.invalidate` event.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._root._connection_is_valid:
self._root.__connection.invalidate(exception)
del self._root.__connection
self._root.__invalid = True
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute(text("SET search_path TO schema1, schema2"))
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`.Connection` instance will remain usable. When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self.__connection.detach()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
Nested calls to :meth:`.begin` on the same :class:`.Connection`
will return new :class:`.Transaction` objects that represent
an emulated transaction within the scope of the enclosing
transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
.. seealso::
:meth:`.Connection.begin_nested` - use a SAVEPOINT
:meth:`.Connection.begin_twophase` -
use a two phase /XID transaction
:meth:`.Engine.begin` - context manager available from
:class:`.Engine`
"""
if self.__branch_from:
return self.__branch_from.begin()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
return self.__transaction
else:
return Transaction(self, self.__transaction)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
.. seealso::
:meth:`.Connection.begin`
:meth:`.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_nested()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
else:
self.__transaction = NestedTransaction(self, self.__transaction)
return self.__transaction
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
.. seealso::
:meth:`.Connection.begin`
:meth:`.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_twophase(xid=xid)
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress."
)
if xid is None:
xid = self.engine.dialect.create_xid()
self.__transaction = TwoPhaseTransaction(self, xid)
return self.__transaction
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
return (
self._root.__transaction is not None
and self._root.__transaction.is_active
)
def _begin_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_impl(self, deactivate_only=False):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_connection_is_valid:
if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
def _commit_impl(self, autocommit=False):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
self.__transaction = None
def _savepoint_impl(self, name=None):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = "sa_savepoint_%s" % self.__savepoint_seq
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _discard_transaction(self, trans):
if trans is self.__transaction:
if trans._is_root:
assert trans._parent is trans
self.__transaction = None
else:
assert trans._parent is not trans
self.__transaction = trans._parent
def _rollback_to_savepoint_impl(
self, name, context, deactivate_only=False
):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
def _release_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
self.__transaction = context
def _begin_twophase_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
def _prepare_twophase_impl(self, xid):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
self.engine.dialect.do_prepare_twophase(self, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared
)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _autorollback(self):
if not self._root.in_transaction():
self._root._rollback_impl()
def close(self):
"""Close this :class:`.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`.Pool` referenced
by the :class:`.Engine` that produced this
:class:`.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`.Connection`.
After :meth:`~.Connection.close` is called, the
:class:`.Connection` is permanently in a closed state,
and will allow no further operations.
"""
if self.__branch_from:
util.warn_deprecated_20(
"The .close() method on a so-called 'branched' connection is "
"deprecated as of 1.4, as are 'branched' connections overall, "
"and will be removed in a future release. If this is a "
"default-handling function, don't close the connection."
)
try:
del self.__connection
except AttributeError:
pass
finally:
self.__can_reconnect = False
return
try:
conn = self.__connection
except AttributeError:
pass
else:
conn.close()
if conn._reset_agent is self.__transaction:
conn._reset_agent = None
# the close() process can end up invalidating us,
# as the pool will call our transaction as the "reset_agent"
# for rollback(), which can then cause an invalidation
if not self.__invalid:
del self.__connection
self.__can_reconnect = False
self.__transaction = None
def scalar(self, object_, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object_, *multiparams, **params).scalar()
def execute(self, object_, *multiparams, **params):
r"""Executes a SQL statement construct and returns a
:class:`.ResultProxy`.
:param object: The statement to be executed. May be
one of:
* a plain string (deprecated)
* any :class:`.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`~.expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :data:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
.. deprecated:: 2.0 passing a string to :meth:`.Connection.execute` is
deprecated and will be removed in version 2.0. Use the
:func:`~.expression.text` construct with
:meth:`.Connection.execute`, or the
:meth:`.Connection.exec_driver_sql` method to invoke a driver-level
SQL string.
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_
for details on paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`~.expression.text` construct.
.. deprecated:: 2.0 use of tuple or scalar positional parameters
is deprecated. All params should be dicts or sequences of dicts.
Use :meth:`.exec_driver_sql` to execute a plain string with
tuple or scalar positional parameters.
"""
if isinstance(object_, util.string_types):
util.warn_deprecated_20(
"Passing a string to Connection.execute() is "
"deprecated and will be removed in version 2.0. Use the "
"text() construct, "
"or the Connection.exec_driver_sql() method to invoke a "
"driver-level SQL string."
)
distilled_params = _distill_params(multiparams, params)
return self._exec_driver_sql_distilled(object_, distilled_params)
try:
meth = object_._execute_on_connection
except AttributeError as err:
util.raise_(
exc.ObjectNotExecutableError(object_), replace_context=err
)
else:
return meth(self, multiparams, params)
def _execute_function(self, func, multiparams, params):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(func.select(), multiparams, params)
def _execute_default(self, default, multiparams, params):
"""Execute a schema.ColumnDefault object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
default, multiparams, params = fn(
self, default, multiparams, params
)
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(dialect, self, conn)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(None, default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, default, multiparams, params, ret
)
return ret
def _execute_ddl(self, ddl, multiparams, params):
"""Execute a schema.DDL object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
ddl, multiparams, params = fn(self, ddl, multiparams, params)
dialect = self.dialect
compiled = ddl.compile(
dialect=dialect, schema_translate_map=self._schema_translate_map
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
compiled,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, ddl, multiparams, params, ret)
return ret
def _execute_clauseelement(self, elem, multiparams, params):
"""Execute a sql.ClauseElement object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
elem, multiparams, params = fn(self, elem, multiparams, params)
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# ensure we don't retain a link to the view object for keys()
# which links to the values, which we don't want to cache
keys = list(distilled_params[0].keys())
else:
keys = []
dialect = self.dialect
if "compiled_cache" in self._execution_options:
elem_cache_key, extracted_params = elem._generate_cache_key()
key = (
dialect,
elem_cache_key,
tuple(sorted(keys)),
bool(self._schema_translate_map),
len(distilled_params) > 1,
)
cache = self._execution_options["compiled_cache"]
compiled_sql = cache.get(key)
if compiled_sql is None:
compiled_sql = elem.compile(
dialect=dialect,
cache_key=(elem_cache_key, extracted_params),
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self._schema_translate_map,
linting=self.dialect.compiler_linting
| compiler.WARN_LINTING,
)
cache[key] = compiled_sql
else:
extracted_params = None
compiled_sql = elem.compile(
dialect=dialect,
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self._schema_translate_map,
linting=self.dialect.compiler_linting | compiler.WARN_LINTING,
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
compiled_sql,
distilled_params,
elem,
extracted_params,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, elem, multiparams, params, ret)
return ret
def _execute_compiled(self, compiled, multiparams, params):
"""Execute a sql.Compiled object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
compiled, multiparams, params = fn(
self, compiled, multiparams, params
)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
parameters,
compiled,
parameters,
None,
None,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, compiled, multiparams, params, ret
)
return ret
def _exec_driver_sql_distilled(self, statement, parameters):
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
statement, multiparams, params = fn(
self, statement, parameters, {}
)
dialect = self.dialect
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
parameters,
statement,
parameters,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, statement, parameters, {})
return ret
def exec_driver_sql(self, statement, parameters=None):
r"""Executes a SQL statement construct and returns a
:class:`.ResultProxy`.
:param statement: The statement str to be executed. Bound parameters
must use the underlying DBAPI's paramstyle, such as "qmark",
"pyformat", "format", etc.
:param parameters: represent bound parameter values to be used in the
execution. The format is one of: a dictionary of named parameters,
a tuple of positional parameters, or a list containing either
dictionaries or tuples for multiple-execute support.
E.g. multiple dictionaries::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
[{"id":1, "value":"v1"}, {"id":2, "value":"v2"}]
)
Single dictionary::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
dict(id=1, value="v1")
)
Single tuple::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, 'v1')
)
.. seealso::
:pep:`249`
"""
if isinstance(parameters, list) and parameters:
if not isinstance(parameters[0], (dict, tuple)):
raise exc.ArgumentError(
"List argument must consist only of tuples or dictionaries"
)
elif isinstance(parameters, (dict, tuple)):
parameters = [parameters]
return self._exec_driver_sql_distilled(statement, parameters or ())
def _execute_context(
self, dialect, constructor, statement, parameters, *args
):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`.ResultProxy`."""
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
context = constructor(dialect, self, conn, *args)
except BaseException as e:
self._handle_dbapi_exception(
e, util.text_type(statement), parameters, None, None
)
if self._root.__transaction and not self._root.__transaction.is_active:
raise exc.InvalidRequestError(
"This connection is on an inactive %stransaction. "
"Please rollback() fully before proceeding."
% (
"savepoint "
if isinstance(self.__transaction, NestedTransaction)
else ""
),
code="8s2a",
)
if context.compiled:
context.pre_exec()
cursor, statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if self._echo:
self.engine.logger.info(statement)
if not self.engine.hide_parameters:
self.engine.logger.info(
"%r",
sql_util._repr_params(
parameters, batches=10, ismulti=context.executemany
),
)
else:
self.engine.logger.info(
"[SQL parameters hidden due to hide_parameters=True]"
)
evt_handled = False
try:
if context.executemany:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor, statement, parameters, context
)
elif not parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, statement, context
)
else:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute(
cursor, statement, parameters, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if context.compiled:
context.post_exec()
result = context._setup_result_proxy()
if context.should_autocommit and self._root.__transaction is None:
self._root._commit_impl(autocommit=True)
# for "connectionless" execution, we have to close this
# Connection after the statement is complete.
if self.should_close_with_result:
assert not context._is_future_result
# ResultProxy already exhausted rows / has no rows.
# close us now
if result._soft_closed:
self.close()
else:
# ResultProxy will close this Connection when no more
# rows to fetch.
result._autoclose_connection = True
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self, cursor, statement, parameters, context, False
)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
for fn in (
()
if not self.dialect._has_events
else self.dialect.dispatch.do_execute
):
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(cursor, statement, parameters, context)
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self, cursor, statement, parameters, context, False
)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True
)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(
self, e, statement, parameters, cursor, context
):
exc_info = sys.exc_info()
is_exit_exception = not isinstance(e, Exception)
if not self._is_disconnect:
self._is_disconnect = (
isinstance(e, self.dialect.dbapi.Error)
and not self.closed
and self.dialect.is_disconnect(
e,
self.__connection if not self.invalidated else None,
cursor,
)
) or (is_exit_exception and not self.closed)
invalidate_pool_on_disconnect = not is_exit_exception
if self._reentrant_error:
util.raise_(
exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
),
with_traceback=exc_info[2],
from_=e,
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or (
statement is not None
and context is None
and not is_exit_exception
)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
connection_invalidated=self._is_disconnect,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
)
else:
sqlalchemy_exception = None
newraise = None
if (
self._has_events or self.engine._has_events
) and not self._execution_options.get(
"skip_user_error_events", False
):
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
self.engine,
self,
cursor,
statement,
parameters,
context,
self._is_disconnect,
invalidate_pool_on_disconnect,
)
for fn in self.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if self._is_disconnect != ctx.is_disconnect:
self._is_disconnect = ctx.is_disconnect
if sqlalchemy_exception:
sqlalchemy_exception.connection_invalidated = (
ctx.is_disconnect
)
# set up potentially user-defined value for
# invalidate pool.
invalidate_pool_on_disconnect = (
ctx.invalidate_pool_on_disconnect
)
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
with util.safe_reraise(warn_only=True):
self._autorollback()
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
if not self.invalidated:
dbapi_conn_wrapper = self.__connection
if invalidate_pool_on_disconnect:
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
self.close()
@classmethod
def _handle_dbapi_exception_noconnection(cls, e, dialect, engine):
exc_info = sys.exc_info()
is_disconnect = dialect.is_disconnect(e, None, None)
should_wrap = isinstance(e, dialect.dbapi.Error)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
None,
None,
e,
dialect.dbapi.Error,
hide_parameters=engine.hide_parameters,
connection_invalidated=is_disconnect,
)
else:
sqlalchemy_exception = None
newraise = None
if engine._has_events:
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
engine,
None,
None,
None,
None,
None,
is_disconnect,
True,
)
for fn in engine.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = (
is_disconnect
) = ctx.is_disconnect
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
def _run_ddl_visitor(self, visitorcallable, element, **kwargs):
"""run a DDL visitor.
This method is only here so that the MockConnection can change the
options given to the visitor so that "checkfirst" is skipped.
"""
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
@util.deprecated(
"1.4",
"The :meth:`.Connection.transaction` method is deprecated and will be "
"removed in a future release. Use the :meth:`.Engine.begin` "
"context manager instead.",
)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed this :class:`.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute(text("some statement"), {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Connection.begin`::
with conn.begin():
conn.execute(text("some statement"), {'x':5, 'y':10})
As well as with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute(text("some statement"), {'x':5, 'y':10})
.. seealso::
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Engine.transaction` - engine-level version of
:meth:`.Connection.transaction`
"""
kwargs["_sa_skip_warning"] = True
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
@util.deprecated(
"1.4",
"The :meth:`.Connection.run_callable` method is deprecated and will "
"be removed in a future release. Use a context manager instead.",
)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Engine.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
class ExceptionContextImpl(ExceptionContext):
"""Implement the :class:`.ExceptionContext` interface."""
def __init__(
self,
exception,
sqlalchemy_exception,
engine,
connection,
cursor,
statement,
parameters,
context,
is_disconnect,
invalidate_pool_on_disconnect,
):
self.engine = engine
self.connection = connection
self.sqlalchemy_exception = sqlalchemy_exception
self.original_exception = exception
self.execution_context = context
self.statement = statement
self.parameters = parameters
self.is_disconnect = is_disconnect
self.invalidate_pool_on_disconnect = invalidate_pool_on_disconnect
class Transaction(object):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`~.Connection.begin` method of
:class:`.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute(text("insert into x (a, b) values (1, 2)"))
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`.Connection.begin` method::
with connection.begin():
connection.execute(text("insert into x (a, b) values (1, 2)"))
The Transaction object is **not** threadsafe.
.. seealso::
:meth:`.Connection.begin`
:meth:`.Connection.begin_twophase`
:meth:`.Connection.begin_nested`
.. index::
single: thread safety; Transaction
"""
_is_root = False
def __init__(self, connection, parent):
self.connection = connection
self._actual_parent = parent
self.is_active = True
def _deactivate(self):
self.is_active = False
@property
def _parent(self):
return self._actual_parent or self
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if not self._parent.is_active:
return
if self._parent is self:
self.rollback()
def rollback(self):
"""Roll back this :class:`.Transaction`.
"""
if self._parent.is_active:
self._do_rollback()
self.is_active = False
self.connection._discard_transaction(self)
def _do_rollback(self):
self._parent._deactivate()
def commit(self):
"""Commit this :class:`.Transaction`."""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
self.is_active = False
def _do_commit(self):
pass
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if type_ is None and self.is_active:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class RootTransaction(Transaction):
_is_root = True
def __init__(self, connection):
super(RootTransaction, self).__init__(connection, None)
self.connection._begin_impl(self)
def _deactivate(self):
self._do_rollback(deactivate_only=True)
self.is_active = False
def _do_rollback(self, deactivate_only=False):
if self.is_active:
self.connection._rollback_impl(deactivate_only=deactivate_only)
def _do_commit(self):
if self.is_active:
self.connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new :class:`.NestedTransaction` object may be procured
using the :meth:`.Connection.begin_nested` method.
The interface is the same as that of :class:`.Transaction`.
"""
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
self._savepoint = self.connection._savepoint_impl()
def _deactivate(self):
self._do_rollback(deactivate_only=True)
self.is_active = False
def _do_rollback(self, deactivate_only=False):
if self.is_active:
self.connection._rollback_to_savepoint_impl(
self._savepoint, self._parent
)
def _do_commit(self):
if self.is_active:
self.connection._release_savepoint_impl(
self._savepoint, self._parent
)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
def __init__(self, connection, xid):
super(TwoPhaseTransaction, self).__init__(connection, None)
self._is_prepared = False
self.xid = xid
self.connection._begin_twophase_impl(self)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _do_rollback(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _do_commit(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
An :class:`.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
.. seealso::
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = util.immutabledict()
_has_events = False
_connection_cls = Connection
_schema_translate_map = None
def __init__(
self,
pool,
dialect,
url,
logging_name=None,
echo=None,
execution_options=None,
hide_parameters=False,
):
self.pool = pool
self.url = url
self.dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.hide_parameters = hide_parameters
log.instance_logger(self, echoflag=echo)
if execution_options:
self.update_execution_options(**execution_options)
@property
def engine(self):
return self
def update_execution_options(self, **opt):
r"""Update the default execution_options dictionary
of this :class:`.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`.create_engine`.
.. seealso::
:meth:`.Connection.execution_options`
:meth:`.Engine.execution_options`
"""
self._execution_options = self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`.Engine` that will provide
:class:`.Connection` objects with the given execution options.
The returned :class:`.Engine` remains related to the original
:class:`.Engine` in that it shares the same connection pool and
other state:
* The :class:`.Pool` used by the new :class:`.Engine` is the
same instance. The :meth:`.Engine.dispose` method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new :class:`.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`.Engine`.
The intent of the :meth:`.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`.Connection` objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`.Connection.info` dictionary, which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt,
params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. seealso::
:meth:`.Connection.execution_options` - update execution options
on a :class:`.Connection` object.
:meth:`.Engine.update_execution_options` - update the execution
options for a given :class:`.Engine` in place.
:meth:`.Engine.get_execution_options`
"""
return OptionEngine(self, opt)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded: 1.3
.. seealso::
:meth:`.Engine.execution_options`
"""
return self._execution_options
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return "Engine(%r)" % self.url
def dispose(self):
"""Dispose of the connection pool used by this :class:`.Engine`.
This has the effect of fully closing all **currently checked in**
database connections. Connections that are still checked out
will **not** be closed, however they will no longer be associated
with this :class:`.Engine`, so when they are closed individually,
eventually the :class:`.Pool` which they are associated with will
be garbage collected and they will be closed out fully, if
not already closed on checkin.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested, so as long as the :class:`.Engine` isn't used again,
no new connections will be made.
.. seealso::
:ref:`engine_disposal`
"""
self.pool.dispose()
self.pool = self.pool.recreate()
self.dispatch.engine_disposed(self)
def _execute_default(self, default):
with self.connect() as conn:
return conn._execute_default(default, (), {})
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self.connect() as conn:
yield conn
else:
yield connection
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
return self.conn
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.transaction.rollback()
else:
self.transaction.commit()
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute(
text("insert into table (x, y, z) values (1, 2, 3)")
)
conn.execute(text("my_special_procedure(5)"))
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
The ``close_with_result`` flag is normally ``False``, and indicates
that the :class:`.Connection` will be closed when the operation
is complete. When set to ``True``, it indicates the
:class:`.Connection` is in "single use" mode, where the
:class:`.ResultProxy` returned by the first call to
:meth:`.Connection.execute` will close the :class:`.Connection` when
that :class:`.ResultProxy` has exhausted all result rows.
.. seealso::
:meth:`.Engine.connect` - procure a :class:`.Connection` from
an :class:`.Engine`.
:meth:`.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`.Connection`.
"""
conn = self.connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
@util.deprecated(
"1.4",
"The :meth:`.Engine.transaction` method is deprecated and will be "
"removed in a future release. Use the :meth:`.Engine.begin` context "
"manager instead.",
)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed a :class:`.Connection` newly procured
from :meth:`.Engine.connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute(text("some statement"), {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute(text("some statement"), {'x':5, 'y':10})
.. seealso::
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Connection.transaction` - connection-level version of
:meth:`.Engine.transaction`
"""
kwargs["_sa_skip_warning"] = True
with self.connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
@util.deprecated(
"1.4",
"The :meth:`.Engine.run_callable` method is deprecated and will be "
"removed in a future release. Use the :meth:`.Engine.connect` "
"context manager instead.",
)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Connection.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
kwargs["_sa_skip_warning"] = True
with self.connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def _run_ddl_visitor(self, visitorcallable, element, **kwargs):
with self.connect() as conn:
conn._run_ddl_visitor(visitorcallable, element, **kwargs)
@util.deprecated_20(
":meth:`.Engine.execute`",
alternative="All statement execution in SQLAlchemy 2.0 is performed "
"by the :meth:`.Connection.execute` method of :class:`.Connection`, "
"or in the ORM by the :meth:`.Session.execute` method of "
":class:`.Session`.",
)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`.
The arguments are the same as those used by
:meth:`.Connection.execute`.
Here, a :class:`.Connection` is acquired using the
:meth:`~.Engine.connect` method, and the statement executed
with that connection. The returned :class:`.ResultProxy` is flagged
such that when the :class:`.ResultProxy` is exhausted and its
underlying cursor is closed, the :class:`.Connection` created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self.connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
@util.deprecated_20(
":meth:`.Engine.scalar`",
alternative="All statement execution in SQLAlchemy 2.0 is performed "
"by the :meth:`.Connection.execute` method of :class:`.Connection`, "
"or in the ORM by the :meth:`.Session.execute` method of "
":class:`.Session`; the :meth:`.Result.scalar` method can then be "
"used to return a scalar result.",
)
def scalar(self, statement, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
connection = self.connect(close_with_result=True)
return connection._execute_clauseelement(elem, multiparams, params)
def _execute_compiled(self, compiled, multiparams, params):
connection = self.connect(close_with_result=True)
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, close_with_result=False):
"""Return a new :class:`.Connection` object.
The :class:`.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`.Pool`
referenced by this :class:`.Engine`. When the
:meth:`~.Connection.close` method of the :class:`.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`~.Engine.connect`.
"""
return self._connection_cls(self, close_with_result=close_with_result)
@util.deprecated(
"1.4",
"The :meth:`.Engine.table_names` method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`.Inspector.get_table_names`.",
)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection.
"""
with self._optional_conn_ctx_manager(connection) as conn:
insp = inspection.inspect(conn)
return insp.get_table_names(schema)
@util.deprecated(
"1.4",
"The :meth:`.Engine.has_table` method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`.Inspector.has_table`.",
)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection
using the :class:`.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
with self._optional_conn_ctx_manager(None) as conn:
insp = inspection.inspect(conn)
return insp.has_table(table_name, schema=schema)
def _wrap_pool_connect(self, fn, connection):
dialect = self.dialect
try:
return fn()
except dialect.dbapi.Error as e:
if connection is None:
Connection._handle_dbapi_exception_noconnection(
e, dialect, self
)
else:
util.raise_(
sys.exc_info()[1], with_traceback=sys.exc_info()[2]
)
def raw_connection(self, _connection=None):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations when the API provided by :class:`.Connection`
is not needed. When a :class:`.Connection` object is already
present, the DBAPI connection is available using
the :attr:`.Connection.connection` accessor.
.. seealso::
:ref:`dbapi_connections`
"""
return self._wrap_pool_connect(self.pool.connect, _connection)
class OptionEngine(Engine):
_sa_propagate_class_events = False
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
self.hide_parameters = proxied.hide_parameters
log.instance_logger(self, echoflag=self.echo)
# note: this will propagate events that are assigned to the parent
# engine after this OptionEngine is created. Since we share
# the events of the parent we also disallow class-level events
# to apply to the OptionEngine class directly.
#
# the other way this can work would be to transfer existing
# events only, using:
# self.dispatch._update(proxied.dispatch)
#
# that might be more appropriate however it would be a behavioral
# change for logic that assigns events to the parent engine and
# would like it to take effect for the already-created sub-engine.
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or self.__dict__.get(
"_has_events", False
)
def _set_has_events(self, value):
self.__dict__["_has_events"] = value
_has_events = property(_get_has_events, _set_has_events)
|
py | 1a55638c1be9e7c8c154d9115f3c812fc36fe1fe | """Test the TcEx API Module."""
# standard library
import datetime
import os
import time
from random import randint
# third-party
import pytest
from pytest import FixtureRequest
# first-party
from tcex.api.tc.v3.tql.tql_operator import TqlOperator
from tests.api.tc.v3.v3_helpers import TestV3, V3Helper
class TestNotes(TestV3):
"""Test TcEx API Interface."""
v3 = None
def setup_method(self):
"""Configure setup before all tests."""
print('') # ensure any following print statements will be on new line
self.v3_helper = V3Helper('notes')
self.v3 = self.v3_helper.v3
self.tcex = self.v3_helper.tcex
def teardown_method(self):
"""Configure teardown before all tests."""
if os.getenv('TEARDOWN_METHOD') is None:
self.v3_helper.cleanup()
def test_note_api_options(self):
"""Test filter keywords."""
super().obj_api_options()
def test_note_filter_keywords(self):
"""Test filter keywords."""
super().obj_filter_keywords()
def test_note_object_properties(self):
"""Test properties."""
super().obj_properties()
def test_note_object_properties_extra(self):
"""Test properties."""
super().obj_properties_extra()
def _test_note_on_obj(self, request, cm_object):
common_note_data = {
'text': 'Generic Note Data. This is auto generated to ensure that adding a note '
'does not remove already existing notes.'
}
notes = self.v3.notes()
# [Pre-Requisite] - Add a note to the provided cm object to ensure that it does not get
# replaced/removed
cm_object.stage_note(common_note_data)
cm_object.update()
# [Pre-Requisite] - Add the note data to the appropriate object
note_data = {'text': f'sample note for {request.node.name} test.'}
# [Pre-Requisite] - Add the appropriate filter for the notes object
if cm_object.type_.lower() == 'artifact':
notes.filter.artifact_id(TqlOperator.EQ, cm_object.model.id)
elif cm_object.type_.lower() == 'case':
notes.filter.case_id(TqlOperator.EQ, cm_object.model.id)
elif cm_object.type_.lower() == 'task':
notes.filter.task_id(TqlOperator.EQ, cm_object.model.id)
else:
assert False, f'Invalid value {cm_object.type_} passed into _test_note_on_obj.'
# [Create Testing] create the object
note = self._stage_note(cm_object, note_data, specify_type=True)
note = self.v3.note(id=note.model.id)
note.get()
# [Retrieve Testing] validate the object returned is the same object
assert note.model.text == note_data.get('text')
# [Retrieve Testing] validate the object got added to the object
assert len(notes) == 2
for note in cm_object.notes:
if note.model.text == note_data.get('text'):
break
else:
assert False, f'No note found -> {note.model.id}'
# [Update Testing] validate the object got updated
note.model.text = 'updated note value'
note.update()
assert len(notes) == 2
for note in cm_object.notes:
if note.model.text == 'updated note value':
break
else:
assert False, f'Note on {cm_object.type_} not updated -> {note.model.id}'
note.delete()
# [Delete Testing] validate the object got deleted to the object
assert len(notes) == 1
for remaining_note in cm_object.notes:
if remaining_note.model.id == note.model.id:
assert (
False
), f'Note found on {cm_object.type_} when it should not have been present.'
# [Delete Testing] validate the object is removed
with pytest.raises(RuntimeError) as exc_info:
note.get()
# [Delete Testing] assert error message contains the correct code
# error -> "(952, 'Error during GET. API status code: 404, ..."
assert '952' in str(exc_info.value)
def test_note_on_case(self, request: FixtureRequest):
"""Test Note functions on a Case Object"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
self._test_note_on_obj(request, case)
def test_note_on_artifact(self, request: FixtureRequest):
"""Test Note functions on a Artifact Object"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
# [Pre-Requisite] - create artifact
artifact_data = {
'case_id': case.model.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
artifact = self.v3.artifact(**artifact_data)
artifact.create()
self._test_note_on_obj(request, artifact)
def test_note_on_task(self, request: FixtureRequest):
"""Test Note functions on a Task Object"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
# [Pre-Requisite] - create task
task_data = {
'case_id': case.model.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
task = self.v3.task(**task_data)
task.create()
self._test_note_on_obj(request, task)
def test_note_get_many(self):
"""Test Artifact Get Many"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
note_count = 10
note_ids = []
for _ in range(0, note_count):
# [Create Testing] define object data
note_data = {
'case_id': case.model.id,
'text': f'sample note randomint - {randint(100, 999)}',
}
# [Create Testing] create the object
note = self.v3.note(**note_data)
# [Create Testing] create the object to the TC API
note.create()
note_ids.append(note.model.id)
# [Retrieve Testing] iterate over all object looking for needle
notes = self.v3.notes(params={'resultLimit': 5})
notes.filter.case_id(TqlOperator.EQ, case.model.id)
assert len(notes) == note_count
for note in notes:
assert note.model.id in note_ids
note_ids.remove(note.model.id)
assert not note_ids, 'Not all artifacts were returned.'
def test_note_get_by_tql_filter_fail_tql(self):
"""Test Artifact Get by TQL"""
# retrieve object using TQL
notes = self.v3.notes()
notes.filter.tql = 'Invalid TQL'
# [Fail Testing] validate the object is removed
with pytest.raises(RuntimeError) as exc_info:
for _ in notes:
pass
# [Fail Testing] assert error message contains the correct code
# error -> "(950, 'Error during pagination. API status code: 400, ..."
assert '950' in str(exc_info.value)
assert notes.request.status_code == 400
def _stage_note(self, cm_object, note_data, specify_type=False):
"""Update the note object to include either the artifact/case/task/workflow_event field."""
keys = ['artifact_id', 'case_id', 'task_id', 'workflow_event_id']
for key in keys:
if key in note_data:
note_data.pop(key)
if specify_type:
note_data['text'] = note_data['text'] + f'Type -> {cm_object.type_}'
if cm_object.type_.lower() == 'artifact':
note_data['artifact_id'] = cm_object.model.id
elif cm_object.type_.lower() == 'case':
note_data['case_id'] = cm_object.model.id
elif cm_object.type_.lower() == 'task':
note_data['task_id'] = cm_object.model.id
elif cm_object.type_.lower() == 'workflow event':
note_data['workflow_event_id'] = cm_object.model.id
else:
assert False, f'Invalid value {cm_object.type_} passed into _test_note_on_obj'
note = self.v3.note(**note_data)
note.create()
return note
def test_note_all_filters(self, request: FixtureRequest):
"""Test TQL Filters for Notes"""
# [Pre-Requisite] - create case
note_data = {'text': f'sample note for {request.node.name} test.'}
case = self.v3_helper.create_case()
# [Pre-Requisite] - create workflow_event
workflow_event_data = {
'case_id': case.model.id,
'summary': 'pytest test workflow event',
}
workflow_event = self.v3.workflow_event(**workflow_event_data)
workflow_event.create()
# [Pre-Requisite] - create task
task_data = {
'case_id': case.model.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
task = self.v3.task(**task_data)
task.create()
# [Pre-Requisite] - create artifact
artifact_data = {
'case_id': case.model.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
artifact = self.v3.artifact(**artifact_data)
artifact.create()
note = self._stage_note(case, note_data, specify_type=True)
notes = self.v3.notes()
future = datetime.datetime.now() + datetime.timedelta(days=10)
future = future.strftime('%Y-%m-%dT%H:%M:%S')
past = datetime.datetime.now() + datetime.timedelta(days=-10)
past = past.strftime('%Y-%m-%dT%H:%M:%S')
# [Filter Testing] case_id
notes.filter.case_id(TqlOperator.EQ, case.model.id)
# [Filter Testing] author
# TODO: [PLAT-????] This fails if the user does not exist on the system
# notes.filter.author(TqlOperator.NE, 'Invalid Author')
# [Filter Testing] date_added
notes.filter.date_added(TqlOperator.GT, past)
# [Filter Testing] has_case -> using id since it's available
notes.filter.has_case.id(TqlOperator.EQ, case.model.id)
# [Filter Testing] id
notes.filter.id(TqlOperator.EQ, note.model.id)
# [Filter Testing] last_modified
notes.filter.last_modified(TqlOperator.LT, future)
# [Filter Testing] summary
notes.filter.summary(TqlOperator.NE, 'Invalid Summary')
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
notes = self.v3.notes()
note = self._stage_note(artifact, note_data, specify_type=True)
notes.filter.artifact_id(TqlOperator.EQ, artifact.model.id)
notes.filter.has_artifact.id(TqlOperator.EQ, artifact.model.id)
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
notes = self.v3.notes()
note = self._stage_note(task, note_data, specify_type=True)
notes.filter.task_id(TqlOperator.EQ, task.model.id)
notes.filter.has_task.id(TqlOperator.EQ, task.model.id)
assert len(notes) == 1, f'Invalid amount of notes retried for tql -> {notes.tql.as_str}'
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
notes = self.v3.notes()
note = self._stage_note(workflow_event, note_data, specify_type=True)
notes.filter.workflow_event_id(TqlOperator.EQ, workflow_event.model.id)
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
|
py | 1a55640d421bb60fa7a7dd7a74ec47779c7251c2 | import qrcode
qrcode_data = 'https://www.youtube.com/c/EmbeddedProgrammer'
image = qrcode.make(qrcode_data)
image.save('QR.png')
|
py | 1a5566fbe10574b7f6156673b691b4863eded904 | from collections import Counter
import logging
from .kad_peerinfo import KadPeerHeap, create_kad_peerinfo
from .utils import gather_dict
log = logging.getLogger(__name__)
class SpiderCrawl:
"""Crawl the network and look for given 160-bit keys."""
def __init__(self, protocol, node, peers, ksize, alpha):
"""
Create a new C{SpiderCrawl}er.
Args:
protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
node: A :class:`~kademlia.node.Node` representing the key we're
looking for
peers: A list of :class:`~kademlia.node.Node` instances that
provide the entry point for the network
ksize: The value for k based on the paper
alpha: The value for alpha based on the paper
"""
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
self.nearest = KadPeerHeap(self.node, self.ksize)
self.last_ids_crawled = []
log.info("creating spider with peers: %s", peers)
self.nearest.push(peers)
async def _find(self, rpcmethod):
"""
Get either a value or list of nodes.
Args:
rpcmethod: The protocol's callfindValue or call_find_node.
The process:
1. calls find_* to current ALPHA nearest not already queried nodes,
adding results to current nearest list of k nodes.
2. current nearest list needs to keep track of who has been queried
already sort by nearest, keep KSIZE
3. if list is same as last time, next call should be to everyone not
yet queried
4. repeat, unless nearest list has all been queried, then ur done
"""
log.info("crawling network with nearest: %s", str(tuple(self.nearest)))
count = self.alpha
if self.nearest.get_ids() == self.last_ids_crawled:
count = len(self.nearest)
self.last_ids_crawled = self.nearest.get_ids()
dicts = {}
for peer in self.nearest.get_uncontacted()[:count]:
dicts[peer.peer_id_bytes] = rpcmethod(peer, self.node)
self.nearest.mark_contacted(peer)
found = await gather_dict(dicts)
return await self._nodes_found(found)
async def _nodes_found(self, responses):
raise NotImplementedError
class ValueSpiderCrawl(SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha):
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
# keep track of the single nearest node without value - per
# section 2.3 so we can set the key there if found
self.nearest_without_value = KadPeerHeap(self.node, 1)
async def find(self):
"""Find either the closest nodes or the value requested."""
return await self._find(self.protocol.call_find_value)
async def _nodes_found(self, responses):
"""Handle the result of an iteration in _find."""
toremove = []
found_values = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
elif response.has_value():
found_values.append(response.get_value())
else:
peer = self.nearest.get_node(peerid)
self.nearest_without_value.push(peer)
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if found_values:
return await self._handle_found_values(found_values)
if self.nearest.have_contacted_all():
# not found!
return None
return await self.find()
async def _handle_found_values(self, values):
"""
We got some values!
Exciting. But let's make sure they're all the same or freak out
a little bit. Also, make sure we tell the nearest node that
*didn't* have the value to store it.
"""
value_counts = Counter(values)
if len(value_counts) != 1:
log.warning(
"Got multiple values for key %i: %s", self.node.xor_id, str(values)
)
value = value_counts.most_common(1)[0][0]
peer = self.nearest_without_value.popleft()
if peer:
await self.protocol.call_store(peer, self.node.peer_id_bytes, value)
return value
class NodeSpiderCrawl(SpiderCrawl):
async def find(self):
"""Find the closest nodes."""
return await self._find(self.protocol.call_find_node)
async def _nodes_found(self, responses):
"""Handle the result of an iteration in _find."""
toremove = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
else:
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if self.nearest.have_contacted_all():
return list(self.nearest)
return await self.find()
class RPCFindResponse:
def __init__(self, response):
"""
A wrapper for the result of a RPC find.
Args:
response: This will be a tuple of (<response received>, <value>)
where <value> will be a list of tuples if not found or
a dictionary of {'value': v} where v is the value desired
"""
self.response = response
def happened(self):
"""Did the other host actually respond?"""
return self.response[0]
def has_value(self):
return isinstance(self.response[1], dict)
def get_value(self):
return self.response[1]["value"]
def get_node_list(self):
"""
Get the node list in the response.
If there's no value, this should be set.
"""
nodelist = self.response[1] or []
return [create_kad_peerinfo(*nodeple) for nodeple in nodelist]
|
py | 1a55680c40bf9c3a5daa1dc2dc5a279271d3fe48 | from __future__ import division, unicode_literals, print_function, absolute_import # Ease the transition to Python 3
import os
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'lyse.svg'))
splash.show()
splash.update_text('importing standard library modules')
# stdlib imports
import sys
import socket
import logging
import threading
import signal
import subprocess
import time
import traceback
import pprint
import ast
# 3rd party imports:
splash.update_text('importing numpy')
import numpy as np
splash.update_text('importing h5_lock and h5py')
import labscript_utils.h5_lock
import h5py
splash.update_text('importing pandas')
import pandas
splash.update_text('importing Qt')
check_version('qtutils', '2.2.2', '3.0.0')
splash.update_text('importing labscript suite modules')
check_version('labscript_utils', '2.12.4', '3')
from labscript_utils.ls_zprocess import ZMQServer, ProcessTree
import zprocess
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
from labscript_utils.qtwidgets.headerview_with_widgets import HorizontalHeaderViewWithWidgets
from labscript_utils.qtwidgets.outputbox import OutputBox
import labscript_utils.shared_drive as shared_drive
from lyse.dataframe_utilities import (concat_with_padding,
get_dataframe_from_shot,
replace_with_padding)
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils import inmain_decorator, inmain, UiLoader, DisconnectContextManager
from qtutils.auto_scroll_to_end import set_auto_scroll_to_end
import qtutils.icons
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
from lyse import LYSE_DIR
process_tree = ProcessTree.instance()
# Set a meaningful name for zlock client id:
process_tree.zlock_client.set_process_name('lyse')
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.join(LYSE_DIR, 'lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.join(LYSE_DIR, '__main__.py')
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'lyse', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def scientific_notation(x, sigfigs=4, mode='eng'):
"""Returns a unicode string of the float f in scientific notation"""
times = u'\u00d7'
thinspace = u'\u2009'
hairspace = u'\u200a'
sups = {u'-': u'\u207b',
u'0': u'\u2070',
u'1': u'\xb9',
u'2': u'\xb2',
u'3': u'\xb3',
u'4': u'\u2074',
u'5': u'\u2075',
u'6': u'\u2076',
u'7': u'\u2077',
u'8': u'\u2078',
u'9': u'\u2079'}
prefixes = {
-24: u"y",
-21: u"z",
-18: u"a",
-15: u"f",
-12: u"p",
-9: u"n",
-6: u"\u03bc",
-3: u"m",
0: u"",
3: u"k",
6: u"M",
9: u"G",
12: u"T",
15: u"P",
18: u"E",
21: u"Z",
24: u"Y"
}
if not isinstance(x, float):
raise TypeError('x must be floating point number')
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = str(significand)
if exponent:
if mode == 'exponential':
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
elif mode == 'eng':
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += hairspace + prefix
except KeyError:
# Otherwise display in scientific notation
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
return result
def get_screen_geometry():
"""Return the a list of the geometries of each screen: each a tuple of
left, top, width and height"""
geoms = []
desktop = qapplication.desktop()
for i in range(desktop.screenCount()):
sg = desktop.screenGeometry(i)
geoms.append((sg.left(), sg.top(), sg.width(), sg.height()))
return geoms
class WebServer(ZMQServer):
def handler(self, request_data):
logger.info('WebServer request: %s' % str(request_data))
if request_data == 'hello':
return 'hello'
elif request_data == 'get dataframe':
# infer_objects() picks fixed datatypes for columns that are compatible with
# fixed datatypes, dramatically speeding up pickling. It is called here
# rather than when updating the dataframe as calling it during updating may
# call it needlessly often, whereas it only needs to be called prior to
# sending the dataframe to a client requesting it, as we're doing now.
app.filebox.shots_model.infer_objects()
return app.filebox.shots_model.dataframe
elif isinstance(request_data, dict):
if 'filepath' in request_data:
h5_filepath = shared_drive.path_to_local(request_data['filepath'])
if isinstance(h5_filepath, bytes):
h5_filepath = h5_filepath.decode('utf8')
if not isinstance(h5_filepath, str):
raise AssertionError(str(type(h5_filepath)) + ' is not str or bytes')
app.filebox.incoming_queue.put(h5_filepath)
return 'added successfully'
elif isinstance(request_data, str):
# Just assume it's a filepath:
app.filebox.incoming_queue.put(shared_drive.path_to_local(request_data))
return "Experiment added successfully\n"
return ("error: operation not supported. Recognised requests are:\n "
"'get dataframe'\n 'hello'\n {'filepath': <some_h5_filepath>}")
class LyseMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
self.closing = False
def closeEvent(self, event):
if self.closing:
return QtWidgets.QMainWindow.closeEvent(self, event)
if app.on_close_event():
self.closing = True
timeout_time = time.time() + 2
self.delayedClose(timeout_time)
event.ignore()
def delayedClose(self, timeout_time):
if not all(app.workers_terminated().values()) and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.delayedClose(timeout_time))
else:
QtCore.QTimer.singleShot(0, self.close)
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class AnalysisRoutine(object):
def __init__(self, filepath, model, output_box_port, checked=QtCore.Qt.Checked):
self.filepath = filepath
self.shortname = os.path.basename(self.filepath)
self.model = model
self.output_box_port = output_box_port
self.COL_ACTIVE = RoutineBox.COL_ACTIVE
self.COL_STATUS = RoutineBox.COL_STATUS
self.COL_NAME = RoutineBox.COL_NAME
self.ROLE_FULLPATH = RoutineBox.ROLE_FULLPATH
self.error = False
self.done = False
self.to_worker, self.from_worker, self.worker = self.start_worker()
# Make a row to put into the model:
active_item = QtGui.QStandardItem()
active_item.setCheckable(True)
active_item.setCheckState(checked)
info_item = QtGui.QStandardItem()
name_item = QtGui.QStandardItem(self.shortname)
name_item.setToolTip(self.filepath)
name_item.setData(self.filepath, self.ROLE_FULLPATH)
self.model.appendRow([active_item, info_item, name_item])
self.exiting = False
def start_worker(self):
# Start a worker process for this analysis routine:
worker_path = os.path.join(LYSE_DIR, 'analysis_subprocess.py')
child_handles = process_tree.subprocess(
worker_path,
output_redirection_port=self.output_box_port,
startup_timeout=30,
)
to_worker, from_worker, worker = child_handles
# Tell the worker what script it with be executing:
to_worker.put(self.filepath)
return to_worker, from_worker, worker
def do_analysis(self, filepath):
self.to_worker.put(['analyse', filepath])
signal, data = self.from_worker.get()
if signal == 'error':
return False, data
elif signal == 'done':
return True, data
else:
raise ValueError('invalid signal %s'%str(signal))
@inmain_decorator()
def set_status(self, status):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted. Nothing to do here.
return
status_item = self.model.item(index, self.COL_STATUS)
if status == 'done':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
self.done = True
self.error = False
elif status == 'working':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/hourglass'))
self.done = False
self.error = False
elif status == 'error':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/exclamation'))
self.error = True
self.done = False
elif status == 'clear':
status_item.setData(None, QtCore.Qt.DecorationRole)
self.done = False
self.error = False
else:
raise ValueError(status)
@inmain_decorator()
def enabled(self):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted.
return False
enabled_item = self.model.item(index, self.COL_ACTIVE)
return (enabled_item.checkState() == QtCore.Qt.Checked)
def get_row_index(self):
"""Returns the row index for this routine's row in the model"""
for row in range(self.model.rowCount()):
name_item = self.model.item(row, self.COL_NAME)
fullpath = name_item.data(self.ROLE_FULLPATH)
if fullpath == self.filepath:
return row
def restart(self):
# TODO set status to 'restarting' or an icon or something, and gray out the item?
self.end_child(restart=True)
def remove(self):
"""End the child process and remove from the treeview"""
self.end_child()
index = self.get_row_index()
if index is None:
# Already gone
return
self.model.removeRow(index)
def end_child(self, restart=False):
self.to_worker.put(['quit', None])
timeout_time = time.time() + 2
self.exiting = True
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(self.worker, timeout_time, kill=False, restart=restart))
def check_child_exited(self, worker, timeout_time, kill=False, restart=False):
worker.poll()
if worker.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill, restart))
return
elif worker.returncode is None:
if not kill:
worker.terminate()
app.output_box.output('%s worker not responding.\n'%self.shortname)
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill=True, restart=restart))
return
else:
worker.kill()
app.output_box.output('%s worker killed\n'%self.shortname, red=True)
elif kill:
app.output_box.output('%s worker terminated\n'%self.shortname, red=True)
else:
app.output_box.output('%s worker exited cleanly\n'%self.shortname)
# if analysis was running notify analysisloop that analysis has failed
self.from_worker.put(('error', {}))
if restart:
self.to_worker, self.from_worker, self.worker = self.start_worker()
app.output_box.output('%s worker restarted\n'%self.shortname)
self.exiting = False
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class RoutineBox(object):
COL_ACTIVE = 0
COL_STATUS = 1
COL_NAME = 2
ROLE_FULLPATH = QtCore.Qt.UserRole + 1
# This data (stored in the name item) does not necessarily match
# the position in the model. It will be set just
# prior to sort() being called with this role as the sort data.
# This is how we will reorder the model's rows instead of
# using remove/insert.
ROLE_SORTINDEX = QtCore.Qt.UserRole + 2
def __init__(self, container, exp_config, filebox, from_filebox, to_filebox, output_box_port, multishot=False):
self.multishot = multishot
self.filebox = filebox
self.exp_config = exp_config
self.from_filebox = from_filebox
self.to_filebox = to_filebox
self.output_box_port = output_box_port
self.logger = logging.getLogger('lyse.RoutineBox.%s'%('multishot' if multishot else 'singleshot'))
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'routinebox.ui'))
container.addWidget(self.ui)
if multishot:
self.ui.groupBox.setTitle('Multishot routines')
else:
self.ui.groupBox.setTitle('Singleshot routines')
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.ui.treeView.setHeader(self.header)
self.ui.treeView.setModel(self.model)
active_item = QtGui.QStandardItem()
active_item.setToolTip('Whether the analysis routine should run')
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('The status of this analyis routine\'s execution')
name_item = QtGui.QStandardItem('name')
name_item.setToolTip('The name of the python script for the analysis routine')
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setToolTip('whether the analysis routine should run')
self.header.setWidget(self.COL_ACTIVE, self.select_all_checkbox)
self.header.setStretchLastSection(True)
self.select_all_checkbox.setTristate(False)
self.model.setHorizontalHeaderItem(self.COL_ACTIVE, active_item)
self.model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
self.model.setHorizontalHeaderItem(self.COL_NAME, name_item)
self.model.setSortRole(self.ROLE_SORTINDEX)
self.ui.treeView.resizeColumnToContents(self.COL_ACTIVE)
self.ui.treeView.resizeColumnToContents(self.COL_STATUS)
self.ui.treeView.setColumnWidth(self.COL_NAME, 200)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'set selected routines active', self.ui)
self.action_set_selected_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'set selected routines inactive', self.ui)
self.action_restart_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/arrow-circle'), 'restart worker process for selected routines', self.ui)
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected routines', self.ui)
self.last_opened_routine_folder = self.exp_config.get('paths', 'analysislib')
self.routines = []
self.connect_signals()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.toolButton_add_routines.clicked.connect(self.on_add_routines_clicked)
self.ui.toolButton_remove_routines.clicked.connect(self.on_remove_selection)
self.model.itemChanged.connect(self.on_model_item_changed)
self.ui.treeView.doubleLeftClicked.connect(self.on_treeview_double_left_clicked)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_active.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_inactive.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
self.action_restart_selected.triggered.connect(self.on_restart_selected_triggered)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
self.ui.toolButton_move_to_top.clicked.connect(self.on_move_to_top_clicked)
self.ui.toolButton_move_up.clicked.connect(self.on_move_up_clicked)
self.ui.toolButton_move_down.clicked.connect(self.on_move_down_clicked)
self.ui.toolButton_move_to_bottom.clicked.connect(self.on_move_to_bottom_clicked)
def on_add_routines_clicked(self):
routine_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select analysis routines',
self.last_opened_routine_folder,
"Python scripts (*.py)")
if type(routine_files) is tuple:
routine_files, _ = routine_files
if not routine_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
routine_files = [os.path.abspath(routine_file) for routine_file in routine_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_routine_folder = os.path.dirname(routine_files[0])
self.add_routines([(routine_file, QtCore.Qt.Checked) for routine_file in routine_files])
def add_routines(self, routine_files, clear_existing=False):
"""Add routines to the routine box, where routine_files is a list of
tuples containing the filepath and whether the routine is enabled or
not when it is added. if clear_existing == True, then any existing
analysis routines will be cleared before the new ones are added."""
if clear_existing:
for routine in self.routines[:]:
routine.remove()
self.routines.remove(routine)
# Queue the files to be opened:
for filepath, checked in routine_files:
if filepath in [routine.filepath for routine in self.routines]:
app.output_box.output('Warning: Ignoring duplicate analysis routine %s\n'%filepath, red=True)
continue
routine = AnalysisRoutine(filepath, self.model, self.output_box_port, checked)
self.routines.append(routine)
self.update_select_all_checkstate()
def on_treeview_double_left_clicked(self, index):
# If double clicking on the the name item, open
# the routine in the specified text editor:
if index.column() != self.COL_NAME:
return
name_item = self.model.item(index.row(), self.COL_NAME)
routine_filepath = name_item.data(self.ROLE_FULLPATH)
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else routine_filepath for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [routine_filepath] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
if not selected_rows:
return
if confirm and not question_dialog("Remove %d routines?" % len(selected_rows)):
return
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines[:]:
if routine.filepath in filepaths:
routine.remove()
self.routines.remove(routine)
self.update_select_all_checkstate()
def on_model_item_changed(self, item):
if item.column() == self.COL_ACTIVE:
self.update_select_all_checkstate()
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
with self.model_item_changed_disconnected:
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(state)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui.treeView)
menu.addAction(self.action_set_selected_active)
menu.addAction(self.action_set_selected_inactive)
menu.addAction(self.action_restart_selected)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, active):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(active)
self.update_select_all_checkstate()
def on_move_to_top_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = 0
i_unselected = len(selected_rows)
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_move_up_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in range(n):
if i in selected_rows:
if last_unselected_index is None:
order.append(i)
else:
order.append(i - 1)
order[last_unselected_index] += 1
else:
last_unselected_index = i
order.append(i)
self.reorder(order)
def on_move_down_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in reversed(range(n)):
if i in selected_rows:
if last_unselected_index is None:
order.insert(0, i)
else:
order.insert(0, i + 1)
order[last_unselected_index - n] -= 1
else:
last_unselected_index = i
order.insert(0, i)
self.reorder(order)
def on_move_to_bottom_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = n - len(selected_rows)
i_unselected = 0
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_restart_selected_triggered(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines:
if routine.filepath in filepaths:
routine.restart()
self.update_select_all_checkstate()
def analysis_loop(self):
while True:
filepath = self.from_filebox.get()
if self.multishot:
assert filepath is None
# TODO: get the filepath of the output h5 file:
# filepath = self.filechooserentry.get_text()
self.logger.info('got a file to process: %s'%filepath)
self.do_analysis(filepath)
def todo(self):
"""How many analysis routines are not done?"""
return len([r for r in self.routines if r.enabled() and not r.done])
def do_analysis(self, filepath):
"""Run all analysis routines once on the given filepath,
which is a shot file if we are a singleshot routine box"""
for routine in self.routines:
routine.set_status('clear')
remaining = self.todo()
error = False
updated_data = {}
while remaining:
self.logger.debug('%d routines left to do'%remaining)
for routine in self.routines:
if routine.enabled() and not routine.done:
break
else:
routine = None
if routine is not None:
self.logger.info('running analysis routine %s'%routine.shortname)
routine.set_status('working')
success, updated_data = routine.do_analysis(filepath)
if success:
routine.set_status('done')
self.logger.debug('success')
else:
routine.set_status('error')
self.logger.debug('failure')
error = True
break
# Race conditions here, but it's only for reporting percent done
# so it doesn't matter if it's wrong briefly:
remaining = self.todo()
total = len([r for r in self.routines if r.enabled()])
done = total - remaining
try:
status_percent = 100*float(done)/(remaining + done)
except ZeroDivisionError:
# All routines got deleted mid-analysis, we're done here:
status_percent = 100.0
self.to_filebox.put(['progress', status_percent, updated_data])
if error:
self.to_filebox.put(['error', None, updated_data])
else:
self.to_filebox.put(['done', 100.0, {}])
self.logger.debug('completed analysis of %s'%filepath)
def reorder(self, order):
assert len(order) == len(set(order)), 'ordering contains non-unique elements'
# Apply the reordering to the liststore:
for old_index, new_index in enumerate(order):
name_item = self.model.item(old_index, self.COL_NAME)
name_item.setData(new_index, self.ROLE_SORTINDEX)
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
# Apply new order to our list of routines too:
self.routines = [self.routines[order.index(i)] for i in range(len(order))]
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
all_states.append(active_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
class EditColumnsDialog(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self):
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.close_signal.emit()
event.ignore()
class EditColumns(object):
ROLE_SORT_DATA = QtCore.Qt.UserRole + 1
COL_VISIBLE = 0
COL_NAME = 1
def __init__(self, filebox, column_names, columns_visible):
self.filebox = filebox
self.column_names = column_names.copy()
self.columns_visible = columns_visible.copy()
self.old_columns_visible = columns_visible.copy()
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'edit_columns.ui'), EditColumnsDialog())
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setTristate(False)
self.ui.treeView.setHeader(self.header)
self.proxy_model = QtCore.QSortFilterProxyModel()
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxy_model.setFilterKeyColumn(self.COL_NAME)
self.ui.treeView.setSortingEnabled(True)
self.header.setStretchLastSection(True)
self.proxy_model.setSortRole(self.ROLE_SORT_DATA)
self.ui.treeView.setModel(self.proxy_model)
self.ui.setWindowModality(QtCore.Qt.ApplicationModal)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_visible = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Show selected columns', self.ui)
self.action_set_selected_hidden = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Hide selected columns', self.ui)
self.connect_signals()
self.populate_model(column_names, self.columns_visible)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.close_signal.connect(self.close)
self.ui.lineEdit_filter.textEdited.connect(self.on_filter_text_edited)
self.ui.pushButton_make_it_so.clicked.connect(self.make_it_so)
self.ui.pushButton_cancel.clicked.connect(self.cancel)
self.model.itemChanged.connect(self.on_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_visible.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_hidden.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
def populate_model(self, column_names, columns_visible):
self.model.clear()
self.model.setHorizontalHeaderLabels(['', 'Name'])
self.header.setWidget(self.COL_VISIBLE, self.select_all_checkbox)
self.ui.treeView.resizeColumnToContents(self.COL_VISIBLE)
# Which indices in self.columns_visible the row numbers correspond to
self.column_indices = {}
# Remove our special columns from the dict of column names by keeping only tuples:
column_names = {i: name for i, name in column_names.items() if isinstance(name, tuple)}
# Sort the column names as comma separated values, converting to lower case:
sortkey = lambda item: ', '.join(item[1]).lower().strip(', ')
for column_index, name in sorted(column_names.items(), key=sortkey):
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(sortkey((column_index, name)), self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtWidgets.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtWidgets.QStyleOptionProgressBar()
progress_bar_option.state = QtWidgets.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtWidgets.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtWidgets.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtWidgets.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
ROLE_DELETED_OFF_DISK = QtCore.Qt.UserRole + 2
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
self.row_number_by_filepath = {}
self._previous_n_digits = 0
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtWidgets.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
# Smaller font for headers:
font = self._vertheader.font()
font.setPointSize(10 if sys.platform == 'darwin' else 8)
self._header.setFont(font)
font.setFamily('Ubuntu Mono')
self._vertheader.setFont(font)
self._vertheader.setHighlightSections(True)
self._vertheader.setSectionsClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Check if integer indexing is to be used
try:
self.integer_indexing = self.exp_config.getboolean('lyse', 'integer_indexing')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.integer_indexing = False
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
self.renumber_rows()
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_DELETED_OFF_DISK):
# If the shot was previously not readable on disk, check to
# see if it's readable now. It may have been undeleted or
# perhaps it being unreadable before was due to a network
# glitch or similar.
filepath = self._model.item(row, self.COL_FILEPATH).text()
if not os.path.exists(filepath):
continue
# Shot file is accesible again:
status_item.setData(False, self.ROLE_DELETED_OFF_DISK)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
status_item.setToolTip(None)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased (the number of levels never
decreases, given the current implementation of concat_with_padding())"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices = column_indices
self.column_names = column_names
@inmain_decorator()
def mark_as_deleted_off_disk(self, filepath):
# Confirm the shot hasn't been removed from lyse (we are in the main
# thread so there is no race condition in checking first)
if not filepath in self.dataframe['filepath'].values:
# Shot has been removed from FileBox, nothing to do here:
return
row_number = self.row_number_by_filepath[filepath]
status_item = self._model.item(row_number, self.COL_STATUS)
already_marked_as_deleted = status_item.data(self.ROLE_DELETED_OFF_DISK)
if already_marked_as_deleted:
return
# Icon only displays if percent completion is 100. This is also
# important so that the shot is not picked up as analysis
# incomplete and analysis re-attempted on it.
status_item.setData(True, self.ROLE_DELETED_OFF_DISK)
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setToolTip("Shot has been deleted off disk or is unreadable")
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/drive--minus'))
app.output_box.output('Warning: Shot deleted from disk or no longer readable %s\n' % filepath, red=True)
@inmain_decorator()
def infer_objects(self):
"""Convert columns in the dataframe with dtype 'object' into compatible, more
specific types, if possible. This improves pickling performance and ensures
multishot analysis code does not encounter columns with dtype 'object' for
non-mixed numerical data, which it might choke on.
"""
self.dataframe = self.dataframe.infer_objects()
@inmain_decorator()
def update_row(self, filepath, dataframe_already_updated=False, status_percent=None, new_row_data=None, updated_row_data=None):
""""Updates a row in the dataframe and Qt model
to the data in the HDF5 file for that shot. Also sets the percent done, if specified"""
# To speed things up block signals to the model during update
self._model.blockSignals(True)
# Update the row in the dataframe first:
if (new_row_data is None) == (updated_row_data is None) and not dataframe_already_updated:
raise ValueError('Exactly one of new_row_data or updated_row_data must be provided')
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
assert filepath == self.dataframe.at[row_number, filepath_colname]
if updated_row_data is not None and not dataframe_already_updated:
for group, name in updated_row_data:
column_name = (group, name) + ('',) * (self.nlevels - 2)
value = updated_row_data[group, name]
try:
self.dataframe.at[row_number, column_name] = value
except ValueError:
# did the column not already exist when we tried to set an iterable?
if not column_name in self.dataframe.columns:
# create it with a non-iterable and then overwrite with the iterable value:
self.dataframe.at[row_number, column_name] = None
else:
# Incompatible datatype - convert the datatype of the column to
# 'object'
self.dataframe[column_name] = self.dataframe[column_name].astype('object')
# Now that the column exists and has dtype object, we can set the value:
self.dataframe.at[row_number, column_name] = value
dataframe_already_updated = True
if not dataframe_already_updated:
if new_row_data is None:
raise ValueError("If dataframe_already_updated is False, then new_row_data, as returned "
"by dataframe_utils.get_dataframe_from_shot(filepath) must be provided.")
self.dataframe = replace_with_padding(self.dataframe, new_row_data, row_number)
self.update_column_levels()
# Check and create necessary new columns in the Qt model:
new_column_names = set(self.dataframe.columns) - set(self.column_names.values())
new_columns_start = self._model.columnCount()
self._model.insertColumns(new_columns_start, len(new_column_names))
for i, column_name in enumerate(sorted(new_column_names)):
# Set the header label of the new column:
column_number = new_columns_start + i
self.column_names[column_number] = column_name
self.column_indices[column_name] = column_number
if column_name in self.deleted_columns_visible:
# Restore the former visibility of this column if we've
# seen one with its name before:
visible = self.deleted_columns_visible[column_name]
self.columns_visible[column_number] = visible
self._view.setColumnHidden(column_number, not visible)
else:
# new columns are visible by default:
self.columns_visible[column_number] = True
column_name_as_string = '\n'.join(column_name).strip()
header_item = QtGui.QStandardItem(column_name_as_string)
header_item.setToolTip(column_name_as_string)
self._model.setHorizontalHeaderItem(column_number, header_item)
# Check and remove any no-longer-needed columns in the Qt model:
defunct_column_names = (set(self.column_names.values()) - set(self.dataframe.columns)
- {self.column_names[self.COL_STATUS], self.column_names[self.COL_FILEPATH]})
defunct_column_indices = [self.column_indices[column_name] for column_name in defunct_column_names]
for column_number in sorted(defunct_column_indices, reverse=True):
# Remove columns from the Qt model. In reverse order so that
# removals do not change the position of columns yet to be
# removed.
self._model.removeColumn(column_number)
# Save whether or not the column was visible when it was
# removed (so that if it is re-added the visibility will be retained):
self.deleted_columns_visible[self.column_names[column_number]] = self.columns_visible[column_number]
del self.column_names[column_number]
del self.columns_visible[column_number]
if defunct_column_indices:
# Renumber the keys of self.columns_visible and self.column_names to reflect deletions:
self.column_names = {newindex: name for newindex, (oldindex, name) in enumerate(sorted(self.column_names.items()))}
self.columns_visible = {newindex: visible for newindex, (oldindex, visible) in enumerate(sorted(self.columns_visible.items()))}
# Update the inverse mapping of self.column_names:
self.column_indices = {name: index for index, name in self.column_names.items()}
# Update the data in the Qt model:
dataframe_row = self.dataframe.iloc[row_number].to_dict()
for column_number, column_name in self.column_names.items():
if not isinstance(column_name, tuple):
# One of our special columns, does not correspond to a column in the dataframe:
continue
if updated_row_data is not None and column_name not in updated_row_data:
continue
value = dataframe_row[column_name]
if isinstance(value, float):
value_str = scientific_notation(value)
else:
value_str = str(value)
lines = value_str.splitlines()
if len(lines) > 1:
short_value_str = lines[0] + ' ...'
else:
short_value_str = value_str
item = self._model.item(row_number, column_number)
if item is None:
# This is the first time we've written a value to this part of the model:
item = QtGui.QStandardItem(short_value_str)
item.setData(QtCore.Qt.AlignCenter, QtCore.Qt.TextAlignmentRole)
self._model.setItem(row_number, column_number, item)
else:
item.setText(short_value_str)
item.setToolTip(repr(value))
for i, column_name in enumerate(sorted(new_column_names)):
# Resize any new columns to fit contents:
column_number = new_columns_start + i
self._view.resizeColumnToContents(column_number)
if status_percent is not None:
status_item = self._model.item(row_number, self.COL_STATUS)
status_item.setData(status_percent, self.ROLE_STATUS_PERCENT)
if new_column_names or defunct_column_names:
self.columns_changed.emit()
# unblock signals to the model and tell it to update
self._model.blockSignals(False)
self._model.layoutChanged.emit()
def new_row(self, filepath, done=False):
status_item = QtGui.QStandardItem()
if done:
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
else:
status_item.setData(0, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
name_item = QtGui.QStandardItem(filepath)
return [status_item, name_item]
def renumber_rows(self, add_from=0):
"""Add/update row indices - the rows are numbered in simple sequential
order for easy comparison with the dataframe. add_from allows you to
only add numbers for new rows from the given index as a performance
optimisation, though if the number of digits changes, all rows will
still be renumbered. add_from should not be used if rows have been
deleted."""
n_digits = len(str(self._model.rowCount()))
if n_digits != self._previous_n_digits:
# All labels must be updated:
add_from = 0
self._previous_n_digits = n_digits
if add_from == 0:
self.row_number_by_filepath = {}
for row_number in range(add_from, self._model.rowCount()):
vertical_header_item = self._model.verticalHeaderItem(row_number)
row_number_str = str(row_number).rjust(n_digits)
vert_header_text = '{}. '.format(row_number_str)
filepath_item = self._model.item(row_number, self.COL_FILEPATH)
filepath = filepath_item.text()
self.row_number_by_filepath[filepath] = row_number
if self.integer_indexing:
header_cols = ['sequence_index', 'run number', 'run repeat']
header_strings = []
for col in header_cols:
val = self.dataframe[col].values[row_number]
if pandas.notna(val):
header_strings.append('{:04d}'.format(val))
else:
header_strings.append('----')
vert_header_text += ' | '.join(header_strings)
else:
basename = os.path.splitext(os.path.basename(filepath))[0]
vert_header_text += basename
vertical_header_item.setText(vert_header_text)
@inmain_decorator()
def add_files(self, filepaths, new_row_data, done=False):
"""Add files to the dataframe model. New_row_data should be a
dataframe containing the new rows."""
to_add = []
# Check for duplicates:
for filepath in filepaths:
if filepath in self.row_number_by_filepath or filepath in to_add:
app.output_box.output('Warning: Ignoring duplicate shot %s\n' % filepath, red=True)
if new_row_data is not None:
df_row_index = np.where(new_row_data['filepath'].values == filepath)
new_row_data = new_row_data.drop(df_row_index[0])
new_row_data.index = pandas.Index(range(len(new_row_data)))
else:
to_add.append(filepath)
assert len(new_row_data) == len(to_add)
if to_add:
# Update the dataframe:
self.dataframe = concat_with_padding(self.dataframe, new_row_data)
self.update_column_levels()
app.filebox.set_add_shots_progress(None, None, "updating filebox")
for filepath in to_add:
# Add the new rows to the Qt model:
self._model.appendRow(self.new_row(filepath, done=done))
vert_header_item = QtGui.QStandardItem('...loading...')
self._model.setVerticalHeaderItem(self._model.rowCount() - 1, vert_header_item)
self._view.resizeRowToContents(self._model.rowCount() - 1)
self.renumber_rows(add_from=self._model.rowCount()-len(to_add))
# Update the Qt model:
for filepath in to_add:
self.update_row(filepath, dataframe_already_updated=True)
@inmain_decorator()
def get_first_incomplete(self):
"""Returns the filepath of the first shot in the model that has not
been analysed"""
for row in range(self._model.rowCount()):
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_STATUS_PERCENT) != 100:
filepath_item = self._model.item(row, self.COL_FILEPATH)
return filepath_item.text()
class FileBox(object):
def __init__(self, container, exp_config, to_singleshot, from_singleshot, to_multishot, from_multishot):
self.exp_config = exp_config
self.to_singleshot = to_singleshot
self.to_multishot = to_multishot
self.from_singleshot = from_singleshot
self.from_multishot = from_multishot
self.logger = logging.getLogger('lyse.FileBox')
self.logger.info('starting')
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'filebox.ui'))
self.ui.progressBar_add_shots.hide()
container.addWidget(self.ui)
self.shots_model = DataFrameModel(self.ui.tableView, self.exp_config)
set_auto_scroll_to_end(self.ui.tableView.verticalScrollBar())
self.edit_columns_dialog = EditColumns(self, self.shots_model.column_names, self.shots_model.columns_visible)
self.last_opened_shots_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.connect_signals()
self.analysis_paused = False
self.multishot_required = False
# An Event to let the analysis thread know to check for shots that
# need analysing, rather than using a time.sleep:
self.analysis_pending = threading.Event()
# The folder that the 'add shots' dialog will open to:
self.current_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# A queue for storing incoming files from the ZMQ server so
# the server can keep receiving files even if analysis is slow
# or paused:
self.incoming_queue = queue.Queue()
# Start the thread to handle incoming files, and store them in
# a buffer if processing is paused:
self.incoming = threading.Thread(target=self.incoming_buffer_loop)
self.incoming.daemon = True
self.incoming.start()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.pushButton_edit_columns.clicked.connect(self.on_edit_columns_clicked)
self.shots_model.columns_changed.connect(self.on_columns_changed)
self.ui.toolButton_add_shots.clicked.connect(self.on_add_shot_files_clicked)
self.ui.toolButton_remove_shots.clicked.connect(self.shots_model.on_remove_selection)
self.ui.tableView.doubleLeftClicked.connect(self.shots_model.on_double_click)
self.ui.pushButton_analysis_running.toggled.connect(self.on_analysis_running_toggled)
self.ui.pushButton_mark_as_not_done.clicked.connect(self.on_mark_selection_not_done_clicked)
self.ui.pushButton_run_multishot_analysis.clicked.connect(self.on_run_multishot_analysis_clicked)
def on_edit_columns_clicked(self):
self.edit_columns_dialog.show()
def on_columns_changed(self):
column_names = self.shots_model.column_names
columns_visible = self.shots_model.columns_visible
self.edit_columns_dialog.update_columns(column_names, columns_visible)
def on_add_shot_files_clicked(self):
shot_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if type(shot_files) is tuple:
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(shot_file) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
self.incoming_queue.put(filepath)
def on_analysis_running_toggled(self, pressed):
if pressed:
self.analysis_paused = True
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis paused')
else:
self.analysis_paused = False
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis running')
self.analysis_pending.set()
def on_mark_selection_not_done_clicked(self):
self.shots_model.mark_selection_not_done()
# Let the analysis loop know to look for these shots:
self.analysis_pending.set()
def on_run_multishot_analysis_clicked(self):
self.multishot_required = True
self.analysis_pending.set()
def set_columns_visible(self, columns_visible):
self.shots_model.set_columns_visible(columns_visible)
@inmain_decorator()
def set_add_shots_progress(self, completed, total, message):
self.ui.progressBar_add_shots.setFormat("Adding shots: [{}] %v/%m (%p%)".format(message))
if completed == total and message is None:
self.ui.progressBar_add_shots.hide()
else:
if total is not None:
self.ui.progressBar_add_shots.setMaximum(total)
if completed is not None:
self.ui.progressBar_add_shots.setValue(completed)
if self.ui.progressBar_add_shots.isHidden():
self.ui.progressBar_add_shots.show()
if completed is None and total is None and message is not None:
# Ensure a repaint when only the message changes:
self.ui.progressBar_add_shots.repaint()
def incoming_buffer_loop(self):
"""We use a queue as a buffer for incoming shots. We don't want to hang and not
respond to a client submitting shots, so we just let shots pile up here until we can get to them.
The downside to this is that we can't return errors to the client if the shot cannot be added,
but the suggested workflow is to handle errors here anyway. A client running shots shouldn't stop
the experiment on account of errors from the analyis stage, so what's the point of passing errors to it?
We'll just raise errors here and the user can decide what to do with them."""
logger = logging.getLogger('lyse.FileBox.incoming')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
n_shots_added = 0
while True:
try:
filepaths = []
filepath = self.incoming_queue.get()
filepaths.append(filepath)
if self.incoming_queue.qsize() == 0:
# Wait momentarily in case more arrive so we can batch process them:
time.sleep(0.1)
# Batch process to decrease number of dataframe concatenations:
batch_size = len(self.shots_model.dataframe) // 3 + 1
while True:
try:
filepath = self.incoming_queue.get(False)
except queue.Empty:
break
else:
filepaths.append(filepath)
if len(filepaths) >= batch_size:
break
logger.info('adding:\n%s' % '\n'.join(filepaths))
if n_shots_added == 0:
total_shots = self.incoming_queue.qsize() + len(filepaths)
self.set_add_shots_progress(1, total_shots, "reading shot files")
# Remove duplicates from the list (preserving order) in case the
# client sent the same filepath multiple times:
filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable
# We open the HDF5 files here outside the GUI thread so as not to hang the GUI:
dataframes = []
indices_of_files_not_found = []
for i, filepath in enumerate(filepaths):
try:
dataframe = get_dataframe_from_shot(filepath)
dataframes.append(dataframe)
except IOError:
app.output_box.output('Warning: Ignoring shot file not found or not readable %s\n' % filepath, red=True)
indices_of_files_not_found.append(i)
n_shots_added += 1
shots_remaining = self.incoming_queue.qsize()
total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)
self.set_add_shots_progress(n_shots_added, total_shots, "reading shot files")
self.set_add_shots_progress(n_shots_added, total_shots, "concatenating dataframes")
if dataframes:
new_row_data = concat_with_padding(*dataframes)
else:
new_row_data = None
# Do not add the shots that were not found on disk. Reverse
# loop so that removing an item doesn't change the indices of
# subsequent removals:
for i in reversed(indices_of_files_not_found):
del filepaths[i]
if filepaths:
self.shots_model.add_files(filepaths, new_row_data)
# Let the analysis loop know to look for new shots:
self.analysis_pending.set()
if shots_remaining == 0:
self.set_add_shots_progress(n_shots_added, total_shots, None)
n_shots_added = 0 # reset our counter for the next batch
except Exception:
# Keep this incoming loop running at all costs, but make the
# otherwise uncaught exception visible to the user:
zprocess.raise_exception_in_thread(sys.exc_info())
def analysis_loop(self):
logger = logging.getLogger('lyse.FileBox.analysis_loop')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
try:
self.analysis_pending.wait()
self.analysis_pending.clear()
at_least_one_shot_analysed = False
while True:
if not self.analysis_paused:
# Find the first shot that has not finished being analysed:
filepath = self.shots_model.get_first_incomplete()
if filepath is not None:
logger.info('analysing: %s'%filepath)
self.do_singleshot_analysis(filepath)
at_least_one_shot_analysed = True
if filepath is None and at_least_one_shot_analysed:
self.multishot_required = True
if filepath is None:
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
else:
logger.info('analysis is paused')
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
except Exception:
etype, value, tb = sys.exc_info()
orig_exception = ''.join(traceback.format_exception_only(etype, value))
message = ('Analysis loop encountered unexpected exception. ' +
'This is a bug and should be reported. The analysis ' +
'loop is continuing, but lyse may be in an inconsistent state. '
'Restart lyse, or continue at your own risk. '
'Original exception was:\n\n' + orig_exception)
# Raise the exception in a thread so we can keep running
zprocess.raise_exception_in_thread((RuntimeError, RuntimeError(message), tb))
self.pause_analysis()
@inmain_decorator()
def pause_analysis(self):
# This automatically triggers the slot that sets self.analysis_paused
self.ui.pushButton_analysis_running.setChecked(True)
def do_singleshot_analysis(self, filepath):
# Check the shot file exists before sending it to the singleshot
# routinebox. This does not guarantee it won't have been deleted by
# the time the routinebox starts running analysis on it, but by
# detecting it now we can most of the time avoid the user code
# coughing exceptions due to the file not existing. Which would also
# not be a problem, but this way we avoid polluting the outputbox with
# more errors than necessary.
if not os.path.exists(filepath):
self.shots_model.mark_as_deleted_off_disk(filepath)
return
self.to_singleshot.put(filepath)
while True:
signal, status_percent, updated_data = self.from_singleshot.get()
for file in updated_data:
# Update the data for all the rows with new data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
# Update the status percent for the the row on which analysis is actually running:
self.shots_model.update_row(filepath, status_percent=status_percent, dataframe_already_updated=True)
if signal == 'done':
return
if signal == 'error':
if not os.path.exists(filepath):
# Do not pause if the file has been deleted. An error is
# no surprise there:
self.shots_model.mark_as_deleted_off_disk(filepath)
else:
self.pause_analysis()
return
if signal == 'progress':
continue
raise ValueError('invalid signal %s' % str(signal))
def do_multishot_analysis(self):
self.to_multishot.put(None)
while True:
signal, _, updated_data = self.from_multishot.get()
for file in updated_data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
if signal == 'done':
self.multishot_required = False
return
elif signal == 'error':
self.pause_analysis()
return
class Lyse(object):
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'main.ui'), LyseMainWindow())
self.connect_signals()
self.setup_config()
self.port = int(self.exp_config.get('ports', 'lyse'))
# The singleshot routinebox will be connected to the filebox
# by queues:
to_singleshot = queue.Queue()
from_singleshot = queue.Queue()
# So will the multishot routinebox:
to_multishot = queue.Queue()
from_multishot = queue.Queue()
self.output_box = OutputBox(self.ui.verticalLayout_output_box)
self.singleshot_routinebox = RoutineBox(self.ui.verticalLayout_singleshot_routinebox, self.exp_config,
self, to_singleshot, from_singleshot, self.output_box.port)
self.multishot_routinebox = RoutineBox(self.ui.verticalLayout_multishot_routinebox, self.exp_config,
self, to_multishot, from_multishot, self.output_box.port, multishot=True)
self.filebox = FileBox(self.ui.verticalLayout_filebox, self.exp_config,
to_singleshot, from_singleshot, to_multishot, from_multishot)
self.last_save_config_file = None
self.last_save_data = None
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionSave_dataframe_as.triggered.connect(lambda: self.on_save_dataframe_triggered(True))
self.ui.actionSave_dataframe.triggered.connect(lambda: self.on_save_dataframe_triggered(False))
self.ui.actionLoad_dataframe.triggered.connect(self.on_load_dataframe_triggered)
self.ui.resize(1600, 900)
# Set the splitters to appropriate fractions of their maximum size:
self.ui.splitter_horizontal.setSizes([1000, 600])
self.ui.splitter_vertical.setSizes([300, 600])
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('lyse', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file, restore_window_geometry)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Load the window geometry now, but then defer the other loading until 50ms
# after the window has shown, so that the GUI pops up faster in the meantime.
try:
self.load_window_geometry_configuration(autoload_config_file)
except Exception:
# ignore error for now and let it be raised again in the call to load_configuration:
restore_window_geometry = True
else:
# Success - skip loading window geometry in load_configuration:
restore_window_geometry = False
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
# self.ui.showMaximized()
def terminate_all_workers(self):
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.end_child()
def workers_terminated(self):
terminated = {}
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.worker.poll()
terminated[routine.filepath] = routine.worker.returncode is not None
return terminated
def are_you_sure(self):
message = ('Current configuration (which scripts are loaded and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
return True
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
if self.only_window_geometry_is_different(save_data, self.last_save_data):
self.save_configuration(self.last_save_config_file)
self.terminate_all_workers()
return True
elif not self.are_you_sure():
return False
self.terminate_all_workers()
return True
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
try:
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
except LabConfig.NoOptionError:
self.exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
if not os.path.exists(default_path):
os.makedirs(default_path)
default = os.path.join(default_path, 'lyse.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current lyse configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def only_window_geometry_is_different(self, current_data, old_data):
ui_keys = ['window_size', 'window_pos', 'splitter', 'splitter_vertical', 'splitter_horizontal']
compare = [current_data[key] == old_data[key] for key in current_data.keys() if key not in ui_keys]
return all(compare)
def get_save_data(self):
save_data = {}
box = self.singleshot_routinebox
save_data['SingleShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastSingleShotFolder'] = box.last_opened_routine_folder
box = self.multishot_routinebox
save_data['MultiShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastMultiShotFolder'] = box.last_opened_routine_folder
save_data['LastFileBoxFolder'] = self.filebox.last_opened_shots_folder
save_data['analysis_paused'] = self.filebox.analysis_paused
window_size = self.ui.size()
save_data['window_size'] = (window_size.width(), window_size.height())
window_pos = self.ui.pos()
save_data['window_pos'] = (window_pos.x(), window_pos.y())
save_data['screen_geometry'] = get_screen_geometry()
save_data['splitter'] = self.ui.splitter.sizes()
save_data['splitter_vertical'] = self.ui.splitter_vertical.sizes()
save_data['splitter_horizontal'] = self.ui.splitter_horizontal.sizes()
return save_data
def save_configuration(self, save_file):
lyse_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
lyse_config.set('lyse_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'lyse.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select lyse configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename, restore_window_geometry=True):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s' % filename)
lyse_config = LabConfig(filename)
try:
self.singleshot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'SingleShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.singleshot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastSingleShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'MultiShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastMultiShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.filebox.last_opened_shots_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastFileBoxFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
if ast.literal_eval(lyse_config.get('lyse_state', 'analysis_paused')):
self.filebox.pause_analysis()
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
if restore_window_geometry:
self.load_window_geometry_configuration(filename)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def load_window_geometry_configuration(self, filename):
"""Load only the window geometry from the config file. It's useful to have this
separate from the rest of load_configuration so that it can be called before the
window is shown."""
lyse_config = LabConfig(filename)
try:
screen_geometry = ast.literal_eval(lyse_config.get('lyse_state', 'screen_geometry'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
else:
# Only restore the window size and position, and splitter
# positions if the screen is the same size/same number of monitors
# etc. This prevents the window moving off the screen if say, the
# position was saved when 2 monitors were plugged in but there is
# only one now, and the splitters may not make sense in light of a
# different window size, so better to fall back to defaults:
current_screen_geometry = get_screen_geometry()
if current_screen_geometry == screen_geometry:
try:
self.ui.resize(*ast.literal_eval(lyse_config.get('lyse_state', 'window_size')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.move(*ast.literal_eval(lyse_config.get('lyse_state', 'window_pos')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_vertical.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_vertical')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_horizontal.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_horizontal')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
"hdf5_viewer",
"hdf5_viewer_arguments"],
"paths": ["shared_drive",
"experiment_shot_storage",
"analysislib"],
"ports": ["lyse"]
}
self.exp_config = LabConfig(required_params=required_config_params)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
# Keyboard shortcuts:
QtWidgets.QShortcut('Del', self.ui, lambda: self.delete_items(True))
QtWidgets.QShortcut('Shift+Del', self.ui, lambda: self.delete_items(False))
def on_save_dataframe_triggered(self, choose_folder=True):
df = self.filebox.shots_model.dataframe.copy()
if len(df) > 0:
default = self.exp_config.get('paths', 'experiment_shot_storage')
if choose_folder:
save_path = QtWidgets.QFileDialog.getExistingDirectory(self.ui, 'Select a Folder for the Dataframes', default)
if type(save_path) is tuple:
save_path, _ = save_path
if not save_path:
# User cancelled
return
sequences = df.sequence.unique()
for sequence in sequences:
sequence_df = pandas.DataFrame(df[df['sequence'] == sequence], columns=df.columns).dropna(axis=1, how='all')
labscript = sequence_df['labscript'].iloc[0]
filename = "dataframe_{}_{}.msg".format(sequence.to_pydatetime().strftime("%Y%m%dT%H%M%S"),labscript[:-3])
if not choose_folder:
save_path = os.path.dirname(sequence_df['filepath'].iloc[0])
sequence_df.infer_objects()
for col in sequence_df.columns :
if sequence_df[col].dtype == object:
sequence_df[col] = pandas.to_numeric(sequence_df[col], errors='ignore')
sequence_df.to_msgpack(os.path.join(save_path, filename))
else:
error_dialog('Dataframe is empty')
def on_load_dataframe_triggered(self):
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'dataframe.msg')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select dataframe file to load',
default,
"dataframe files (*.msg)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
df = pandas.read_msgpack(file).sort_values("run time").reset_index()
# Check for changes in the shot files since the dataframe was exported
def changed_since(filepath, time):
if os.path.isfile(filepath):
return os.path.getmtime(filepath) > time
else:
return False
filepaths = df["filepath"].tolist()
changetime_cache = os.path.getmtime(file)
need_updating = np.where(map(lambda x: changed_since(x, changetime_cache), filepaths))[0]
need_updating = np.sort(need_updating)[::-1] # sort in descending order to not remove the wrong items with pop
# Reload the files where changes where made since exporting
for index in need_updating:
filepath = filepaths.pop(index)
self.filebox.incoming_queue.put(filepath)
df = df.drop(need_updating)
self.filebox.shots_model.add_files(filepaths, df, done=True)
def delete_items(self, confirm):
"""Delete items from whichever box has focus, with optional confirmation
dialog"""
if self.filebox.ui.tableView.hasFocus():
self.filebox.shots_model.remove_selection(confirm)
if self.singleshot_routinebox.ui.treeView.hasFocus():
self.singleshot_routinebox.remove_selection(confirm)
if self.multishot_routinebox.ui.treeView.hasFocus():
self.multishot_routinebox.remove_selection(confirm)
if __name__ == "__main__":
logger = setup_logging('lyse')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = Lyse()
# Start the web server:
splash.update_text('starting analysis server')
server = WebServer(app.port)
splash.update_text('done')
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
splash.hide()
qapplication.exec_()
server.shutdown()
|
py | 1a5568bf968bf99f2631e8351eb15102a9269fe5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
import os
from pyccel.codegen.printing.fcode import FCodePrinter
from pyccel.codegen.printing.ccode import CCodePrinter
from pyccel.codegen.printing.pycode import PythonCodePrinter
from pyccel.ast.core import FunctionDef, Interface, ModuleHeader
from pyccel.errors.errors import Errors
from pyccel.utilities.stage import PyccelStage
_extension_registry = {'fortran': 'f90', 'c':'c', 'python':'py'}
_header_extension_registry = {'fortran': None, 'c':'h', 'python':None}
printer_registry = {
'fortran':FCodePrinter,
'c':CCodePrinter,
'python':PythonCodePrinter
}
pyccel_stage = PyccelStage()
class Codegen(object):
"""Abstract class for code generator."""
def __init__(self, parser, name):
"""Constructor for Codegen.
parser: pyccel parser
name: str
name of the generated module or program.
"""
pyccel_stage.set_stage('codegen')
self._parser = parser
self._ast = parser.ast
self._name = name
self._printer = None
self._language = None
#TODO verify module name != function name
#it generates a compilation error
self._stmts = {}
_structs = [
'imports',
'body',
'routines',
'classes',
'modules',
'variables',
'interfaces',
]
for key in _structs:
self._stmts[key] = []
self._collect_statements()
self._is_program = self.ast.program is not None
@property
def parser(self):
return self._parser
@property
def name(self):
"""Returns the name associated to the source code"""
return self._name
@property
def imports(self):
"""Returns the imports of the source code."""
return self._stmts['imports']
@property
def variables(self):
"""Returns the variables of the source code."""
return self._stmts['variables']
@property
def body(self):
"""Returns the body of the source code, if it is a Program or Module."""
return self._stmts['body']
@property
def routines(self):
"""Returns functions/subroutines."""
return self._stmts['routines']
@property
def classes(self):
"""Returns the classes if Module."""
return self._stmts['classes']
@property
def interfaces(self):
"""Returns the interfaces."""
return self._stmts['interfaces']
@property
def modules(self):
"""Returns the modules if Program."""
return self._stmts['modules']
@property
def is_program(self):
"""Returns True if a Program."""
return self._is_program
@property
def ast(self):
"""Returns the AST."""
return self._ast
@property
def language(self):
"""Returns the used language"""
return self._language
def set_printer(self, **settings):
""" Set the current codeprinter instance"""
# Get language used (default language used is fortran)
language = settings.pop('language', 'fortran')
# Set language
if not language in ['fortran', 'c', 'python']:
raise ValueError('{} language is not available'.format(language))
self._language = language
# instantiate codePrinter
code_printer = printer_registry[language]
errors = Errors()
errors.set_parser_stage('codegen')
# set the code printer
self._printer = code_printer(self.parser.filename, **settings)
def get_printer_imports(self):
"""return the imports of the current codeprinter"""
return self._printer.get_additional_imports()
def _collect_statements(self):
"""Collects statements and split them into routines, classes, etc."""
scope = self.parser.scope
funcs = []
interfaces = []
for i in scope.functions.values():
if isinstance(i, FunctionDef) and not i.is_header:
funcs.append(i)
elif isinstance(i, Interface):
interfaces.append(i)
self._stmts['imports' ] = list(scope.imports['imports'].values())
self._stmts['variables' ] = list(self.parser.get_variables(scope))
self._stmts['routines' ] = funcs
self._stmts['classes' ] = list(scope.classes.values())
self._stmts['interfaces'] = interfaces
self._stmts['body'] = self.ast
def doprint(self, **settings):
"""Prints the code in the target language."""
if not self._printer:
self.set_printer(**settings)
return self._printer.doprint(self.ast)
def export(self, filename=None, **settings):
"""Export code in filename"""
self.set_printer(**settings)
ext = _extension_registry[self._language]
header_ext = _header_extension_registry[self._language]
if filename is None: filename = self.name
header_filename = '{name}.{ext}'.format(name=filename, ext=header_ext)
filename = '{name}.{ext}'.format(name=filename, ext=ext)
# print module header
if header_ext is not None:
code = self._printer.doprint(ModuleHeader(self.ast))
with open(header_filename, 'w') as f:
for line in code:
f.write(line)
# print module
code = self._printer.doprint(self.ast)
with open(filename, 'w') as f:
for line in code:
f.write(line)
# print program
prog_filename = None
if self.is_program and self.language != 'python':
folder = os.path.dirname(filename)
fname = os.path.basename(filename)
prog_filename = os.path.join(folder,"prog_"+fname)
code = self._printer.doprint(self.ast.program)
with open(prog_filename, 'w') as f:
for line in code:
f.write(line)
return filename, prog_filename
|
py | 1a556af0a5aa3ab086497c9ccc06e425c6a320b5 | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **config)
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **config)
|
py | 1a556c6062c6302042ef4fa01dee1334d50c1742 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Editor Widget"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
import logging
import os
import os.path as osp
import sys
import functools
import unicodedata
# Third party imports
import qstylizer
from qtpy.compat import getsavefilename
from qtpy.QtCore import (QByteArray, QFileInfo, QPoint, QSize, Qt, QTimer,
Signal, Slot)
from qtpy.QtGui import QFont
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QHBoxLayout,
QLabel, QMainWindow, QMessageBox, QMenu,
QSplitter, QVBoxLayout, QWidget, QListWidget,
QListWidgetItem, QSizePolicy, QToolBar)
# Local imports
from spyder.api.panel import Panel
from spyder.config.base import _, running_under_pytest
from spyder.config.manager import CONF
from spyder.config.utils import (get_edit_filetypes, get_edit_filters,
get_filter, is_kde_desktop, is_anaconda)
from spyder.plugins.editor.utils.autosave import AutosaveForStack
from spyder.plugins.editor.utils.editor import get_file_language
from spyder.plugins.editor.utils.switcher import EditorSwitcherManager
from spyder.plugins.editor.widgets import codeeditor
from spyder.plugins.editor.widgets.editorstack_helpers import (
ThreadManager, FileInfo, StackHistory)
from spyder.plugins.editor.widgets.status import (CursorPositionStatus,
EncodingStatus, EOLStatus,
ReadWriteStatus, VCSStatus)
from spyder.plugins.explorer.widgets.explorer import (
show_in_external_file_explorer)
from spyder.plugins.outlineexplorer.widgets import OutlineExplorerWidget
from spyder.plugins.outlineexplorer.editor import OutlineExplorerProxyEditor
from spyder.plugins.outlineexplorer.api import cell_name
from spyder.py3compat import qbytearray_to_str, to_text_string
from spyder.utils import encoding, sourcecode, syntaxhighlighters
from spyder.utils.icon_manager import ima
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (add_actions, create_action,
create_toolbutton, MENU_SEPARATOR,
mimedata2url, set_menu_icons,
create_waitspinner)
from spyder.utils.stylesheet import (
APP_STYLESHEET, APP_TOOLBAR_STYLESHEET, PANES_TABBAR_STYLESHEET)
from spyder.widgets.findreplace import FindReplace
from spyder.widgets.tabs import BaseTabs
logger = logging.getLogger(__name__)
class TabSwitcherWidget(QListWidget):
"""Show tabs in mru order and change between them."""
def __init__(self, parent, stack_history, tabs):
QListWidget.__init__(self, parent)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Dialog)
self.editor = parent
self.stack_history = stack_history
self.tabs = tabs
self.setSelectionMode(QListWidget.SingleSelection)
self.itemActivated.connect(self.item_selected)
self.id_list = []
self.load_data()
size = CONF.get('main', 'completion/size')
self.resize(*size)
self.set_dialog_position()
self.setCurrentRow(0)
CONF.config_shortcut(lambda: self.select_row(-1), context='Editor',
name='Go to previous file', parent=self)
CONF.config_shortcut(lambda: self.select_row(1), context='Editor',
name='Go to next file', parent=self)
def load_data(self):
"""Fill ListWidget with the tabs texts.
Add elements in inverse order of stack_history.
"""
for index in reversed(self.stack_history):
text = self.tabs.tabText(index)
text = text.replace('&', '')
item = QListWidgetItem(ima.icon('TextFileIcon'), text)
self.addItem(item)
def item_selected(self, item=None):
"""Change to the selected document and hide this widget."""
if item is None:
item = self.currentItem()
# stack history is in inverse order
try:
index = self.stack_history[-(self.currentRow()+1)]
except IndexError:
pass
else:
self.editor.set_stack_index(index)
self.editor.current_changed(index)
self.hide()
def select_row(self, steps):
"""Move selected row a number of steps.
Iterates in a cyclic behaviour.
"""
row = (self.currentRow() + steps) % self.count()
self.setCurrentRow(row)
def set_dialog_position(self):
"""Positions the tab switcher in the top-center of the editor."""
left = self.editor.geometry().width()/2 - self.width()/2
top = (self.editor.tabs.tabBar().geometry().height() +
self.editor.fname_label.geometry().height())
self.move(self.editor.mapToGlobal(QPoint(left, top)))
def keyReleaseEvent(self, event):
"""Reimplement Qt method.
Handle "most recent used" tab behavior,
When ctrl is released and tab_switcher is visible, tab will be changed.
"""
if self.isVisible():
qsc = CONF.get_shortcut(context='Editor', name='Go to next file')
for key in qsc.split('+'):
key = key.lower()
if ((key == 'ctrl' and event.key() == Qt.Key_Control) or
(key == 'alt' and event.key() == Qt.Key_Alt)):
self.item_selected()
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt method to allow cyclic behavior."""
if event.key() == Qt.Key_Down:
self.select_row(1)
elif event.key() == Qt.Key_Up:
self.select_row(-1)
def focusOutEvent(self, event):
"""Reimplement Qt method to close the widget when loosing focus."""
event.ignore()
if sys.platform == "darwin":
if event.reason() != Qt.ActiveWindowFocusReason:
self.close()
else:
self.close()
class EditorStack(QWidget):
reset_statusbar = Signal()
readonly_changed = Signal(bool)
encoding_changed = Signal(str)
sig_editor_cursor_position_changed = Signal(int, int)
sig_refresh_eol_chars = Signal(str)
sig_refresh_formatting = Signal(bool)
starting_long_process = Signal(str)
ending_long_process = Signal(str)
redirect_stdio = Signal(bool)
exec_in_extconsole = Signal(str, bool)
run_cell_in_ipyclient = Signal(str, object, str, bool)
debug_cell_in_ipyclient = Signal(str, object, str, bool)
update_plugin_title = Signal()
editor_focus_changed = Signal()
zoom_in = Signal()
zoom_out = Signal()
zoom_reset = Signal()
sig_open_file = Signal(dict)
sig_close_file = Signal(str, str)
file_saved = Signal(str, str, str)
file_renamed_in_data = Signal(str, str, str)
opened_files_list_changed = Signal()
active_languages_stats = Signal(set)
todo_results_changed = Signal()
update_code_analysis_actions = Signal()
refresh_file_dependent_actions = Signal()
refresh_save_all_action = Signal()
sig_breakpoints_saved = Signal()
text_changed_at = Signal(str, int)
current_file_changed = Signal(str, int, int, int)
plugin_load = Signal((str,), ())
edit_goto = Signal(str, int, str)
sig_split_vertically = Signal()
sig_split_horizontally = Signal()
sig_new_file = Signal((str,), ())
sig_save_as = Signal()
sig_prev_edit_pos = Signal()
sig_prev_cursor = Signal()
sig_next_cursor = Signal()
sig_prev_warning = Signal()
sig_next_warning = Signal()
sig_go_to_definition = Signal(str, int, int)
sig_perform_completion_request = Signal(str, str, dict)
sig_option_changed = Signal(str, object) # config option needs changing
sig_save_bookmark = Signal(int)
sig_load_bookmark = Signal(int)
sig_save_bookmarks = Signal(str, str)
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Dictionary required by the Help pane to render a docstring.
Examples
--------
>>> help_data = {
'obj_text': str,
'name': str,
'argspec': str,
'note': str,
'docstring': str,
'force_refresh': bool,
'path': str,
}
See Also
--------
:py:meth:spyder.plugins.editor.widgets.editor.EditorStack.send_to_help
"""
def __init__(self, parent, actions):
QWidget.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.threadmanager = ThreadManager(self)
self.new_window = False
self.horsplit_action = None
self.versplit_action = None
self.close_action = None
self.__get_split_actions()
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.menu = None
self.switcher_dlg = None
self.switcher_manager = None
self.tabs = None
self.tabs_switcher = None
self.stack_history = StackHistory(self)
# External panels
self.external_panels = []
self.setup_editorstack(parent, layout)
self.find_widget = None
self.data = []
switcher_action = create_action(
self,
_("File switcher..."),
icon=ima.icon('filelist'),
triggered=self.open_switcher_dlg)
symbolfinder_action = create_action(
self,
_("Find symbols in file..."),
icon=ima.icon('symbol_find'),
triggered=self.open_symbolfinder_dlg)
copy_to_cb_action = create_action(self, _("Copy path to clipboard"),
icon=ima.icon('editcopy'),
triggered=lambda:
QApplication.clipboard().setText(self.get_current_filename()))
close_right = create_action(self, _("Close all to the right"),
triggered=self.close_all_right)
close_all_but_this = create_action(self, _("Close all but this"),
triggered=self.close_all_but_this)
sort_tabs = create_action(self, _("Sort tabs alphabetically"),
triggered=self.sort_file_tabs_alphabetically)
if sys.platform == 'darwin':
text = _("Show in Finder")
else:
text = _("Show in external file explorer")
external_fileexp_action = create_action(
self, text,
triggered=self.show_in_external_file_explorer,
shortcut=CONF.get_shortcut(context="Editor",
name="show in external file explorer"),
context=Qt.WidgetShortcut)
self.menu_actions = actions + [external_fileexp_action,
None, switcher_action,
symbolfinder_action,
copy_to_cb_action, None, close_right,
close_all_but_this, sort_tabs]
self.outlineexplorer = None
self.is_closable = False
self.new_action = None
self.open_action = None
self.save_action = None
self.revert_action = None
self.tempfile_path = None
self.title = _("Editor")
self.todolist_enabled = True
self.is_analysis_done = False
self.linenumbers_enabled = True
self.blanks_enabled = False
self.scrollpastend_enabled = False
self.edgeline_enabled = True
self.edgeline_columns = (79,)
self.close_parentheses_enabled = True
self.close_quotes_enabled = True
self.add_colons_enabled = True
self.auto_unindent_enabled = True
self.indent_chars = " "*4
self.tab_stop_width_spaces = 4
self.show_class_func_dropdown = False
self.help_enabled = False
self.default_font = None
self.wrap_enabled = False
self.tabmode_enabled = False
self.stripmode_enabled = False
self.intelligent_backspace_enabled = True
self.automatic_completions_enabled = True
self.automatic_completion_chars = 3
self.automatic_completion_ms = 300
self.completions_hint_enabled = True
self.completions_hint_after_ms = 500
self.hover_hints_enabled = True
self.format_on_save = False
self.code_snippets_enabled = True
self.code_folding_enabled = True
self.underline_errors_enabled = False
self.highlight_current_line_enabled = False
self.highlight_current_cell_enabled = False
self.occurrence_highlighting_enabled = True
self.occurrence_highlighting_timeout = 1500
self.checkeolchars_enabled = True
self.always_remove_trailing_spaces = False
self.add_newline = False
self.remove_trailing_newlines = False
self.convert_eol_on_save = False
self.convert_eol_on_save_to = 'LF'
self.focus_to_editor = True
self.run_cell_copy = False
self.create_new_file_if_empty = True
self.indent_guides = False
ccs = 'spyder/dark'
if ccs not in syntaxhighlighters.COLOR_SCHEME_NAMES:
ccs = syntaxhighlighters.COLOR_SCHEME_NAMES[0]
self.color_scheme = ccs
self.__file_status_flag = False
# Real-time code analysis
self.analysis_timer = QTimer(self)
self.analysis_timer.setSingleShot(True)
self.analysis_timer.setInterval(1000)
self.analysis_timer.timeout.connect(self.analyze_script)
# Update filename label
self.editor_focus_changed.connect(self.update_fname_label)
# Accepting drops
self.setAcceptDrops(True)
# Local shortcuts
self.shortcuts = self.create_shortcuts()
# For opening last closed tabs
self.last_closed_files = []
# Reference to save msgbox and avoid memory to be freed.
self.msgbox = None
# File types and filters used by the Save As dialog
self.edit_filetypes = None
self.edit_filters = None
# For testing
self.save_dialog_on_tests = not running_under_pytest()
# Autusave component
self.autosave = AutosaveForStack(self)
self.last_cell_call = None
@Slot()
def show_in_external_file_explorer(self, fnames=None):
"""Show file in external file explorer"""
if fnames is None:
fnames = self.get_current_filename()
try:
show_in_external_file_explorer(fnames)
except FileNotFoundError as error:
file = str(error).split("'")[1]
if "xdg-open" in file:
msg_title = _("Warning")
msg = _("Spyder can't show this file in the external file "
"explorer because the <tt>xdg-utils</tt> package is "
"not available on your system.")
QMessageBox.information(self, msg_title, msg,
QMessageBox.Ok)
def create_shortcuts(self):
"""Create local shortcuts"""
# --- Configurable shortcuts
inspect = CONF.config_shortcut(
self.inspect_current_object,
context='Editor',
name='Inspect current object',
parent=self)
set_breakpoint = CONF.config_shortcut(
self.set_or_clear_breakpoint,
context='Editor',
name='Breakpoint',
parent=self)
set_cond_breakpoint = CONF.config_shortcut(
self.set_or_edit_conditional_breakpoint,
context='Editor',
name='Conditional breakpoint',
parent=self)
gotoline = CONF.config_shortcut(
self.go_to_line,
context='Editor',
name='Go to line',
parent=self)
tab = CONF.config_shortcut(
lambda: self.tab_navigation_mru(forward=False),
context='Editor',
name='Go to previous file',
parent=self)
tabshift = CONF.config_shortcut(
self.tab_navigation_mru,
context='Editor',
name='Go to next file',
parent=self)
prevtab = CONF.config_shortcut(
lambda: self.tabs.tab_navigate(-1),
context='Editor',
name='Cycle to previous file',
parent=self)
nexttab = CONF.config_shortcut(
lambda: self.tabs.tab_navigate(1),
context='Editor',
name='Cycle to next file',
parent=self)
run_selection = CONF.config_shortcut(
self.run_selection,
context='Editor',
name='Run selection',
parent=self)
new_file = CONF.config_shortcut(
lambda: self.sig_new_file[()].emit(),
context='Editor',
name='New file',
parent=self)
open_file = CONF.config_shortcut(
lambda: self.plugin_load[()].emit(),
context='Editor',
name='Open file',
parent=self)
save_file = CONF.config_shortcut(
self.save,
context='Editor',
name='Save file',
parent=self)
save_all = CONF.config_shortcut(
self.save_all,
context='Editor',
name='Save all',
parent=self)
save_as = CONF.config_shortcut(
lambda: self.sig_save_as.emit(),
context='Editor',
name='Save As',
parent=self)
close_all = CONF.config_shortcut(
self.close_all_files,
context='Editor',
name='Close all',
parent=self)
prev_edit_pos = CONF.config_shortcut(
lambda: self.sig_prev_edit_pos.emit(),
context="Editor",
name="Last edit location",
parent=self)
prev_cursor = CONF.config_shortcut(
lambda: self.sig_prev_cursor.emit(),
context="Editor",
name="Previous cursor position",
parent=self)
next_cursor = CONF.config_shortcut(
lambda: self.sig_next_cursor.emit(),
context="Editor",
name="Next cursor position",
parent=self)
zoom_in_1 = CONF.config_shortcut(
lambda: self.zoom_in.emit(),
context="Editor",
name="zoom in 1",
parent=self)
zoom_in_2 = CONF.config_shortcut(
lambda: self.zoom_in.emit(),
context="Editor",
name="zoom in 2",
parent=self)
zoom_out = CONF.config_shortcut(
lambda: self.zoom_out.emit(),
context="Editor",
name="zoom out",
parent=self)
zoom_reset = CONF.config_shortcut(
lambda: self.zoom_reset.emit(),
context="Editor",
name="zoom reset",
parent=self)
close_file_1 = CONF.config_shortcut(
self.close_file,
context="Editor",
name="close file 1",
parent=self)
close_file_2 = CONF.config_shortcut(
self.close_file,
context="Editor",
name="close file 2",
parent=self)
run_cell = CONF.config_shortcut(
self.run_cell,
context="Editor",
name="run cell",
parent=self)
debug_cell = CONF.config_shortcut(
self.debug_cell,
context="Editor",
name="debug cell",
parent=self)
run_cell_and_advance = CONF.config_shortcut(
self.run_cell_and_advance,
context="Editor",
name="run cell and advance",
parent=self)
go_to_next_cell = CONF.config_shortcut(
self.advance_cell,
context="Editor",
name="go to next cell",
parent=self)
go_to_previous_cell = CONF.config_shortcut(
lambda: self.advance_cell(reverse=True),
context="Editor",
name="go to previous cell",
parent=self)
re_run_last_cell = CONF.config_shortcut(
self.re_run_last_cell,
context="Editor",
name="re-run last cell",
parent=self)
prev_warning = CONF.config_shortcut(
lambda: self.sig_prev_warning.emit(),
context="Editor",
name="Previous warning",
parent=self)
next_warning = CONF.config_shortcut(
lambda: self.sig_next_warning.emit(),
context="Editor",
name="Next warning",
parent=self)
split_vertically = CONF.config_shortcut(
lambda: self.sig_split_vertically.emit(),
context="Editor",
name="split vertically",
parent=self)
split_horizontally = CONF.config_shortcut(
lambda: self.sig_split_horizontally.emit(),
context="Editor",
name="split horizontally",
parent=self)
close_split = CONF.config_shortcut(
self.close_split,
context="Editor",
name="close split panel",
parent=self)
external_fileexp = CONF.config_shortcut(
self.show_in_external_file_explorer,
context="Editor",
name="show in external file explorer",
parent=self)
# Return configurable ones
return [inspect, set_breakpoint, set_cond_breakpoint, gotoline, tab,
tabshift, run_selection, new_file, open_file, save_file,
save_all, save_as, close_all, prev_edit_pos, prev_cursor,
next_cursor, zoom_in_1, zoom_in_2, zoom_out, zoom_reset,
close_file_1, close_file_2, run_cell, debug_cell,
run_cell_and_advance,
go_to_next_cell, go_to_previous_cell, re_run_last_cell,
prev_warning, next_warning, split_vertically,
split_horizontally, close_split,
prevtab, nexttab, external_fileexp]
def get_shortcut_data(self):
"""
Returns shortcut data, a list of tuples (shortcut, text, default)
shortcut (QShortcut or QAction instance)
text (string): action/shortcut description
default (string): default key sequence
"""
return [sc.data for sc in self.shortcuts]
def setup_editorstack(self, parent, layout):
"""Setup editorstack's layout"""
layout.setSpacing(0)
# Create filename label, spinner and the toolbar that contains them
self.create_top_widgets()
# Add top toolbar
layout.addWidget(self.top_toolbar)
# Tabbar
menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'),
tip=_('Options'))
menu_btn.setStyleSheet(str(PANES_TABBAR_STYLESHEET))
self.menu = QMenu(self)
menu_btn.setMenu(self.menu)
menu_btn.setPopupMode(menu_btn.InstantPopup)
self.menu.aboutToShow.connect(self.__setup_menu)
corner_widgets = {Qt.TopRightCorner: [menu_btn]}
self.tabs = BaseTabs(self, menu=self.menu, menu_use_tooltips=True,
corner_widgets=corner_widgets)
self.tabs.set_close_function(self.close_file)
self.tabs.tabBar().tabMoved.connect(self.move_editorstack_data)
self.tabs.setMovable(True)
self.stack_history.refresh()
if hasattr(self.tabs, 'setDocumentMode') \
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the editor is detached from the main window
# Fixes spyder-ide/spyder#561.
self.tabs.setDocumentMode(True)
self.tabs.currentChanged.connect(self.current_changed)
tab_container = QWidget()
tab_container.setObjectName('tab-container')
tab_layout = QHBoxLayout(tab_container)
tab_layout.setContentsMargins(0, 0, 0, 0)
tab_layout.addWidget(self.tabs)
layout.addWidget(tab_container)
# Show/hide icons in plugin menus for Mac
if sys.platform == 'darwin':
self.menu.aboutToHide.connect(
lambda menu=self.menu:
set_menu_icons(menu, False))
def create_top_widgets(self):
# Filename label
self.fname_label = QLabel()
# Spacer
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
# Spinner
self.spinner = create_waitspinner(size=16, parent=self.fname_label)
# Add widgets to toolbar
self.top_toolbar = QToolBar(self)
self.top_toolbar.addWidget(self.fname_label)
self.top_toolbar.addWidget(spacer)
self.top_toolbar.addWidget(self.spinner)
# Set toolbar style
css = qstylizer.style.StyleSheet()
css.QToolBar.setValues(
margin='0px',
padding='4px',
borderBottom=f'1px solid {QStylePalette.COLOR_BACKGROUND_4}'
)
self.top_toolbar.setStyleSheet(css.toString())
def hide_tooltip(self):
"""Hide any open tooltips."""
for finfo in self.data:
finfo.editor.hide_tooltip()
@Slot()
def update_fname_label(self):
"""Update file name label."""
filename = to_text_string(self.get_current_filename())
if len(filename) > 100:
shorten_filename = u'...' + filename[-100:]
else:
shorten_filename = filename
self.fname_label.setText(shorten_filename)
def add_corner_widgets_to_tabbar(self, widgets):
self.tabs.add_corner_widgets(widgets)
@Slot()
def close_split(self):
"""Closes the editorstack if it is not the last one opened."""
if self.is_closable:
self.close()
def closeEvent(self, event):
"""Overrides QWidget closeEvent()."""
self.threadmanager.close_all_threads()
self.analysis_timer.timeout.disconnect(self.analyze_script)
# Remove editor references from the outline explorer settings
if self.outlineexplorer is not None:
for finfo in self.data:
self.outlineexplorer.remove_editor(finfo.editor.oe_proxy)
for finfo in self.data:
if not finfo.editor.is_cloned:
finfo.editor.notify_close()
QWidget.closeEvent(self, event)
def clone_editor_from(self, other_finfo, set_current):
fname = other_finfo.filename
enc = other_finfo.encoding
new = other_finfo.newly_created
finfo = self.create_new_editor(fname, enc, "",
set_current=set_current, new=new,
cloned_from=other_finfo.editor)
finfo.set_todo_results(other_finfo.todo_results)
return finfo.editor
def clone_from(self, other):
"""Clone EditorStack from other instance"""
for other_finfo in other.data:
self.clone_editor_from(other_finfo, set_current=True)
self.set_stack_index(other.get_stack_index())
@Slot()
@Slot(str)
def open_switcher_dlg(self, initial_text=''):
"""Open file list management dialog box"""
if not self.tabs.count():
return
if self.switcher_dlg is not None and self.switcher_dlg.isVisible():
self.switcher_dlg.hide()
self.switcher_dlg.clear()
return
if self.switcher_dlg is None:
from spyder.widgets.switcher import Switcher
self.switcher_dlg = Switcher(self)
self.switcher_manager = EditorSwitcherManager(
self.get_plugin(),
self.switcher_dlg,
lambda: self.get_current_editor(),
lambda: self,
section=self.get_plugin_title())
self.switcher_dlg.set_search_text(initial_text)
self.switcher_dlg.setup()
self.switcher_dlg.show()
# Note: the +1 pixel on the top makes it look better
delta_top = (self.tabs.tabBar().geometry().height() +
self.fname_label.geometry().height() + 1)
self.switcher_dlg.set_position(delta_top)
@Slot()
def open_symbolfinder_dlg(self):
self.open_switcher_dlg(initial_text='@')
def get_plugin(self):
"""Get the plugin of the parent widget."""
# Needed for the editor stack to use its own switcher instance.
# See spyder-ide/spyder#10684.
return self.parent().plugin
def get_plugin_title(self):
"""Get the plugin title of the parent widget."""
# Needed for the editor stack to use its own switcher instance.
# See spyder-ide/spyder#9469.
return self.get_plugin().get_plugin_title()
def go_to_line(self, line=None):
"""Go to line dialog"""
if line is not None:
# When this method is called from the flileswitcher, a line
# number is specified, so there is no need for the dialog.
self.get_current_editor().go_to_line(line)
else:
if self.data:
self.get_current_editor().exec_gotolinedialog()
def set_or_clear_breakpoint(self):
"""Set/clear breakpoint"""
if self.data:
editor = self.get_current_editor()
editor.debugger.toogle_breakpoint()
def set_or_edit_conditional_breakpoint(self):
"""Set conditional breakpoint"""
if self.data:
editor = self.get_current_editor()
editor.debugger.toogle_breakpoint(edit_condition=True)
def set_bookmark(self, slot_num):
"""Bookmark current position to given slot."""
if self.data:
editor = self.get_current_editor()
editor.add_bookmark(slot_num)
def inspect_current_object(self, pos=None):
"""Inspect current object in the Help plugin"""
editor = self.get_current_editor()
editor.sig_display_object_info.connect(self.display_help)
cursor = None
offset = editor.get_position('cursor')
if pos:
cursor = editor.get_last_hover_cursor()
if cursor:
offset = cursor.position()
else:
return
line, col = editor.get_cursor_line_column(cursor)
editor.request_hover(line, col, offset,
show_hint=False, clicked=bool(pos))
@Slot(str, bool)
def display_help(self, help_text, clicked):
editor = self.get_current_editor()
if clicked:
name = editor.get_last_hover_word()
else:
name = editor.get_current_word(help_req=True)
try:
editor.sig_display_object_info.disconnect(self.display_help)
except TypeError:
# Needed to prevent an error after some time in idle.
# See spyder-ide/spyder#11228
pass
self.send_to_help(name, help_text, force=True)
# ------ Editor Widget Settings
def set_closable(self, state):
"""Parent widget must handle the closable state"""
self.is_closable = state
def set_io_actions(self, new_action, open_action,
save_action, revert_action):
self.new_action = new_action
self.open_action = open_action
self.save_action = save_action
self.revert_action = revert_action
def set_find_widget(self, find_widget):
self.find_widget = find_widget
def set_outlineexplorer(self, outlineexplorer):
self.outlineexplorer = outlineexplorer
def add_outlineexplorer_button(self, editor_plugin):
oe_btn = create_toolbutton(editor_plugin)
oe_btn.setDefaultAction(self.outlineexplorer.visibility_action)
self.add_corner_widgets_to_tabbar([5, oe_btn])
def set_tempfile_path(self, path):
self.tempfile_path = path
def set_title(self, text):
self.title = text
def set_classfunc_dropdown_visible(self, state):
self.show_class_func_dropdown = state
if self.data:
for finfo in self.data:
if finfo.editor.is_python_like():
finfo.editor.classfuncdropdown.setVisible(state)
def __update_editor_margins(self, editor):
editor.linenumberarea.setup_margins(
linenumbers=self.linenumbers_enabled, markers=self.has_markers())
def has_markers(self):
"""Return True if this editorstack has a marker margin for TODOs or
code analysis"""
return self.todolist_enabled
def set_todolist_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'todo_list')
self.todolist_enabled = state
if self.data:
for finfo in self.data:
self.__update_editor_margins(finfo.editor)
finfo.cleanup_todo_results()
if state and current_finfo is not None:
if current_finfo is not finfo:
finfo.run_todo_finder()
def set_linenumbers_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'line_numbers')
self.linenumbers_enabled = state
if self.data:
for finfo in self.data:
self.__update_editor_margins(finfo.editor)
def set_blanks_enabled(self, state):
self.blanks_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_blanks_enabled(state)
def set_scrollpastend_enabled(self, state):
self.scrollpastend_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_scrollpastend_enabled(state)
def set_edgeline_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'edge_line')
self.edgeline_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.edge_line.set_enabled(state)
def set_edgeline_columns(self, columns):
# CONF.get(self.CONF_SECTION, 'edge_line_column')
self.edgeline_columns = columns
if self.data:
for finfo in self.data:
finfo.editor.edge_line.set_columns(columns)
def set_indent_guides(self, state):
self.indent_guides = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_identation_guides(state)
def set_close_parentheses_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_parentheses')
self.close_parentheses_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_parentheses_enabled(state)
def set_close_quotes_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_quotes')
self.close_quotes_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_quotes_enabled(state)
def set_add_colons_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'add_colons')
self.add_colons_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_add_colons_enabled(state)
def set_auto_unindent_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'auto_unindent')
self.auto_unindent_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_auto_unindent_enabled(state)
def set_indent_chars(self, indent_chars):
# CONF.get(self.CONF_SECTION, 'indent_chars')
indent_chars = indent_chars[1:-1] # removing the leading/ending '*'
self.indent_chars = indent_chars
if self.data:
for finfo in self.data:
finfo.editor.set_indent_chars(indent_chars)
def set_tab_stop_width_spaces(self, tab_stop_width_spaces):
# CONF.get(self.CONF_SECTION, 'tab_stop_width')
self.tab_stop_width_spaces = tab_stop_width_spaces
if self.data:
for finfo in self.data:
finfo.editor.tab_stop_width_spaces = tab_stop_width_spaces
finfo.editor.update_tab_stop_width_spaces()
def set_help_enabled(self, state):
self.help_enabled = state
def set_default_font(self, font, color_scheme=None):
self.default_font = font
if color_scheme is not None:
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_font(font, color_scheme)
def set_color_scheme(self, color_scheme):
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_color_scheme(color_scheme)
def set_wrap_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'wrap')
self.wrap_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_wrap_mode(state)
def set_tabmode_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'tab_always_indent')
self.tabmode_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_tab_mode(state)
def set_stripmode_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'strip_trailing_spaces_on_modify')
self.stripmode_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_strip_mode(state)
def set_intelligent_backspace_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'intelligent_backspace')
self.intelligent_backspace_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_intelligent_backspace(state)
def set_code_snippets_enabled(self, state):
self.code_snippets_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_code_snippets(state)
def set_code_folding_enabled(self, state):
self.code_folding_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_code_folding(state)
def set_automatic_completions_enabled(self, state):
self.automatic_completions_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_automatic_completions(state)
def set_automatic_completions_after_chars(self, chars):
self.automatic_completion_chars = chars
if self.data:
for finfo in self.data:
finfo.editor.set_automatic_completions_after_chars(chars)
def set_automatic_completions_after_ms(self, ms):
self.automatic_completion_ms = ms
if self.data:
for finfo in self.data:
finfo.editor.set_automatic_completions_after_ms(ms)
def set_completions_hint_enabled(self, state):
self.completions_hint_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_completions_hint(state)
def set_completions_hint_after_ms(self, ms):
self.completions_hint_after_ms = ms
if self.data:
for finfo in self.data:
finfo.editor.set_completions_hint_after_ms(ms)
def set_hover_hints_enabled(self, state):
self.hover_hints_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_hover_hints(state)
def set_format_on_save(self, state):
self.format_on_save = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_format_on_save(state)
def set_occurrence_highlighting_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'occurrence_highlighting')
self.occurrence_highlighting_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_occurrence_highlighting(state)
def set_occurrence_highlighting_timeout(self, timeout):
# CONF.get(self.CONF_SECTION, 'occurrence_highlighting/timeout')
self.occurrence_highlighting_timeout = timeout
if self.data:
for finfo in self.data:
finfo.editor.set_occurrence_timeout(timeout)
def set_underline_errors_enabled(self, state):
self.underline_errors_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_underline_errors_enabled(state)
def set_highlight_current_line_enabled(self, state):
self.highlight_current_line_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_highlight_current_line(state)
def set_highlight_current_cell_enabled(self, state):
self.highlight_current_cell_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_highlight_current_cell(state)
def set_checkeolchars_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'check_eol_chars')
self.checkeolchars_enabled = state
def set_always_remove_trailing_spaces(self, state):
# CONF.get(self.CONF_SECTION, 'always_remove_trailing_spaces')
self.always_remove_trailing_spaces = state
if self.data:
for finfo in self.data:
finfo.editor.set_remove_trailing_spaces(state)
def set_add_newline(self, state):
self.add_newline = state
if self.data:
for finfo in self.data:
finfo.editor.set_add_newline(state)
def set_remove_trailing_newlines(self, state):
self.remove_trailing_newlines = state
if self.data:
for finfo in self.data:
finfo.editor.set_remove_trailing_newlines(state)
def set_convert_eol_on_save(self, state):
"""If `state` is `True`, saving files will convert line endings."""
# CONF.get(self.CONF_SECTION, 'convert_eol_on_save')
self.convert_eol_on_save = state
def set_convert_eol_on_save_to(self, state):
"""`state` can be one of ('LF', 'CRLF', 'CR')"""
# CONF.get(self.CONF_SECTION, 'convert_eol_on_save_to')
self.convert_eol_on_save_to = state
def set_focus_to_editor(self, state):
self.focus_to_editor = state
def set_run_cell_copy(self, state):
"""If `state` is ``True``, code cells will be copied to the console."""
self.run_cell_copy = state
def set_current_project_path(self, root_path=None):
"""
Set the current active project root path.
Parameters
----------
root_path: str or None, optional
Path to current project root path. Default is None.
"""
for finfo in self.data:
finfo.editor.set_current_project_path(root_path)
# ------ Stacked widget management
def get_stack_index(self):
return self.tabs.currentIndex()
def get_current_finfo(self):
if self.data:
return self.data[self.get_stack_index()]
def get_current_editor(self):
return self.tabs.currentWidget()
def get_stack_count(self):
return self.tabs.count()
def set_stack_index(self, index, instance=None):
if instance == self or instance == None:
self.tabs.setCurrentIndex(index)
def set_tabbar_visible(self, state):
self.tabs.tabBar().setVisible(state)
def remove_from_data(self, index):
self.tabs.blockSignals(True)
self.tabs.removeTab(index)
self.data.pop(index)
self.tabs.blockSignals(False)
self.update_actions()
def __modified_readonly_title(self, title, is_modified, is_readonly):
if is_modified is not None and is_modified:
title += "*"
if is_readonly is not None and is_readonly:
title = "(%s)" % title
return title
def get_tab_text(self, index, is_modified=None, is_readonly=None):
"""Return tab title."""
files_path_list = [finfo.filename for finfo in self.data]
fname = self.data[index].filename
fname = sourcecode.disambiguate_fname(files_path_list, fname)
return self.__modified_readonly_title(fname,
is_modified, is_readonly)
def get_tab_tip(self, filename, is_modified=None, is_readonly=None):
"""Return tab menu title"""
text = u"%s — %s"
text = self.__modified_readonly_title(text,
is_modified, is_readonly)
if self.tempfile_path is not None\
and filename == encoding.to_unicode_from_fs(self.tempfile_path):
temp_file_str = to_text_string(_("Temporary file"))
return text % (temp_file_str, self.tempfile_path)
else:
return text % (osp.basename(filename), osp.dirname(filename))
def add_to_data(self, finfo, set_current, add_where='end'):
finfo.editor.oe_proxy = None
index = 0 if add_where == 'start' else len(self.data)
self.data.insert(index, finfo)
index = self.data.index(finfo)
editor = finfo.editor
self.tabs.insertTab(index, editor, self.get_tab_text(index))
self.set_stack_title(index, False)
if set_current:
self.set_stack_index(index)
self.current_changed(index)
self.update_actions()
def __repopulate_stack(self):
self.tabs.blockSignals(True)
self.tabs.clear()
for finfo in self.data:
if finfo.newly_created:
is_modified = True
else:
is_modified = None
index = self.data.index(finfo)
tab_text = self.get_tab_text(index, is_modified)
tab_tip = self.get_tab_tip(finfo.filename)
index = self.tabs.addTab(finfo.editor, tab_text)
self.tabs.setTabToolTip(index, tab_tip)
self.tabs.blockSignals(False)
def rename_in_data(self, original_filename, new_filename):
index = self.has_filename(original_filename)
if index is None:
return
finfo = self.data[index]
# Send close request to LSP
finfo.editor.notify_close()
# Set new filename
finfo.filename = new_filename
finfo.editor.filename = new_filename
# File type has changed!
original_ext = osp.splitext(original_filename)[1]
new_ext = osp.splitext(new_filename)[1]
if original_ext != new_ext:
# Set file language and re-run highlighter
txt = to_text_string(finfo.editor.get_text_with_eol())
language = get_file_language(new_filename, txt)
finfo.editor.set_language(language, new_filename)
finfo.editor.run_pygments_highlighter()
# If the user renamed the file to a different language, we
# need to emit sig_open_file to see if we can start a
# language server for it.
options = {
'language': language,
'filename': new_filename,
'codeeditor': finfo.editor
}
self.sig_open_file.emit(options)
# Update panels
finfo.editor.set_debug_panel(
show_debug_panel=True, language=language)
finfo.editor.cleanup_code_analysis()
finfo.editor.cleanup_folding()
else:
# If there's no language change, we simply need to request a
# document_did_open for the new file.
finfo.editor.document_did_open()
set_new_index = index == self.get_stack_index()
current_fname = self.get_current_filename()
finfo.editor.filename = new_filename
new_index = self.data.index(finfo)
self.__repopulate_stack()
if set_new_index:
self.set_stack_index(new_index)
else:
# Fixes spyder-ide/spyder#1287.
self.set_current_filename(current_fname)
if self.outlineexplorer is not None:
self.outlineexplorer.file_renamed(
finfo.editor.oe_proxy, finfo.filename)
return new_index
def set_stack_title(self, index, is_modified):
finfo = self.data[index]
fname = finfo.filename
is_modified = (is_modified or finfo.newly_created) and not finfo.default
is_readonly = finfo.editor.isReadOnly()
tab_text = self.get_tab_text(index, is_modified, is_readonly)
tab_tip = self.get_tab_tip(fname, is_modified, is_readonly)
# Only update tab text if have changed, otherwise an unwanted scrolling
# will happen when changing tabs. See spyder-ide/spyder#1170.
if tab_text != self.tabs.tabText(index):
self.tabs.setTabText(index, tab_text)
self.tabs.setTabToolTip(index, tab_tip)
# ------ Context menu
def __setup_menu(self):
"""Setup tab context menu before showing it"""
self.menu.clear()
if self.data:
actions = self.menu_actions
else:
actions = (self.new_action, self.open_action)
self.setFocus() # --> Editor.__get_focus_editortabwidget
add_actions(self.menu, list(actions) + self.__get_split_actions())
self.close_action.setEnabled(self.is_closable)
if sys.platform == 'darwin':
set_menu_icons(self.menu, True)
# ------ Hor/Ver splitting
def __get_split_actions(self):
if self.parent() is not None:
plugin = self.parent().plugin
else:
plugin = None
# New window
if plugin is not None:
self.new_window_action = create_action(
self, _("New window"),
icon=ima.icon('newwindow'),
tip=_("Create a new editor window"),
triggered=plugin.create_new_window)
# Splitting
self.versplit_action = create_action(
self,
_("Split vertically"),
icon=ima.icon('versplit'),
tip=_("Split vertically this editor window"),
triggered=lambda: self.sig_split_vertically.emit(),
shortcut=CONF.get_shortcut(context='Editor',
name='split vertically'),
context=Qt.WidgetShortcut)
self.horsplit_action = create_action(
self,
_("Split horizontally"),
icon=ima.icon('horsplit'),
tip=_("Split horizontally this editor window"),
triggered=lambda: self.sig_split_horizontally.emit(),
shortcut=CONF.get_shortcut(context='Editor',
name='split horizontally'),
context=Qt.WidgetShortcut)
self.close_action = create_action(
self,
_("Close this panel"),
icon=ima.icon('close_panel'),
triggered=self.close_split,
shortcut=CONF.get_shortcut(context='Editor',
name='close split panel'),
context=Qt.WidgetShortcut)
# Regular actions
actions = [MENU_SEPARATOR, self.versplit_action,
self.horsplit_action, self.close_action]
if self.new_window:
window = self.window()
close_window_action = create_action(
self, _("Close window"),
icon=ima.icon('close_pane'),
triggered=window.close)
actions += [MENU_SEPARATOR, self.new_window_action,
close_window_action]
elif plugin is not None:
if plugin._undocked_window is not None:
actions += [MENU_SEPARATOR, plugin._dock_action]
else:
actions += [MENU_SEPARATOR, self.new_window_action,
plugin._undock_action,
plugin._close_plugin_action]
return actions
def reset_orientation(self):
self.horsplit_action.setEnabled(True)
self.versplit_action.setEnabled(True)
def set_orientation(self, orientation):
self.horsplit_action.setEnabled(orientation == Qt.Horizontal)
self.versplit_action.setEnabled(orientation == Qt.Vertical)
def update_actions(self):
state = self.get_stack_count() > 0
self.horsplit_action.setEnabled(state)
self.versplit_action.setEnabled(state)
# ------ Accessors
def get_current_filename(self):
if self.data:
return self.data[self.get_stack_index()].filename
def get_current_language(self):
if self.data:
return self.data[self.get_stack_index()].editor.language
def get_filenames(self):
"""
Return a list with the names of all the files currently opened in
the editorstack.
"""
return [finfo.filename for finfo in self.data]
def has_filename(self, filename):
"""Return the self.data index position for the filename.
Args:
filename: Name of the file to search for in self.data.
Returns:
The self.data index for the filename. Returns None
if the filename is not found in self.data.
"""
fixpath = lambda path: osp.normcase(osp.realpath(path))
for index, finfo in enumerate(self.data):
if fixpath(filename) == fixpath(finfo.filename):
return index
return None
def set_current_filename(self, filename, focus=True):
"""Set current filename and return the associated editor instance."""
index = self.has_filename(filename)
if index is not None:
if focus:
self.set_stack_index(index)
editor = self.data[index].editor
if focus:
editor.setFocus()
else:
self.stack_history.remove_and_append(index)
return editor
def is_file_opened(self, filename=None):
"""Return if filename is in the editor stack.
Args:
filename: Name of the file to search for. If filename is None,
then checks if any file is open.
Returns:
True: If filename is None and a file is open.
False: If filename is None and no files are open.
None: If filename is not None and the file isn't found.
integer: Index of file name in editor stack.
"""
if filename is None:
# Is there any file opened?
return len(self.data) > 0
else:
return self.has_filename(filename)
def get_index_from_filename(self, filename):
"""
Return the position index of a file in the tab bar of the editorstack
from its name.
"""
filenames = [d.filename for d in self.data]
return filenames.index(filename)
@Slot(int, int)
def move_editorstack_data(self, start, end):
"""Reorder editorstack.data so it is synchronized with the tab bar when
tabs are moved."""
if start < 0 or end < 0:
return
else:
steps = abs(end - start)
direction = (end-start) // steps # +1 for right, -1 for left
data = self.data
self.blockSignals(True)
for i in range(start, end, direction):
data[i], data[i+direction] = data[i+direction], data[i]
self.blockSignals(False)
self.refresh()
# ------ Close file, tabwidget...
def close_file(self, index=None, force=False):
"""Close file (index=None -> close current file)
Keep current file index unchanged (if current file
that is being closed)"""
current_index = self.get_stack_index()
count = self.get_stack_count()
if index is None:
if count > 0:
index = current_index
else:
self.find_widget.set_editor(None)
return
new_index = None
if count > 1:
if current_index == index:
new_index = self._get_previous_file_index()
else:
new_index = current_index
can_close_file = self.parent().plugin.can_close_file(
self.data[index].filename) if self.parent() else True
is_ok = (force or self.save_if_changed(cancelable=True, index=index)
and can_close_file)
if is_ok:
finfo = self.data[index]
self.threadmanager.close_threads(finfo)
# Removing editor reference from outline explorer settings:
if self.outlineexplorer is not None:
self.outlineexplorer.remove_editor(finfo.editor.oe_proxy)
filename = self.data[index].filename
self.remove_from_data(index)
finfo.editor.notify_close()
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms.
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
self.sig_close_file.emit(str(id(self)), filename)
self.opened_files_list_changed.emit()
self.update_code_analysis_actions.emit()
self.refresh_file_dependent_actions.emit()
self.update_plugin_title.emit()
editor = self.get_current_editor()
if editor:
editor.setFocus()
if new_index is not None:
if index < new_index:
new_index -= 1
self.set_stack_index(new_index)
self.add_last_closed_file(finfo.filename)
if finfo.filename in self.autosave.file_hashes:
del self.autosave.file_hashes[finfo.filename]
if self.get_stack_count() == 0 and self.create_new_file_if_empty:
self.sig_new_file[()].emit()
return False
self.__modify_stack_title()
return is_ok
def register_completion_capabilities(self, capabilities, language):
"""
Register completion server capabilities across all editors.
Parameters
----------
capabilities: dict
Capabilities supported by a language server.
language: str
Programming language for the language server (it has to be
in small caps).
"""
for index in range(self.get_stack_count()):
editor = self.tabs.widget(index)
if editor.language.lower() == language:
editor.register_completion_capabilities(capabilities)
def start_completion_services(self, language):
"""Notify language server availability to code editors."""
for index in range(self.get_stack_count()):
editor = self.tabs.widget(index)
if editor.language.lower() == language:
editor.start_completion_services()
def stop_completion_services(self, language):
"""Notify language server unavailability to code editors."""
for index in range(self.get_stack_count()):
editor = self.tabs.widget(index)
if editor.language.lower() == language:
editor.stop_completion_services()
def close_all_files(self):
"""Close all opened scripts"""
while self.close_file():
pass
def close_all_right(self):
""" Close all files opened to the right """
num = self.get_stack_index()
n = self.get_stack_count()
for __ in range(num, n-1):
self.close_file(num+1)
def close_all_but_this(self):
"""Close all files but the current one"""
self.close_all_right()
for __ in range(0, self.get_stack_count() - 1):
self.close_file(0)
def sort_file_tabs_alphabetically(self):
"""Sort open tabs alphabetically."""
while self.sorted() is False:
for i in range(0, self.tabs.tabBar().count()):
if(self.tabs.tabBar().tabText(i) >
self.tabs.tabBar().tabText(i + 1)):
self.tabs.tabBar().moveTab(i, i + 1)
def sorted(self):
"""Utility function for sort_file_tabs_alphabetically()."""
for i in range(0, self.tabs.tabBar().count() - 1):
if (self.tabs.tabBar().tabText(i) >
self.tabs.tabBar().tabText(i + 1)):
return False
return True
def add_last_closed_file(self, fname):
"""Add to last closed file list."""
if fname in self.last_closed_files:
self.last_closed_files.remove(fname)
self.last_closed_files.insert(0, fname)
if len(self.last_closed_files) > 10:
self.last_closed_files.pop(-1)
def get_last_closed_files(self):
return self.last_closed_files
def set_last_closed_files(self, fnames):
self.last_closed_files = fnames
# ------ Save
def save_if_changed(self, cancelable=False, index=None):
"""Ask user to save file if modified.
Args:
cancelable: Show Cancel button.
index: File to check for modification.
Returns:
False when save() fails or is cancelled.
True when save() is successful, there are no modifications,
or user selects No or NoToAll.
This function controls the message box prompt for saving
changed files. The actual save is performed in save() for
each index processed. This function also removes autosave files
corresponding to files the user chooses not to save.
"""
if index is None:
indexes = list(range(self.get_stack_count()))
else:
indexes = [index]
buttons = QMessageBox.Yes | QMessageBox.No
if cancelable:
buttons |= QMessageBox.Cancel
unsaved_nb = 0
for index in indexes:
if self.data[index].editor.document().isModified():
unsaved_nb += 1
if not unsaved_nb:
# No file to save
return True
if unsaved_nb > 1:
buttons |= int(QMessageBox.YesToAll | QMessageBox.NoToAll)
yes_all = no_all = False
for index in indexes:
self.set_stack_index(index)
finfo = self.data[index]
if finfo.filename == self.tempfile_path or yes_all:
if not self.save(index):
return False
elif no_all:
self.autosave.remove_autosave_file(finfo)
elif (finfo.editor.document().isModified() and
self.save_dialog_on_tests):
self.msgbox = QMessageBox(
QMessageBox.Question,
self.title,
_("<b>%s</b> has been modified."
"<br>Do you want to save changes?"
) % osp.basename(finfo.filename),
buttons,
parent=self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
if not self.save(index):
return False
elif answer == QMessageBox.No:
self.autosave.remove_autosave_file(finfo.filename)
elif answer == QMessageBox.YesToAll:
if not self.save(index):
return False
yes_all = True
elif answer == QMessageBox.NoToAll:
self.autosave.remove_autosave_file(finfo.filename)
no_all = True
elif answer == QMessageBox.Cancel:
return False
return True
def compute_hash(self, fileinfo):
"""Compute hash of contents of editor.
Args:
fileinfo: FileInfo object associated to editor whose hash needs
to be computed.
Returns:
int: computed hash.
"""
txt = fileinfo.editor.get_text_with_eol()
return hash(txt)
def _write_to_file(self, fileinfo, filename):
"""Low-level function for writing text of editor to file.
Args:
fileinfo: FileInfo object associated to editor to be saved
filename: str with filename to save to
This is a low-level function that only saves the text to file in the
correct encoding without doing any error handling.
"""
txt = fileinfo.editor.get_text_with_eol()
fileinfo.encoding = encoding.write(txt, filename, fileinfo.encoding)
def save(self, index=None, force=False, save_new_files=True):
"""Write text of editor to a file.
Args:
index: self.data index to save. If None, defaults to
currentIndex().
force: Force save regardless of file state.
Returns:
True upon successful save or when file doesn't need to be saved.
False if save failed.
If the text isn't modified and it's not newly created, then the save
is aborted. If the file hasn't been saved before, then save_as()
is invoked. Otherwise, the file is written using the file name
currently in self.data. This function doesn't change the file name.
"""
if index is None:
# Save the currently edited file
if not self.get_stack_count():
return
index = self.get_stack_index()
finfo = self.data[index]
if not (finfo.editor.document().isModified() or
finfo.newly_created) and not force:
return True
if not osp.isfile(finfo.filename) and not force:
# File has not been saved yet
if save_new_files:
return self.save_as(index=index)
# The file doesn't need to be saved
return True
if self.always_remove_trailing_spaces:
self.remove_trailing_spaces(index)
if self.remove_trailing_newlines:
self.trim_trailing_newlines(index)
if self.add_newline:
self.add_newline_to_file(index)
if self.convert_eol_on_save:
# hack to account for the fact that the config file saves
# CR/LF/CRLF while set_os_eol_chars wants the os.name value.
osname_lookup = {'LF': 'posix', 'CRLF': 'nt', 'CR': 'mac'}
osname = osname_lookup[self.convert_eol_on_save_to]
self.set_os_eol_chars(osname=osname)
try:
if self.format_on_save and finfo.editor.formatting_enabled:
# Autoformat document and then save
finfo.editor.sig_stop_operation_in_progress.connect(
functools.partial(self._save_file, finfo, index))
finfo.editor.format_document()
else:
self._save_file(finfo, index)
return True
except EnvironmentError as error:
self.msgbox = QMessageBox(
QMessageBox.Critical,
_("Save Error"),
_("<b>Unable to save file '%s'</b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(finfo.filename),
str(error)),
parent=self)
self.msgbox.exec_()
return False
def _save_file(self, finfo, index):
self._write_to_file(finfo, finfo.filename)
file_hash = self.compute_hash(finfo)
self.autosave.file_hashes[finfo.filename] = file_hash
self.autosave.remove_autosave_file(finfo.filename)
finfo.newly_created = False
self.encoding_changed.emit(finfo.encoding)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms.
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
# The filename is passed instead of an index in case the tabs
# have been rearranged. See spyder-ide/spyder#5703.
self.file_saved.emit(str(id(self)),
finfo.filename, finfo.filename)
finfo.editor.document().setModified(False)
self.modification_changed(index=index)
self.analyze_script(index)
finfo.editor.notify_save()
def file_saved_in_other_editorstack(self, original_filename, filename):
"""
File was just saved in another editorstack, let's synchronize!
This avoids file being automatically reloaded.
The original filename is passed instead of an index in case the tabs
on the editor stacks were moved and are now in a different order - see
spyder-ide/spyder#5703.
Filename is passed in case file was just saved as another name.
"""
index = self.has_filename(original_filename)
if index is None:
return
finfo = self.data[index]
finfo.newly_created = False
finfo.filename = to_text_string(filename)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
def select_savename(self, original_filename):
"""Select a name to save a file.
Args:
original_filename: Used in the dialog to display the current file
path and name.
Returns:
Normalized path for the selected file name or None if no name was
selected.
"""
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
# Don't use filters on KDE to not make the dialog incredible
# slow
# Fixes spyder-ide/spyder#4156.
if is_kde_desktop() and not is_anaconda():
filters = ''
selectedfilter = ''
else:
filters = self.edit_filters
selectedfilter = get_filter(self.edit_filetypes,
osp.splitext(original_filename)[1])
self.redirect_stdio.emit(False)
filename, _selfilter = getsavefilename(self, _("Save file"),
original_filename,
filters=filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails)
self.redirect_stdio.emit(True)
if filename:
return osp.normpath(filename)
return None
def save_as(self, index=None):
"""Save file as...
Args:
index: self.data index for the file to save.
Returns:
False if no file name was selected or if save() was unsuccessful.
True is save() was successful.
Gets the new file name from select_savename(). If no name is chosen,
then the save_as() aborts. Otherwise, the current stack is checked
to see if the selected name already exists and, if so, then the tab
with that name is closed.
The current stack (self.data) and current tabs are updated with the
new name and other file info. The text is written with the new
name using save() and the name change is propagated to the other stacks
via the file_renamed_in_data signal.
"""
if index is None:
# Save the currently edited file
index = self.get_stack_index()
finfo = self.data[index]
original_newly_created = finfo.newly_created
# The next line is necessary to avoid checking if the file exists
# While running __check_file_status
# See spyder-ide/spyder#3678 and spyder-ide/spyder#3026.
finfo.newly_created = True
original_filename = finfo.filename
filename = self.select_savename(original_filename)
if filename:
ao_index = self.has_filename(filename)
# Note: ao_index == index --> saving an untitled file
if ao_index is not None and ao_index != index:
if not self.close_file(ao_index):
return
if ao_index < index:
index -= 1
new_index = self.rename_in_data(original_filename,
new_filename=filename)
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
self.file_renamed_in_data.emit(str(id(self)),
original_filename, filename)
ok = self.save(index=new_index, force=True)
self.refresh(new_index)
self.set_stack_index(new_index)
return ok
else:
finfo.newly_created = original_newly_created
return False
def save_copy_as(self, index=None):
"""Save copy of file as...
Args:
index: self.data index for the file to save.
Returns:
False if no file name was selected or if save() was unsuccessful.
True is save() was successful.
Gets the new file name from select_savename(). If no name is chosen,
then the save_copy_as() aborts. Otherwise, the current stack is
checked to see if the selected name already exists and, if so, then the
tab with that name is closed.
Unlike save_as(), this calls write() directly instead of using save().
The current file and tab aren't changed at all. The copied file is
opened in a new tab.
"""
if index is None:
# Save the currently edited file
index = self.get_stack_index()
finfo = self.data[index]
original_filename = finfo.filename
filename = self.select_savename(original_filename)
if filename:
ao_index = self.has_filename(filename)
# Note: ao_index == index --> saving an untitled file
if ao_index is not None and ao_index != index:
if not self.close_file(ao_index):
return
if ao_index < index:
index -= 1
try:
self._write_to_file(finfo, filename)
# open created copy file
self.plugin_load.emit(filename)
return True
except EnvironmentError as error:
self.msgbox = QMessageBox(
QMessageBox.Critical,
_("Save Error"),
_("<b>Unable to save file '%s'</b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(finfo.filename),
str(error)),
parent=self)
self.msgbox.exec_()
else:
return False
def save_all(self, save_new_files=True):
"""Save all opened files.
Iterate through self.data and call save() on any modified files.
"""
all_saved = True
for index in range(self.get_stack_count()):
if self.data[index].editor.document().isModified():
all_saved &= self.save(index, save_new_files=save_new_files)
return all_saved
#------ Update UI
def start_stop_analysis_timer(self):
self.is_analysis_done = False
self.analysis_timer.stop()
self.analysis_timer.start()
def analyze_script(self, index=None):
"""Analyze current script for TODOs."""
if self.is_analysis_done:
return
if index is None:
index = self.get_stack_index()
if self.data and len(self.data) > index:
finfo = self.data[index]
if self.todolist_enabled:
finfo.run_todo_finder()
self.is_analysis_done = True
def set_todo_results(self, filename, todo_results):
"""Synchronize todo results between editorstacks"""
index = self.has_filename(filename)
if index is None:
return
self.data[index].set_todo_results(todo_results)
def get_todo_results(self):
if self.data:
return self.data[self.get_stack_index()].todo_results
def current_changed(self, index):
"""Stack index has changed"""
editor = self.get_current_editor()
if index != -1:
editor.setFocus()
logger.debug("Set focus to: %s" % editor.filename)
else:
self.reset_statusbar.emit()
self.opened_files_list_changed.emit()
self.stack_history.refresh()
self.stack_history.remove_and_append(index)
# Needed to avoid an error generated after moving/renaming
# files outside Spyder while in debug mode.
# See spyder-ide/spyder#8749.
try:
logger.debug("Current changed: %d - %s" %
(index, self.data[index].editor.filename))
except IndexError:
pass
self.update_plugin_title.emit()
# Make sure that any replace happens in the editor on top
# See spyder-ide/spyder#9688.
self.find_widget.set_editor(editor, refresh=False)
if editor is not None:
# Needed in order to handle the close of files open in a directory
# that has been renamed. See spyder-ide/spyder#5157.
try:
line, col = editor.get_cursor_line_column()
self.current_file_changed.emit(self.data[index].filename,
editor.get_position('cursor'),
line, col)
except IndexError:
pass
def _get_previous_file_index(self):
"""Return the penultimate element of the stack history."""
try:
return self.stack_history[-2]
except IndexError:
return None
def tab_navigation_mru(self, forward=True):
"""
Tab navigation with "most recently used" behaviour.
It's fired when pressing 'go to previous file' or 'go to next file'
shortcuts.
forward:
True: move to next file
False: move to previous file
"""
self.tabs_switcher = TabSwitcherWidget(self, self.stack_history,
self.tabs)
self.tabs_switcher.show()
self.tabs_switcher.select_row(1 if forward else -1)
self.tabs_switcher.setFocus()
def focus_changed(self):
"""Editor focus has changed"""
fwidget = QApplication.focusWidget()
for finfo in self.data:
if fwidget is finfo.editor:
if finfo.editor.operation_in_progress:
self.spinner.start()
else:
self.spinner.stop()
self.refresh()
self.editor_focus_changed.emit()
def _refresh_outlineexplorer(self, index=None, update=True, clear=False):
"""Refresh outline explorer panel"""
oe = self.outlineexplorer
if oe is None:
return
if index is None:
index = self.get_stack_index()
if self.data and len(self.data) > index:
finfo = self.data[index]
oe.setEnabled(True)
oe.set_current_editor(finfo.editor.oe_proxy,
update=update, clear=clear)
if index != self.get_stack_index():
# The last file added to the outline explorer is not the
# currently focused one in the editor stack. Therefore,
# we need to force a refresh of the outline explorer to set
# the current editor to the currently focused one in the
# editor stack. See spyder-ide/spyder#8015.
self._refresh_outlineexplorer(update=False)
return
self._sync_outlineexplorer_file_order()
def _sync_outlineexplorer_file_order(self):
"""
Order the root file items of the outline explorer as in the tabbar
of the current EditorStack.
"""
if self.outlineexplorer is not None:
self.outlineexplorer.treewidget.set_editor_ids_order(
[finfo.editor.get_document_id() for finfo in self.data])
def __refresh_statusbar(self, index):
"""Refreshing statusbar widgets"""
if self.data and len(self.data) > index:
finfo = self.data[index]
self.encoding_changed.emit(finfo.encoding)
# Refresh cursor position status:
line, index = finfo.editor.get_cursor_line_column()
self.sig_editor_cursor_position_changed.emit(line, index)
def __refresh_readonly(self, index):
if self.data and len(self.data) > index:
finfo = self.data[index]
read_only = not QFileInfo(finfo.filename).isWritable()
if not osp.isfile(finfo.filename):
# This is an 'untitledX.py' file (newly created)
read_only = False
elif os.name == 'nt':
try:
# Try to open the file to see if its permissions allow
# to write on it
# Fixes spyder-ide/spyder#10657
fd = os.open(finfo.filename, os.O_RDWR)
os.close(fd)
except (IOError, OSError):
read_only = True
finfo.editor.setReadOnly(read_only)
self.readonly_changed.emit(read_only)
def __check_file_status(self, index):
"""Check if file has been changed in any way outside Spyder:
1. removed, moved or renamed outside Spyder
2. modified outside Spyder"""
if self.__file_status_flag:
# Avoid infinite loop: when the QMessageBox.question pops, it
# gets focus and then give it back to the CodeEditor instance,
# triggering a refresh cycle which calls this method
return
self.__file_status_flag = True
if len(self.data) <= index:
index = self.get_stack_index()
finfo = self.data[index]
name = osp.basename(finfo.filename)
if finfo.newly_created:
# File was just created (not yet saved): do nothing
# (do not return because of the clean-up at the end of the method)
pass
elif not osp.isfile(finfo.filename):
# File doesn't exist (removed, moved or offline):
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("<b>%s</b> is unavailable "
"(this file may have been removed, moved "
"or renamed outside Spyder)."
"<br>Do you want to close it?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.close_file(index)
else:
finfo.newly_created = True
finfo.editor.document().setModified(True)
self.modification_changed(index=index)
else:
# Else, testing if it has been modified elsewhere:
lastm = QFileInfo(finfo.filename).lastModified()
if to_text_string(lastm.toString()) \
!= to_text_string(finfo.lastmodified.toString()):
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(
QMessageBox.Question,
self.title,
_("<b>%s</b> has been modified outside Spyder."
"<br>Do you want to reload it and lose all "
"your changes?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.reload(index)
else:
finfo.lastmodified = lastm
else:
self.reload(index)
# Finally, resetting temporary flag:
self.__file_status_flag = False
def __modify_stack_title(self):
for index, finfo in enumerate(self.data):
state = finfo.editor.document().isModified()
self.set_stack_title(index, state)
def refresh(self, index=None):
"""Refresh tabwidget"""
if index is None:
index = self.get_stack_index()
# Set current editor
if self.get_stack_count():
index = self.get_stack_index()
finfo = self.data[index]
editor = finfo.editor
editor.setFocus()
self._refresh_outlineexplorer(index, update=False)
self.update_code_analysis_actions.emit()
self.__refresh_statusbar(index)
self.__refresh_readonly(index)
self.__check_file_status(index)
self.__modify_stack_title()
self.update_plugin_title.emit()
else:
editor = None
# Update the modification-state-dependent parameters
self.modification_changed()
# Update FindReplace binding
self.find_widget.set_editor(editor, refresh=False)
def modification_changed(self, state=None, index=None, editor_id=None):
"""
Current editor's modification state has changed
--> change tab title depending on new modification state
--> enable/disable save/save all actions
"""
if editor_id is not None:
for index, _finfo in enumerate(self.data):
if id(_finfo.editor) == editor_id:
break
# This must be done before refreshing save/save all actions:
# (otherwise Save/Save all actions will always be enabled)
self.opened_files_list_changed.emit()
# --
if index is None:
index = self.get_stack_index()
if index == -1:
return
finfo = self.data[index]
if state is None:
state = finfo.editor.document().isModified() or finfo.newly_created
self.set_stack_title(index, state)
# Toggle save/save all actions state
self.save_action.setEnabled(state)
self.refresh_save_all_action.emit()
# Refreshing eol mode
eol_chars = finfo.editor.get_line_separator()
self.refresh_eol_chars(eol_chars)
self.stack_history.refresh()
def refresh_eol_chars(self, eol_chars):
os_name = sourcecode.get_os_name_from_eol_chars(eol_chars)
self.sig_refresh_eol_chars.emit(os_name)
# ------ Load, reload
def reload(self, index):
"""Reload file from disk."""
finfo = self.data[index]
logger.debug("Reloading {}".format(finfo.filename))
txt, finfo.encoding = encoding.read(finfo.filename)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
position = finfo.editor.get_position('cursor')
finfo.editor.set_text(txt)
finfo.editor.document().setModified(False)
self.autosave.file_hashes[finfo.filename] = hash(txt)
finfo.editor.set_cursor_position(position)
#XXX CodeEditor-only: re-scan the whole text to rebuild outline
# explorer data from scratch (could be optimized because
# rehighlighting text means searching for all syntax coloring
# patterns instead of only searching for class/def patterns which
# would be sufficient for outline explorer data.
finfo.editor.rehighlight()
def revert(self):
"""Revert file from disk."""
index = self.get_stack_index()
finfo = self.data[index]
logger.debug("Reverting {}".format(finfo.filename))
filename = finfo.filename
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("All changes to <b>%s</b> will be lost."
"<br>Do you want to revert file from disk?"
) % osp.basename(filename),
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer != QMessageBox.Yes:
return
self.reload(index)
def create_new_editor(self, fname, enc, txt, set_current, new=False,
cloned_from=None, add_where='end'):
"""
Create a new editor instance
Returns finfo object (instead of editor as in previous releases)
"""
editor = codeeditor.CodeEditor(self)
editor.go_to_definition.connect(
lambda fname, line, column: self.sig_go_to_definition.emit(
fname, line, column))
finfo = FileInfo(fname, enc, editor, new, self.threadmanager)
self.add_to_data(finfo, set_current, add_where)
finfo.sig_send_to_help.connect(self.send_to_help)
finfo.sig_show_object_info.connect(self.inspect_current_object)
finfo.todo_results_changed.connect(
lambda: self.todo_results_changed.emit())
finfo.edit_goto.connect(lambda fname, lineno, name:
self.edit_goto.emit(fname, lineno, name))
finfo.sig_save_bookmarks.connect(lambda s1, s2:
self.sig_save_bookmarks.emit(s1, s2))
editor.sig_run_selection.connect(self.run_selection)
editor.sig_run_cell.connect(self.run_cell)
editor.sig_debug_cell.connect(self.debug_cell)
editor.sig_run_cell_and_advance.connect(self.run_cell_and_advance)
editor.sig_re_run_last_cell.connect(self.re_run_last_cell)
editor.sig_new_file.connect(self.sig_new_file.emit)
editor.sig_breakpoints_saved.connect(self.sig_breakpoints_saved)
editor.sig_process_code_analysis.connect(
lambda: self.update_code_analysis_actions.emit())
editor.sig_refresh_formatting.connect(self.sig_refresh_formatting)
language = get_file_language(fname, txt)
editor.setup_editor(
linenumbers=self.linenumbers_enabled,
show_blanks=self.blanks_enabled,
underline_errors=self.underline_errors_enabled,
scroll_past_end=self.scrollpastend_enabled,
edge_line=self.edgeline_enabled,
edge_line_columns=self.edgeline_columns,
language=language,
markers=self.has_markers(),
font=self.default_font,
color_scheme=self.color_scheme,
wrap=self.wrap_enabled,
tab_mode=self.tabmode_enabled,
strip_mode=self.stripmode_enabled,
intelligent_backspace=self.intelligent_backspace_enabled,
automatic_completions=self.automatic_completions_enabled,
automatic_completions_after_chars=self.automatic_completion_chars,
automatic_completions_after_ms=self.automatic_completion_ms,
code_snippets=self.code_snippets_enabled,
completions_hint=self.completions_hint_enabled,
completions_hint_after_ms=self.completions_hint_after_ms,
hover_hints=self.hover_hints_enabled,
highlight_current_line=self.highlight_current_line_enabled,
highlight_current_cell=self.highlight_current_cell_enabled,
occurrence_highlighting=self.occurrence_highlighting_enabled,
occurrence_timeout=self.occurrence_highlighting_timeout,
close_parentheses=self.close_parentheses_enabled,
close_quotes=self.close_quotes_enabled,
add_colons=self.add_colons_enabled,
auto_unindent=self.auto_unindent_enabled,
indent_chars=self.indent_chars,
tab_stop_width_spaces=self.tab_stop_width_spaces,
cloned_from=cloned_from,
filename=fname,
show_class_func_dropdown=self.show_class_func_dropdown,
indent_guides=self.indent_guides,
folding=self.code_folding_enabled,
remove_trailing_spaces=self.always_remove_trailing_spaces,
remove_trailing_newlines=self.remove_trailing_newlines,
add_newline=self.add_newline,
format_on_save=self.format_on_save
)
if cloned_from is None:
editor.set_text(txt)
editor.document().setModified(False)
finfo.text_changed_at.connect(
lambda fname, position:
self.text_changed_at.emit(fname, position))
editor.sig_cursor_position_changed.connect(
self.editor_cursor_position_changed)
editor.textChanged.connect(self.start_stop_analysis_timer)
# Register external panels
for panel_class, args, kwargs, position in self.external_panels:
self.register_panel(
panel_class, *args, position=position, **kwargs)
def perform_completion_request(lang, method, params):
self.sig_perform_completion_request.emit(lang, method, params)
editor.sig_perform_completion_request.connect(
perform_completion_request)
editor.sig_start_operation_in_progress.connect(self.spinner.start)
editor.sig_stop_operation_in_progress.connect(self.spinner.stop)
editor.modificationChanged.connect(
lambda state: self.modification_changed(
state, editor_id=id(editor)))
editor.focus_in.connect(self.focus_changed)
editor.zoom_in.connect(lambda: self.zoom_in.emit())
editor.zoom_out.connect(lambda: self.zoom_out.emit())
editor.zoom_reset.connect(lambda: self.zoom_reset.emit())
editor.sig_eol_chars_changed.connect(
lambda eol_chars: self.refresh_eol_chars(eol_chars))
self.find_widget.set_editor(editor)
self.refresh_file_dependent_actions.emit()
self.modification_changed(index=self.data.index(finfo))
# To update the outline explorer.
editor.oe_proxy = OutlineExplorerProxyEditor(editor, editor.filename)
if self.outlineexplorer is not None:
self.outlineexplorer.register_editor(editor.oe_proxy)
# Needs to reset the highlighting on startup in case the PygmentsSH
# is in use
editor.run_pygments_highlighter()
options = {
'language': editor.language,
'filename': editor.filename,
'codeeditor': editor
}
self.sig_open_file.emit(options)
if self.get_stack_index() == 0:
self.current_changed(0)
return finfo
def editor_cursor_position_changed(self, line, index):
"""Cursor position of one of the editor in the stack has changed"""
self.sig_editor_cursor_position_changed.emit(line, index)
@Slot(str, str, bool)
def send_to_help(self, name, signature, force=False):
"""qstr1: obj_text, qstr2: argpspec, qstr3: note, qstr4: doc_text"""
if not force and not self.help_enabled:
return
editor = self.get_current_editor()
language = editor.language.lower()
signature = to_text_string(signature)
signature = unicodedata.normalize("NFKD", signature)
parts = signature.split('\n\n')
definition = parts[0]
documentation = '\n\n'.join(parts[1:])
args = ''
if '(' in definition and language == 'python':
args = definition[definition.find('('):]
else:
documentation = signature
doc = {
'obj_text': '',
'name': name,
'argspec': args,
'note': '',
'docstring': documentation,
'force_refresh': force,
'path': editor.filename
}
self.sig_help_requested.emit(doc)
def new(self, filename, encoding, text, default_content=False,
empty=False):
"""
Create new filename with *encoding* and *text*
"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
if not empty:
finfo.editor.insert_text(os.linesep)
if default_content:
finfo.default = True
finfo.editor.document().setModified(False)
return finfo
def load(self, filename, set_current=True, add_where='end',
processevents=True):
"""
Load filename, create an editor instance and return it
This also sets the hash of the loaded file in the autosave component.
*Warning* This is loading file, creating editor but not executing
the source code analysis -- the analysis must be done by the editor
plugin (in case multiple editorstack instances are handled)
"""
filename = osp.abspath(to_text_string(filename))
if processevents:
self.starting_long_process.emit(_("Loading %s...") % filename)
text, enc = encoding.read(filename)
self.autosave.file_hashes[filename] = hash(text)
finfo = self.create_new_editor(filename, enc, text, set_current,
add_where=add_where)
index = self.data.index(finfo)
if processevents:
self.ending_long_process.emit("")
if self.isVisible() and self.checkeolchars_enabled \
and sourcecode.has_mixed_eol_chars(text):
name = osp.basename(filename)
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("<b>%s</b> contains mixed end-of-line "
"characters.<br>Spyder will fix this "
"automatically.") % name,
QMessageBox.Ok,
self)
self.msgbox.exec_()
self.set_os_eol_chars(index)
self.is_analysis_done = False
self.analyze_script(index)
return finfo
def set_os_eol_chars(self, index=None, osname=None):
"""Sets the EOL character(s) based on the operating system.
If `osname` is None, then the default line endings for the current
operating system (`os.name` value) will be used.
`osname` can be one of:
('posix', 'nt', 'java')
"""
if osname is None:
osname = os.name
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
eol_chars = sourcecode.get_eol_chars_from_os_name(osname)
finfo.editor.set_eol_chars(eol_chars)
finfo.editor.document().setModified(True)
def remove_trailing_spaces(self, index=None):
"""Remove trailing spaces"""
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.trim_trailing_spaces()
def trim_trailing_newlines(self, index=None):
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.trim_trailing_newlines()
def add_newline_to_file(self, index=None):
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.add_newline_to_file()
def fix_indentation(self, index=None):
"""Replace tab characters by spaces"""
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.fix_indentation()
def format_document_or_selection(self, index=None):
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.format_document_or_range()
# ------ Run
def run_selection(self):
"""
Run selected text or current line in console.
If some text is selected, then execute that text in console.
If no text is selected, then execute current line, unless current line
is empty. Then, advance cursor to next line. If cursor is on last line
and that line is not empty, then add a new blank line and move the
cursor there. If cursor is on last line and that line is empty, then do
not move cursor.
"""
text = self.get_current_editor().get_selection_as_executable_code()
if text:
self.exec_in_extconsole.emit(text.rstrip(), self.focus_to_editor)
return
editor = self.get_current_editor()
line = editor.get_current_line()
text = line.lstrip()
if text:
self.exec_in_extconsole.emit(text, self.focus_to_editor)
if editor.is_cursor_on_last_line() and text:
editor.append(editor.get_line_separator())
editor.move_cursor_to_next('line', 'down')
def run_cell(self, debug=False):
"""Run current cell."""
text, block = self.get_current_editor().get_cell_as_executable_code()
finfo = self.get_current_finfo()
editor = self.get_current_editor()
name = cell_name(block)
filename = finfo.filename
self._run_cell_text(text, editor, (filename, name), debug)
def debug_cell(self):
"""Debug current cell."""
self.run_cell(debug=True)
def run_cell_and_advance(self):
"""Run current cell and advance to the next one"""
self.run_cell()
self.advance_cell()
def advance_cell(self, reverse=False):
"""Advance to the next cell.
reverse = True --> go to previous cell.
"""
if not reverse:
move_func = self.get_current_editor().go_to_next_cell
else:
move_func = self.get_current_editor().go_to_previous_cell
if self.focus_to_editor:
move_func()
else:
term = QApplication.focusWidget()
move_func()
term.setFocus()
term = QApplication.focusWidget()
move_func()
term.setFocus()
def re_run_last_cell(self):
"""Run the previous cell again."""
if self.last_cell_call is None:
return
filename, cell_name = self.last_cell_call
index = self.has_filename(filename)
if index is None:
return
editor = self.data[index].editor
try:
text = editor.get_cell_code(cell_name)
except RuntimeError:
return
self._run_cell_text(text, editor, (filename, cell_name))
def _run_cell_text(self, text, editor, cell_id, debug=False):
"""Run cell code in the console.
Cell code is run in the console by copying it to the console if
`self.run_cell_copy` is ``True`` otherwise by using the `run_cell`
function.
Parameters
----------
text : str
The code in the cell as a string.
line : int
The starting line number of the cell in the file.
"""
(filename, cell_name) = cell_id
if editor.is_python_or_ipython():
args = (text, cell_name, filename, self.run_cell_copy)
if debug:
self.debug_cell_in_ipyclient.emit(*args)
else:
self.run_cell_in_ipyclient.emit(*args)
if self.focus_to_editor:
editor.setFocus()
else:
console = QApplication.focusWidget()
console.setFocus()
# ------ Drag and drop
def dragEnterEvent(self, event):
"""
Reimplemented Qt method.
Inform Qt about the types of data that the widget accepts.
"""
logger.debug("dragEnterEvent was received")
source = event.mimeData()
# The second check is necessary on Windows, where source.hasUrls()
# can return True but source.urls() is []
# The third check is needed since a file could be dropped from
# compressed files. In Windows mimedata2url(source) returns None
# Fixes spyder-ide/spyder#5218.
has_urls = source.hasUrls()
has_text = source.hasText()
urls = source.urls()
all_urls = mimedata2url(source)
logger.debug("Drag event source has_urls: {}".format(has_urls))
logger.debug("Drag event source urls: {}".format(urls))
logger.debug("Drag event source all_urls: {}".format(all_urls))
logger.debug("Drag event source has_text: {}".format(has_text))
if has_urls and urls and all_urls:
text = [encoding.is_text_file(url) for url in all_urls]
logger.debug("Accept proposed action?: {}".format(any(text)))
if any(text):
event.acceptProposedAction()
else:
event.ignore()
elif source.hasText():
event.acceptProposedAction()
elif os.name == 'nt':
# This covers cases like dragging from compressed files,
# which can be opened by the Editor if they are plain
# text, but doesn't come with url info.
# Fixes spyder-ide/spyder#2032.
logger.debug("Accept proposed action on Windows")
event.acceptProposedAction()
else:
logger.debug("Ignore drag event")
event.ignore()
def dropEvent(self, event):
"""
Reimplement Qt method.
Unpack dropped data and handle it.
"""
logger.debug("dropEvent was received")
source = event.mimeData()
# The second check is necessary when mimedata2url(source)
# returns None.
# Fixes spyder-ide/spyder#7742.
if source.hasUrls() and mimedata2url(source):
files = mimedata2url(source)
files = [f for f in files if encoding.is_text_file(f)]
files = set(files or [])
for fname in files:
self.plugin_load.emit(fname)
elif source.hasText():
editor = self.get_current_editor()
if editor is not None:
editor.insert_text(source.text())
else:
event.ignore()
event.acceptProposedAction()
def register_panel(self, panel_class, *args,
position=Panel.Position.LEFT, **kwargs):
"""Register a panel in all codeeditors."""
if (panel_class, args, kwargs, position) not in self.external_panels:
self.external_panels.append((panel_class, args, kwargs, position))
for finfo in self.data:
cur_panel = finfo.editor.panels.register(
panel_class(*args, **kwargs), position=position)
if not cur_panel.isVisible():
cur_panel.setVisible(True)
class EditorSplitter(QSplitter):
"""QSplitter for editor windows."""
def __init__(self, parent, plugin, menu_actions, first=False,
register_editorstack_cb=None, unregister_editorstack_cb=None):
"""Create a splitter for dividing an editor window into panels.
Adds a new EditorStack instance to this splitter. If it's not
the first splitter, clones the current EditorStack from the plugin.
Args:
parent: Parent widget.
plugin: Plugin this widget belongs to.
menu_actions: QActions to include from the parent.
first: Boolean if this is the first splitter in the editor.
register_editorstack_cb: Callback to register the EditorStack.
Defaults to plugin.register_editorstack() to
register the EditorStack with the Editor plugin.
unregister_editorstack_cb: Callback to unregister the EditorStack.
Defaults to plugin.unregister_editorstack() to
unregister the EditorStack with the Editor plugin.
"""
QSplitter.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setChildrenCollapsible(False)
self.toolbar_list = None
self.menu_list = None
self.plugin = plugin
if register_editorstack_cb is None:
register_editorstack_cb = self.plugin.register_editorstack
self.register_editorstack_cb = register_editorstack_cb
if unregister_editorstack_cb is None:
unregister_editorstack_cb = self.plugin.unregister_editorstack
self.unregister_editorstack_cb = unregister_editorstack_cb
self.menu_actions = menu_actions
self.editorstack = EditorStack(self, menu_actions)
self.register_editorstack_cb(self.editorstack)
if not first:
self.plugin.clone_editorstack(editorstack=self.editorstack)
self.editorstack.destroyed.connect(lambda: self.editorstack_closed())
self.editorstack.sig_split_vertically.connect(
lambda: self.split(orientation=Qt.Vertical))
self.editorstack.sig_split_horizontally.connect(
lambda: self.split(orientation=Qt.Horizontal))
self.addWidget(self.editorstack)
if not running_under_pytest():
self.editorstack.set_color_scheme(plugin.get_color_scheme())
self.setStyleSheet(self._stylesheet)
def closeEvent(self, event):
"""Override QWidget closeEvent().
This event handler is called with the given event when Qt
receives a window close request from a top-level widget.
"""
QSplitter.closeEvent(self, event)
def __give_focus_to_remaining_editor(self):
focus_widget = self.plugin.get_focus_widget()
if focus_widget is not None:
focus_widget.setFocus()
def editorstack_closed(self):
logger.debug("method 'editorstack_closed':")
logger.debug(" self : %r" % self)
try:
self.unregister_editorstack_cb(self.editorstack)
self.editorstack = None
close_splitter = self.count() == 1
except (RuntimeError, AttributeError):
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorstack just closed was the last widget in this QSplitter
self.close()
return
self.__give_focus_to_remaining_editor()
def editorsplitter_closed(self):
logger.debug("method 'editorsplitter_closed':")
logger.debug(" self : %r" % self)
try:
close_splitter = self.count() == 1 and self.editorstack is None
except RuntimeError:
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorsplitter just closed was the last widget in this QSplitter
self.close()
return
elif self.count() == 2 and self.editorstack:
# back to the initial state: a single editorstack instance,
# as a single widget in this QSplitter: orientation may be changed
self.editorstack.reset_orientation()
self.__give_focus_to_remaining_editor()
def split(self, orientation=Qt.Vertical):
"""Create and attach a new EditorSplitter to the current EditorSplitter.
The new EditorSplitter widget will contain an EditorStack that
is a clone of the current EditorStack.
A single EditorSplitter instance can be split multiple times, but the
orientation will be the same for all the direct splits. If one of
the child splits is split, then that split can have a different
orientation.
"""
self.setOrientation(orientation)
self.editorstack.set_orientation(orientation)
editorsplitter = EditorSplitter(self.parent(), self.plugin,
self.menu_actions,
register_editorstack_cb=self.register_editorstack_cb,
unregister_editorstack_cb=self.unregister_editorstack_cb)
self.addWidget(editorsplitter)
editorsplitter.destroyed.connect(self.editorsplitter_closed)
current_editor = editorsplitter.editorstack.get_current_editor()
if current_editor is not None:
current_editor.setFocus()
def iter_editorstacks(self):
"""Return the editor stacks for this splitter and every first child.
Note: If a splitter contains more than one splitter as a direct
child, only the first child's editor stack is included.
Returns:
List of tuples containing (EditorStack instance, orientation).
"""
editorstacks = [(self.widget(0), self.orientation())]
if self.count() > 1:
editorsplitter = self.widget(1)
editorstacks += editorsplitter.iter_editorstacks()
return editorstacks
def get_layout_settings(self):
"""Return the layout state for this splitter and its children.
Record the current state, including file names and current line
numbers, of the splitter panels.
Returns:
A dictionary containing keys {hexstate, sizes, splitsettings}.
hexstate: String of saveState() for self.
sizes: List for size() for self.
splitsettings: List of tuples of the form
(orientation, cfname, clines) for each EditorSplitter
and its EditorStack.
orientation: orientation() for the editor
splitter (which may be a child of self).
cfname: EditorStack current file name.
clines: Current line number for each file in the
EditorStack.
"""
splitsettings = []
for editorstack, orientation in self.iter_editorstacks():
clines = []
cfname = ''
# XXX - this overrides value from the loop to always be False?
orientation = False
if hasattr(editorstack, 'data'):
clines = [finfo.editor.get_cursor_line_number()
for finfo in editorstack.data]
cfname = editorstack.get_current_filename()
splitsettings.append((orientation == Qt.Vertical, cfname, clines))
return dict(hexstate=qbytearray_to_str(self.saveState()),
sizes=self.sizes(), splitsettings=splitsettings)
def set_layout_settings(self, settings, dont_goto=None):
"""Restore layout state for the splitter panels.
Apply the settings to restore a saved layout within the editor. If
the splitsettings key doesn't exist, then return without restoring
any settings.
The current EditorSplitter (self) calls split() for each element
in split_settings, thus recreating the splitter panels from the saved
state. split() also clones the editorstack, which is then
iterated over to restore the saved line numbers on each file.
The size and positioning of each splitter panel is restored from
hexstate.
Args:
settings: A dictionary with keys {hexstate, sizes, orientation}
that define the layout for the EditorSplitter panels.
dont_goto: Defaults to None, which positions the cursor to the
end of the editor. If there's a value, positions the
cursor on the saved line number for each editor.
"""
splitsettings = settings.get('splitsettings')
if splitsettings is None:
return
splitter = self
editor = None
for i, (is_vertical, cfname, clines) in enumerate(splitsettings):
if i > 0:
splitter.split(Qt.Vertical if is_vertical else Qt.Horizontal)
splitter = splitter.widget(1)
editorstack = splitter.widget(0)
for j, finfo in enumerate(editorstack.data):
editor = finfo.editor
# TODO: go_to_line is not working properly (the line it jumps
# to is not the corresponding to that file). This will be fixed
# in a future PR (which will fix spyder-ide/spyder#3857).
if dont_goto is not None:
# Skip go to line for first file because is already there.
pass
else:
try:
editor.go_to_line(clines[j])
except IndexError:
pass
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
sizes = settings.get('sizes')
if sizes is not None:
self.setSizes(sizes)
if editor is not None:
editor.clearFocus()
editor.setFocus()
@property
def _stylesheet(self):
css = qstylizer.style.StyleSheet()
css.QSplitter.setValues(
background=QStylePalette.COLOR_BACKGROUND_1
)
return css.toString()
class EditorWidget(QSplitter):
CONF_SECTION = 'editor'
def __init__(self, parent, plugin, menu_actions):
QSplitter.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
statusbar = parent.statusBar() # Create a status bar
self.vcs_status = VCSStatus(self)
self.cursorpos_status = CursorPositionStatus(self)
self.encoding_status = EncodingStatus(self)
self.eol_status = EOLStatus(self)
self.readwrite_status = ReadWriteStatus(self)
statusbar.insertPermanentWidget(0, self.readwrite_status)
statusbar.insertPermanentWidget(0, self.eol_status)
statusbar.insertPermanentWidget(0, self.encoding_status)
statusbar.insertPermanentWidget(0, self.cursorpos_status)
statusbar.insertPermanentWidget(0, self.vcs_status)
self.editorstacks = []
self.plugin = plugin
self.find_widget = FindReplace(self, enable_replace=True)
self.plugin.register_widget_shortcuts(self.find_widget)
self.find_widget.hide()
# TODO: Check this initialization once the editor is migrated to the
# new API
self.outlineexplorer = OutlineExplorerWidget(
'outline_explorer',
plugin,
self,
context=f'editor_window_{str(id(self))}'
)
self.outlineexplorer.edit_goto.connect(
lambda filenames, goto, word:
plugin.load(filenames=filenames, goto=goto, word=word,
editorwindow=self.parent()))
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
editorsplitter = EditorSplitter(self, plugin, menu_actions,
register_editorstack_cb=self.register_editorstack,
unregister_editorstack_cb=self.unregister_editorstack)
self.editorsplitter = editorsplitter
editor_layout.addWidget(editorsplitter)
editor_layout.addWidget(self.find_widget)
splitter = QSplitter(self)
splitter.setContentsMargins(0, 0, 0, 0)
splitter.addWidget(editor_widgets)
splitter.addWidget(self.outlineexplorer)
splitter.setStretchFactor(0, 5)
splitter.setStretchFactor(1, 1)
def register_editorstack(self, editorstack):
self.editorstacks.append(editorstack)
logger.debug("EditorWidget.register_editorstack: %r" % editorstack)
self.__print_editorstacks()
self.plugin.last_focused_editorstack[self.parent()] = editorstack
editorstack.set_closable(len(self.editorstacks) > 1)
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
editorstack.reset_statusbar.connect(self.readwrite_status.hide)
editorstack.reset_statusbar.connect(self.encoding_status.hide)
editorstack.reset_statusbar.connect(self.cursorpos_status.hide)
editorstack.readonly_changed.connect(
self.readwrite_status.update_readonly)
editorstack.encoding_changed.connect(
self.encoding_status.update_encoding)
editorstack.sig_editor_cursor_position_changed.connect(
self.cursorpos_status.update_cursor_position)
editorstack.sig_refresh_eol_chars.connect(self.eol_status.update_eol)
self.plugin.register_editorstack(editorstack)
def __print_editorstacks(self):
logger.debug("%d editorstack(s) in editorwidget:" %
len(self.editorstacks))
for edst in self.editorstacks:
logger.debug(" %r" % edst)
def unregister_editorstack(self, editorstack):
logger.debug("EditorWidget.unregister_editorstack: %r" % editorstack)
self.plugin.unregister_editorstack(editorstack)
self.editorstacks.pop(self.editorstacks.index(editorstack))
self.__print_editorstacks()
class EditorMainWindow(QMainWindow):
def __init__(self, plugin, menu_actions, toolbar_list, menu_list):
QMainWindow.__init__(self)
self.setAttribute(Qt.WA_DeleteOnClose)
self.plugin = plugin
self.window_size = None
self.editorwidget = EditorWidget(self, plugin, menu_actions)
self.setCentralWidget(self.editorwidget)
# Setting interface theme
self.setStyleSheet(str(APP_STYLESHEET))
# Give focus to current editor to update/show all status bar widgets
editorstack = self.editorwidget.editorsplitter.editorstack
editor = editorstack.get_current_editor()
if editor is not None:
editor.setFocus()
self.setWindowTitle("Spyder - %s" % plugin.windowTitle())
self.setWindowIcon(plugin.windowIcon())
if toolbar_list:
self.toolbars = []
for title, object_name, actions in toolbar_list:
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setStyleSheet(str(APP_TOOLBAR_STYLESHEET))
toolbar.setMovable(False)
add_actions(toolbar, actions)
self.toolbars.append(toolbar)
if menu_list:
quit_action = create_action(self, _("Close window"),
icon=ima.icon("close_pane"),
tip=_("Close this window"),
triggered=self.close)
self.menus = []
for index, (title, actions) in enumerate(menu_list):
menu = self.menuBar().addMenu(title)
if index == 0:
# File menu
add_actions(menu, actions+[None, quit_action])
else:
add_actions(menu, actions)
self.menus.append(menu)
def get_toolbars(self):
"""Get the toolbars."""
return self.toolbars
def add_toolbars_to_menu(self, menu_title, actions):
"""Add toolbars to a menu."""
# Six is the position of the view menu in menus list
# that you can find in plugins/editor.py setup_other_windows.
view_menu = self.menus[6]
view_menu.setObjectName('checkbox-padding')
if actions == self.toolbars and view_menu:
toolbars = []
for toolbar in self.toolbars:
action = toolbar.toggleViewAction()
toolbars.append(action)
add_actions(view_menu, toolbars)
def load_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbars:
dic[toolbar.objectName()] = toolbar
toolbar.toggleViewAction().setChecked(False)
toolbar.setVisible(False)
for name in toolbars_names:
if name in dic:
dic[name].toggleViewAction().setChecked(True)
dic[name].setVisible(True)
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.isFullScreen():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
def closeEvent(self, event):
"""Reimplement Qt method"""
if self.plugin._undocked_window is not None:
self.plugin.dockwidget.setWidget(self.plugin)
self.plugin.dockwidget.setVisible(True)
self.plugin.switch_to_plugin()
QMainWindow.closeEvent(self, event)
if self.plugin._undocked_window is not None:
self.plugin._undocked_window = None
def get_layout_settings(self):
"""Return layout state"""
splitsettings = self.editorwidget.editorsplitter.get_layout_settings()
return dict(size=(self.window_size.width(), self.window_size.height()),
pos=(self.pos().x(), self.pos().y()),
is_maximized=self.isMaximized(),
is_fullscreen=self.isFullScreen(),
hexstate=qbytearray_to_str(self.saveState()),
splitsettings=splitsettings)
def set_layout_settings(self, settings):
"""Restore layout state"""
size = settings.get('size')
if size is not None:
self.resize( QSize(*size) )
self.window_size = self.size()
pos = settings.get('pos')
if pos is not None:
self.move( QPoint(*pos) )
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
if settings.get('is_maximized'):
self.setWindowState(Qt.WindowMaximized)
if settings.get('is_fullscreen'):
self.setWindowState(Qt.WindowFullScreen)
splitsettings = settings.get('splitsettings')
if splitsettings is not None:
self.editorwidget.editorsplitter.set_layout_settings(splitsettings)
class EditorPluginExample(QSplitter):
def __init__(self):
QSplitter.__init__(self)
self._dock_action = None
self._undock_action = None
self._close_plugin_action = None
self._undocked_window = None
menu_actions = []
self.editorstacks = []
self.editorwindows = []
self.last_focused_editorstack = {} # fake
self.find_widget = FindReplace(self, enable_replace=True)
self.outlineexplorer = OutlineExplorerWidget(None, self, self)
self.outlineexplorer.edit_goto.connect(self.go_to_file)
self.editor_splitter = EditorSplitter(self, self, menu_actions,
first=True)
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
editor_layout.addWidget(self.editor_splitter)
editor_layout.addWidget(self.find_widget)
self.setContentsMargins(0, 0, 0, 0)
self.addWidget(editor_widgets)
self.addWidget(self.outlineexplorer)
self.setStretchFactor(0, 5)
self.setStretchFactor(1, 1)
self.menu_actions = menu_actions
self.toolbar_list = None
self.menu_list = None
self.setup_window([], [])
def go_to_file(self, fname, lineno, text='', start_column=None):
editorstack = self.editorstacks[0]
editorstack.set_current_filename(to_text_string(fname))
editor = editorstack.get_current_editor()
editor.go_to_line(lineno, word=text, start_column=start_column)
def closeEvent(self, event):
for win in self.editorwindows[:]:
win.close()
logger.debug("%d: %r" % (len(self.editorwindows), self.editorwindows))
logger.debug("%d: %r" % (len(self.editorstacks), self.editorstacks))
event.accept()
def load(self, fname):
QApplication.processEvents()
editorstack = self.editorstacks[0]
editorstack.load(fname)
editorstack.analyze_script()
def register_editorstack(self, editorstack):
logger.debug("FakePlugin.register_editorstack: %r" % editorstack)
self.editorstacks.append(editorstack)
if self.isAncestorOf(editorstack):
# editorstack is a child of the Editor plugin
editorstack.set_closable(len(self.editorstacks) > 1)
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
oe_btn = create_toolbutton(self)
editorstack.add_corner_widgets_to_tabbar([5, oe_btn])
action = QAction(self)
editorstack.set_io_actions(action, action, action, action)
font = QFont("Courier New")
font.setPointSize(10)
editorstack.set_default_font(font, color_scheme='Spyder')
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.plugin_load.connect(self.load)
def unregister_editorstack(self, editorstack):
logger.debug("FakePlugin.unregister_editorstack: %r" % editorstack)
self.editorstacks.pop(self.editorstacks.index(editorstack))
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
def setup_window(self, toolbar_list, menu_list):
self.toolbar_list = toolbar_list
self.menu_list = menu_list
def create_new_window(self):
window = EditorMainWindow(self, self.menu_actions,
self.toolbar_list, self.menu_list,
show_fullpath=False, show_all_files=False,
group_cells=True, show_comments=True,
sort_files_alphabetically=False)
window.resize(self.size())
window.show()
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
def register_editorwindow(self, window):
logger.debug("register_editorwindowQObject*: %r" % window)
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
logger.debug("unregister_editorwindow: %r" % window)
self.editorwindows.pop(self.editorwindows.index(window))
def get_focus_widget(self):
pass
@Slot(str, str)
def close_file_in_all_editorstacks(self, editorstack_id_str, filename):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
index = editorstack.get_index_from_filename(filename)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_saved_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(original_filename,
filename)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_renamed_in_data_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(original_filename, filename)
def register_widget_shortcuts(self, widget):
"""Fake!"""
pass
def get_color_scheme(self):
pass
def test():
from spyder.utils.qthelpers import qapplication
from spyder.config.base import get_module_path
spyder_dir = get_module_path('spyder')
app = qapplication(test_time=8)
test = EditorPluginExample()
test.resize(900, 700)
test.show()
import time
t0 = time.time()
test.load(osp.join(spyder_dir, "widgets", "collectionseditor.py"))
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"editor.py"))
test.load(osp.join(spyder_dir, "plugins", "explorer", "widgets",
'explorer.py'))
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"codeeditor.py"))
print("Elapsed time: %.3f s" % (time.time()-t0)) # spyder: test-skip
sys.exit(app.exec_())
if __name__ == "__main__":
test()
|
py | 1a556c699ec5d8bea1b452edb6dc56e5f07b597a | from typing import Any
from pandas_profiling.report.presentation.abstract.item_renderer import ItemRenderer
class Table(ItemRenderer):
def __init__(self, rows, name=None, **kwargs):
super().__init__("table", {"rows": rows, "name": name}, **kwargs)
def render(self) -> Any:
raise NotImplementedError()
|
py | 1a556ccf366f7b1ef6fd14f97703797cee86d8be | import collections
import cv2
import face_recognition.detect_face as detect_face
import face_recognition.facenet as facenet
import math
import numpy as np
import os
import pickle
import sys
import tensorflow as tf
import time
import urllib.request as ur
from datetime import datetime
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from timeit import default_timer as timer
from sklearn.svm import SVC
# path to the user setting file
SETTING_PATH = 'setting.txt'
# read user settings from the setting text file
setting_file = open(SETTING_PATH, 'r')
# define the IP webcam to be used as the input
setting_file.readline()
URL = str(setting_file.readline())
# program starting time
start_time = datetime.now()
# path to the object detection log file, making sure there's no invalid characters in the file name
OBJECT_DETECTION_LOG_PATH = 'object_detection/object-' + str(start_time.date()) + '-' + str(start_time.time()).replace(':', '-') + '.txt'
# path to the face recognition log file, making sure there's no invalid characters in the file name
FACE_RECOGNITION_LOG_PATH = 'face_recognition/face-' + str(start_time.date()) + '-' + str(start_time.time()).replace(':', '-') + '.txt'
# variables for calculating fps
fps_count_started = False
fps_count_start_time = 0.0
fps_count_end_time = 0.0
fps_count_num_of_frames = 0
"""
Object detection.
Variables.
"""
object_detection_initialised = False
object_detection_on = False
# path to the user setting file for object detection
OBJECT_DETECTION_SETTING_PATH = 'object_detection/object_detection_setting.txt'
# path to object detection models
OBJECT_DETECTION_MODEL_PATH = 'models/object_detection/'
# user setting
# read user settings from the setting text file
object_detection_setting_file = open(OBJECT_DETECTION_SETTING_PATH, 'r')
# define the object detection model to be used
object_detection_setting_file.readline()
object_detection_model_name = object_detection_setting_file.readline()
# get rid of the line break at the end of the line just read
object_detection_model_name = object_detection_model_name.rstrip('\n')
# path to the frozen detection graph, which is the actual model used to perform object detection
OBJECT_DETECTION_CKPT_PATH = OBJECT_DETECTION_MODEL_PATH + object_detection_model_name + '/frozen_inference_graph.pb'
# path to the label map consisting of labels to be added to corresponding detection boxes
OBJECT_DETECTION_LABELS_PATH = OBJECT_DETECTION_MODEL_PATH + object_detection_model_name + '/oid_v5_label_map_customised.pbtxt'
# define the max number of classes of objects to be detected
object_detection_setting_file.readline()
max_num_classes_object = int(object_detection_setting_file.readline())
# define which classes of objects to be detected
selected_classes_object = []
object_detection_setting_file.readline()
for i in range(max_num_classes_object):
object_detection_setting_file.readline()
class_setting = int(object_detection_setting_file.readline())
if class_setting == 1:
selected_classes_object.append(i+1)
label_map_object = label_map_util.load_labelmap(OBJECT_DETECTION_LABELS_PATH)
categories_object = label_map_util.convert_label_map_to_categories(label_map_object, max_num_classes=max_num_classes_object, use_display_name=True)
category_index_object = label_map_util.create_category_index(categories_object)
# load the object detection model into memory
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(OBJECT_DETECTION_CKPT_PATH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess_object = tf.Session(graph=detection_graph)
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
"""
Face recognition.
Variables.
"""
face_recognition_on = False
# path to the user setting file for face recognition
FACE_RECOGNITION_SETTING_PATH = 'face_recognition/face_recognition_setting.txt'
# path to face recognition models.
FACE_RECOGNITION_MODEL_PATH = 'models/face_recognition/'
# path to the model used to perform face detection.
FACE_RECOGNITION_CKPT_PATH = FACE_RECOGNITION_MODEL_PATH + '20180402-114759.pb'
# path to the model used to perform face recognition.
FACE_RECOGNITION_CLASSIFIER_PATH = FACE_RECOGNITION_MODEL_PATH + 'my_classifier.pkl'
# path to the label map consisting of labels to be added to corresponding detection boxes.
FACE_RECOGNITION_LABELS_PATH = FACE_RECOGNITION_MODEL_PATH + 'facenet_label_map.pbtxt'
# user setting
# read user settings from the setting text file
face_recognition_setting_file = open(FACE_RECOGNITION_SETTING_PATH, 'r')
# define the max number of classes of faces to be detected
face_recognition_setting_file.readline()
max_num_classes_face = int(face_recognition_setting_file.readline())
# define the size of the input to be resized to
face_recognition_setting_file.readline()
input_image_size_face = int(face_recognition_setting_file.readline())
# define the minimum face size to be detected
face_recognition_setting_file.readline()
minsize_face = int(face_recognition_setting_file.readline())
# define the three steps face detection threshold
threshold_detection_face = [0.0, 0.0, 0.0]
face_recognition_setting_file.readline()
for i in range(3):
threshold_detection_face[i] = float(face_recognition_setting_file.readline())
# define the factor used to create a scaling pyramid of face sizes to detect in the image
face_recognition_setting_file.readline()
factor_face = float(face_recognition_setting_file.readline())
# define the face recognition threshold
face_recognition_setting_file.readline()
threshold_recognition_face = float(face_recognition_setting_file.readline())
label_map_face = label_map_util.load_labelmap(FACE_RECOGNITION_LABELS_PATH)
categories_face = label_map_util.convert_label_map_to_categories(label_map_face, max_num_classes=max_num_classes_face, use_display_name=True)
category_index_face = label_map_util.create_category_index(categories_face)
# load The Custom Classifier
with open(FACE_RECOGNITION_CLASSIFIER_PATH, 'rb') as file:
model, class_names = pickle.load(file)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess_face = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
# load the model
facenet.load_model(FACE_RECOGNITION_CKPT_PATH)
# get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
pnet, rnet, onet = detect_face.create_mtcnn(sess_face, "./face_recognition")
while(True):
image = ur.urlopen(URL)
image_array = np.array(bytearray(image.read()),dtype=np.uint8)
frame = cv2.imdecode(image_array,-1)
# dimension of the input image
frame_shape = frame.shape
"""
Object detection.
Runtime.
"""
if object_detection_initialised == False or object_detection_on == True:
frame_expanded = np.expand_dims(frame, axis=0)
(boxes_object, scores_object, classes_object, num_object) = sess_object.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
if object_detection_initialised == True:
num_of_objects_detected = int(num_object[0])
for i in range(0, num_of_objects_detected):
# only draw objects of selected classes
if int(classes_object[0][i]) not in selected_classes_object:
boxes_object[0][i] = [float(0), float(0), float(0), float(0)]
scores_object[0][i] = float(0)
classes_object[0][i] = float(1)
num_object[0] = num_object[0] - 1
# report objects of selected classes once detected
else:
with open(OBJECT_DETECTION_LOG_PATH, 'a') as log_file:
log_file.write('Time: ' + str(datetime.now()) + '\tCategory: ' + str(int(classes_object[0][i])) + '\tScore: ' + str(scores_object[0][i]) + '\n')
# visualise the detection results.
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes_object),
np.squeeze(classes_object).astype(np.int32),
np.squeeze(scores_object),
category_index_object,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# only run initialisation once.
if object_detection_initialised == False:
object_detection_initialised = True
"""
Face recognition.
Runtime.
"""
if face_recognition_on == True:
bounding_boxes_face, _ = detect_face.detect_face(frame, minsize_face, pnet, rnet, onet, threshold_detection_face, factor_face)
faces_found = bounding_boxes_face.shape[0]
boxes_face = [[[float(0),float(0),float(0),float(0)]] * (faces_found+1)]
scores_face = [[float(0)] * (faces_found+1)]
classes_face = [[float(0)] * (faces_found+1)]
try:
if faces_found > 0:
det_face = bounding_boxes_face[:, 0:4]
bb_face = np.zeros((faces_found, 4), dtype=np.int32)
for i in range(faces_found):
bb_face[i][0] = det_face[i][0]
bb_face[i][1] = det_face[i][1]
bb_face[i][2] = det_face[i][2]
bb_face[i][3] = det_face[i][3]
cropped_face = frame[bb_face[i][1]:bb_face[i][3], bb_face[i][0]:bb_face[i][2], :]
scaled_face = cv2.resize(cropped_face, (input_image_size_face, input_image_size_face), interpolation=cv2.INTER_CUBIC)
scaled_face = facenet.prewhiten(scaled_face)
reshaped_face = scaled_face.reshape(-1, input_image_size_face, input_image_size_face, 3)
embed_array_face = sess_face.run(embeddings, feed_dict={images_placeholder: reshaped_face, phase_train_placeholder: False})
predictions_face = model.predict_proba(embed_array_face)
best_class_indices_face = np.argmax(predictions_face, axis=1)
best_class_score_face = predictions_face[np.arange(len(best_class_indices_face)), best_class_indices_face]
best_name_face = class_names[best_class_indices_face[0]]
# get relative coordinates of detection boxes
boxes_face[0][i] = [float(bb_face[i][1])/frame_shape[0], float(bb_face[i][0])/frame_shape[1], float(bb_face[i][3])/frame_shape[0], float(bb_face[i][2])/frame_shape[1]]
# the confidence score of a face is the one of its best match
scores_face[0][i] = float(best_class_score_face)
# a face is considered being recognised as someone when the best match has a score higher than the threshold
if best_class_score_face > threshold_recognition_face:
classes_face[0][i] = float(best_class_indices_face[0] + 2)
# otherwise the face detected is considered unknown
else:
classes_face[0][i] = float(1)
# report unknown faces once detected
with open(FACE_RECOGNITION_LOG_PATH, 'a') as log_file:
log_file.write('Time: ' + str(datetime.now()) + '\tScore: ' + str(scores_face[0][i]) + '\n')
# visualise the detection and recognition results.
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes_face),
np.squeeze(classes_face).astype(np.int32),
np.squeeze(scores_face),
category_index_face,
use_normalized_coordinates=True,
line_thickness=8)
except:
pass
# display the result image
cv2.imshow('Smart Surveillance Camera', frame)
# increment number of frames being processed by one for calculating FPS
fps_count_num_of_frames = fps_count_num_of_frames + 1
# handle user input
key = cv2.waitKey(1)
# press 'q' to exit
if key == ord('q'):
break
# press 'o' to switch object detection on and off
elif key == ord('o'):
object_detection_on = not object_detection_on
# press 'f' to switch face recognition on and off
elif key == ord('f'):
face_recognition_on = not face_recognition_on
# press 'p' to switch fps calculation on and off
elif key == ord('p'):
# initialise and start the fps calculation if it's not already started
if fps_count_started == False:
fps_count_num_of_frames = 0
fps_count_start_time = timer()
fps_count_started = True
# stop, calculate and display the fps if it's already started
else:
fps_count_started = False
fps_count_end_time = timer()
fps = fps_count_num_of_frames / (fps_count_end_time - fps_count_start_time)
print('FPS:' + str(fps))
cv2.destroyAllWindows() |
py | 1a556d36db5162742a0ede6b85e3de44d8533164 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#####################################
### CIS SLOT FILLING SYSTEM ####
### 2014-2015 ####
### Author: Heike Adel ####
#####################################
from __future__ import unicode_literals
import codecs, sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
import re
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.INFO)
def getMatch(candidate, patterns):
candidate = candidate.lower()
maxMatch = 0.0
for p in patterns:
if p.match(candidate):
logger.debug("found matching candidate: " + candidate + "; reg ex: " + p.pattern)
maxMatch = 1.0
return maxMatch
return maxMatch
def match(slot2candidates, patternsPerSlot, slot2inverse):
slot2candidatesAndFillersAndConfidence = {}
for slot in slot2candidates:
logger.info("Pattern matching: " + slot)
patterns = []
if slot in slot2inverse:
slot_eval = slot2inverse[slot]
logger.info("using patterns of slot: " + slot_eval)
else:
slot_eval = slot
if slot_eval in patternsPerSlot:
patterns = patternsPerSlot[slot_eval]
candidateAndFillerAndOffsetList = slot2candidates[slot]
if len(candidateAndFillerAndOffsetList) == 0:
continue
for cf in candidateAndFillerAndOffsetList:
c = cf[1]
if slot in slot2inverse:
# reverse name and filler
c_tmp = re.sub(ur' \<name\> ', ' NAME ', ' ' + c + ' ', re.UNICODE)
c_tmp = re.sub(ur' \<filler\> ', ' <name> ', c_tmp, re.UNICODE)
c_tmp = re.sub(ur' NAME ', ' <filler> ', c_tmp, re.UNICODE)
c = c_tmp.strip()
# match c against patterns for slot
matchResult = getMatch(c, patterns)
if not slot in slot2candidatesAndFillersAndConfidence:
slot2candidatesAndFillersAndConfidence[slot] = []
slot2candidatesAndFillersAndConfidence[slot].append([cf[0], cf[1], matchResult, cf[2], cf[3], cf[4], cf[5]])
return slot2candidatesAndFillersAndConfidence
|
py | 1a556e434d8b423f47764d4c23df4ff90e34c91c | from .utils import methods
DEBUG_MODE = False
class Router():
def __init__(self):
self.routes = {}
self.num_middleware = 0
for method in methods:
self._generate_add_route_method(method)
# helper method for adding routes.
# if middleware is provided, mount the middleware at the path.
# if not, return a decorator that mounts the function as middleware at path.
def _add_route(self, path, middleware=None, method='*'):
if path not in self.routes:
self.routes[path] = {}
method = method.upper()
if method not in self.routes[path]:
self.routes[path][method] = []
# the actual method that mounts the middleware to the route and method.
# may be returned as a decorator.
def add_route(f):
f.index = self.num_middleware
self.num_middleware += 1
self.routes[path][method].append(f)
if DEBUG_MODE:
print('mounting middleware %s at path %s' % (middleware, path))
if middleware:
for m in middleware:
add_route(m)
else:
return add_route
# generates an add_route method for EVERY HTTP method.
# this is for app.get, app.post, app.trace, app.mkactivity, etc.
def _generate_add_route_method(self, method):
add_route = self._add_route
if DEBUG_MODE:
print('registering app.%s' % (method))
def add_route_method(path='*', *middleware):
if not isinstance(path, str):
middleware = [path] + [m for m in middleware]
path = '*'
return add_route(path, middleware, method)
# temporary solution ?? ? ? ?
setattr(self, method, add_route_method)
return getattr(self, method)
# mount the middleware for all requests to the path.
def use(self, path='*', *middleware):
# path can be a piece of middleware
# if so, append it to middleware
if not isinstance(path, str):
middleware = [path] + [m for m in middleware]
path = '*'
return self._add_route(path, middleware)
def route(self, path='*'):
pass
|
py | 1a556e4c15534bf7bf47df7cd9daf05f3c8f1f76 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.tf_sequence_example_decoder."""
import numpy as np
import tensorflow as tf
from lstm_object_detection.inputs import tf_sequence_example_decoder
from object_detection.core import standard_fields as fields
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
class TFSequenceExampleDecoderTest(tf.test.TestCase):
"""Tests for sequence example decoder."""
def _EncodeImage(self, image_tensor, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_encoded
def _DecodeImage(self, image_encoded, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_decoded
def testDecodeJpegImageAndBoundingBox(self):
"""Test if the decoder can correctly decode the image and bounding box.
A set of random images (represented as an image tensor) is first decoded as
the groundtrue image. Meanwhile, the image tensor will be encoded and pass
through the sequence example, and then decoded as images. The groundtruth
image and the decoded image are expected to be equal. Similar tests are
also applied to labels such as bounding box.
"""
image_tensor = np.random.randint(256, size=(256, 256, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
decoded_jpeg = self._DecodeImage(encoded_jpeg)
sequence_example = example_pb2.SequenceExample(
feature_lists=feature_pb2.FeatureLists(
feature_list={
'image/encoded':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_jpeg])),
]),
'bbox/xmin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/xmax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'bbox/ymin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/ymax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
})).SerializeToString()
example_decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(sequence_example))
# Test tensor dict image dimension.
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.image].get_shape().as_list()),
[None, None, None, 3])
with self.test_session() as sess:
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image])
tensor_dict[fields.InputDataFields.groundtruth_boxes] = tf.squeeze(
tensor_dict[fields.InputDataFields.groundtruth_boxes])
tensor_dict = sess.run(tensor_dict)
# Test decoded image.
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
# Test decoded bounding box.
self.assertAllEqual([0.0, 0.0, 1.0, 1.0],
tensor_dict[fields.InputDataFields.groundtruth_boxes])
if __name__ == '__main__':
tf.test.main()
|
py | 1a556ecdc18c4bd59fb77f4419a2a5a880d9defc | """Module for intervention access strategy functions
Determining whether or not to provide access to a given intervention
for a user is occasionally tricky business. By way of the access_strategies
property on all interventions, one can add additional criteria by defining a
function here (or elsewhere) and adding it to the desired intervention.
function signature: takes named parameters (intervention, user) and returns
a boolean - True grants access (and short circuits further access tests),
False does not.
NB - several functions are closures returning access_strategy functions with
the parameters given to the closures.
"""
from datetime import datetime
import json
import sys
from flask import current_app, url_for
from flask_babel import gettext as _
from sqlalchemy import UniqueConstraint, and_
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from ..database import db
from ..date_tools import localize_datetime
from ..system_uri import DECISION_SUPPORT_GROUP, TRUENTH_CLINICAL_CODE_SYSTEM
from .clinical_constants import CC
from .codeable_concept import CodeableConcept
from .coding import Coding
from .identifier import Identifier
from .intervention import INTERVENTION, Intervention, UserIntervention
from .organization import Organization, OrganizationIdentifier, OrgTree
from .overall_status import OverallStatus
from .procedure_codes import known_treatment_started
from .role import Role
# ##
# # functions implementing the 'access_strategy' API
# ##
__log_strats = None
def _log(**kwargs):
"""Wrapper to log all the access lookup results within"""
# get config value if haven't yet
global __log_strats
if __log_strats is None:
__log_strats = current_app.config.get("LOG_DEBUG_STRATS", False)
if __log_strats:
msg = kwargs.get('message', '') # optional
current_app.logger.debug(
"{func_name} returning {result} for {user} on intervention "
"{intervention}".format(**kwargs) + msg)
def limit_by_clinic_w_id(
identifier_value, identifier_system=DECISION_SUPPORT_GROUP,
combinator='any', include_children=True):
"""Requires user is associated with {any,all} clinics with identifier
:param identifier_value: value string for identifer associated with org(s)
:param identifier_system: system string for identifier, defaults to
DECISION_SUPPORT_GROUP
:param combinator: determines if the user must be in 'any' (default) or
'all' of the clinics in the given list. NB combining 'all' with
include_children=True would mean all orgs in the list AND all chidren of
all orgs in list must be associated with the user for a true result.
:param include_children: include children in the organization tree if
set (default), otherwise, only include the organizations in the list
"""
try:
identifier = Identifier.query.filter_by(
_value=identifier_value, system=identifier_system).one()
except NoResultFound:
raise ValueError(
"strategy names non-existing Identifier({}, {})".format(
identifier_value, identifier_system))
orgs = Organization.query.join(OrganizationIdentifier).filter(and_(
Organization.id == OrganizationIdentifier.organization_id,
OrganizationIdentifier.identifier_id == identifier.id)).all()
if include_children:
ot = OrgTree()
required = {o for og in orgs for o in ot.here_and_below_id(og.id)}
else:
required = set((o.id for o in orgs))
if combinator not in ('any', 'all'):
raise ValueError("unknown value {} for combinator, must be any or all")
def user_registered_with_all_clinics(intervention, user):
has = set((o.id for o in user.organizations))
if required.intersection(has) == required:
_log(result=True, func_name='limit_by_clinic_list', user=user,
intervention=intervention.name)
return True
def user_registered_with_any_clinics(intervention, user):
has = set((o.id for o in user.organizations))
if not required.isdisjoint(has):
_log(result=True, func_name='limit_by_clinic_list', user=user,
intervention=intervention.name)
return True
return (
user_registered_with_all_clinics if combinator == 'all'
else user_registered_with_any_clinics)
def not_in_clinic_w_id(
identifier_value, identifier_system=DECISION_SUPPORT_GROUP,
include_children=True):
"""Requires user isn't associated with any clinic in the list
:param identifier_value: value string for identifer associated with org(s)
:param identifier_system: system string for identifier, defaults to
DECISION_SUPPORT_GROUP
:param include_children: include children in the organization tree if
set (default), otherwise, only include the organizations directly
associated with the identifier
"""
try:
identifier = Identifier.query.filter_by(
_value=identifier_value, system=identifier_system).one()
except NoResultFound:
raise ValueError(
"strategy names non-existing Identifier({}, {})".format(
identifier_value, identifier_system))
orgs = Organization.query.join(OrganizationIdentifier).filter(and_(
Organization.id == OrganizationIdentifier.organization_id,
OrganizationIdentifier.identifier_id == identifier.id)).all()
if include_children:
ot = OrgTree()
dont_want = {o for og in orgs for o in ot.here_and_below_id(og.id)}
else:
dont_want = set((o.id for o in orgs))
def user_not_registered_with_clinics(intervention, user):
has = set((o.id for o in user.organizations))
if has.isdisjoint(dont_want):
_log(result=True, func_name='not_in_clinic_list', user=user,
intervention=intervention.name)
return True
return user_not_registered_with_clinics
def in_role_list(role_list):
"""Requires user is associated with any role in the list"""
roles = []
for role in role_list:
try:
role = Role.query.filter_by(
name=role).one()
roles.append(role)
except NoResultFound:
raise ValueError("role '{}' not found".format(role))
except MultipleResultsFound:
raise ValueError("more than one role named '{}'"
"found".format(role))
required = set(roles)
def user_has_given_role(intervention, user):
has = set(user.roles)
if has.intersection(required):
_log(result=True, func_name='in_role_list', user=user,
intervention=intervention.name)
return True
return user_has_given_role
def not_in_role_list(role_list):
"""Requires user isn't associated with any role in the list"""
roles = []
for role in role_list:
try:
role = Role.query.filter_by(
name=role).one()
roles.append(role)
except NoResultFound:
raise ValueError("role '{}' not found".format(role))
except MultipleResultsFound:
raise ValueError("more than one role named '{}'"
"found".format(role))
dont_want = set(roles)
def user_not_given_role(intervention, user):
has = set(user.roles)
if has.isdisjoint(dont_want):
_log(result=True, func_name='not_in_role_list', user=user,
intervention=intervention.name)
return True
return user_not_given_role
def allow_if_not_in_intervention(intervention_name):
"""Strategy API checks user does not belong to named intervention"""
exclusive_intervention = getattr(INTERVENTION, intervention_name)
def user_not_in_intervention(intervention, user):
if not exclusive_intervention.quick_access_check(user):
_log(result=True, func_name='user_not_in_intervention', user=user,
intervention=intervention.name)
return True
return user_not_in_intervention
def tx_begun(boolean_value):
"""Returns strategy function testing if user is known to have started Tx
:param boolean_value: true for known treatment started (i.e. procedure
indicating tx has begun), false to confirm a user doesn't have
a procedure indicating tx has begun
"""
if boolean_value == 'true':
check_func = known_treatment_started
elif boolean_value == 'false':
def check_func(u):
return not known_treatment_started(u)
else:
raise ValueError("expected 'true' or 'false' for boolean_value")
def user_has_desired_tx(intervention, user):
return check_func(user)
return user_has_desired_tx
def observation_check(display, boolean_value, invert_logic=False):
"""Returns strategy function for a particular observation and logic value
:param display: observation coding.display from
TRUENTH_CLINICAL_CODE_SYSTEM
:param boolean_value: ValueQuantity boolean true or false expected
:param invert_logic: Effective binary ``not`` to apply to test. If set,
will return True only if given observation with boolean_value is NOT
defined for user
NB a history of observations is maintained, with the most recent taking
precedence.
"""
try:
coding = Coding.query.filter_by(
system=TRUENTH_CLINICAL_CODE_SYSTEM, display=display).one()
except NoResultFound:
raise ValueError("coding.display '{}' not found".format(display))
try:
cc = CodeableConcept.query.filter(
CodeableConcept.codings.contains(coding)).one()
except NoResultFound:
raise ValueError("codeable_concept'{}' not found".format(coding))
if boolean_value == 'true':
vq = CC.TRUE_VALUE
elif boolean_value == 'false':
vq = CC.FALSE_VALUE
else:
raise ValueError("boolean_value must be 'true' or 'false'")
def user_has_matching_observation(intervention, user):
value, status = user.fetch_value_status_for_concept(
codeable_concept=cc)
if value == vq:
_log(result=True, func_name='observation_check', user=user,
intervention=intervention.name,
message='{}:{}'.format(coding.display, vq.value))
return True if not invert_logic else False
return False if not invert_logic else True
return user_has_matching_observation
def combine_strategies(**kwargs):
"""Make multiple strategies into a single statement
The nature of the access lookup returns True for the first
success in the list of strategies for an intervention. Use
this method to chain multiple strategies together into a logical **and**
fashion rather than the built in locical **or**.
NB - kwargs must have keys such as 'strategy_n', 'strategy_n_kwargs'
for every 'n' strategies being combined, starting at 1. Set arbitrary
limit of 6 strategies for time being.
Nested strategies may actually want a logical 'OR'. Optional kwarg
`combinator` takes values {'any', 'all'} - default 'all' means all
strategies must evaluate true. 'any' means just one must eval true for a
positive result.
"""
strats = []
arbitrary_limit = 7
if 'strategy_{}'.format(arbitrary_limit) in kwargs:
raise ValueError(
"only supporting %d combined strategies", arbitrary_limit - 1)
for i in range(1, arbitrary_limit):
if 'strategy_{}'.format(i) not in kwargs:
break
func_name = kwargs['strategy_{}'.format(i)]
func_kwargs = {}
for argset in kwargs['strategy_{}_kwargs'.format(i)]:
func_kwargs[argset['name']] = argset['value']
func = getattr(sys.modules[__name__], func_name)
strats.append(func(**func_kwargs))
def call_all_combined(intervention, user):
"""Returns True if ALL of the combined strategies return True"""
for strategy in strats:
if not strategy(intervention, user):
_log(
result=False, func_name='combine_strategies', user=user,
intervention=intervention.name)
return
# still here? effective AND passed as all returned true
_log(
result=True, func_name='combine_strategies', user=user,
intervention=intervention.name)
return True
def call_any_combined(intervention, user):
"""Returns True if ANY of the combined strategies return True"""
for strategy in strats:
if strategy(intervention, user):
_log(
result=True, func_name='combine_strategies', user=user,
intervention=intervention.name)
return True
# still here? effective ANY failed as none returned true
_log(
result=False, func_name='combine_strategies', user=user,
intervention=intervention.name)
return
combinator = kwargs.get('combinator', 'all')
if combinator == 'any':
return call_any_combined
elif combinator == 'all':
return call_all_combined
else:
raise ValueError("unrecognized value {} for `combinator`, "
"limited to {'any', 'all'}").format(combinator)
class AccessStrategy(db.Model):
"""ORM to persist access strategies on an intervention
The function_details field contains JSON defining which strategy to
use and how it should be instantiated by one of the closures implementing
the access_strategy interface. Said closures must be defined in this
module (a security measure to keep unsanitized code out).
"""
__tablename__ = 'access_strategies'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
description = db.Column(db.Text)
intervention_id = db.Column(
db.ForeignKey('interventions.id'), nullable=False)
rank = db.Column(db.Integer)
function_details = db.Column(JSONB, nullable=False)
__table_args__ = (UniqueConstraint('intervention_id', 'rank',
name='rank_per_intervention'),)
def __str__(self):
"""Log friendly string format"""
return (
"AccessStrategy: {0.name} {0.description} {0.rank}"
"{0.function_details}").format(self)
@classmethod
def from_json(cls, data):
strat = cls()
return strat.update_from_json(data)
def update_from_json(self, data):
try:
self.name = data['name']
if 'id' in data:
self.id = data['id']
if 'intervention_name' in data:
intervention = Intervention.query.filter_by(
name=data['intervention_name']).first()
if not intervention:
raise ValueError(
'Intervention not found {}. (NB: new interventions '
'require `seed -i` to import)'.format(
data['intervention_name']))
self.intervention_id = intervention.id
if 'description' in data:
self.description = data['description']
if 'rank' in data:
self.rank = data['rank']
self.function_details = json.dumps(data['function_details'])
# validate the given details by attempting to instantiate
self.instantiate()
except Exception as e:
raise ValueError("AccessStrategy instantiation error: {}".format(
e))
return self
def as_json(self):
"""Return self in JSON friendly dictionary"""
d = {
"name": self.name,
"resourceType": 'AccessStrategy'
}
d["function_details"] = (
json.loads(self.function_details) if self.function_details
else None)
d['intervention_name'] = (
Intervention.query.get(self.intervention_id).name
if self.intervention_id else None)
if self.id:
d['id'] = self.id
if self.rank:
d['rank'] = self.rank
if self.description:
d['description'] = self.description
return d
def instantiate(self):
"""Bring the serialized access strategy function to life
Using the JSON in self.function_details, instantiate the
function and return it ready to use.
"""
details = json.loads(self.function_details)
if 'function' not in details:
raise ValueError("'function' not found in function_details")
if 'kwargs' not in details:
raise ValueError("'kwargs' not found in function_details")
func_name = details['function']
# limit to this module
func = getattr(sys.modules[__name__], func_name)
kwargs = {}
for argset in details['kwargs']:
kwargs[argset['name']] = argset['value']
return func(**kwargs)
|
py | 1a556eda6f3177d9241ee9d325d299078bf7fee8 | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import os
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.base.fingerprint_strategy import DefaultFingerprintHashingMixin, FingerprintStrategy
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir_for
class PythonInterpreterFingerprintStrategy(DefaultFingerprintHashingMixin, FingerprintStrategy):
def __init__(self, python_setup):
self.python_setup = python_setup
def compute_fingerprint(self, python_target):
# Consider the target's compatibility requirements, and if those are missing then fall back
# to the global interpreter constraints. Only these two values can affect the selected interpreter.
hash_elements_for_target = sorted(
self.python_setup.compatibility_or_constraints(python_target.compatibility)
)
if not hash_elements_for_target:
return None
hasher = hashlib.sha1()
for element in hash_elements_for_target:
hasher.update(element.encode())
return hasher.hexdigest()
class SelectInterpreter(Task):
"""Select an Python interpreter that matches the constraints of all targets in the working
set."""
@classmethod
def implementation_version(cls):
# TODO(John Sirois): Fixup this task to use VTS results_dirs. Right now version bumps aren't
# effective in dealing with workdir data format changes.
return super().implementation_version() + [("SelectInterpreter", 4)]
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (PythonInterpreterCache,)
@classmethod
def product_types(cls):
return [PythonInterpreter]
@property
def _interpreter_cache(self):
return PythonInterpreterCache.global_instance()
def execute(self):
# NB: Downstream product consumers may need the selected interpreter for use with
# any type of importable Python target, including `PythonRequirementLibrary` targets
# (for use with the `repl` goal, for instance). For interpreter selection,
# we only care about targets with compatibility constraints.
python_tgts_and_reqs = self.context.targets(
lambda tgt: isinstance(tgt, (PythonTarget, PythonRequirementLibrary))
)
if not python_tgts_and_reqs:
return
python_tgts = [tgt for tgt in python_tgts_and_reqs if isinstance(tgt, PythonTarget)]
fs = PythonInterpreterFingerprintStrategy(python_setup=self._interpreter_cache.python_setup)
with self.invalidated(python_tgts, fingerprint_strategy=fs) as invalidation_check:
# If there are no constraints, meaning no global constraints nor compatibility requirements on
# the targets, we still go through the motions of selecting an interpreter, to prevent
# downstream tasks from having to check for this special case.
target_set_id = (
"no_constraints"
if not invalidation_check.all_vts
else VersionedTargetSet.from_versioned_targets(
invalidation_check.all_vts
).cache_key.hash
)
interpreter_path_file = self._interpreter_path_file(target_set_id)
interpreter = self._get_interpreter(interpreter_path_file, python_tgts)
self.context.products.register_data(PythonInterpreter, interpreter)
def _select_interpreter(self, interpreter_path_file, targets):
interpreter = self._interpreter_cache.select_interpreter_for_targets(targets)
safe_mkdir_for(interpreter_path_file)
with open(interpreter_path_file, "w") as outfile:
outfile.write(f"{interpreter.binary}\n")
return interpreter
def _interpreter_path_file(self, target_set_id):
# NB: The file name must be changed when its format changes. See the TODO in
# `implementation_version` above for more.
#
# The historical names to avoid:
# - interpreter.path
# - interpreter.info
return os.path.join(self.workdir, target_set_id, "interpreter.binary")
def _get_interpreter(self, interpreter_path_file, targets):
if os.path.exists(interpreter_path_file):
with open(interpreter_path_file, "r") as infile:
binary = infile.read().strip()
try:
return PythonInterpreter.from_binary(binary)
except PythonInterpreter.Error:
self.context.log.info(
"Stale interpreter reference detected: {}, removing reference and "
"selecting a new interpreter.".format(binary)
)
os.remove(interpreter_path_file)
return self._select_interpreter(interpreter_path_file, targets)
|
py | 1a55717e55ea2350eb0198479457e257de26d60e | from game import Game
def test_game():
game = Game('test_game.txt')
game.setStart()
assert [game.player.row, game.player.col] == [0, 2], 'Method setStart failed'
print('test1 passed: Method setStart')
assert game.row == 4, 'game.row failed'
print('test2 passed: game.row passed')
assert game.column == 6, 'game.column failed'
print('test3 passed: game.column passed')
"""Positive test cases"""
# step into air
game.coordinateUpdate('s', game)
new = game.showGrid()
assert game.moves == ['s'], 'game.moves failed'
assert game.movesNum == 1, 'game.movesNum failed'
print('test4 passed: game attributes are correct')
assert new == '**X***\n' \
'*1A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Air failed'
print('test5 passed: Step into Air')
# step into teleport
game.coordinateUpdate('a', game)
new = game.showGrid()
assert game.moves == ['s', 'a'], 'game.moves failed'
assert game.movesNum == 2, 'game.movesNum failed'
print('test6 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*W FA*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Teleport failed'
print('test7 passed: Step into Teleport')
# step into water
for move in ['w', 'a', 'a', 's', 'a']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'a', 'w', 'a', 'a', 's', 'a'], 'game.moves failed'
assert game.movesNum == 7, 'game.movesNum failed'
print('test8 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*A F1*\n' \
'* Y**\n\n' \
'You have 1 water bucket.', 'Step into Water failed'
print('test9 passed: Step into Water')
# step into fire with water
for move in ['d', 'd']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'a', 'w', 'a', 'a', 's', 'a', 'd', 'd'], 'game.moves failed'
assert game.movesNum == 9, 'game.movesNum failed'
print('test10 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'* A1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Fire with water failed'
print('test11 passed: Step into Fire with water passed')
# reach the end successfully
game.coordinateUpdate('s', game)
new = game.showGrid()
assert game.moves == ['s', 'a', 'w', 'a', 'a', 's', 'a', 'd', 'd', 's'], 'game.moves failed'
assert game.movesNum == 10, 'game.movesNum failed'
print('test12 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'* 1*\n' \
'* A**\n\n' \
'You have 0 water buckets.', 'Step into End failed'
print('test13 passed: Step into End')
# step into fire without water
game2 = Game('test_game.txt')
game2.setStart()
for move in ['s', 'd', 's']:
game2.coordinateUpdate(move, game2)
new = game2.showGrid()
assert game2.moves == ['s', 'd', 's'], 'game.moves failed'
assert game2.movesNum == 3, 'game.movesNum failed'
print('test14 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*W A1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Fire without water failed'
print('test15 passed: Step into Fire without water')
# test command 'e'
game = Game('test_game.txt')
game.setStart()
for move in ['s', 'e']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'e'], 'game.moves failed'
assert game.movesNum == 2, 'game.movesNum failed'
print('test16 passed: game attributes are correct')
assert new == '**X***\n' \
'*1A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "Command 'e' failed"
print("test17 passed: Command 'e' passed")
# test command 'q'
game.coordinateUpdate('q', game)
new = game.showGrid()
assert game.moves == ['s', 'e'], 'game.moves failed'
assert game.movesNum == 2, 'game.movesNum failed'
print('test18 passed: game attributes are correct')
assert new == '**X***\n' \
'*1A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "Command 'q' failed"
print("test19 passed: Command 'q' passed")
# test uppercase
# these should be still uppercase, because I turned them into lowercase when using cell.step()
for move in ['D']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'e', 'D'], 'game.moves failed'
assert game.movesNum == 3, 'game.movesNum failed'
print('test20 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "uppercase failed"
print("test21 passed: uppercase passed")
print('positive tests all passed')
"""Negative cases"""
# invalid input
game.coordinateUpdate('asc', game)
new = game.showGrid()
assert game.moves == ['s', 'e', 'D'], 'game.moves failed'
assert game.movesNum == 3, 'game.movesNum failed'
print('test22 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "invalid input failed"
print("test23 passed: invalid input passed")
print('negative tests all passed')
"""Edge case"""
# go beyond the map
for move in ['a', 's', 's', 's']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'e', 'D', 'a', 's', 's'], 'game.moves failed'
assert game.movesNum == 6, 'game.movesNum failed'
print('test24 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*W F1*\n' \
'* AY**\n\n' \
'You have 0 water buckets.', "go beyond the map failed"
print("test25 passed: go beyond the map passed")
print('edge tests all passed')
def run_tests():
test_game()
print('test_game all passed \n')
|
py | 1a55721dbf8db9288e0b351756319de30f634bcb | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('delete/<str:pk>', views.delete, name='delete')
] |
py | 1a5572ca36aa764a3211fe4960b6a2861227b284 | import logging
import os
class Config:
receive_timeout = int(os.environ.get("SESSION_TIMEOUT",60))
base_url = os.environ.get('BASE_URL', "wss://api.bale.ai/v1/bots/")
request_timeout = int(os.environ.get('REQUEST_TIMEOUT', 5))
# 0:print to output 1:use graylog 2:both 0 and 1
use_graylog = os.environ.get('USE_GRAYLOG', "0")
source = os.environ.get('LOG_SOURCE', "bot_source")
graylog_host = os.environ.get('GRAYLOG_HOST', "172.30.41.67")
graylog_port = int(os.environ.get('GRAYLOG_PORT', 12201))
log_level = int(os.environ.get('LOG_LEVEL', logging.DEBUG))
log_facility_name = os.environ.get('LOG_FACILITY_NAME', "python_bale_bot")
monitoring_hash = os.environ.get('MONITORING_HASH', "SADJSDSDas4d2asf41f2a2faasd45sas")
real_time_fetch_updates = os.environ.get('REAL_TIME_FETCH_UPDATES', True)
continue_last_processed_seq = os.environ.get('CONTINUE_LAST_PROCESSED_SEQ', False)
timeInterval = int(os.environ.get('TIME_INTERVAL', 1)) # unit for time interval is second)
updates_number = int(os.environ.get('UPDATES_NUMBER', 3))
heartbeat = int(os.environ.get("HEARTBEAT", 30))
state_holder = int(os.getenv('STATE_HOLDER', 0))
redis_host = os.getenv('REDIS_HOST', "localhost")
redis_port = int(os.getenv('REDIS_PORT', 6379))
redis_db = os.getenv('REDIS_DB', 0)
group_shield = int(os.getenv('GROUP_SHIELD', 1))
|
py | 1a5572e56275541f7bc07b622b7b7900ac0019bf | from pathlib import Path
new_keys = set()
for file in Path('/').glob('*.txt'):
with file.open('r') as f:
for line in f.readlines():
new_keys.add(line.strip('\n'))
with open('condenced.txt', 'w') as f:
for key in new_keys:
f.write(key + '\n')
|
py | 1a55736ace1023cb9fa38030c1abcad1b4128d96 | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHAFileForPythia8 = cms.string('Configuration/Generator/data/GMSB/GMSB_Lambda100TeV_CTau1200cm.slha'),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
py | 1a557370752bb937f101ed493dc34546cde0d02d | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test a node with the -disablewallet option.
- Test that validateaddress RPC works when running with -disablewallet
- Test that it is not possible to mine to an invalid address.
"""
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import *
class DisableWalletTest (SarielsazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-disablewallet"]]
def run_test (self):
# Make sure wallet is really disabled
assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo)
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet. Generating to a valid address should succeed
# but generating to an invalid address will fail.
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
if __name__ == '__main__':
DisableWalletTest ().main ()
|
py | 1a5574724cb2b5fe170420583aa3cf6e86cc0241 |
from cachelib import SimpleCache
from slickqaweb.api.project import get_project, get_release, get_build
cache = SimpleCache()
def get_project_release_build_ids(project_name, release_name, build_name):
retval = []
project = None
release = None
build = None
if project_name is None:
retval.append(None)
else:
cache_key = "project-{}".format(project_name)
if cache.has(cache_key):
retval.append(cache.get(cache_key))
else:
project = get_project(project_name)
if project is not None:
retval.append(project.id)
cache.set(cache_key, project.id)
else:
retval.append(None)
if release_name is None:
retval.append(None)
else:
cache_key = "release-{}".format(release_name)
if cache.has(cache_key):
retval.append(cache.get(cache_key))
else:
if project is not None:
release = get_release(project, release_name)
if release is not None:
retval.append(release.id)
cache.set(cache_key, release.id)
else:
retval.append(None)
if build_name is None:
retval.append(None)
else:
cache_key = "build-{}".format(build_name)
if cache.has(cache_key):
retval.append(cache.get(cache_key))
else:
if release is not None:
build = get_build(release, build_name)
if build is not None:
retval.append(build.id)
cache.set(cache_key, build.id)
else:
retval.append(None)
return retval
|
py | 1a557519c08b242a4542ed28e22ec68ee7d9a690 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class NeutronPluginBaseV2(object):
@abc.abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
def create_subnetpool(self, context, subnetpool):
"""Create a subnet pool.
:param context: neutron api request context
:param subnetpool: Dictionary representing the subnetpool to create.
"""
raise NotImplementedError()
def update_subnetpool(self, context, id, subnetpool):
"""Update a subnet pool.
:param context: neutron api request context
:param subnetpool: Dictionary representing the subnetpool attributes
to update.
"""
raise NotImplementedError()
def get_subnetpool(self, context, id, fields=None):
"""Show a subnet pool.
:param context: neutron api request context
:param id: The UUID of the subnetpool to show.
"""
raise NotImplementedError()
def get_subnetpools(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieve list of subnet pools."""
raise NotImplementedError()
def delete_subnetpool(self, context, id):
"""Delete a subnet pool.
:param context: neutron api request context
:param id: The UUID of the subnet pool to delete.
"""
raise NotImplementedError()
@abc.abstractmethod
def create_network(self, context, network):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_networks_count(self, context, filters=None):
"""Return the number of networks.
The result depends on the identity
of the user making the request (as indicated by the context) as well
as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. Values in
this dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will have
matched one of the values for each key in filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_network(self, context, id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
"""
pass
@abc.abstractmethod
def create_port(self, context, port):
"""Create a port.
Create a port, which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 neutron network.
:param context: neutron api request context
:param port: dictionary describing the port, with keys as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. All keys will be
populated.
"""
pass
@abc.abstractmethod
def update_port(self, context, id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_port(self, context, id, fields=None):
"""Retrieve a port.
:param context: neutron api request context
:param id: UUID representing the port to fetch.
:param fields: a list of strings that are valid keys in a port
dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user making
the request (as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`. Values
in this dictiontary are an iterable containing values
that will be used for an exact match comparison for
that value. Each result returned by this function will
have matched one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
port dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_ports_count(self, context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_port(self, context, id):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
"""
pass
def start_rpc_listeners(self):
"""Start the RPC listeners.
Most plugins start RPC listeners implicitly on initialization. In
order to support multiple process RPC, the plugin needs to expose
control over when this is started.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
def start_rpc_state_reports_listener(self):
"""Start the RPC listeners consuming state reports queue.
This optional method creates rpc consumer for REPORTS queue only.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError()
def rpc_workers_supported(self):
"""Return whether the plugin supports multiple RPC workers.
A plugin that supports multiple RPC workers should override the
start_rpc_listeners method to ensure that this method returns True and
that start_rpc_listeners is called at the appropriate time.
Alternately, a plugin can override this method to customize detection
of support for multiple rpc workers
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
return (self.__class__.start_rpc_listeners !=
NeutronPluginBaseV2.start_rpc_listeners)
def rpc_state_report_workers_supported(self):
"""Return whether the plugin supports state report RPC workers.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
return (self.__class__.start_rpc_state_reports_listener !=
NeutronPluginBaseV2.start_rpc_state_reports_listener)
def get_workers(self):
"""Returns a collection NeutronWorker instances
If a plugin needs to define worker processes outside of API/RPC workers
then it will override this and return a collection of NeutronWorker
instances
"""
return ()
|
py | 1a5575a0f65b51137796a1eef561d02f99c5566c | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# This is an auto-generated file. Do not edit it.
"""
Provides Twisted version information.
"""
from twisted.python import versions
version = versions.Version('twisted', 15, 2, 1)
|
py | 1a5575bbc8e230a643b489bdcaf174ec70900a0b | """
Miscellaneous generic functions
A collection of functions implementing generic algorithms in arbitrary
groups, including additive and multiplicative groups.
In all cases the group operation is specified by a parameter
'operation', which is a string either one of the set of
multiplication_names or addition_names specified below, or 'other'.
In the latter case, the caller must provide an identity, inverse() and
op() functions.
::
multiplication_names = ( 'multiplication', 'times', 'product', '*')
addition_names = ( 'addition', 'plus', 'sum', '+')
Also included are a generic function for computing multiples (or
powers), and an iterator for general multiples and powers.
EXAMPLES:
Some examples in the multiplicative group of a finite field:
- Discrete logs::
sage: K = GF(3^6,'b')
sage: b = K.gen()
sage: a = b^210
sage: discrete_log(a, b, K.order()-1)
210
- Linear relation finder::
sage: F.<a>=GF(3^6,'a')
sage: a.multiplicative_order().factor()
2^3 * 7 * 13
sage: b=a^7
sage: c=a^13
sage: linear_relation(b,c,'*')
(13, 7)
sage: b^13==c^7
True
- Orders of elements::
sage: k.<a> = GF(5^5)
sage: b = a^4
sage: order_from_multiple(b,5^5-1,operation='*')
781
sage: order_from_bounds(b,(5^4,5^5),operation='*')
781
Some examples in the group of points of an elliptic curve over a finite field:
- Discrete logs::
sage: F=GF(37^2,'a')
sage: E=EllipticCurve(F,[1,1])
sage: F.<a>=GF(37^2,'a')
sage: E=EllipticCurve(F,[1,1])
sage: P=E(25*a + 16 , 15*a + 7 )
sage: P.order()
672
sage: Q=39*P; Q
(36*a + 32 : 5*a + 12 : 1)
sage: discrete_log(Q,P,P.order(),operation='+')
39
- Linear relation finder::
sage: F.<a>=GF(3^6,'a')
sage: E=EllipticCurve([a^5 + 2*a^3 + 2*a^2 + 2*a, a^4 + a^3 + 2*a + 1])
sage: P=E(a^5 + a^4 + a^3 + a^2 + a + 2 , 0)
sage: Q=E(2*a^3 + 2*a^2 + 2*a , a^3 + 2*a^2 + 1)
sage: linear_relation(P,Q,'+')
(1, 2)
sage: P == 2*Q
True
- Orders of elements::
sage: k.<a> = GF(5^5)
sage: E = EllipticCurve(k,[2,4])
sage: P = E(3*a^4 + 3*a , 2*a + 1 )
sage: M = E.cardinality(); M
3227
sage: plist = M.prime_factors()
sage: order_from_multiple(P, M, plist, operation='+')
3227
sage: Q = E(0,2)
sage: order_from_multiple(Q, M, plist, operation='+')
7
sage: order_from_bounds(Q, Hasse_bounds(5^5), operation='+')
7
"""
###########################################################################
# Copyright (C) 2008 William Stein <[email protected]>
# John Cremona <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
###########################################################################
from copy import copy
import sage.misc.misc as misc
import sage.rings.integer_ring as integer_ring
import sage.rings.integer
#
# Lists of names (as strings) which the user may use to identify one
# of the standard operations:
#
multiplication_names = ( 'multiplication', 'times', 'product', '*')
addition_names = ( 'addition', 'plus', 'sum', '+')
from sage.structure.element import generic_power as power
def multiple(a, n, operation='*', identity=None, inverse=None, op=None):
r"""
Returns either `na` or `a^n`, where `n` is any integer and `a` is
a Python object on which a group operation such as addition or
multiplication is defined. Uses the standard binary algorithm.
INPUT: See the documentation for ``discrete_logarithm()``.
EXAMPLES::
sage: multiple(2,5)
32
sage: multiple(RealField()('2.5'),4)
39.0625000000000
sage: multiple(2,-3)
1/8
sage: multiple(2,100,'+') == 100*2
True
sage: multiple(2,100) == 2**100
True
sage: multiple(2,-100,) == 2**-100
True
sage: R.<x>=ZZ[]
sage: multiple(x,100)
x^100
sage: multiple(x,100,'+')
100*x
sage: multiple(x,-10)
1/x^10
Idempotence is detected, making the following fast::
sage: multiple(1,10^1000)
1
sage: E=EllipticCurve('389a1')
sage: P=E(-1,1)
sage: multiple(P,10,'+')
(645656132358737542773209599489/22817025904944891235367494656 : 525532176124281192881231818644174845702936831/3446581505217248068297884384990762467229696 : 1)
sage: multiple(P,-10,'+')
(645656132358737542773209599489/22817025904944891235367494656 : -528978757629498440949529703029165608170166527/3446581505217248068297884384990762467229696 : 1)
"""
from operator import inv, mul, neg, add
if operation in multiplication_names:
identity = a.parent()(1)
inverse = inv
op = mul
elif operation in addition_names:
identity = a.parent()(0)
inverse = neg
op = add
else:
if identity==None or inverse==None or op==None:
raise ValueError, "identity, inverse and operation must all be specified"
if n == 0:
return identity
if n < 0:
n = -n
a = inverse(a)
if n == 1:
return a
# check for idempotence, and store the result otherwise
aa = op(a,a)
if aa == a:
return a
if n == 2:
return aa
if n == 3:
return op(aa,a)
if n == 4:
return op(aa,aa)
# since we've computed a^2, let's start squaring there
# so, let's keep the least-significant bit around, just
# in case.
m = n & 1
n = n >> 1
# One multiplication can be saved by starting with
# the second-smallest power needed rather than with 1
# we've already squared a, so let's start there.
apow = aa
while n&1 == 0:
apow = op(apow,apow)
n = n >> 1
power = apow
n = n >> 1
# now multiply that least-significant bit in...
if m:
power = op(power,a)
# and this is straight from the book.
while n != 0:
apow = op(apow,apow)
if n&1 != 0:
power = op(power,apow)
n = n >> 1
return power
#
# Generic iterator for looping through multiples or powers
#
class multiples:
r"""
Return an iterator which runs through ``P0+i*P`` for ``i`` in ``range(n)``.
``P`` and ``P0`` must be Sage objects in some group; if the operation is
multiplication then the returned values are instead ``P0*P**i``.
EXAMPLES::
sage: list(multiples(1,10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
sage: list(multiples(1,10,100))
[100, 101, 102, 103, 104, 105, 106, 107, 108, 109]
sage: E=EllipticCurve('389a1')
sage: P=E(-1,1)
sage: for Q in multiples(P,5): print Q, Q.height()/P.height()
(0 : 1 : 0) 0.000000000000000
(-1 : 1 : 1) 1.00000000000000
(10/9 : -35/27 : 1) 4.00000000000000
(26/361 : -5720/6859 : 1) 9.00000000000000
(47503/16641 : 9862190/2146689 : 1) 16.0000000000000
sage: R.<x>=ZZ[]
sage: list(multiples(x,5))
[0, x, 2*x, 3*x, 4*x]
sage: list(multiples(x,5,operation='*'))
[1, x, x^2, x^3, x^4]
sage: list(multiples(x,5,indexed=True))
[(0, 0), (1, x), (2, 2*x), (3, 3*x), (4, 4*x)]
sage: list(multiples(x,5,indexed=True,operation='*'))
[(0, 1), (1, x), (2, x^2), (3, x^3), (4, x^4)]
sage: for i,y in multiples(x,5,indexed=True): print "%s times %s = %s"%(i,x,y)
0 times x = 0
1 times x = x
2 times x = 2*x
3 times x = 3*x
4 times x = 4*x
sage: for i,n in multiples(3,5,indexed=True,operation='*'): print "3 to the power %s = %s"%(i,n)
3 to the power 0 = 1
3 to the power 1 = 3
3 to the power 2 = 9
3 to the power 3 = 27
3 to the power 4 = 81
"""
def __init__(self,P,n,P0=None,indexed=False, operation='+', op=None):
"""
Create a multiples iterator
INPUT:
- ``P`` - step value: any Sage object on which a binary
operation is defined
- ``n`` - number of multiples: non-negative integer
- ``P0`` - offset (default 0): Sage object which can be 'added' to P
- ``indexed`` - boolean (default False)
If ``indexed==False`` then the iterator delivers ``P0+i*P``
(if ``operation=='+'``) or ``P0*P**i`` (if
``operation=='*'``), for ``i`` in ``range(n)``.
If ``indexed==True`` then the iterator delivers tuples
``(i,P0+i*P)`` or ``(i,P0*P**i)``.
- ``operation`` - string: '+' (default ) or '*' or `other`.
If `other`, a function ``op()`` must be supplied (a function
of 2 arguments) defining the group binary operation; also
``P0`` must be supplied.
"""
if n<0:
raise ValueError, 'n cannot be negative in multiples'
from operator import mul, add
if operation in multiplication_names:
if P0==None: P0 = P.parent()(1)
self.op = mul
elif operation in addition_names:
if P0==None: P0 = P.parent()(0)
self.op = add
else:
self.op = op
if P0==None:
raise ValueError, "P0 must be supplied when operation is neither addition nor multiplication"
if op==None:
raise ValueError, "op() must both be supplied when operation is neither addition nor multiplication"
self.P=copy(P)
self.Q=copy(P0)
assert self.P is not None and self.Q is not None
self.i = 0
self.bound = n
self.indexed = indexed
def next(self):
"""
Returns the next item in this multiples iterator.
"""
if self.i >= self.bound:
raise StopIteration
i = self.i
val = self.Q
self.i +=1
self.Q=self.op(self.Q,self.P)
if self.indexed:
return (i,val)
else:
return val
def __iter__(self):
"""
Standard member function making this class an iterator.
"""
return self
def bsgs(a, b, bounds, operation='*', identity=None, inverse=None, op=None):
r"""
Totally generic discrete baby-step giant-step function.
Solves `na=b` (or `a^n=b`) with `lb\le n\le ub` where ``bounds==(lb,ub)``,
raising an error if no such `n` exists.
`a` and `b` must be elements of some group with given identity,
inverse of ``x`` given by ``inverse(x)``, and group operation on
``x``, ``y`` by ``op(x,y)``.
If operation is '*' or '+' then the other
arguments are provided automatically; otherwise they must be
provided by the caller.
INPUT:
- ``a`` - group element
- ``b`` - group element
- ``bounds`` - a 2-tuple of integers ``(lower,upper)`` with ``0<=lower<=upper``
- ``operation`` - string: '*', '+', 'other'
- ``identity`` - the identity element of the group
- ``inverse()`` - function of 1 argument ``x`` returning inverse of ``x``
- ``op()`` - function of 2 arguments ``x``, ``y`` returning ``x*y`` in group
OUTPUT:
An integer `n` such that `a^n = b` (or `na = b`). If no
such `n` exists, this function raises a ValueError exception.
NOTE: This is a generalization of discrete logarithm. One
situation where this version is useful is to find the order of
an element in a group where we only have bounds on the group
order (see the elliptic curve example below).
ALGORITHM: Baby step giant step. Time and space are soft
`O(\sqrt{n})` where `n` is the difference between upper and lower
bounds.
EXAMPLES::
sage: b = Mod(2,37); a = b^20
sage: bsgs(b, a, (0,36))
20
sage: p=next_prime(10^20)
sage: a=Mod(2,p); b=a^(10^25)
sage: bsgs(a, b, (10^25-10^6,10^25+10^6)) == 10^25
True
sage: K = GF(3^6,'b')
sage: a = K.gen()
sage: b = a^210
sage: bsgs(a, b, (0,K.order()-1))
210
sage: K.<z>=CyclotomicField(230)
sage: w=z^500
sage: bsgs(z,w,(0,229))
40
An additive example in an elliptic curve group::
sage: F.<a>=GF(37^5,'a')
sage: E=EllipticCurve(F,[1,1])
sage: P=E.lift_x(a); P
(a : 9*a^4 + 22*a^3 + 23*a^2 + 30 : 1)
This will return a multiple of the order of P:
sage: bsgs(P,P.parent()(0),Hasse_bounds(F.order()),operation='+')
69327408
AUTHOR:
- John Cremona (2008-03-15)
"""
Z = integer_ring.ZZ
from operator import inv, mul, neg, add
if operation in multiplication_names:
identity = a.parent()(1)
inverse = inv
op = mul
elif operation in addition_names:
identity = a.parent()(0)
inverse = neg
op = add
else:
if identity==None or inverse==None or op==None:
raise ValueError, "identity, inverse and operation must be given"
lb, ub = bounds
if lb<0 or ub<lb:
raise ValueError, "bsgs() requires 0<=lb<=ub"
if a.is_zero() and not b.is_zero():
raise ValueError, "No solution in bsgs()"
ran = 1 + ub - lb # the length of the interval
c = op(inverse(b),multiple(a,lb,operation=operation))
if ran < 30: # use simple search for small ranges
i = lb
d = c
# for i,d in multiples(a,ran,c,indexed=True,operation=operation):
for i0 in range(ran):
i = lb + i0
if identity == d: # identity == b^(-1)*a^i, so return i
return Z(i)
d = op(a,d)
raise ValueError, "No solution in bsgs()"
m = ran.isqrt()+1 # we need sqrt(ran) rounded up
table = dict() # will hold pairs (a^(lb+i),lb+i) for i in range(m)
d=c
for i0 in misc.srange(m):
i = lb + i0
if identity==d: # identity == b^(-1)*a^i, so return i
return Z(i)
table[d] = i
d=op(d,a)
c = op(c,inverse(d)) # this is now a**(-m)
d=identity
for i in misc.srange(m):
j = table.get(d)
if not j==None: # then d == b*a**(-i*m) == a**j
return Z(i*m + j)
d=op(c,d)
raise ValueError, "Log of %s to the base %s does not exist in %s."%(b,a,bounds)
def discrete_log_rho(a, base, ord=None, operation='*', hash_function=hash):
"""
Pollard Rho algorithm for computing discrete logarithm in cyclic
group of prime order.
If the group order is very small it falls back to the baby step giant step
algorithm.
INPUT:
- a - a group element
- base - a group element
- ord - the order of base or None, in this case we try to compute it
- operation - a string (default: '*') wether we are in an
additive group or a multiplicative one
- hash_function - having an efficient hash function is critical
for this algorithm (see examples)
OUTPUT: return an integer $n$ such that `a=base^n` (or `a=n*base`)
ALGORITHM: Pollard rho for discrete logarithm, adapted from the article of Edlyn Teske,
'A space efficient algorithm for group structure computation'
EXAMPLES::
sage: F.<a> = GF(2^13)
sage: g = F.gen()
sage: discrete_log_rho(g^1234, g)
1234
sage: F.<a> = GF(37^5, 'a')
sage: E = EllipticCurve(F, [1,1])
sage: G = 3*31*2^4*E.lift_x(a)
sage: discrete_log_rho(12345*G, G, ord=46591, operation='+')
12345
It also works with matrices::
sage: A = matrix(GF(50021),[[10577,23999,28893],[14601,41019,30188],[3081,736,27092]])
sage: discrete_log_rho(A^1234567, A)
1234567
Beware, the order must be prime::
sage: I = IntegerModRing(171980)
sage: discrete_log_rho(I(2), I(3))
Traceback (most recent call last):
...
ValueError: for Pollard rho algorithm the order of the group must be prime
If it fails to find a suitable logarithm, it raises a ``ValueError``::
sage: I = IntegerModRing(171980)
sage: discrete_log_rho(I(31002),I(15501))
Traceback (most recent call last):
...
ValueError: Pollard rho algorithm failed to find a logarithm
The main limitation on the hash function is that we don't want to have
`hash(x*y) = hash(x)+hash(y)`::
sage: I = IntegerModRing(next_prime(2^23))
sage: def test():
... try:
... discrete_log_rho(I(123456),I(1),operation='+')
... except StandardError:
... print "FAILURE"
sage: test() # random failure
FAILURE
If this happens, we can provide a better hash function::
sage: discrete_log_rho(I(123456),I(1),operation='+', hash_function=lambda x: hash(x*x))
123456
AUTHOR:
- Yann Laigle-Chapuy (2009-09-05)
"""
from sage.rings.integer import Integer
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing
from operator import mul, add, pow
# should be reasonable choices
partition_size=20
memory_size=4
if operation in addition_names:
mult=add
power=mul
if ord==None:
ord=base.additive_order()
elif operation in multiplication_names:
mult=mul
power=pow
if ord==None:
ord=base.multiplicative_order()
else:
raise(ValueError, "unknown operation")
ord = Integer(ord)
if not ord.is_prime():
raise ValueError,"for Pollard rho algorithm the order of the group must be prime"
# check if we need to set immutable before hashing
mut = hasattr(base,'set_immutable')
isqrtord=ord.isqrt()
if isqrtord < partition_size: #setup to costly, use bsgs
return bsgs(base,a, bounds=(0,ord), operation=operation)
reset_bound = 8*isqrtord # we take some margin
I=IntegerModRing(ord)
for s in xrange(10): # to avoid infinite loops
# random walk function setup
m=[I.random_element() for i in xrange(partition_size)]
n=[I.random_element() for i in xrange(partition_size)]
M=[mult(power(base,Integer(m[i])),power(a,Integer(n[i]))) for i in xrange(partition_size)]
ax = I.random_element()
x = power(base,Integer(ax))
if mut:
x.set_immutable()
bx = I(0)
sigma=[(0,None)]*memory_size
H={} # memory
i0=0
nextsigma = 0
for i in xrange(reset_bound):
#random walk, we need an efficient hash
s=hash_function(x) % partition_size
(x,ax,bx) = (mult(M[s],x), ax+m[s], bx+n[s])
if mut:
x.set_immutable()
# look for collisions
if x in H:
ay,by=H[x]
if bx == by:
break
else:
res = sage.rings.integer.Integer((ay-ax)/(bx-by))
if power(base,res) == a:
return res
else:
break
# should we remember this value?
elif i >= nextsigma:
if sigma[i0][1] is not None:
H.pop(sigma[i0][1])
sigma[i0]=(i,x)
i0 = (i0+1) % memory_size
nextsigma = 3*sigma[i0][0] #3 seems a good choice
H[x]=(ax,bx)
raise ValueError, "Pollard rho algorithm failed to find a logarithm"
def discrete_log(a, base, ord=None, bounds=None, operation='*', identity=None, inverse=None, op=None):
r"""
Totally generic discrete log function.
INPUT:
- ``a`` - group element
- ``base`` - group element (the base)
- ``ord`` - integer (multiple of order of base, or ``None``)
- ``bounds`` - a priori bounds on the log
- ``operation`` - string: '*', '+', 'other'
- ``identity`` - the group's identity
- ``inverse()`` - function of 1 argument ``x`` returning inverse of ``x``
- ``op()`` - function of 2 arguments ``x``, ``y`` returning ``x*y`` in group
``a`` and ``base`` must be elements of some group with identity
given by identity, inverse of ``x`` by ``inverse(x)``, and group
operation on ``x``, ``y`` by ``op(x,y)``.
If operation is '*' or '+' then the other
arguments are provided automatically; otherwise they must be
provided by the caller.
OUTPUT: Returns an integer `n` such that `b^n = a` (or `nb = a`),
assuming that ``ord`` is a multiple of the order of the base `b`.
If ``ord`` is not specified, an attempt is made to compute it.
If no such `n` exists, this function raises a ValueError exception.
.. warning::
If ``x`` has a log method, it is likely to be vastly faster
than using this function. E.g., if ``x`` is an integer modulo
`n`, use its log method instead!
ALGORITHM: Pohlig-Hellman and Baby step giant step.
EXAMPLES::
sage: b = Mod(2,37); a = b^20
sage: discrete_log(a, b)
20
sage: b = Mod(2,997); a = b^20
sage: discrete_log(a, b)
20
sage: K = GF(3^6,'b')
sage: b = K.gen()
sage: a = b^210
sage: discrete_log(a, b, K.order()-1)
210
sage: b = Mod(1,37); x = Mod(2,37)
sage: discrete_log(x, b)
Traceback (most recent call last):
...
ValueError: No discrete log of 2 found to base 1
sage: b = Mod(1,997); x = Mod(2,997)
sage: discrete_log(x, b)
Traceback (most recent call last):
...
ValueError: No discrete log of 2 found to base 1
See trac\#2356:
sage: F.<w> = GF(121)
sage: v = w^120
sage: v.log(w)
0
sage: K.<z>=CyclotomicField(230)
sage: w=z^50
sage: discrete_log(w,z)
50
An example where the order is infinite: note that we must give
an upper bound here::
sage: K.<a> = QuadraticField(23)
sage: eps = 5*a-24 # a fundamental unit
sage: eps.multiplicative_order()
+Infinity
sage: eta = eps^100
sage: discrete_log(eta,eps,bounds=(0,1000))
100
In this case we cannot detect negative powers::
sage: eta = eps^(-3)
sage: discrete_log(eta,eps,bounds=(0,100))
Traceback (most recent call last):
...
ValueError: No discrete log of -11515*a - 55224 found to base 5*a - 24
But we can invert the base (and negate the result) instead::
sage: - discrete_log(eta^-1,eps,bounds=(0,100))
-3
An additive example: elliptic curve DLOG::
sage: F=GF(37^2,'a')
sage: E=EllipticCurve(F,[1,1])
sage: F.<a>=GF(37^2,'a')
sage: E=EllipticCurve(F,[1,1])
sage: P=E(25*a + 16 , 15*a + 7 )
sage: P.order()
672
sage: Q=39*P; Q
(36*a + 32 : 5*a + 12 : 1)
sage: discrete_log(Q,P,P.order(),operation='+')
39
An example of big smooth group::
sage: F.<a>=GF(2^63)
sage: g=F.gen()
sage: u=g**123456789
sage: discrete_log(u,g)
123456789
AUTHORS:
- William Stein and David Joyner (2005-01-05)
- John Cremona (2008-02-29) rewrite using ``dict()`` and make generic
"""
if ord == None:
if operation in multiplication_names:
try:
ord = base.multiplicative_order()
except StandardError:
ord = base.order()
elif operation in addition_names:
try:
ord = base.additive_order()
except StandardError:
ord = base.order()
else:
try:
ord = base.order()
except StandardError:
raise ValueError, "ord must be specified"
try:
from sage.rings.infinity import Infinity
if ord==+Infinity:
return bsgs(base,a,bounds, operation=operation)
if ord==1 and a!=base:
raise ValueError
f=ord.factor()
l=[0]*len(f)
for i,(pi,ri) in enumerate(f):
for j in range(ri):
if operation in multiplication_names:
c=bsgs(base**(ord//pi),(a/base**l[i])**(ord//pi**(j+1)),(0,pi),operation=operation)
l[i] += c*(pi**j)
elif operation in addition_names:
c=bsgs(base*(ord//pi),(a-base*l[i])*(ord//pi**(j+1)),(0,pi),operation=operation)
l[i] += c*(pi**j)
from sage.rings.arith import CRT_list
return CRT_list(l,[pi**ri for pi,ri in f])
except ValueError:
raise ValueError, "No discrete log of %s found to base %s"%(a,base)
def discrete_log_generic(a, base, ord=None, bounds=None, operation='*', identity=None, inverse=None, op=None):
"""
Alias for ``discrete_log``.
"""
return discrete_log(a, base, ord=None, bounds=None, operation='*', identity=None, inverse=None, op=None)
def discrete_log_lambda(a, base, bounds, operation='*', hash_function=hash):
"""
Pollard Lambda algorithm for computing discrete logarithms. It uses
only a logarithmic amount of memory. It's useful if you have
bounds on the logarithm. If you are computing logarithms in a
whole finite group, you should use Pollard Rho algorithm.
INPUT:
- a - a group element
- base - a group element
- bounds - a couple (lb,ub) representing the range where we look for a logarithm
- operation - string: '+', '*' or 'other'
- hash_function -- having an efficient hash function is critical for this algorithm
OUTPUT: Returns an integer `n` such that `a=base^n` (or `a=n*base`)
ALGORITHM: Pollard Lambda, if bounds are (lb,ub) it has time complexity
O(sqrt(ub-lb)) and space complexity O(log(ub-lb))
EXEMPLES::
sage: F.<a> = GF(2^63)
sage: discrete_log_lambda(a^1234567, a, (1200000,1250000))
1234567
sage: F.<a> = GF(37^5, 'a')
sage: E = EllipticCurve(F, [1,1])
sage: P=E.lift_x(a); P
(a : 9*a^4 + 22*a^3 + 23*a^2 + 30 : 1)
This will return a multiple of the order of P::
sage: discrete_log_lambda(P.parent()(0), P, Hasse_bounds(F.order()), operation='+')
69327408
sage: K.<a> = GF(89**5)
sage: hs = lambda x: hash(x) + 15
sage: discrete_log_lambda(a**(89**3 - 3), a, (89**2, 89**4), operation = '*', hash_function = hs) # long time (10s on sage.math, 2011)
704966
AUTHOR:
-- Yann Laigle-Chapuy (2009-01-25)
"""
from sage.rings.integer import Integer
from operator import mul, add, pow
if operation in addition_names:
mult=add
power=mul
elif operation in multiplication_names:
mult=mul
power=pow
else:
raise(ValueError, "unknown operation")
lb,ub = bounds
if lb<0 or ub<lb:
raise ValueError, "discrete_log_lambda() requires 0<=lb<=ub"
# check for mutability
mut = hasattr(base,'set_immutable')
width = Integer(ub-lb)
N = width.isqrt()+1
M = dict()
for s in xrange(10): #to avoid infinite loops
#random walk function setup
k = 0
while (2**k<N):
r = sage.misc.prandom.randrange(1,N)
M[k] = (r , power(base,r))
k += 1
#first random walk
H = power(base,ub)
c = ub
for i in xrange(N):
if mut: H.set_immutable()
r,e = M[hash_function(H)%k]
H = mult(H,e)
c += r
if mut: H.set_immutable()
mem=set([H])
#second random walk
H = a
d=0
while c-d >= lb:
if mut: H.set_immutable()
if ub > c-d and H in mem:
return c-d
r,e = M[hash_function(H)%k]
H = mult(H,e)
d += r
raise ValueError, "Pollard Lambda failed to find a log"
################################################################
#
# Older version of discrete_log. Works fine but has been
# superceded by the version which simply calls the more general
# bsgs() function.
#
################################################################
# def old_discrete_log(a, base, ord=None, operation='*',
# identity=None, inverse=None, op=None):
# r"""
# Totally generic discrete log function.
# a and base must be elements of some group with identity given by
# identity, inverse of x by inverse(x), and group operation on x,y
# by op(x,y).
# If operation is '*' or '+' then the other
# arguments are provided automatically; otherwise they must be
# provided by the caller.
# WARNING: If x has a log method, it is likely to be vastly faster
# than using this function. E.g., if x is an integer modulo n, use
# its log method instead!
# INPUT:
# a -- group element
# base -- group element (the base)
# ord -- integer (multiple of order of base, or None)
# operation -- string: '*', '+', 'other'
# identity -- the group's identity
# inverse() -- function of 1 argument x returning inverse of x
# op() -- function of 2 arguments x,y returning x*y in group
# OUTPUT:
# Returns an integer $n$ such that $b^n = a$ (or $n*b = a$),
# assuming that ord is a multiple of the order of the base $b$.
# If ord is not specified an attempt is made to compute it.
# If no such $n$ exists, this function raises a ValueError exception.
# ALGORITHM: Baby step giant step.
# EXAMPLES:
# sage: b = Mod(2,37); a = b^20
# sage: old_discrete_log(a, b)
# 20
# sage: b = Mod(2,997); a = b^20
# sage: old_discrete_log(a, b)
# 20
# sage: K = GF(3^6,'b')
# sage: b = K.gen()
# sage: a = b^210
# sage: old_discrete_log(a, b, K.order()-1)
# 210
# sage: b = Mod(1,37); x = Mod(2,37)
# sage: old_discrete_log(x, b)
# Traceback (most recent call last):
# ...
# ValueError: Log of 2 to the base 1 does not exist.
# sage: b = Mod(1,997); x = Mod(2,997)
# sage: old_discrete_log(x, b)
# Traceback (most recent call last):
# ...
# ValueError: Log of 2 to the base 1 does not exist.
# See trac\#2356:
# sage: F.<w> = GF(121)
# sage: v = w^120
# sage: v.log(w)
# 0
# sage: K.<z>=CyclotomicField(230)
# sage: w=z^50
# sage: old_discrete_log(w,z)
# 50
# An example where the order is infinite: note that we must give
# an upper bound here:
# sage: K.<a> = QuadraticField(23)
# sage: eps = 5*a-24 # a fundamental unit
# sage: eps.multiplicative_order()
# +Infinity
# sage: eta = eps^100
# sage: old_discrete_log(eta,eps,1000)
# 100
# In this case we cannot detect negative powers:
# sage: eta = eps^(-3)
# sage: old_discrete_log(eta,eps,100)
# Traceback (most recent call last):
# ...
# ValueError: Log of -11515*a - 55224 to the base 5*a - 24 does not exist.
# But we can invert the base (and negate the result) instead:
# sage: - old_discrete_log(eta^-1,eps,100)
# -3
# An additive example: elliptic curve DLOG:
# sage: F=GF(37^2,'a')
# sage: E=EllipticCurve(F,[1,1])
# sage: F.<a>=GF(37^2,'a')
# sage: E=EllipticCurve(F,[1,1])
# sage: P=E(25*a + 16 , 15*a + 7 )
# sage: P.order()
# 672
# sage: Q=39*P; Q
# (36*a + 32 : 5*a + 12 : 1)
# sage: old_discrete_log(Q,P,P.order(),'+')
# 39
# AUTHOR:
# -- William Stein and David Joyner (2005-01-05)
# -- John Cremona (2008-02-29) rewrite using dict() and make generic
# """
# Z = integer_ring.ZZ
# b = base
# from operator import inv, mul, neg, add
# if operation in multiplication_names:
# identity = b.parent()(1)
# inverse = inv
# op = mul
# if ord==None:
# ord = b.multiplicative_order()
# elif operation in addition_names:
# identity = b.parent()(0)
# inverse = neg
# op = add
# if ord==None:
# ord = b.order()
# else:
# if ord==None or identity==None or inverse==None or op==None:
# raise ValueError, "order, identity, inverse and operation must all be specified"
# ord = Z(ord)
# if ord < 100:
# c = identity
# for i in range(ord):
# if c == a: # is b^i
# return Z(i)
# c = op(c,b)
# raise ValueError, "Log of %s to the base %s does not exist."%(a,b)
# m = ord.isqrt()+1 # we need sqrt(ord) rounded up
# table = dict() # will hold pairs (b^j,j) for j in range(m)
# g = identity # will run through b**j
# for j in range(m):
# if a==g:
# return Z(j)
# table[g] = j
# g = op(g,b)
# g = inverse(g) # this is now b**(-m)
# h = op(a,g) # will run through a*g**i = a*b**(-i*m)
# for i in range(1,m):
# j = table.get(h)
# if not j==None: # then a*b**(-i*m) == b**j
# return Z(i*m + j)
# if i < m-1:
# h = op(h,g)
# raise ValueError, "Log of %s to the base %s does not exist."%(a,b)
################################################################
#
# Generic linear relation finder
#
################################################################
def linear_relation(P, Q, operation='+', identity=None, inverse=None, op=None):
r"""
Function which solves the equation ``a*P=m*Q`` or ``P^a=Q^m``.
Additive version: returns `(a,m)` with minimal `m>0` such that
`aP=mQ`. Special case: if `\left<P\right>` and `\left<Q\right>`
intersect only in `\{0\}` then `(a,m)=(0,n)` where `n` is
``Q.additive_order()``.
Multiplicative version: returns `(a,m)` with minimal `m>0` such
that `P^a=Q^m`. Special case: if `\left<P\right>` and
`\left<Q\right>` intersect only in `\{1\}` then `(a,m)=(0,n)`
where `n` is ``Q.multiplicative_order()``.
ALGORITHM:
Uses the generic ``bsgs()`` function, and so works in general
finite abelian groups.
EXAMPLES:
An additive example (in an elliptic curve group)::
sage: F.<a>=GF(3^6,'a')
sage: E=EllipticCurve([a^5 + 2*a^3 + 2*a^2 + 2*a, a^4 + a^3 + 2*a + 1])
sage: P=E(a^5 + a^4 + a^3 + a^2 + a + 2 , 0)
sage: Q=E(2*a^3 + 2*a^2 + 2*a , a^3 + 2*a^2 + 1)
sage: linear_relation(P,Q,'+')
(1, 2)
sage: P == 2*Q
True
A multiplicative example (in a finite field's multiplicative group)::
sage: F.<a>=GF(3^6,'a')
sage: a.multiplicative_order().factor()
2^3 * 7 * 13
sage: b=a^7
sage: c=a^13
sage: linear_relation(b,c,'*')
(13, 7)
sage: b^13==c^7
True
"""
from operator import mul, add
Z = integer_ring.ZZ
if operation in multiplication_names:
op = mul
try:
n = P.multiplicative_order()
m = Q.multiplicative_order()
except StandardError:
n = P.order()
m = Q.order()
elif operation in addition_names:
op = add
try:
n = P.additive_order()
m = Q.additive_order()
except StandardError:
n = P.order()
m = Q.order()
else:
if op==None:
raise ValueError, "operation must be specified"
n = P.order()
m = Q.order()
g = sage.rings.arith.gcd(n,m)
if g==1: return (m,Z(0))
n1 = n//g
m1 = m//g
P1 = multiple(P,n1,operation=operation) # has exact order g
Q1 = multiple(Q,m1,operation=operation) # has exact order g
# now see if Q1 is a multiple of P1; the only multiples we
# need check are h*Q1 where h divides g
for h in g.divisors(): # positive divisors!
try:
Q2 = multiple(Q1,h,operation=operation)
return (n1 * bsgs(P1,Q2,(0,g-1),operation=operation),
m1 * h)
except ValueError:
pass # to next h
raise ValueError, "No solution found in linear_relation!"
################################################################
#
# Generic functions to find orders of elements
#
# 1. order_from_multiple: finds the order given a multiple of the order
#
# 2. order_from_bounds: finds the order given an interval containing a
# multiple of the order
#
################################################################
def order_from_multiple(P, m, plist=None, factorization=None, check=True,
operation='+'):
r"""
Generic function to find order of a group element given a multiple
of its order.
INPUT:
- ``P`` - a Sage object which is a group element;
- ``m`` - a Sage integer which is a multiple of the order of ``P``,
i.e. we require that ``m*P=0`` (or ``P**m=1``);
- ``check`` - a Boolean (default:True), indicating whether we check if ``m``
really is a multiple of the order;
- ``factorization`` - the factorization of ``m``, or ``None`` in which
case this function will need to factor ``m``;
- ``plist`` - a list of the prime factors of ``m``, or ``None`` - kept for compatibility only,
prefer the use of ``factorization``;
- ``operation`` - string: '+' (default) or '*'.
.. note::
It is more efficient for the caller to factor ``m`` and cache
the factors for subsequent calls.
EXAMPLES::
sage: k.<a> = GF(5^5)
sage: b = a^4
sage: order_from_multiple(b,5^5-1,operation='*')
781
sage: E = EllipticCurve(k,[2,4])
sage: P = E(3*a^4 + 3*a , 2*a + 1 )
sage: M = E.cardinality(); M
3227
sage: F = M.factor()
sage: order_from_multiple(P, M, factorization=F, operation='+')
3227
sage: Q = E(0,2)
sage: order_from_multiple(Q, M, factorization=F, operation='+')
7
sage: K.<z>=CyclotomicField(230)
sage: w=z^50
sage: order_from_multiple(w,230,operation='*')
23
sage: F=GF(2^1279,'a')
sage: n=F.cardinality()-1 # Mersenne prime
sage: order_from_multiple(F.random_element(),n,factorization=[(n,1)],operation='*')==n
True
sage: K.<a> = GF(3^60)
sage: order_from_multiple(a, 3^60-1, operation='*', check=False)
42391158275216203514294433200
"""
from operator import mul, add
Z = integer_ring.ZZ
if operation in multiplication_names:
identity = P.parent()(1)
elif operation in addition_names:
identity = P.parent()(0)
else:
raise ValueError, "unknown group operation"
if P == identity:
return Z(1)
M=Z(m)
if check:
assert multiple(P,M,operation=operation) == identity
if factorization:
F = factorization
elif plist:
F = [(p,M.valuation(p)) for p in plist]
else:
F = M.factor()
if len(F) == 1 and list(F) == [(M,1)]:
return M
# Efficiency improvement (2009-10-27, implemented by Yann Laigle-Chapuy):
# we use an internal recursive function to avoid unnecessary computations.
def _order_from_multiple_helper(Q, L, S):
"""
internal use, to minimize the number of group operations.
"""
l = len(L)
if l == 1:
# we determine the power of p dividing the order,
# Efficiency improvement (2009-04-01, suggested by Ryan Hinton,
# implemented by John Cremona): avoid the last multiplication by p.
# For example, if M itself is prime the code used to compute M*P
# twice (unless P=0), now it does it once.
p,e = L[0]
e0 = 0
while (Q != identity) and (e0<e-1):
Q = multiple(Q,p,operation=operation)
e0 += 1
if (Q != identity):
e0 += 1
return p**e0
else:
# try to split the list wisely
sum_left = 0
i = 0
for k in range(l):
p,e = L[k]
# multiplying by p**e require roughly 'e log_2(p) / 2' additions
v = e * sage.functions.log.log(float(p))
if abs(sum_left + v - (S / 2)) > abs(sum_left - (S / 2)):
break
sum_left += v
L1 = L[:k]
L2 = L[k:]
# recursive calls
o1 = _order_from_multiple_helper(
multiple(Q, sage.misc.misc.prod([p**e for p,e in L2]), operation),
L1,
sum_left)
o2 = _order_from_multiple_helper(
multiple(Q, o1 , operation),
L2,
S-sum_left)
return o1*o2
return _order_from_multiple_helper(P, F, sage.functions.log.log(float(M)) )
def order_from_bounds(P, bounds, d=None, operation='+',
identity=None, inverse=None, op=None):
r"""
Generic function to find order of a group element, given only
upper and lower bounds for a multiple of the order (e.g. bounds on
the order of the group of which it is an element)
INPUT:
- ``P`` - a Sage object which is a group element
- ``bounds`` - a 2-tuple ``(lb,ub)`` such that ``m*P=0`` (or
``P**m=1``) for some ``m`` with ``lb<=m<=ub``.
- ``d`` - (optional) a positive integer; only ``m`` which are
multiples of this will be considered.
- ``operation`` - string: '+' (default ) or '*' or other.
If other, the following must be supplied:
- ``identity``: the identity element for the group;
- ``inverse()``: a function of one argument giving the inverse
of a group element;
- ``op()``: a function of 2 arguments defining the group binary
operation.
.. note::
Typically ``lb`` and ``ub`` will be bounds on the group order,
and from previous calculation we know that the group order is
divisible by ``d``.
EXAMPLES::
sage: k.<a> = GF(5^5)
sage: b = a^4
sage: order_from_bounds(b,(5^4,5^5),operation='*')
781
sage: E = EllipticCurve(k,[2,4])
sage: P = E(3*a^4 + 3*a , 2*a + 1 )
sage: bounds = Hasse_bounds(5^5)
sage: Q = E(0,2)
sage: order_from_bounds(Q, bounds, operation='+')
7
sage: order_from_bounds(P, bounds, 7, operation='+')
3227
sage: K.<z>=CyclotomicField(230)
sage: w=z^50
sage: order_from_bounds(w,(200,250),operation='*')
23
"""
from operator import mul, add
Z = integer_ring.ZZ
if operation in multiplication_names:
op = mul
identity = P.parent()(1)
elif operation in addition_names:
op = add
identity = P.parent()(0)
else:
if op==None:
raise ValueError, "operation and identity must be specified"
Q = P
if d == None: d = 1
if d > 1:
Q = multiple(P,d,operation=operation)
lb, ub = bounds
bounds = ( sage.rings.arith.integer_ceil(lb/d),
sage.rings.arith.integer_floor(ub/d) )
# Use generic bsgs to find n=d*m with lb<=n<=ub and n*P=0
m = d * bsgs(Q, identity, bounds, operation=operation)
# Now use the order_from_multiple() function to finish the job:
return order_from_multiple(P, m, operation=operation, check=False)
def merge_points(P1,P2, operation='+',
identity=None, inverse=None, op=None, check=True):
r"""
Returns a group element whose order is the lcm of the given elements.
INPUT:
- ``P1`` -- a pair `(g_1,n_1)` where `g_1` is a group element of order `n_1`
- ``P2`` -- a pair `(g_2,n_2)` where `g_2` is a group element of order `n_2`
- ``operation`` -- string: '+' (default ) or '*' or other. If
other, the following must be supplied:
- ``identity``: the identity element for the group;
- ``inverse()``: a function of one argument giving the inverse
of a group element;
- ``op()``: a function of 2 arguments defining the group
binary operation.
OUTPUT:
A pair `(g_3,n_3)` where `g_3` has order `n_3=\hbox{lcm}(n_1,n_2)`.
EXAMPLES::
sage: F.<a>=GF(3^6,'a')
sage: b = a^7
sage: c = a^13
sage: ob = (3^6-1)//7
sage: oc = (3^6-1)//13
sage: merge_points((b,ob),(c,oc),operation='*')
(a^4 + 2*a^3 + 2*a^2, 728)
sage: d,od = merge_points((b,ob),(c,oc),operation='*')
sage: od == d.multiplicative_order()
True
sage: od == lcm(ob,oc)
True
sage: E=EllipticCurve([a^5 + 2*a^3 + 2*a^2 + 2*a, a^4 + a^3 + 2*a + 1])
sage: P=E(2*a^5 + 2*a^4 + a^3 + 2 , a^4 + a^3 + a^2 + 2*a + 2)
sage: P.order()
7
sage: Q=E(2*a^5 + 2*a^4 + 1 , a^5 + 2*a^3 + 2*a + 2 )
sage: Q.order()
4
sage: R,m = merge_points((P,7),(Q,4), operation='+')
sage: R.order() == m
True
sage: m == lcm(7,4)
True
"""
from operator import mul, add
Z = integer_ring.ZZ
g1, n1 = P1
g2, n2 = P2
if operation in multiplication_names:
op = mul
identity = g1.parent()(1)
elif operation in addition_names:
op = add
identity = g1.parent()(0)
else:
if op==None:
raise ValueError, "operation and identity must be specified"
if check:
assert multiple(g1,n1,operation=operation) == identity
assert multiple(g2,n2,operation=operation) == identity
# trivial cases
if n1.divides(n2):
return (g2,n2)
if n2.divides(n1):
return (g1,n1)
m,k1,k2 = sage.rings.arith.xlcm(n1,n2);
m1 = n1//k1
m2 = n2//k2
g1 = multiple(g1,m1,operation=operation)
g2 = multiple(g2,m2,operation=operation)
return (op(g1,g2), m)
|
py | 1a5576ec5a475b0b592822c084a5c73a8cdc60a1 | # -*- coding: utf-8 -*-
"""Scattering Matrix Units
This contains useful units and conversions for scattering matrix calculations
Because all the scattering matrix code is written with m.g.s SI units, constants
have been included that can be used as multipliers to improve the readability of
the code being written that uses this library. This will also hopefully cut down
on unit errors.
Imported External Modules: numpy, scipy.constants
Classes:
Units: contains the various distance, frequency, time, and power multipliers
Functions:
convert_photon_unit(string,string,numeric): converts between one unit of photon
measurement (such as eV) to another (such as Hz).
convert_index_unit(string,string,numeric): converts between complex nk values
and complex relative permittivity.
Notes:
Throughout the documentation, you will find type "numeric" indicated. Most
functions are designed to work with floats, ints, complex, and numpy arrays.
All of these are grouped under "numeric".
"""
import numpy as np
from scipy import constants
class Units:
"""Constants and units useful for optical calculations
Attributes:
KILOMETERS, km
METERS, m
CENTIMETERS, cm
MILLIMETERS, mm
MICROMETERS, um
NANOMETERS, nm
PICOMETERS, pm
FEMTOMETERS, fm
HERTZ, Hz
KILOHERTZ, kHz
MEGAHERTZ, MHz
GIGAHERTZ, GHz
TERAHERTZ, THz
PETAHERTZ, PHz
SECONDS, s
MILLISECONDS, ms
MICROSECONDS, us
NANOSECONDS, ns
PICOSECONDS, ps
FEMTOSECONDS, fs
WATTS, W
"""
#Distance Units
KILOMETERS = km = 10.0**3
METERS = m = 1.0
CENTIMETERS = cm = 10.0**-2
MILLIMETERS = mm = 10.0**-3
MICROMETERS = um = 10.0**-6
NANOMETERS = nm = 10.0**-9
PICOMETERS = pm = 10.0**-12
FEMTOMETERS = fm = 10.0**-15
#Frequency Units
HERTZ = Hz = 1.0
KILOHERTZ = kHz = 10.0**3
MEGAHERTZ = MHz = 10.0**6
GIGAHERTZ = GHz = 10.0**9
TERAHERTZ = THz = 10.0**12
PETAHERTZ = PHz = 10.0**15
#Time Units
SECONDS = s = 1.0
MILLISECONDS = ms = 10.0**-3
MICROSECONDS = us = 10.0**-6
NANOSECONDS = ns = 10.0**-9
PICOSECONDS = ps = 10.0**-12
FEMTOSECONDS = fs = 10.0**-15
#Power Units
WATTS = W = 1.0
def convert_photon_unit(initial_unit, new_unit, initial_value):
"""converts between one unit of photon measurement to another
Args:
initial_unit (string): Units of the current value. Must be one
of 'eV', 'wl', or 'freq'.
new_unit (string): Units to convert to. Must be one of 'eV',
'wl', or 'freq'.
initial_value (numeric): Initial value in initial_unit.
Returns:
numeric: value in terms of new_unit
"""
convert = {}
convert["eV"]={"wl": lambda x: (constants.h*constants.c)/(x*constants.e),
"freq": lambda x: (x*constants.e)/(constants.h)}
convert["wl"]={"eV": lambda x: (constants.h*constants.c)/(x*constants.e),
"freq": lambda x: constants.c/x}
convert["freq"]={"wl": lambda x: constants.c/x,
"eV": lambda x: (constants.h*x)/(constants.e)}
return convert[initial_unit][new_unit](initial_value)
def convert_index_unit(initial_unit, new_unit, initial_value):
"""converts between complex nk values and complex relative permittivity.
Args:
initial_unit (string): Units of the current value. Must be one
of 'nk', or 'er_ei'.
new_unit (string): Units to convert to. Must be one of 'nk', or 'er_ei'.
initial_value (numeric): Initial value in initial_unit.
Returns:
numeric: value in terms of new_unit
"""
convert = {}
convert["nk"]={"er_ei": lambda x: x**2.0}
convert["er_ei"]={"nk": lambda x: np.sqrt(x)}
return convert[initial_unit][new_unit](initial_value)
|
py | 1a5576fa9a363613e36756f11127904a4a8dbe4a | # coding=utf-8
from test.utils import get_modules
from challenges import user
__author__ = "Gareth Coles"
failures = 0
total_tests = 0
total_points = 0
max_points = 0
if __name__ == "__main__":
if user.__learner__ == "123456789012345678":
print("=> Default user ID found - not running tests")
exit()
for mod in get_modules("test/tests"):
try:
case = mod.Case()
except Exception as e:
print("!> Failed to load test case from {}".format(mod.__name__))
print(e)
continue
print("=== {} ({}) ===".format(case.title, mod.__author__))
try:
case.do_test()
except Exception as e:
print("!> Error running test: {}".format(e))
try:
case.do_flake8()
except Exception as e:
print("!> Error running PEP compliance: {}".format(e))
max_points += case.max_points
total_tests += 1
if case.passed:
print("=> Challenge passed with {}/{} points.".format(case.points, case.max_points))
total_points += case.points
print()
print("-- Bonus breakdown --")
for key in case.breakdown.keys():
print("=> {}: {}".format(key, "Passed" if case.breakdown[key] else "Failed"))
else:
print()
print("!> Challenge failed. Please try again!")
failures += 1
if not case.flake8:
print("!> PEP8 compliance check failed. Please see the following messages for more information.")
print()
for line in case.flake8_messages:
if line:
print("flake8 => " + line)
print()
print("=== Results! ===")
print("Test passed: {} / {}".format(total_tests - failures, total_tests))
print("Points earned: {} / {}".format(total_points, max_points))
if failures:
exit(1)
|
py | 1a55771b4c0777f36f8a45ff62703e4819aada78 | from monitoring.mock_uss import webapp
@webapp.route('/riddp/status')
def riddp_status():
return 'Mock RID Display Provider ok'
from . import routes_observation
|
py | 1a5577a41b0aa9c67de55c184a8596b80ae9cb8e | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from user_handler import UserHandler
from cache_handler import CacheHandler
from status_handler import StatusHandler
|
py | 1a55782de053b04fdb5ef01d30ebd5b1e87c9f00 | import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import time
import re
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import torch.utils.data as data
from torchvision import transforms
import torch
import pdb
import argparse
from src import Networks
from sklearn.metrics import confusion_matrix
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
backends = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface']
from deepface import DeepFace
from deepface.extendedmodels import Age
from deepface.commons import functions, realtime, distance as dst
from deepface.detectors import OpenCvWrapper
input_shape = (224, 224);
input_shape_x = input_shape[0];
input_shape_y = input_shape[1]
text_color = (255, 255, 255)
frame_threshold = 1
time_threshold = 0.1
tic = time.time()
data_transforms_test = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
#emotion_model = DeepFace.build_model('Emotion')
emotion_model = Networks.ResNet18_ARM___RAF()
print("Loading pretrained weights...models/RAF-DB/epoch59_acc0.9205.pth")
checkpoint = torch.load('./models/RAF-DB/epoch59_acc0.9205.pth')
emotion_model.load_state_dict(checkpoint["model_state_dict"], strict=False)
emotion_model = emotion_model.cuda()
print("Emotion model loaded")
toc = time.time()
print("Facial attibute analysis models loaded in ", toc - tic, " seconds")
pivot_img_size = 112 # face recognition result image
# -----------------------
opencv_path = OpenCvWrapper.get_opencv_path()
face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
face_cascade = cv2.CascadeClassifier(face_detector_path)
# -----------------------
freeze = False
face_detected = False
face_included_frames = 0 # freeze screen if face detected sequantially 5 frames
freezed_frame = 0
tic = time.time()
cap = cv2.VideoCapture(0) # webcam
_cnt_frame = 0
emotion_model.eval()
while (True):
_start = time.time()
ret, img = cap.read()
_cnt_frame += 1
if img is None:
break
raw_img = img.copy()
resolution = img.shape
resolution_x = img.shape[1];
resolution_y = img.shape[0]
if freeze == False:
faces = face_cascade.detectMultiScale(img, 1.3, 5)
fc_img, faces = DeepFace.detectFace(img, detector_backend = backends[1])
if len(faces) == 0:
face_included_frames = 0
else:
faces = []
detected_faces = []
face_index = 0
if len(faces)==0:
faces = faces
else:
faces = [faces]
for (x, y, w, h) in faces:
if w > 130: # discard small detected faces
face_detected = True
if face_index == 0:
face_included_frames = face_included_frames + 1 # increase frame for a single face
cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67), 1) # draw rectangle to main image
cv2.putText(img, str(frame_threshold - face_included_frames), (int(x + w / 4), int(y + h / 1.5)),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2)
detected_face = img[int(y):int(y + h), int(x):int(x + w)] # crop detected face
# -------------------------------------
detected_faces.append((x, y, w, h))
face_index = face_index + 1
# -------------------------------------
if face_detected == True and face_included_frames == frame_threshold and freeze == False:
freeze = True
# base_img = img.copy()
base_img = raw_img.copy()
detected_faces_final = detected_faces.copy()
tic = time.time()
if freeze == True:
toc = time.time()
if (toc - tic) < time_threshold:
#
# if freezed_frame == 0:
freeze_img = base_img.copy()
# freeze_img = np.zeros(resolution, np.uint8) #here, np.uint8 handles showing white area issue
emotion_predictions = np.zeros((7), dtype=float)
for detected_face in detected_faces_final:
x = detected_face[0];
y = detected_face[1]
w = detected_face[2];
h = detected_face[3]
cv2.rectangle(freeze_img, (x, y), (x + w, y + h), (67, 67, 67), 1) # draw rectangle to main image
# -------------------------------
# apply deep learning for custom_face
custom_face = base_img[y:y + h, x:x + w]
# -------------------------------
# facial attribute analysis
gray_img = torch.unsqueeze(data_transforms_test(custom_face),0)
# emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] #Original
emotion_labels = ['Surprise', 'Fear', 'Disgust', 'Happy', 'Sad','Angry','Neutral']
outputs, _ = emotion_model(gray_img.cuda())
_emotion_predictions = torch.softmax(outputs,1)
# _emotion_predictions = emotion_model.predict(gray_img)[0, :]
emotion_predictions = torch.squeeze(_emotion_predictions).detach().cpu().numpy()
sum_of_predictions = emotion_predictions.sum()
mood_items = []
print('===================================================================================')
print('%d of frames' % (_cnt_frame))
for i in range(0, len(emotion_labels)):
mood_item = []
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
mood_item.append(emotion_label)
mood_item.append(emotion_prediction)
mood_items.append(mood_item)
print('Emotion: %s - Confidence: %f' % (emotion_labels[i], emotion_prediction))
print('===================================================================================')
emotion_df = pd.DataFrame(mood_items, columns=["emotion", "score"]) # pd Dataset emotion dataset.
emotion_df = emotion_df.sort_values(by=["score"], ascending=False).reset_index(
drop=True) # pd Dataset emotion dataset.
'''
'emotion_df' contains emotion labels and the scores of each emotion class.
'''
overlay = freeze_img.copy()
opacity = 0.4
if x + w + pivot_img_size < resolution_x:
# right
cv2.rectangle(freeze_img
# , (x+w,y+20)
, (x + w, y)
, (x + w + pivot_img_size, y + h)
, (64, 64, 64), cv2.FILLED)
cv2.addWeighted(overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img)
elif x - pivot_img_size > 0:
# left
cv2.rectangle(freeze_img
# , (x-pivot_img_size,y+20)
, (x - pivot_img_size, y)
, (x, y + h)
, (64, 64, 64), cv2.FILLED)
cv2.addWeighted(overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img)
assert isinstance(emotion_df.iterrows, object)
for index, instance in emotion_df.iterrows():
emotion_label = "%s " % (instance['emotion'])
emotion_score = instance['score'] / 100
bar_x = 35 # this is the size if an emotion is 100%
bar_x = int(bar_x * emotion_score)
if x + w + pivot_img_size < resolution_x:
text_location_y = y + 20 + (index + 1) * 20
text_location_x = x + w
if text_location_y < y + h:
cv2.putText(freeze_img, emotion_label, (text_location_x, text_location_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.rectangle(freeze_img
, (x + w + 70, y + 13 + (index + 1) * 20)
, (x + w + 70 + bar_x, y + 13 + (index + 1) * 20 + 5)
, (255, 255, 255), cv2.FILLED)
elif x - pivot_img_size > 0:
text_location_y = y + 20 + (index + 1) * 20
text_location_x = x - pivot_img_size
if text_location_y <= y + h:
cv2.putText(freeze_img, emotion_label, (text_location_x, text_location_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.rectangle(freeze_img
, (x - pivot_img_size + 70, y + 13 + (index + 1) * 20)
, (x - pivot_img_size + 70 + bar_x, y + 13 + (index + 1) * 20 + 5)
, (255, 255, 255), cv2.FILLED)
# -------------------------------
# face_224 = functions.preprocess_face(img = custom_face, target_size = (224, 224), grayscale = False, enforce_detection = False)
tic = time.time() # in this way, freezed image can show 5 seconds
cv2.imshow('img', freeze_img)
freezed_frame = freezed_frame + 1
face_detected = False
face_included_frames = 0
freeze = False
freezed_frame = 0
else:
cv2.imshow('img', img)
print('Execution speed: %f sec' % (time.time() - _start))
if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit
break
# kill open cv things
cap.release()
cv2.destroyAllWindows()
|
py | 1a55789a023cdb2844bcba306066245ad2fb9984 |
from itertools import product
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from seaborn._core.moves import Dodge, Jitter, Shift, Stack
from seaborn._core.rules import categorical_order
from seaborn._core.groupby import GroupBy
import pytest
class MoveFixtures:
@pytest.fixture
def df(self, rng):
n = 50
data = {
"x": rng.choice([0., 1., 2., 3.], n),
"y": rng.normal(0, 1, n),
"grp2": rng.choice(["a", "b"], n),
"grp3": rng.choice(["x", "y", "z"], n),
"width": 0.8,
"baseline": 0,
}
return pd.DataFrame(data)
@pytest.fixture
def toy_df(self):
data = {
"x": [0, 0, 1],
"y": [1, 2, 3],
"grp": ["a", "b", "b"],
"width": .8,
"baseline": 0,
}
return pd.DataFrame(data)
@pytest.fixture
def toy_df_widths(self, toy_df):
toy_df["width"] = [.8, .2, .4]
return toy_df
@pytest.fixture
def toy_df_facets(self):
data = {
"x": [0, 0, 1, 0, 1, 2],
"y": [1, 2, 3, 1, 2, 3],
"grp": ["a", "b", "a", "b", "a", "b"],
"col": ["x", "x", "x", "y", "y", "y"],
"width": .8,
"baseline": 0,
}
return pd.DataFrame(data)
class TestJitter(MoveFixtures):
def get_groupby(self, data, orient):
other = {"x": "y", "y": "x"}[orient]
variables = [v for v in data if v not in [other, "width"]]
return GroupBy(variables)
def check_same(self, res, df, *cols):
for col in cols:
assert_series_equal(res[col], df[col])
def check_pos(self, res, df, var, limit):
assert (res[var] != df[var]).all()
assert (res[var] < df[var] + limit / 2).all()
assert (res[var] > df[var] - limit / 2).all()
def test_width(self, df):
width = .4
orient = "x"
groupby = self.get_groupby(df, orient)
res = Jitter(width=width)(df, groupby, orient)
self.check_same(res, df, "y", "grp2", "width")
self.check_pos(res, df, "x", width * df["width"])
def test_x(self, df):
val = .2
orient = "x"
groupby = self.get_groupby(df, orient)
res = Jitter(x=val)(df, groupby, orient)
self.check_same(res, df, "y", "grp2", "width")
self.check_pos(res, df, "x", val)
def test_y(self, df):
val = .2
orient = "x"
groupby = self.get_groupby(df, orient)
res = Jitter(y=val)(df, groupby, orient)
self.check_same(res, df, "x", "grp2", "width")
self.check_pos(res, df, "y", val)
def test_seed(self, df):
kws = dict(width=.2, y=.1, seed=0)
orient = "x"
groupby = self.get_groupby(df, orient)
res1 = Jitter(**kws)(df, groupby, orient)
res2 = Jitter(**kws)(df, groupby, orient)
for var in "xy":
assert_series_equal(res1[var], res2[var])
class TestDodge(MoveFixtures):
# First some very simple toy examples
def test_default(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge()(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3]),
assert_array_almost_equal(res["x"], [-.2, .2, 1.2])
assert_array_almost_equal(res["width"], [.4, .4, .4])
def test_fill(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge(empty="fill")(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3]),
assert_array_almost_equal(res["x"], [-.2, .2, 1])
assert_array_almost_equal(res["width"], [.4, .4, .8])
def test_drop(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge("drop")(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1])
assert_array_almost_equal(res["width"], [.4, .4, .4])
def test_gap(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge(gap=.25)(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1.2])
assert_array_almost_equal(res["width"], [.3, .3, .3])
def test_widths_default(self, toy_df_widths):
groupby = GroupBy(["x", "grp"])
res = Dodge()(toy_df_widths, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.08, .32, 1.1])
assert_array_almost_equal(res["width"], [.64, .16, .2])
def test_widths_fill(self, toy_df_widths):
groupby = GroupBy(["x", "grp"])
res = Dodge(empty="fill")(toy_df_widths, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.08, .32, 1])
assert_array_almost_equal(res["width"], [.64, .16, .4])
def test_widths_drop(self, toy_df_widths):
groupby = GroupBy(["x", "grp"])
res = Dodge(empty="drop")(toy_df_widths, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.08, .32, 1])
assert_array_almost_equal(res["width"], [.64, .16, .2])
def test_faceted_default(self, toy_df_facets):
groupby = GroupBy(["x", "grp", "col"])
res = Dodge()(toy_df_facets, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, .8, .2, .8, 2.2])
assert_array_almost_equal(res["width"], [.4] * 6)
def test_faceted_fill(self, toy_df_facets):
groupby = GroupBy(["x", "grp", "col"])
res = Dodge(empty="fill")(toy_df_facets, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1, 0, 1, 2])
assert_array_almost_equal(res["width"], [.4, .4, .8, .8, .8, .8])
def test_faceted_drop(self, toy_df_facets):
groupby = GroupBy(["x", "grp", "col"])
res = Dodge(empty="drop")(toy_df_facets, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1, 0, 1, 2])
assert_array_almost_equal(res["width"], [.4] * 6)
def test_orient(self, toy_df):
df = toy_df.assign(x=toy_df["y"], y=toy_df["x"])
groupby = GroupBy(["y", "grp"])
res = Dodge("drop")(df, groupby, "y")
assert_array_equal(res["x"], [1, 2, 3])
assert_array_almost_equal(res["y"], [-.2, .2, 1])
assert_array_almost_equal(res["width"], [.4, .4, .4])
# Now tests with slightly more complicated data
@pytest.mark.parametrize("grp", ["grp2", "grp3"])
def test_single_semantic(self, df, grp):
groupby = GroupBy(["x", grp])
res = Dodge()(df, groupby, "x")
levels = categorical_order(df[grp])
w, n = 0.8, len(levels)
shifts = np.linspace(0, w - w / n, n)
shifts -= shifts.mean()
assert_series_equal(res["y"], df["y"])
assert_series_equal(res["width"], df["width"] / n)
for val, shift in zip(levels, shifts):
rows = df[grp] == val
assert_series_equal(res.loc[rows, "x"], df.loc[rows, "x"] + shift)
def test_two_semantics(self, df):
groupby = GroupBy(["x", "grp2", "grp3"])
res = Dodge()(df, groupby, "x")
levels = categorical_order(df["grp2"]), categorical_order(df["grp3"])
w, n = 0.8, len(levels[0]) * len(levels[1])
shifts = np.linspace(0, w - w / n, n)
shifts -= shifts.mean()
assert_series_equal(res["y"], df["y"])
assert_series_equal(res["width"], df["width"] / n)
for (v2, v3), shift in zip(product(*levels), shifts):
rows = (df["grp2"] == v2) & (df["grp3"] == v3)
assert_series_equal(res.loc[rows, "x"], df.loc[rows, "x"] + shift)
class TestStack(MoveFixtures):
def test_basic(self, toy_df):
groupby = GroupBy(["color", "group"])
res = Stack()(toy_df, groupby, "x")
assert_array_equal(res["x"], [0, 0, 1])
assert_array_equal(res["y"], [1, 3, 3])
assert_array_equal(res["baseline"], [0, 1, 0])
def test_faceted(self, toy_df_facets):
groupby = GroupBy(["color", "group"])
res = Stack()(toy_df_facets, groupby, "x")
assert_array_equal(res["x"], [0, 0, 1, 0, 1, 2])
assert_array_equal(res["y"], [1, 3, 3, 1, 2, 3])
assert_array_equal(res["baseline"], [0, 1, 0, 0, 0, 0])
def test_misssing_data(self, toy_df):
df = pd.DataFrame({
"x": [0, 0, 0],
"y": [2, np.nan, 1],
"baseline": [0, 0, 0],
})
res = Stack()(df, None, "x")
assert_array_equal(res["y"], [2, np.nan, 3])
assert_array_equal(res["baseline"], [0, np.nan, 2])
def test_baseline_homogeneity_check(self, toy_df):
toy_df["baseline"] = [0, 1, 2]
groupby = GroupBy(["color", "group"])
move = Stack()
err = "Stack move cannot be used when baselines"
with pytest.raises(RuntimeError, match=err):
move(toy_df, groupby, "x")
class TestShift(MoveFixtures):
def test_default(self, toy_df):
gb = GroupBy(["color", "group"])
res = Shift()(toy_df, gb, "x")
for col in toy_df:
assert_series_equal(toy_df[col], res[col])
@pytest.mark.parametrize("x,y", [(.3, 0), (0, .2), (.1, .3)])
def test_moves(self, toy_df, x, y):
gb = GroupBy(["color", "group"])
res = Shift(x=x, y=y)(toy_df, gb, "x")
assert_array_equal(res["x"], toy_df["x"] + x)
assert_array_equal(res["y"], toy_df["y"] + y)
|
py | 1a55793e5089f89d8e094d3d19b917c7ca92caad | from app.nanoleaf.model import AuroraObject
class PanelLayout(AuroraObject):
def __init__(self, requester, rhythm):
super().__init__(requester)
self.rhythm = rhythm
@property
def orientation(self):
"""Returns the orientation of the device (0-360)"""
return self._requester.request(method="GET", endpoint="panelLayout/globalOrientation/value")
@property
def orientation_min(self):
"""Returns the minimum orientation possible. (This always returns 0)"""
return self._requester.request(method="GET", endpoint="panelLayout/globalOrientation/min")
@property
def orientation_max(self):
"""Returns the maximum orientation possible. (This always returns 360)"""
return self._requester.request(method="GET", endpoint="panelLayout/globalOrientation/max")
@property
def panel_count(self):
"""Returns the number of panels connected to the device"""
count = int(self._requester.request(method="GET", endpoint="panelLayout/layout/numPanels"))
if self.rhythm.rhythm_connected:
count -= 1
return count
@property
def panel_length(self):
"""Returns the length of a single panel. (This always returns 150)"""
return self._requester.request(method="GET", endpoint="panelLayout/layout/sideLength")
@property
def panel_positions(self):
"""Returns a list of all panels with their attributes represented in a dict.
panelId - Unique identifier for this panel
x - X-coordinate
y - Y-coordinate
o - Rotational orientation
"""
return self._requester.request(method="GET", endpoint="panelLayout/layout/positionData") |
py | 1a557947acab519da8af03d8871ff4a27a4e8d38 | """
Plotting model residuals
========================
"""
import numpy as np
import seaborn as sns
sns.set(style="whitegrid")
# Make an example dataset with y ~ x
rs = np.random.RandomState(7)
x = rs.normal(2, 1, 75)
y = 2 + 1.5 * x + rs.normal(0, 2, 75)
# Plot the residuals after fitting a linear model
sns.residplot(x, y, lowess=True, color="g")
|
py | 1a5579dd670ed1e69a4c24bffa015ce366cc4af9 | import logging
from moneywagon import (
get_unspent_outputs, CurrentPrice, get_optimal_fee, PushTx,
get_onchain_exchange_rates,
get_current_price)
from moneywagon.core import get_optimal_services, get_magic_bytes
from bitcoin import mktx, sign, pubtoaddr, privtopub
from .crypto_data import crypto_data
from .currency_support import CurrencySupport
class Transaction(object):
def __init__(self, crypto, hex=None, verbose=False):
c = CurrencySupport()
if crypto not in c.supported_currencies('moneywagon', 'transaction'):
form = crypto_data[crypto]['transaction_form']
raise NotImplementedError("%s not yet supported (tx form: %s)" % (
crypto.upper(), form
))
self.change_address = None
self.crypto = crypto
self.fee_satoshi = None
self.outs = []
self.ins = []
self.onchain_rate = None
self.verbose = verbose
if hex:
self.hex = hex
def from_unit_to_satoshi(self, value, unit='satoshi'):
"""
Convert a value to satoshis. units can be any fiat currency.
By default the unit is satoshi.
"""
logging.info("from_unit_to_satoshi : Value: {}, unit: {}".format(value, unit))
if not unit or unit == 'satoshi':
return value
if unit == 'bitcoin' or unit == 'btc':
return value * 1e8
# assume fiat currency that we can convert
convert = get_current_price(self.crypto, unit)
if isinstance(convert, tuple):
convert = convert[0]
logging.info("from_unit_to_satoshi : Convert: {}".format(convert))
return int(value / convert * 1e8)
def add_raw_inputs(self, inputs, private_key=None):
"""
Add a set of utxo's to this transaction. This method is better to use if you
want more fine control of which inputs get added to a transaction.
`inputs` is a list of "unspent outputs" (they were 'outputs' to previous transactions,
and 'inputs' to subsiquent transactions).
`private_key` - All inputs will be signed by the passed in private key.
"""
for i in inputs:
self.ins.append(dict(input=i, private_key=private_key))
self.change_address = i['address']
def _get_utxos(self, address, services, **modes):
"""
Using the service fallback engine, get utxos from remote service.
"""
return get_unspent_outputs(
self.crypto, address, services=services,
**modes
)
def private_key_to_address(self, pk):
"""
Convert a private key (in hex format) into an address.
"""
pub = privtopub(pk)
pub_byte, priv_byte = get_magic_bytes(self.crypto)
if priv_byte >= 128:
priv_byte -= 128 #pybitcointools bug
return pubtoaddr(pub, pub_byte)
def add_inputs(self, private_key=None, address=None, amount='all', max_ins=None, password=None, services=None, **modes):
"""
Make call to external service to get inputs from an address and/or private_key.
`amount` is the amount of [currency] worth of inputs (in satoshis) to add from
this address. Pass in 'all' (the default) to use *all* inputs found for this address.
Returned is the number of units (in satoshis) that were added as inputs to this tx.
"""
if private_key:
if private_key.startswith('6P'):
if not password:
raise Exception("Password required for BIP38 encoded private keys")
from .bip38 import Bip38EncryptedPrivateKey
private_key = Bip38EncryptedPrivateKey(self.crypto, private_key).decrypt(password)
address_from_priv = self.private_key_to_address(private_key)
if address and address != address_from_priv:
raise Exception("Invalid Private key")
address = address_from_priv
self.change_address = address
if not services:
services = get_optimal_services(self.crypto, 'unspent_outputs')
total_added_satoshi = 0
ins = 0
for utxo in self._get_utxos(address, services, **modes):
if max_ins and ins >= max_ins:
break
if (amount == 'all' or total_added_satoshi < amount):
ins += 1
self.ins.append(
dict(input=utxo, private_key=private_key)
)
total_added_satoshi += utxo['amount']
return total_added_satoshi, ins
def total_input_satoshis(self):
"""
Add up all the satoshis coming from all input tx's.
"""
just_inputs = [x['input'] for x in self.ins]
return sum([x['amount'] for x in just_inputs])
def select_inputs(self, amount):
'''Maximize transaction priority. Select the oldest inputs,
that are sufficient to cover the spent amount. Then,
remove any unneeded inputs, starting with
the smallest in value.
Returns sum of amounts of inputs selected'''
sorted_txin = sorted(self.ins, key=lambda x:-x['input']['confirmations'])
total_amount = 0
for (idx, tx_in) in enumerate(sorted_txin):
total_amount += tx_in['input']['amount']
if (total_amount >= amount):
break
sorted_txin = sorted(sorted_txin[:idx+1], key=lambda x:x['input']['amount'])
for (idx, tx_in) in enumerate(sorted_txin):
value = tx_in['input']['amount']
if (total_amount - value < amount):
break
else:
total_amount -= value
self.ins = sorted_txin[idx:]
return total_amount
def add_output(self, address, value, unit='satoshi'):
"""
Add an output (a person who will receive funds via this tx).
If no unit is specified, satoshi is implied.
"""
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f)" % (
value_satoshi, (value_satoshi / 1e8)
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def onchain_exchange(self, withdraw_crypto, withdraw_address, value, unit='satoshi'):
"""
This method is like `add_output` but it sends to another
"""
self.onchain_rate = get_onchain_exchange_rates(
self.crypto, withdraw_crypto, best=True, verbose=self.verbose
)
exchange_rate = float(self.onchain_rate['rate'])
result = self.onchain_rate['service'].get_onchain_exchange_address(
self.crypto, withdraw_crypto, withdraw_address
)
address = result['deposit']
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f) via onchain exchange, converting to %s %s" % (
value_satoshi, (value_satoshi / 1e8),
exchange_rate * value_satoshi / 1e8, withdraw_crypto.upper()
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def fee(self, value=None, unit='satoshi'):
"""
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
"""
convert = None
if not value:
# no fee was specified, use $0.02 as default.
# convert = get_current_price(self.crypto, "usd")
# self.fee_satoshi = int(0.02 / convert * 1e8)
self.fee_satoshi = int(2000)
verbose = "Using default fee of:"
elif value == 'optimal':
# self.fee_satoshi = get_optimal_fee(
# self.crypto, self.estimate_size(), verbose=self.verbose
# )
self.fee_satoshi = int(2000)
verbose = "Using optimal fee of:"
else:
self.fee_satoshi = self.from_unit_to_satoshi(value, unit)
verbose = "Using manually set fee of:"
if self.verbose:
if not convert:
convert = get_current_price(self.crypto, "usd")
fee_dollar = convert * self.fee_satoshi / 1e8
print(verbose + " %s satoshis ($%.2f)" % (self.fee_satoshi, fee_dollar))
def estimate_size(self):
"""
Estimate how many bytes this transaction will be by countng inputs
and outputs.
Formula taken from: http://bitcoin.stackexchange.com/a/3011/18150
"""
# if there are no outs use 1 (because the change will be an out)
outs = len(self.outs) or 1
return outs * 34 + 148 * len(self.ins) + 10
def get_hex(self, signed=True):
"""
Given all the data the user has given so far, make the hex using pybitcointools
"""
logging.info('2')
total_ins_satoshi = self.total_input_satoshis()
logging.info('3')
if total_ins_satoshi == 0:
raise ValueError("Can't make transaction, there are zero inputs")
logging.info('4')
# Note: there can be zero outs (sweep or coalesc transactions)
total_outs_satoshi = sum([x['value'] for x in self.outs])
logging.info('5')
if not self.fee_satoshi:
self.fee() # use default of $0.02
logging.info('6')
change_satoshi = total_ins_satoshi - (total_outs_satoshi + self.fee_satoshi)
logging.info('7')
if change_satoshi < 0:
raise ValueError(
"Input amount (%s) must be more than all output amounts (%s) plus fees (%s). You need more %s."
% (total_ins_satoshi, total_outs_satoshi, self.fee_satoshi, self.crypto.upper())
)
logging.info('8')
ins = [x['input'] for x in self.ins]
logging.info('9')
if change_satoshi > 0:
if self.verbose:
print("Adding change address of %s satoshis to %s" % (change_satoshi, self.change_address))
change = [{'value': change_satoshi, 'address': self.change_address}]
logging.info('10')
else:
change = [] # no change ?!
if self.verbose: print("Inputs == Outputs, no change address needed.")
logging.info('11')
tx = mktx(ins, self.outs + change)
logging.info('12')
if signed:
for i, input_data in enumerate(self.ins):
logging.info('13')
if not input_data['private_key']:
raise Exception("Can't sign transaction, missing private key for input %s" % i)
tx = sign(tx, i, input_data['private_key'])
logging.info('14')
return tx
def push(self, services=None, redundancy=1):
if not services:
services = get_optimal_services(self.crypto, "push_tx")
self.pushers = []
pusher = PushTx(services=services, verbose=self.verbose)
results = [pusher.action(self.crypto, self.get_hex())]
try:
for service in services[1:redundancy-1]:
pusher = PushTx(services=[service], verbose=self.verbose)
results.append(self.pusher.action(self.crypto, self.get_hex()))
self.pushers.append(pusher)
except:
raise Exception("Partial push. Some services returned success, some failed.")
return results
|
py | 1a557b3ffbb9e29a75a229c097d50b712646a33e | # Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import hashlib
import os.path
import platform
import re
import shutil
from flask_babel import lazy_gettext as _
def get_tree_size(start_path):
"""
return size (in bytes) of filesystem tree
"""
if not os.path.exists(start_path):
raise ValueError(_("Incorrect path: %(start_path)s", start_path=start_path))
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def get_python_file_dst(dirname, basename):
basename = os.path.basename(basename)
(root, ext) = os.path.splitext(basename)
if ext != '.py' and ext != '.pyc':
ValueError(_('Python file, %(basename)s, needs .py or .pyc extension.', basename=basename))
filename = os.path.join(dirname, 'digits_python_layers' + ext)
if os.path.isfile(filename):
ValueError(_('Python file, %(filename)s, already exists.', filename=filename))
return filename
def copy_python_layer_file(from_client, job_dir, client_file, server_file):
if from_client and client_file:
filename = get_python_file_dst(job_dir, client_file.filename)
client_file.save(filename)
elif server_file and len(server_file) > 0:
filename = get_python_file_dst(job_dir, server_file)
shutil.copy(server_file, filename)
def tail(file, n=40):
"""
Returns last n lines of text file (or all lines if the file has fewer lines)
Arguments:
file -- full path of that file, calling side must ensure its existence
n -- the number of tailing lines to return
"""
if platform.system() in ['Linux', 'Darwin']:
import subprocess
output = subprocess.check_output(['tail', '-n{}'.format(n), file])
else:
from collections import deque
tailing_lines = deque()
with open(file) as f:
for line in f:
tailing_lines.append(line)
if len(tailing_lines) > n:
tailing_lines.popleft()
output = ''.join(tailing_lines)
return output
def dir_hash(dir_name):
"""
Return a hash for the files in a directory tree, excluding hidden
files and directoies. If any files are renamed, added, removed, or
modified the hash will change.
"""
if not os.path.isdir(dir_name):
raise TypeError(_('%(dir_name)s is not a directory.', dir_name=dir_name))
md5 = hashlib.md5()
for root, dirs, files in os.walk(dir_name, topdown=True):
# Skip if the root has a hidden directory in its path
if not re.search(r'/\.', root):
for f in files:
# Skip if the file is hidden
if not f.startswith('.') and not re.search(r'/\.', f):
# Change the hash if the file name changes
file_name = os.path.join(root, f).encode('utf-8')
md5.update(hashlib.md5(file_name).hexdigest().encode('utf-8'))
# Change the hash if the file content changes
data = open(file_name, 'rb').read()
md5.update(hashlib.md5(data).hexdigest().encode('utf-8'))
return md5.hexdigest()
|
py | 1a557b65d7f999887c93f0b135efe333eddb6644 | #!/usr/bin/env python3
# Write a Shannon entropy calculator: H = -sum(pi * log(pi))
# The values should come from the command line
# E.g. python3 entropy.py 0.4 0.3 0.2 0.1
# Put the probabilities into a new list
# Don't forget to convert them to numbers
import math
import sys
numbers = []
for item in sys.argv[1:]:
numbers.append(float(item))
print(numbers)
H = 0
H += numbers[0] * math.log2(numbers[0])
H += numbers[1] * math.log2(numbers[1])
H += numbers[2] * math.log2(numbers[2])
H += numbers[3] * math.log2(numbers[3])
print(-H)
H = 0
for i in range(len(numbers)):
H += numbers[i] * math.log2(numbers[i])
print(H)
H = 0
for p in numbers:
H += p * math.log2(p)
print(-H)
"""
python3 31entropy.py 0.1 0.2 0.3 0.4
1.846
"""
|
py | 1a557bef55e0ccd4d357604e2fcc62c4f94d6d72 | # coding: utf-8
"""Miscellaneous utility functions and classes.
"""
from tornado.util import Configurable
if type('') is not type(b''):
def u(s):
return s
bytes_type = bytes
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
bytes_type = str
unicode_type = unicode
basestring_type = basestring
class Cache(Configurable):
""" 缓存部分数据的缓存. 例如用来维护登录的 Session, 避免一次登录请求
可以实现使用内存的缓存(MemCache), 基于Redis的缓存(TODO)
"""
@classmethod
def configurable_base(cls):
return Cache
@classmethod
def configurable_default(cls):
return MemCache
def initialize(self):
pass
def get(self, key):
""" 依据 key 获取内容
"""
raise NotImplementedError()
def set(self, key, value):
""" 设置 key 对应值为value
"""
raise NotImplementedError()
def remove(self, key):
""" 使 key 失效
"""
raise NotImplementedError()
class MemCache(Cache):
def __init__(self):
self._dict = dict()
def get(self, key):
return self._dict.get(key, None)
def set(self, key, value):
self._dict[key] = value
return self
def remove(self, key):
if key in self._dict:
del self._dict[key]
|
py | 1a557cb51aeb53ed25c185f93e7cbf156182a953 | import numpy as np
import tensorflow as tf
def _to_int32(a):
return np.int32(np.ceil(a))
def extract_patches(detector: tf.keras.models.Model,
img: tf.TensorArray,
min_score: float = 0.4,
max_boxes: int = 10):
shape = tf.shape(img)
im_height, im_width = shape[0].numpy(), shape[1].numpy()
result = detector(img[tf.newaxis, ...])
result = {key: value.numpy() for key, value in result.items()}
boxes = result["detection_boxes"][0]
# entities = result["detection_class_entities"]
scores = result["detection_scores"][0]
examples = []
for i in range(min(len(boxes), max_boxes)):
if scores[i] >= min_score:
example = {}
ymin, xmin, ymax, xmax = tuple(boxes[i])
# class_name = entities[i].decode("ascii")
xmin, xmax, ymin, ymax = _to_int32(xmin * im_width), _to_int32(xmax * im_width), _to_int32(
ymin * im_height), _to_int32(ymax * im_height)
tmp = tf.image.crop_to_bounding_box(img, ymin, xmin, ymax - ymin, xmax - xmin)
# example["class_name"] = class_name
example["arr"] = tmp.numpy()
example["score"] = scores[i]
example["bounding_box"] = (xmin, xmax, ymin, ymax)
examples.append(example)
return {
"results": examples,
"height": im_height,
"width": im_width
}
|
py | 1a557d032098ef696f61c521aceb4f8fc4d26139 | """
## This script is for run tesing and test NYU dataset
"""
# %matplotlib inline
"""
## This script is for run tesing and test MSRA dataset
"""
# %matplotlib inline
""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import argparse
import os
from lib.solver import train_epoch, val_epoch, test_epoch
from lib.sampler import ChunkSampler
from src.v2v_model import V2VModel
from src.v2v_util import V2VVoxelization
from datasets.nyu_hand import NYUDataset_train, NYUDataset
#######################################################################################
# # Some helpers
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Hand Keypoints Estimation Training')
#parser.add_argument('--resume', 'r', action='store_true', help='resume from checkpoint')
parser.add_argument('--resume', '-r', default=-1, type=int, help='resume after epoch')
args = parser.parse_args()
return args
#######################################################################################
# # Configurations
print('Warning: disable cudnn for batchnorm first, or just use only cuda instead!')
# When we need to resume training, enable randomness to avoid seeing the determinstic
# (agumented) samples many times.
# np.random.seed(1)
# torch.manual_seed(1)
# torch.cuda.manual_seed(1)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
dtype = torch.float
#
args = parse_args()
resume_train = args.resume >= 0
resume_after_epoch = args.resume
save_checkpoint = True
checkpoint_per_epochs = 1
checkpoint_dir = r'./checkpoint_nyu'
start_epoch = 0
epochs_num = 15
batch_size = 6
###############################################################################
# ls '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/joint_data.mat'
###############################################################################
# ls '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/nyu_center/center_train_refined.txt'
#######################################################################################
# # Data, transform, dataset and loader
# Data
print('==> Preparing data ..')
data_dir = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/'
center_dir = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/nyu_center/'
keypoints_num = 21
test_subject_id = 3
cubic_size = 250
# Transform
voxelization_train = V2VVoxelization(cubic_size=250, augmentation=True)
voxelization_val = V2VVoxelization(cubic_size=250, augmentation=False)
def transform_test(sample):
points, refpoint = sample['points'], sample['refpoint']
input = voxelize_input(points, refpoint)
return torch.from_numpy(input), torch.from_numpy(refpoint.reshape((1, -1)))
def transform_train(sample):
points, keypoints, refpoint = sample['points'], sample['joints'], sample['refpoint']
assert(keypoints.shape[0] == keypoints_num)
input, heatmap = voxelization_train({'points': points, 'keypoints': keypoints, 'refpoint': refpoint})
return (torch.from_numpy(input), torch.from_numpy(heatmap))
def transform_val(sample):
points, keypoints, refpoint = sample['points'], sample['joints'], sample['refpoint']
assert(keypoints.shape[0] == keypoints_num)
input, heatmap = voxelization_val({'points': points, 'keypoints': keypoints, 'refpoint': refpoint})
return (torch.from_numpy(input), torch.from_numpy(heatmap))
# Dataset and loader
train_set = NYUDataset_train(data_dir, center_dir ,transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=3)
#train_num = 1
#train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=6,sampler=ChunkSampler(train_num, 0))
# No separate validation dataset, just use test dataset instead
root = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/test_bin'
val_set = NYUDataset(root, center_dir, transform_test)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=6)
#######################################################################################
# # Model, criterion and optimizer
print('==> Constructing model ..')
net = V2VModel(input_channels=1, output_channels=keypoints_num)
net = net.to(device, dtype)
if device == torch.device('cuda'):
torch.backends.cudnn.enabled = True
cudnn.benchmark = True
print('cudnn.enabled: ', torch.backends.cudnn.enabled)
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters())
#optimizer = optim.RMSprop(net.parameters(), lr=2.5e-4)
#######################################################################################
# # Resume
if resume_train:
# Load checkpoint
epoch = resume_after_epoch
checkpoint_file = os.path.join(checkpoint_dir, 'epoch'+str(epoch)+'.pth')
print('==> Resuming from checkpoint after epoch {} ..'.format(epoch))
assert os.path.isdir(checkpoint_dir), 'Error: no checkpoint directory found!'
assert os.path.isfile(checkpoint_file), 'Error: no checkpoint file of epoch {}'.format(epoch)
checkpoint = torch.load(os.path.join(checkpoint_dir, 'epoch'+str(epoch)+'.pth'))
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch'] + 1
#######################################################################################
# # Train and Validate
print('==> Training ..')
for epoch in range(start_epoch, start_epoch + epochs_num):
print('Epoch: {}'.format(epoch))
train_epoch(net, criterion, optimizer, train_loader, device=device, dtype=dtype)
val_epoch(net, criterion, val_loader, device=device, dtype=dtype)
if save_checkpoint and epoch % checkpoint_per_epochs == 0:
if not os.path.exists(checkpoint_dir): os.mkdir(checkpoint_dir)
checkpoint_file = os.path.join(checkpoint_dir, 'epoch'+str(epoch)+'.pth')
checkpoint = {
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch
}
torch.save(checkpoint, checkpoint_file)
""
# # Test
# print('==> Testing ..')
# voxelize_input = voxelization_train.voxelize
# evaluate_keypoints = voxelization_train.evaluate
""
# def transform_test(sample):
# points, refpoint = sample['points'], sample['refpoint']
# input = voxelize_input(points, refpoint)
# return torch.from_numpy(input), torch.from_numpy(refpoint.reshape((1, -1)))
# def transform_output(heatmaps, refpoints):
# keypoints = evaluate_keypoints(heatmaps, refpoints)
# return keypoints
# class BatchResultCollector():
# def __init__(self, samples_num, transform_output):
# self.samples_num = samples_num
# self.transform_output = transform_output
# self.keypoints = None
# self.idx = 0
# def __call__(self, data_batch):
# inputs_batch, outputs_batch, extra_batch = data_batch
# outputs_batch = outputs_batch.cpu().numpy()
# refpoints_batch = extra_batch.cpu().numpy()
# keypoints_batch = self.transform_output(outputs_batch, refpoints_batch)
# if self.keypoints is None:
# # Initialize keypoints until dimensions awailable now
# self.keypoints = np.zeros((self.samples_num, *keypoints_batch.shape[1:]))
# batch_size = keypoints_batch.shape[0]
# self.keypoints[self.idx:self.idx+batch_size] = keypoints_batch
# self.idx += batch_size
# def get_result(self):
# return self.keypoints
# print('Test on test dataset ..')
# def save_keypoints(filename, keypoints):
# # Reshape one sample keypoints into one line
# keypoints = keypoints.reshape(keypoints.shape[0], -1)
# np.savetxt(filename, keypoints, fmt='%0.4f')
# test_set = MARAHandDataset(data_dir, center_dir, 'test', test_subject_id, transform_test)
# test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=6)
# test_res_collector = BatchResultCollector(len(test_set), transform_output)
# test_epoch(net, test_loader, test_res_collector, device, dtype)
# keypoints_test = test_res_collector.get_result()
# save_keypoints('./test_res.txt', keypoints_test)
# print('Fit on train dataset ..')
# fit_set = MARAHandDataset(data_dir, center_dir, 'train', test_subject_id, transform_test)
# fit_loader = torch.utils.data.DataLoader(fit_set, batch_size=batch_size, shuffle=False, num_workers=6)
# fit_res_collector = BatchResultCollector(len(fit_set), transform_output)
# test_epoch(net, fit_loader, fit_res_collector, device, dtype)
# keypoints_fit = fit_res_collector.get_result()
# save_keypoints('./fit_res.txt', keypoints_fit)
# print('All done ..')
###############################################################################
# import os
# import numpy as np
# import sys
# import struct
# from torch.utils.data import Dataset
# import scipy.io as scio
###############################################################################
# num_samples = 72757
# world_dim = 3
# joint_num = 21
###############################################################################
# joints_world = np.zeros((num_samples, joint_num, world_dim))
###############################################################################
# = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/joint_data.mat'
###############################################################################
# root = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/'
###############################################################################
# keypointsXYZ_test = scio.loadmat(keypoint_file)["joint_xyz"].astype(np.float32)[0]
###############################################################################
# EVAL_JOINTS = np.array([
# 0, 6, 12, 18, 24,
# 1, 7, 13, 19, 25,
# 4, 10, 15, 21, 26,
# 5, 11, 17, 23, 28,
# 29 ])
###############################################################################
# keypointsXYZ_test = keypointsXYZ_test[::][:,EVAL_JOINTS,:]
###############################################################################
# np.shape(keypointsXYZ_test)
###############################################################################
# np.shape(joints_world)
###############################################################################
# for fileID in range(0, 72757):
# joints_world[fileID] = keypointsXYZ_test[fileID]
# print(os.path.join(root, 'depth_1_{:0>7d}.bin'.format(fileID+1)))
###############################################################################
# ls /V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/depth_1_0000001.bin
""
|
py | 1a557d766d24e02adce4ba65d36199f347a2e68c | import os
import signal
import time
from dataclasses import dataclass, field
from typing import Any, List, Optional
import gevent
import gevent.util
import structlog
from gevent._tracer import GreenletTracer
from gevent.hub import Hub
from raiden.exceptions import RaidenUnrecoverableError
LIBEV_LOW_PRIORITY = -2
LIBEV_HIGH_PRIORITY = 2
log = structlog.get_logger(__name__)
def enable_gevent_monitoring_signal() -> None:
"""Install a signal handler for SIGUSR1 that executes gevent.util.print_run_info().
This can help evaluating the gevent greenlet tree.
See http://www.gevent.org/monitoring.html for more information.
Usage:
pytest [...]
# while test is running (or stopped in a pdb session):
kill -SIGUSR1 $(pidof -x pytest)
"""
def on_signal(signalnum: Any, stack_frame: Any) -> None: # pylint: disable=unused-argument
gevent.util.print_run_info()
if os.name == "nt":
# SIGUSR1 not supported on Windows
return
signal.signal(signal.SIGUSR1, on_signal)
def limit_thread_cpu_usage_by_time() -> None:
"""This will enable Gevent's monitoring thread, and if a Greenlet uses the
CPU for longer than `max_blocking_time` it will be killed.
This will result in the whole process being killed, since exceptions are
propagate to the top-level. The goal here is to detect slow functions that
have to be optimized.
"""
gevent.config.monitor_thread = True
gevent.config.max_blocking_time = 10.0
# The monitoring thread will use the trace api just like the TraceSampler
# and the SwitchMonitoring. Sadly there is no API to uninstall the thread,
# but this should not be a problem.
monitor_thread = gevent.get_hub().start_periodic_monitoring_thread()
# This code must not use the tracer from the monitor_thread because calls
# to `did_block_hub` will reset its internal state. If two threads use the
# same underlying tracer false positives will happen, because the switch
# counter will be artifically reset.
greenlet_tracer = GreenletTracer()
def kill_offender(hub: Hub) -> None:
if greenlet_tracer.did_block_hub(hub):
active_greenlet = greenlet_tracer.active_greenlet
msg = ""
if monitor_thread._tracer.active_greenlet != active_greenlet:
msg = (
f"Mismatch values for the active_greenlet among the "
f"monitor_thread and deubgging tracer, this either means "
f"there is a bug in the trace chain (the wrong values are "
f"forwarded), or that one of the trace functions was wrongly "
f"uninstalled. Active greenlets "
f"monitor_thread={monitor_thread._tracer.active_greenlet} "
f"debug_tracer={active_greenlet}."
)
hub.loop.run_callback(
lambda: active_greenlet.throw(
RaidenUnrecoverableError(
f"A greenlet used the CPU for longer than "
f"{gevent.config.max_blocking_time} seconds, killing it.{msg}"
)
)
)
monitor_thread.add_monitoring_function(kill_offender, gevent.config.max_blocking_time)
@dataclass
class IdleMeasurement:
before_poll: float
after_poll: float
@dataclass
class Idle:
""" Measures how much time the thread waited on the libev backend. """
measurement_interval: float
before_poll: Optional[float] = None
last_print: float = field(init=False, default_factory=time.time)
measurements: List[IdleMeasurement] = field(init=False, default_factory=list)
def prepare_handler(self) -> None:
"""The prepare handler executed before the call to the polling backend
(e.g. select/epoll).
Note:
- Gevent uses a prepare handler to execute deferred callbacks. This
means there will be some work done on with this type of handler that
must not added to the idle time. To avoid counting the time spent on
the deferred callbacks the prepare_handler must be installed with a
low priority, so that it executes after the gevent's callbacks.
"""
self.before_poll = time.time()
def check_handler(self) -> None:
"""Check handler executed after the poll backend returns.
Note:
- For each of the watchers in the ready state there will be a callback,
which will do work related to the watcher (e.g. read from a socket).
This time must not be accounted for in the Idle timeout, therefore
this handler must have a high priority.
"""
curr_time = time.time()
# It is possible for the check_handler to be executed before the
# prepare_handler, this happens when the watchers are installed by a
# greenlet that was switched onto because of IO (IOW, Idle.enable is
# called while the event loop is executing watchers, after the `poll`)
if self.before_poll is not None:
self.measurements.append( # pylint: disable=no-member
IdleMeasurement(self.before_poll, curr_time)
)
# keep at least one measurement, this will tell if the code is
# blocking for an extended period of time.
while len(self.measurements) > 1 and self.running_interval > self.measurement_interval:
self.measurements.pop() # pylint: disable=no-member
if curr_time - self.last_print >= self.measurement_interval:
self.log()
self.last_print = curr_time
def enable(self) -> None:
loop = gevent.get_hub().loop
loop.prepare(priority=LIBEV_LOW_PRIORITY).start(self.prepare_handler)
loop.check(priority=LIBEV_HIGH_PRIORITY).start(self.check_handler)
@property
def measurements_start(self) -> float:
return self.measurements[0].before_poll
@property
def measurements_end(self) -> float:
return self.measurements[-1].after_poll
@property
def running_interval(self) -> float:
"""The number of seconds idled by this thread.
This will take into account the measurements frequency. Ideally the
measurements would happen exactly every `measurement_interval` seconds,
however that dependends on the existing load for the given thread, if
the event loop doesn't run often enough the running_interval will be
larger than the target `measurement_interval`.
"""
return self.measurements_end - self.measurements_start
@property
def idled(self) -> float:
""" The amount of seconds the thread idled. """
return sum(interval.after_poll - interval.before_poll for interval in self.measurements)
@property
def idled_pct(self) -> float:
""" The percentage of time the thread idled, waiting on the event loop. """
return self.idled / self.running_interval
@property
def context_switches(self) -> int:
""" The number of context switches done for the past `measurement_interval`. """
return len(IDLE.measurements)
def log(self) -> None:
if not self.measurements:
log.debug("No idle data", context_switches=self.context_switches)
return
is_blocking = (
len(self.measurements) == 1 and self.running_interval > self.measurement_interval
)
if is_blocking:
msg = "Blocking function, there is not a lot of idle time"
else:
msg = "Idle"
log.debug(
msg,
start=self.measurements_start,
context_switches=self.context_switches,
idled=self.idled,
interval=self.running_interval,
idle_pct=self.idled_pct,
)
def __bool__(self) -> bool:
return bool(self.measurements)
def __str__(self) -> str:
if not self.measurements:
return ""
return (
f"The thread had {self.context_switches} context_switches, and "
f"idled {self.idled_pct}% of the time."
)
IDLE = Idle(10)
|
py | 1a557e0b2dd54be2c7ab9c0c60054bfc65ea7186 | def say_hello(name=False):
if name:
return f"Hello! {name}"
else:
return "Hello! World"
|
py | 1a557ed70fdca22d05ae7d047a4b495e756bb03f | # -*- coding: utf-8 -*-
"""
MoinMoin - OpenID server action
This is the UI and provider for OpenID.
@copyright: 2006, 2007, 2008 Johannes Berg <[email protected]>
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.support.python_compatibility import rsplit
from MoinMoin.util.moinoid import MoinOpenIDStore, strbase64
from MoinMoin import wikiutil
from openid.consumer.discover import OPENID_1_0_TYPE, \
OPENID_1_1_TYPE, OPENID_2_0_TYPE, OPENID_IDP_2_0_TYPE
from openid import sreg
from openid.cryptutil import randomString
from openid.server import server
from openid.message import IDENTIFIER_SELECT
from MoinMoin.widget import html
from MoinMoin.web.request import MoinMoinFinish
def execute(pagename, request):
return MoinOpenIDServer(pagename, request).handle()
class MoinOpenIDServer:
def __init__(self, pagename, request):
self.request = request
self._ = request.getText
self.cfg = request.cfg
def serveYadisEP(self, endpoint_url):
request = self.request
request.content_type = 'application/xrds+xml'
user_url = request.getQualifiedURL(request.page.url(request))
self.request.write("""\
<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS
xmlns:xrds="xri://$xrds"
xmlns="xri://$xrd*($v*2.0)">
<XRD>
<Service priority="0">
<Type>%(type10)s</Type>
<URI>%(uri)s</URI>
<LocalID>%(id)s</LocalID>
</Service>
<Service priority="0">
<Type>%(type11)s</Type>
<URI>%(uri)s</URI>
<LocalID>%(id)s</LocalID>
</Service>
<!-- older version of the spec draft -->
<Service priority="0">
<Type>http://openid.net/signon/2.0</Type>
<URI>%(uri)s</URI>
<LocalID>%(id)s</LocalID>
</Service>
<Service priority="0">
<Type>%(type20)s</Type>
<URI>%(uri)s</URI>
<LocalID>%(id)s</LocalID>
</Service>
</XRD>
</xrds:XRDS>
""" % {
'type10': OPENID_1_0_TYPE,
'type11': OPENID_1_1_TYPE,
'type20': OPENID_2_0_TYPE,
'uri': endpoint_url,
'id': user_url
})
def serveYadisIDP(self, endpoint_url):
request = self.request
request.content_type = 'application/xrds+xml'
user_url = request.getQualifiedURL(request.page.url(request))
self.request.write("""\
<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS
xmlns:xrds="xri://$xrds"
xmlns="xri://$xrd*($v*2.0)">
<XRD>
<Service priority="0">
<Type>%(typeidp)s</Type>
<URI>%(uri)s</URI>
<LocalID>%(id)s</LocalID>
</Service>
</XRD>
</xrds:XRDS>
""" % {
'typeidp': OPENID_IDP_2_0_TYPE,
'uri': endpoint_url,
'id': user_url
})
def _verify_endpoint_identity(self, identity):
"""
Verify that the given identity matches the current endpoint.
We always serve out /UserName?action=... for the UserName
OpenID and this is pure paranoia to make sure it is that way
on incoming data.
Also verify that the given identity is allowed to have an OpenID.
"""
request = self.request
cfg = request.cfg
# we can very well split on the last slash since usernames
# must not contain slashes
base, received_name = rsplit(identity, '/', 1)
check_name = received_name
if received_name == '':
pg = wikiutil.getFrontPage(request)
if pg:
received_name = pg.page_name
check_name = received_name
if 'openid.user' in pg.pi:
received_name = pg.pi['openid.user']
# some sanity checking
# even if someone goes to http://johannes.sipsolutions.net/
# we'll serve out http://johannes.sipsolutions.net/JohannesBerg?action=serveopenid
# (if JohannesBerg is set as page_front_page)
# For the #OpenIDUser PI, we need to allow the page that includes the PI,
# hence use check_name here (see above for how it is assigned)
fullidentity = '/'.join([base, check_name])
thisurl = request.getQualifiedURL(request.page.url(request))
if not thisurl == fullidentity:
return False
# again, we never put an openid.server link on this page...
# why are they here?
openid_group_name = cfg.openid_server_restricted_users_group
if openid_group_name and received_name not in request.groups.get(openid_group_name, []):
return False
return True
def handleCheckIDRequest(self, identity, username, openidreq, server_url):
if self.user_trusts_url(openidreq.trust_root):
return self.approved(identity, openidreq, server_url=server_url)
if openidreq.immediate:
return openidreq.answer(False, identity=identity, server_url=server_url)
self.request.session['openidserver.request'] = openidreq
self.show_decide_page(identity, username, openidreq)
return None
def _make_identity(self):
page = wikiutil.getHomePage(self.request)
if page:
server_url = self.request.getQualifiedURL(
page.url(self.request, querystr={'action': 'serveopenid'}))
identity = self.request.getQualifiedURL(page.url(self.request))
return identity, server_url
return None, None
def handle(self):
_ = self._
request = self.request
form = request.values
username = request.page.page_name
if 'openid.user' in request.page.pi:
username = request.page.pi['openid.user']
if not request.cfg.openid_server_enabled:
# since we didn't put any openid.server into
# the page to start with, this is someone trying
# to abuse us. No need to give a nice error
request.makeForbidden(403, '')
return
server_url = request.getQualifiedURL(
request.page.url(request, querystr={'action': 'serveopenid'}))
yadis_type = form.get('yadis')
if yadis_type == 'ep':
return self.serveYadisEP(server_url)
elif yadis_type == 'idp':
return self.serveYadisIDP(server_url)
# if the identity is set it must match the server URL
# sort of arbitrary, but we have to have some restriction
identity = form.get('openid.identity')
if identity == IDENTIFIER_SELECT:
identity, server_url = self._make_identity()
if not identity:
return self._sorry_no_identity()
username = request.user.name
elif identity is not None:
if not self._verify_endpoint_identity(identity):
request.makeForbidden(403, 'verification failed')
return
if 'openid.user' in request.page.pi:
username = request.page.pi['openid.user']
store = MoinOpenIDStore(request)
openidsrv = server.Server(store, op_endpoint=server_url)
answer = None
if 'dontapprove' in form:
answer = self.handle_response(False, username, identity)
if answer is None:
return
elif form.has_key('approve'):
answer = self.handle_response(True, username, identity)
if answer is None:
return
else:
query = {}
for key in form:
query[key] = form[key]
try:
openidreq = openidsrv.decodeRequest(query)
except Exception, e:
request.makeForbidden(403, 'OpenID decode error: %r' % e)
return
if openidreq is None:
request.makeForbidden(403, 'no request')
return
if request.user.valid and username != request.user.name:
answer = openidreq.answer(False, identity=identity, server_url=server_url)
elif openidreq.mode in ["checkid_immediate", "checkid_setup"]:
answer = self.handleCheckIDRequest(identity, username, openidreq, server_url)
if answer is None:
return
else:
answer = openidsrv.handleRequest(openidreq)
webanswer = openidsrv.encodeResponse(answer)
request.status = '%d OpenID status' % webanswer.code
for hdr in webanswer.headers:
request.headers.add(hdr, webanswer.headers[hdr])
request.write(webanswer.body)
raise MoinMoinFinish
def handle_response(self, positive, username, identity):
request = self.request
form = request.values
# check form submission nonce, use None for stored value default
# since it cannot be sent from the user
session_nonce = self.request.session.get('openidserver.nonce')
if session_nonce is not None:
del self.request.session['openidserver.nonce']
# use empty string if nothing was sent
form_nonce = form.get('nonce', '')
if session_nonce != form_nonce:
self.request.makeForbidden(403, 'invalid nonce')
return None
openidreq = request.session.get('openidserver.request')
if not openidreq:
request.makeForbidden(403, 'no response request')
return None
del request.session['openidserver.request']
if (not positive or
not request.user.valid or
request.user.name != username):
return openidreq.answer(False)
if form.get('remember', 'no') == 'yes':
if not hasattr(request.user, 'openid_trusted_roots'):
request.user.openid_trusted_roots = []
request.user.openid_trusted_roots.append(strbase64(openidreq.trust_root))
request.user.save()
dummyidentity, server_url = self._make_identity()
return self.approved(identity, openidreq, server_url=server_url)
def approved(self, identity, openidreq, server_url=None):
# TODO: If simple registration is implemented, this needs
# to do something like the following:
#
# sreg_data = { fill this dict with real values }
# sreq_req = sreg.SRegRequest.fromOpenIDRequest(openidreq.message)
# # do something with the request to see what values are required?
# sreg_resp = sreg.SRegResponse.extractResponse(openidreq, sreg_data)
# sreg_resp.addToOpenIDResponse(reply.fields)
reply = openidreq.answer(True, identity=identity, server_url=server_url)
return reply
def user_trusts_url(self, trustroot):
user = self.request.user
if hasattr(user, 'openid_trusted_roots'):
return strbase64(trustroot) in user.openid_trusted_roots
return False
def show_decide_page(self, identity, username, openidreq):
request = self.request
_ = self._
if not request.user.valid or username != request.user.name:
request.makeForbidden(403, _('''You need to manually go to your OpenID provider wiki
and log in before you can use your OpenID. MoinMoin will
never allow you to enter your password here.
Once you have logged in, simply reload this page.'''))
return
request.theme.send_title(_("OpenID Trust verification"), pagename=request.page.page_name)
# Start content (important for RTL support)
request.write(request.formatter.startContent("content"))
request.write(request.formatter.paragraph(1))
request.write(_('The site %s has asked for your identity.') % openidreq.trust_root)
request.write(request.formatter.paragraph(0))
request.write(request.formatter.paragraph(1))
request.write(_('''
If you approve, the site represented by the trust root below will be
told that you control the identity URL %s. (If you are using a delegated
identity, the site will take care of reversing the
delegation on its own.)''') % openidreq.identity)
request.write(request.formatter.paragraph(0))
form = html.FORM(method='POST', action=request.page.url(request))
form.append(html.INPUT(type='hidden', name='action', value='serveopenid'))
form.append(html.INPUT(type='hidden', name='openid.identity', value=openidreq.identity))
form.append(html.INPUT(type='hidden', name='openid.return_to', value=openidreq.return_to))
form.append(html.INPUT(type='hidden', name='openid.trust_root', value=openidreq.trust_root))
form.append(html.INPUT(type='hidden', name='openid.mode', value=openidreq.mode))
form.append(html.INPUT(type='hidden', name='name', value=username))
nonce = randomString(32, 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789')
form.append(html.INPUT(type='hidden', name='nonce', value=nonce))
request.session['openidserver.nonce'] = nonce
table = html.TABLE()
form.append(table)
tr = html.TR()
table.append(tr)
tr.append(html.TD().append(html.STRONG().append(html.Text(_('Trust root')))))
tr.append(html.TD().append(html.Text(openidreq.trust_root)))
tr = html.TR()
table.append(tr)
tr.append(html.TD().append(html.STRONG().append(html.Text(_('Identity URL')))))
tr.append(html.TD().append(html.Text(identity)))
tr = html.TR()
table.append(tr)
tr.append(html.TD().append(html.STRONG().append(html.Text(_('Name')))))
tr.append(html.TD().append(html.Text(username)))
tr = html.TR()
table.append(tr)
tr.append(html.TD().append(html.STRONG().append(html.Text(_('Remember decision')))))
td = html.TD()
tr.append(td)
td.append(html.INPUT(type='checkbox', name='remember', value='yes'))
td.append(html.Text(_('Remember this trust decision and don\'t ask again')))
tr = html.TR()
table.append(tr)
tr.append(html.TD())
td = html.TD()
tr.append(td)
td.append(html.INPUT(type='submit', name='approve', value=_("Approve")))
td.append(html.INPUT(type='submit', name='dontapprove', value=_("Don't approve")))
request.write(unicode(form))
request.write(request.formatter.endContent())
request.theme.send_footer(request.page.page_name)
request.theme.send_closing_html()
def _sorry_no_identity(self):
request = self.request
_ = self._
request.theme.send_title(_("OpenID not served"), pagename=request.page.page_name)
# Start content (important for RTL support)
request.write(request.formatter.startContent("content"))
request.write(request.formatter.paragraph(1))
request.write(_('''
Unfortunately you have not created your homepage yet. Therefore,
we cannot serve an OpenID for you. Please create your homepage first
and then reload this page or click the button below to cancel this
verification.'''))
request.write(request.formatter.paragraph(0))
form = html.FORM(method='POST', action=request.page.url(request))
form.append(html.INPUT(type='hidden', name='action', value='serveopenid'))
nonce = randomString(32, 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789')
form.append(html.INPUT(type='hidden', name='nonce', value=nonce))
request.session['openidserver.nonce'] = nonce
form.append(html.INPUT(type='submit', name='dontapprove', value=_("Cancel")))
request.write(unicode(form))
request.write(request.formatter.endContent())
request.theme.send_footer(request.page.page_name)
request.theme.send_closing_html()
|
py | 1a557f5c51470b2ad0b91b830280b646fc3ad119 | class CategoryInvalidConfig(Exception): pass
class CategoryInvalidConfigProperty(Exception): pass
class Category(object):
def __init__(self, *args, **kwargs):
self.items = {}
self.items.update(**kwargs)
def get(self):
ret = {}
for k, v in self.items.items():
ret[k] = v.value
return ret
def __repr__(self):
return "{}".format(self.get())
@classmethod
def create(cls, config_map):
return cls(**config_map.properties)
|
py | 1a557fa410fe55deba84fb0bbfb1fa2f571cdf93 | from random import randint
computador = randint(1, 11)
while True:
jogador = int(input('Em qual número eu pensei? '))
if jogador > computador:
print('menos')
elif jogador < computador:
print('mais')
elif jogador == computador:
print(f'Parabéns você acertou, eu pensei em {computador}')
break |
py | 1a557fe97980a8c8460d2ee73bfbae9e551981c8 | import battlecode as bc
import behaviour_tree as bt
import random
import units
class Knight(units.Unit):
"""The container for the knight unit."""
def __init__(self, unit, gc):
super().__init__(unit, gc)
self._targeted_enemy = None
def generate_tree(self):
"""Generates the tree for the knight."""
tree = bt.FallBack()
# Attack or chase enemies
enemy_handling = bt.Sequence()
enemy_visible = self.EnemyVisible(self)
enemy_fallback = bt.FallBack()
enemy_attack = bt.Sequence()
enemy_adjacent = self.EnemyAdjacent(self)
attack = self.Attack(self)
enemy_attack.add_child(enemy_adjacent)
enemy_attack.add_child(attack)
enemy_javelin = bt.Sequence()
can_javelin = self.CanJavelin(self)
javelin = self.Javelin(self)
move_towards_enemy = self.MoveTowardsEnemy(self)
enemy_javelin.add_child(can_javelin)
enemy_javelin.add_child(javelin)
enemy_javelin.add_child(move_towards_enemy)
enemy_chase = bt.Sequence()
enemy_chase.add_child(move_towards_enemy)
enemy_chase.add_child(enemy_attack)
enemy_fallback.add_child(enemy_attack)
enemy_fallback.add_child(enemy_javelin)
enemy_fallback.add_child(enemy_chase)
enemy_handling.add_child(enemy_visible)
enemy_handling.add_child(enemy_fallback)
tree.add_child(enemy_handling)
# Random movement
move_randomly = self.MoveRandomly(self)
tree.add_child(move_randomly)
return tree
##################
# ENEMY HANDLING #
##################
class EnemyVisible(bt.Condition):
"""Check if there is an enemy close to the knight."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def condition(self):
knight = self.__outer.unit()
range = knight.vision_range
location = knight.location.map_location()
team = knight.team
enemy_team = bc.Team.Red if team == bc.Team.Blue else bc.Team.Blue
nearby_units = self.__outer._gc.sense_nearby_units_by_team(location, range, enemy_team)
# No enemy visible
if not nearby_units:
return False
# Look for the enemy closest to the knight with lowest health
best_enemy = nearby_units[0]
best_enemy_distance = location.distance_squared_to(best_enemy.location.map_location())
for unit in nearby_units:
enemy_distance = location.distance_squared_to(unit.location.map_location())
if enemy_distance < best_enemy_distance:
best_enemy = unit
best_enemy_distance = enemy_distance
elif enemy_distance == best_enemy_distance:
if unit.health < best_enemy.health:
best_enemy = unit
best_enemy_distance = enemy_distance
self.__outer._targeted_enemy = best_enemy.id
return True
class EnemyAdjacent(bt.Condition):
"""Check if there is an enemy adjacent to the knight."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def condition(self):
location = self.__outer.unit().location
enemy_location = self.__outer.get_enemy_unit(self.__outer._targeted_enemy).location
return location.is_adjacent_to(enemy_location)
class Attack(bt.Action):
"""Attacks the adjacent enemy."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
knight = self.__outer.unit()
if not enemy:
self._status = bt.Status.FAIL
else:
if self.__outer._gc.is_attack_ready(knight.id) and self.__outer._gc.can_attack(knight.id, enemy.id):
self.__outer._gc.attack(knight.id, enemy.id)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.RUNNING
class CanJavelin(bt.Condition):
"""Check if the knight can perform a javelin attack."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def condition(self):
knight = self.__outer.unit()
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
if knight.research_level < 3:
return False
if not enemy:
return False
distance = knight.location.map_location().distance_squared_to(enemy.location.map_location())
return distance <= knight.ability_range()
class Javelin(bt.Action):
"""Perform the javelin attack."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
knight = self.__outer.unit()
if not enemy:
self._status = bt.Status.FAIL
else:
if self.__outer._gc.is_javelin_ready(knight.id) and self.__outer._gc.can_javelin(knight.id, enemy.id):
self.__outer._gc.javelin(knight.id, enemy.id)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.RUNNING
class MoveTowardsEnemy(bt.Action):
"""Moves in the direction of the visible enemy."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
knight = self.__outer.unit()
if not enemy:
self._status = bt.Status.FAIL
else:
enemy_direction = knight.location.map_location().direction_to(enemy.location.map_location())
if self.__outer._gc.is_move_ready(knight.id) and self.__outer._gc.can_move(knight.id, enemy_direction):
self.__outer._gc.move_robot(knight.id, enemy_direction)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.FAIL
#################
# MOVE RANDOMLY #
#################
class MoveRandomly(bt.Action):
"""Move in some random direction."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
random_dir = random.choice(list(bc.Direction))
knight = self.__outer.unit()
if self.__outer._gc.is_move_ready(knight.id) and self.__outer._gc.can_move(knight.id, random_dir):
self.__outer._gc.move_robot(knight.id, random_dir)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.FAIL
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.