filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_27711
|
from fixtures import * # noqa: F401,F403
import os
with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
l1.daemon.wait_for_log("Forgetting remote peer .*")
l2.daemon.wait_for_log("Forgetting remote peer .*")
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("hand_back_peer .*: now local again")
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
l1.daemon.wait_for_log("Forgetting remote peer .*")
l2.daemon.wait_for_log("Forgetting remote peer .*")
|
the-stack_0_27712
|
import numpy as np
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.links as L
import chainer.functions as F
from lib.utils import *
from lib.functions import *
class YOLOv2(Chain):
"""
YOLOv2
- It takes (416, 416, 3) sized image as input
"""
def __init__(self, n_classes, n_boxes):
super(YOLOv2, self).__init__(
##### common layers for both pretrained layers and yolov2 #####
conv1 = L.Convolution2D(3, 32, ksize=3, stride=1, pad=1, nobias=True),
bn1 = L.BatchNormalization(32, use_beta=False, eps=2e-5),
bias1 = L.Bias(shape=(32,)),
conv2 = L.Convolution2D(32, 64, ksize=3, stride=1, pad=1, nobias=True),
bn2 = L.BatchNormalization(64, use_beta=False, eps=2e-5),
bias2 = L.Bias(shape=(64,)),
conv3 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True),
bn3 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias3 = L.Bias(shape=(128,)),
conv4 = L.Convolution2D(128, 64, ksize=1, stride=1, pad=0, nobias=True),
bn4 = L.BatchNormalization(64, use_beta=False, eps=2e-5),
bias4 = L.Bias(shape=(64,)),
conv5 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True),
bn5 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias5 = L.Bias(shape=(128,)),
conv6 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True),
bn6 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias6 = L.Bias(shape=(256,)),
conv7 = L.Convolution2D(256, 128, ksize=1, stride=1, pad=0, nobias=True),
bn7 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias7 = L.Bias(shape=(128,)),
conv8 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True),
bn8 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias8 = L.Bias(shape=(256,)),
conv9 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn9 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias9 = L.Bias(shape=(512,)),
conv10 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True),
bn10 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias10 = L.Bias(shape=(256,)),
conv11 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn11 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias11 = L.Bias(shape=(512,)),
conv12 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True),
bn12 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias12 = L.Bias(shape=(256,)),
conv13 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn13 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias13 = L.Bias(shape=(512,)),
conv14 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn14 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias14 = L.Bias(shape=(1024,)),
conv15 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True),
bn15 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias15 = L.Bias(shape=(512,)),
conv16 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn16 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias16 = L.Bias(shape=(1024,)),
conv17 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True),
bn17 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias17 = L.Bias(shape=(512,)),
conv18 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn18 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias18 = L.Bias(shape=(1024,)),
###### new layer
conv19 = L.Convolution2D(1024, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn19 = L.BatchNormalization(1024, use_beta=False),
bias19 = L.Bias(shape=(1024,)),
conv20 = L.Convolution2D(1024, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn20 = L.BatchNormalization(1024, use_beta=False),
bias20 = L.Bias(shape=(1024,)),
conv21 = L.Convolution2D(3072, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn21 = L.BatchNormalization(1024, use_beta=False),
bias21 = L.Bias(shape=(1024,)),
conv22 = L.Convolution2D(1024, n_boxes * (5 + n_classes), ksize=1, stride=1, pad=0, nobias=True),
bias22 = L.Bias(shape=(n_boxes * (5 + n_classes),)),
)
self.train = False
self.finetune = False
self.n_boxes = n_boxes
self.n_classes = n_classes
def __call__(self, x):
##### common layer
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), test=not self.train, finetune=self.finetune)), slope=0.1)
high_resolution_feature = reorg(h) # 高解像度特徴量をreorgでサイズ落として保存しておく
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), test=not self.train, finetune=self.finetune)), slope=0.1)
###### new layer
h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = F.concat((high_resolution_feature, h), axis=1) # output concatnation
h = F.leaky_relu(self.bias21(self.bn21(self.conv21(h), test=not self.train, finetune=self.finetune)), slope=0.1)
h = self.bias22(self.conv22(h))
return h
class YOLOv2Predictor(Chain):
def __init__(self, predictor):
super(YOLOv2Predictor, self).__init__(predictor=predictor)
self.anchors = [[5.375, 5.03125], [5.40625, 4.6875], [2.96875, 2.53125], [2.59375, 2.78125], [1.9375, 3.25]]
self.thresh = 0.6
self.seen = 0
self.unstable_seen = 5000
def __call__(self, input_x, t):
output = self.predictor(input_x)
batch_size, _, grid_h, grid_w = output.shape
self.seen += batch_size
x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2)
x = F.sigmoid(x) # xのactivation
y = F.sigmoid(y) # yのactivation
conf = F.sigmoid(conf) # confのactivation
prob = F.transpose(prob, (0, 2, 1, 3, 4))
prob = F.softmax(prob) # probablitiyのacitivation
# 教師データの用意
tw = np.zeros(w.shape, dtype=np.float32) # wとhが0になるように学習(e^wとe^hは1に近づく -> 担当するbboxの倍率1)
th = np.zeros(h.shape, dtype=np.float32)
tx = np.tile(0.5, x.shape).astype(np.float32) # 活性化後のxとyが0.5になるように学習()
ty = np.tile(0.5, y.shape).astype(np.float32)
if self.seen < self.unstable_seen: # centerの存在しないbbox誤差学習スケールは基本0.1
box_learning_scale = np.tile(0.1, x.shape).astype(np.float32)
else:
box_learning_scale = np.tile(0, x.shape).astype(np.float32)
tconf = np.zeros(conf.shape, dtype=np.float32) # confidenceのtruthは基本0、iouがthresh以上のものは学習しない、ただしobjectの存在するgridのbest_boxのみ真のIOUに近づかせる
conf_learning_scale = np.tile(0.1, conf.shape).astype(np.float32)
tprob = prob.data.copy() # best_anchor以外は学習させない(自身との二乗和誤差 = 0)
# 全bboxとtruthのiouを計算(batch単位で計算する)
x_shift = Variable(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape[1:]))
y_shift = Variable(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape[1:]))
w_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape[1:]))
h_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape[1:]))
x_shift.to_gpu(), y_shift.to_gpu(), w_anchor.to_gpu(), h_anchor.to_gpu()
best_ious = []
for batch in range(batch_size):
n_truth_boxes = len(t[batch])
box_x = (x[batch] + x_shift) / grid_w
box_y = (y[batch] + y_shift) / grid_h
box_w = F.exp(w[batch]) * w_anchor / grid_w
box_h = F.exp(h[batch]) * h_anchor / grid_h
ious = []
for truth_index in range(n_truth_boxes):
truth_box_x = Variable(np.broadcast_to(np.array(t[batch][truth_index]["x"], dtype=np.float32), box_x.shape))
truth_box_y = Variable(np.broadcast_to(np.array(t[batch][truth_index]["y"], dtype=np.float32), box_y.shape))
truth_box_w = Variable(np.broadcast_to(np.array(t[batch][truth_index]["w"], dtype=np.float32), box_w.shape))
truth_box_h = Variable(np.broadcast_to(np.array(t[batch][truth_index]["h"], dtype=np.float32), box_h.shape))
truth_box_x.to_gpu(), truth_box_y.to_gpu(), truth_box_w.to_gpu(), truth_box_h.to_gpu()
ious.append(multi_box_iou(Box(box_x, box_y, box_w, box_h), Box(truth_box_x, truth_box_y, truth_box_w, truth_box_h)).data.get())
ious = np.array(ious)
best_ious.append(np.max(ious, axis=0))
best_ious = np.array(best_ious)
# 一定以上のiouを持つanchorに対しては、confを0に下げないようにする(truthの周りのgridはconfをそのまま維持)。
tconf[best_ious > self.thresh] = conf.data.get()[best_ious > self.thresh]
conf_learning_scale[best_ious > self.thresh] = 0
# objectの存在するanchor boxのみ、x、y、w、h、conf、probを個別修正
abs_anchors = self.anchors / np.array([grid_w, grid_h])
for batch in range(batch_size):
for truth_box in t[batch]:
truth_w = int(float(truth_box["x"]) * grid_w)
truth_h = int(float(truth_box["y"]) * grid_h)
truth_n = 0
best_iou = 0.0
for anchor_index, abs_anchor in enumerate(abs_anchors):
iou = box_iou(Box(0, 0, float(truth_box["w"]), float(truth_box["h"])), Box(0, 0, abs_anchor[0], abs_anchor[1]))
if best_iou < iou:
best_iou = iou
truth_n = anchor_index
# objectの存在するanchorについて、centerを0.5ではなく、真の座標に近づかせる。anchorのスケールを1ではなく真のスケールに近づかせる。学習スケールを1にする。
box_learning_scale[batch, truth_n, :, truth_h, truth_w] = 1.0
tx[batch, truth_n, :, truth_h, truth_w] = float(truth_box["x"]) * grid_w - truth_w
ty[batch, truth_n, :, truth_h, truth_w] = float(truth_box["y"]) * grid_h - truth_h
tw[batch, truth_n, :, truth_h, truth_w] = np.log(float(truth_box["w"]) / abs_anchors[truth_n][0])
th[batch, truth_n, :, truth_h, truth_w] = np.log(float(truth_box["h"]) / abs_anchors[truth_n][1])
tprob[batch, :, truth_n, truth_h, truth_w] = 0
tprob[batch, int(truth_box["label"]), truth_n, truth_h, truth_w] = 1
# IOUの観測
full_truth_box = Box(float(truth_box["x"]), float(truth_box["y"]), float(truth_box["w"]), float(truth_box["h"]))
predicted_box = Box(
(x[batch][truth_n][0][truth_h][truth_w].data.get() + truth_w) / grid_w,
(y[batch][truth_n][0][truth_h][truth_w].data.get() + truth_h) / grid_h,
np.exp(w[batch][truth_n][0][truth_h][truth_w].data.get()) * abs_anchors[truth_n][0],
np.exp(h[batch][truth_n][0][truth_h][truth_w].data.get()) * abs_anchors[truth_n][1]
)
predicted_iou = box_iou(full_truth_box, predicted_box)
tconf[batch, truth_n, :, truth_h, truth_w] = predicted_iou
conf_learning_scale[batch, truth_n, :, truth_h, truth_w] = 10.0
# debug prints
maps = F.transpose(prob[batch], (2, 3, 1, 0)).data
print("best confidences and best conditional probability and predicted class of each grid:")
for i in range(grid_h):
for j in range(grid_w):
print("%2d" % (int(conf[batch, :, :, i, j].data.max() * 100)), end=" ")
print(" ", end="")
for j in range(grid_w):
print("%2d" % (maps[i][j][int(maps[i][j].max(axis=1).argmax())].argmax()), end=" ")
print(" ", end="")
for j in range(grid_w):
print("%2d" % (maps[i][j][int(maps[i][j].max(axis=1).argmax())].max()*100), end=" ")
print()
print("best default iou: %.2f predicted iou: %.2f confidence: %.2f class: %s" % (best_iou, predicted_iou, conf[batch][truth_n][0][truth_h][truth_w].data, t[batch][0]["label"]))
print("-------------------------------")
print("seen = %d" % self.seen)
# loss計算
tx, ty, tw, th, tconf, tprob = Variable(tx), Variable(ty), Variable(tw), Variable(th), Variable(tconf), Variable(tprob)
box_learning_scale, conf_learning_scale = Variable(box_learning_scale), Variable(conf_learning_scale)
tx.to_gpu(), ty.to_gpu(), tw.to_gpu(), th.to_gpu(), tconf.to_gpu(), tprob.to_gpu()
box_learning_scale.to_gpu()
conf_learning_scale.to_gpu()
x_loss = F.sum((tx - x) ** 2 * box_learning_scale) / 2
y_loss = F.sum((ty - y) ** 2 * box_learning_scale) / 2
w_loss = F.sum((tw - w) ** 2 * box_learning_scale) / 2
h_loss = F.sum((th - h) ** 2 * box_learning_scale) / 2
c_loss = F.sum((tconf - conf) ** 2 * conf_learning_scale) / 2
p_loss = F.sum((tprob - prob) ** 2) / 2
print("x_loss: %f y_loss: %f w_loss: %f h_loss: %f c_loss: %f p_loss: %f" %
(F.sum(x_loss).data, F.sum(y_loss).data, F.sum(w_loss).data, F.sum(h_loss).data, F.sum(c_loss).data, F.sum(p_loss).data)
)
loss = x_loss + y_loss + w_loss + h_loss + c_loss + p_loss
return loss
def init_anchor(self, anchors):
self.anchors = anchors
def predict(self, input_x):
output = self.predictor(input_x)
batch_size, input_channel, input_h, input_w = input_x.shape
batch_size, _, grid_h, grid_w = output.shape
x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2)
x = F.sigmoid(x) # xのactivation
y = F.sigmoid(y) # yのactivation
conf = F.sigmoid(conf) # confのactivation
prob = F.transpose(prob, (0, 2, 1, 3, 4))
prob = F.softmax(prob) # probablitiyのacitivation
prob = F.transpose(prob, (0, 2, 1, 3, 4))
# x, y, w, hを絶対座標へ変換
x_shift = Variable(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape))
y_shift = Variable(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape))
w_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape))
h_anchor = Variable(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape))
#x_shift.to_gpu(), y_shift.to_gpu(), w_anchor.to_gpu(), h_anchor.to_gpu()
box_x = (x + x_shift) / grid_w
box_y = (y + y_shift) / grid_h
box_w = F.exp(w) * w_anchor / grid_w
box_h = F.exp(h) * h_anchor / grid_h
return box_x, box_y, box_w, box_h, conf, prob
|
the-stack_0_27715
|
from ..testing import DatabaseTest
from ..facets import (
FacetConstants as Facets,
FacetConfig,
)
class TestFacetConfig(DatabaseTest):
def test_from_library(self):
library = self._default_library
order_by = Facets.ORDER_FACET_GROUP_NAME
# When you create a FacetConfig from a Library it implements
# enabled_facets() and default_facet() the same as the Library
# does.
config = FacetConfig.from_library(library)
assert Facets.ORDER_RANDOM not in config.enabled_facets(order_by)
for group in Facets.DEFAULT_FACET.keys():
assert (config.enabled_facets(group) ==
library.enabled_facets(group))
assert (config.default_facet(group) ==
library.default_facet(group))
# If you then modify the FacetConfig, it deviates from what
# the Library would do.
config.set_default_facet(order_by, Facets.ORDER_RANDOM)
assert Facets.ORDER_RANDOM == config.default_facet(order_by)
assert library.default_facet(order_by) != Facets.ORDER_RANDOM
assert Facets.ORDER_RANDOM in config.enabled_facets(order_by)
def test_enable_facet(self):
# You can enable a facet without making it the default for its
# facet group.
order_by = Facets.ORDER_FACET_GROUP_NAME
config = FacetConfig.from_library(self._default_library)
config.enable_facet(order_by, Facets.ORDER_RANDOM)
assert Facets.ORDER_RANDOM in config.enabled_facets(order_by)
assert config.default_facet(order_by) != Facets.ORDER_RANDOM
|
the-stack_0_27716
|
from tkinter import *
from tkinter import ttk, messagebox, filedialog
import tkinter as tk
import TestList, EditFrame
import os, sys
class Tetra:
def __init__(self, root) -> None:
root.title("Tetra")
windowWidth = 320
windowHeight = 300
root.protocol("WM_DELETE_WINDOW", self.exitMenu)
root.geometry(str(windowWidth) + "x" + str(windowHeight))
# tearOffを無効化
root.option_add("*tearOff", FALSE)
# テストを生成
self.test = TestList.TestList()
# メニューバー表示
self.construct_menu(root)
# 画面作成
self.mainframe = ttk.Frame(root)
self.mainframe.pack(fill=tk.BOTH)
self.construct_frame()
# 各種画面を構築
self.edit_frame = None
## menuファイル生成
def construct_menu(self, root):
self.menuBar = Menu()
self.fileMenu = Menu()
self.helpMenu = Menu()
self.editMenu = Menu()
## ファイルメニュー
self.menuBar.add_cascade(menu=self.fileMenu, label="ファイル(F)", underline=5)
self.fileMenu.add_command(label="テスト読み込み(R)", underline=8, command=self.load)
## 編集メニュー
self.menuBar.add_cascade(menu=self.editMenu, label="編集(E)", underline=3)
self.editMenu.add_command(label="新規テスト作成(N)", underline=8, command=self.create_new_test)
## ヘルプメニュー
self.menuBar.add_cascade(menu=self.helpMenu, label="ヘルプ(H)", underline=4)
self.helpMenu.add_command(label="バージョン情報(V)", underline=8, command=self.show_version)
self.helpMenu.add_command(label="終了(X)", underline=3, command=self.exitMenu)
root["menu"] = self.menuBar
## メイン画面の構築
def construct_frame(self):
# ツリービューの設定
tree = ttk.Treeview(self.mainframe)
tree["columns"] = list(range(0, 2))
tree["show"] = "headings"
tree.column(0, width=10)
tree.column(0, width=50)
# 表のヘッダ
tree.heading(0, text="No.")
tree.heading(1, text="テスト名")
# 画面上にセット
self.tree = tree
self.tree.grid(row=0)
# イベントリスナをセット、クリック時に反応する
self.tree.bind("<<TreeviewSelect>>", self.onSelectTest)
self.displayTreeView()
frame = tk.Frame(self.mainframe)
frame.grid(row=1)
new_create_button = tk.Button(frame, text="新規テスト作成", command=self.create_new_test)
new_create_button.grid(row=0, column=0)
edit_label = tk.Label(frame, text="No.")
edit_label.grid(row=0, column=1)
self.edit_string = StringVar()
self.edit_entry = tk.Entry(frame, textvariable=self.edit_string, width=5)
self.edit_entry.grid(row=0, column=2)
edit_button = tk.Button(frame, text="編集", command=self.edit_test)
edit_button.grid(row=0, column=3)
## 表のアイテムがクリックされたときのリスナー
def onSelectTest(self, event):
# 既に値の入っているものはリセット
# 選択されている一番上の項目のみ適応
for id in self.tree.selection():
item = self.tree.set(id)
self.edit_entry.insert(tk.END, item["0"])
break
## プロセス定義の表示
def displayTreeView(self):
# 一度全要素を削除
for i in self.tree.get_children():
self.tree.delete(i)
# 値を入れ直す
record = self.test.get_test_table()
for rec in record:
self.tree.insert("", "end", values=rec)
## テストの読み込み
def load(self):
pass
## 新規テスト作成
def create_new_test(self):
self.create_dialog = tk.Toplevel(self.mainframe)
self.create_dialog.title("Create Test")
self.create_dialog.geometry("400x100")
self.create_dialog.protocol("WM_DELETE_WINDOW", self.close_new_test_dialog)
# 新規テスト項目を入力
blank_label = tk.Label(self.create_dialog, text="")
blank_label.grid(row=0, column=0)
pipe_no_label = tk.Label(self.create_dialog, text="Pipeline No")
pipe_no_label.grid(row=1, column=0)
self.pipe_no_string = tk.StringVar()
pipe_no_entry = tk.Entry(self.create_dialog, textvariable=self.pipe_no_string, width=3)
pipe_no_entry.grid(row=1,column=1)
test_name_label = tk.Label(self.create_dialog, text="テスト名")
test_name_label.grid(row=1, column=2)
self.test_name_string = StringVar()
test_name_entry = tk.Entry(self.create_dialog, textvariable=self.test_name_string, width=30)
test_name_entry.grid(row=1, column=3)
dialog_confirm_button = tk.Button(self.create_dialog, text="決定", command=self.close_new_test_dialog)
dialog_confirm_button.grid(row=1, column=4)
self.create_dialog.grab_set()
self.create_dialog.focus_set()
self.create_dialog.transient(self.mainframe)
self.mainframe.wait_window()
## 新規テストのダイアログを閉じる際の後処理
def close_new_test_dialog(self):
pipe_no = 0
input_result = True
try:
pipe_no = int(self.pipe_no_string.get())
except:
pipe_no = -1
messagebox.showwarning("warning", "数字を入力してください")
input_result = False
if pipe_no == 0:
messagebox.showwarning("warning", "数字を入力してください")
input_result = False
if len(self.test_name_string.get()) == 0:
messagebox.showwarning("warning", "名称を入力してください")
input_result = False
# テスト追加
if input_result == True:
if self.test.create_new_test(pipe_no, self.test_name_string.get()) == False:
errormessage = "新規テスト生成に失敗しました\nTest No: " + pipe_no.__str__()
messagebox.showwarning("warning", errormessage)
self.create_dialog.destroy()
self.displayTreeView()
#Edit画面へ進む
if input_result == True:
self.edit_entry.insert(tk.END, pipe_no)
self.edit_test()
def edit_test(self):
# シナリオNoの数値化とオブジェクト取得
pipe_no = 0
input_result = True
try:
pipe_no = int(self.edit_entry.get())
except:
pipe_no = -1
messagebox.showwarning("warning", "数字を入力してください")
input_result = False
target = self.test.find(pipe_no)
if target == None:
messagebox.showwarning("warning", "テストがありません。新規作成をしてください。")
input_result = False
if input_result == True:
self.edit_dialog = tk.Toplevel(self.mainframe)
# ほかの画面生成
self.edit_frame =EditFrame.EditFrame(self.edit_dialog, target)
def delete_test(self):
pass
def executeTest(self):
pass
def viewTestResult(self):
pass
## バージョン表示
def show_version(self):
ver = "Tetra"
ver += " Version (2021/11/20)\n"
ver += " ©Yuya.Eguchi All rights reserved.\n"
ver += " Python ver. " + sys.version
messagebox.showinfo("Tetra", ver)
## 終了
def exitMenu(self):
langueage = "EN"
# if R.langType == R.LangType.JP:
# langueage = "JP"
## 終了する前にiniファイルに設定書き出し
# cp = configparser.ConfigParser()
# cp["main"] = {
# "dirpath": self.directoryName,
# "language": langueage,
# "width": root.winfo_width(),
# "height": root.winfo_height()
# }
# with open(CONFIGURATION_FILE, "w") as f:
# cp.write(f)
root.destroy()
root = tk.Tk()
tetra = Tetra(root)
root.mainloop()
|
the-stack_0_27717
|
import re
from turtle import TNavigator, TPen
ANCHOR_NAMES = dict(left='start',
center='middle',
right='end')
class SvgTurtle(TNavigator, TPen):
""" Helper class to include turtle graphics within a PDF document. """
class _Screen(object):
def __init__(self, drawing, width, height):
self.cv = drawing
self._window_width = width
self._window_height = height
def window_width(self):
return self._window_width
def window_height(self):
return self._window_height
def __init__(self, drawing, width=None, height=None):
if width is None:
width = _parse_int(drawing['width'])
if height is None:
height = _parse_int(drawing['height'])
self._path = None
self._lines_to_draw = None
self.screen = None
TNavigator.__init__(self)
TPen.__init__(self)
self.screen = SvgTurtle._Screen(drawing, width, height)
self.__xoff = self.window_width()/2
self.__yoff = -self.window_height()/2
def _convert_position(self, position):
return (position[0] + self.__xoff, -position[1] - self.__yoff)
def _goto(self, end):
if self.screen:
x1, y1 = self._convert_position(self._position)
x2, y2 = self._convert_position(end)
if self._drawing:
pencolor = self._pencolor or 0
pensize = self._pensize or 0
# May draw line twice when filling, but it makes sure that we
# still draw line when caller doesn't call end_fill().
self._draw_line(x1, y1, x2, y2, pencolor, pensize)
else:
pencolor = None
pensize = None
if self._lines_to_draw is not None:
self._lines_to_draw.append((x1,
y1,
x2,
y2,
pencolor,
pensize))
self._position = end
def _draw_line(self, x1, y1, x2, y2, pencolor, pensize):
self.screen.cv.add(self.screen.cv.line((x1, y1),
(x2, y2),
stroke=pencolor,
stroke_width=pensize,
stroke_linecap='round'))
def begin_fill(self):
self.fill(True)
def end_fill(self):
self.fill(False)
def _flush_lines(self):
if self._lines_to_draw:
for x1, y1, x2, y2, pencolor, pensize in self._lines_to_draw:
if pencolor is not None:
self._draw_line(x1, y1, x2, y2, pencolor, pensize)
def fill(self, flag=None):
if flag is None:
return self._path is not None
if self._lines_to_draw: # TODO: and len(self._path) > 2:
points = [line[:2] for line in self._lines_to_draw]
points.append(self._lines_to_draw[-1][2:4])
self.screen.cv.add(self.screen.cv.polygon(points=points,
fill=self._fillcolor,
fill_rule='evenodd'))
self._flush_lines()
if not flag:
self._path = None
self._lines_to_draw = None
else:
self._lines_to_draw = []
def window_width(self):
return self.screen.window_width()
def window_height(self):
return self.screen.window_height()
def write(self,
arg,
move=False,
align="left",
font=("Helvetica", 8, "normal")):
if move:
raise ValueError('move', 'Parameter is not supported.')
font_name, font_size, font_style = font
font_size *= 1.65
style = 'font-family: {}; font-size: {}; font-style: {};'.format(
font_name,
font_size,
font_style)
x, y = self._convert_position(self._position)
y -= font[1] * 0.45
self.screen.cv.add(self.screen.cv.text(arg,
insert=(x, y),
text_anchor=ANCHOR_NAMES[align],
style=style,
fill=self._pencolor))
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
just uses black.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
return color_map.get(color.lower(), color)
try:
r, g, b = color
except:
return '#000000'
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
return '#000000'
return "#%02x%02x%02x" % (r, g, b)
def _parse_int(s):
""" Parse an integer from the start of a string, ignore anything else. """
match = re.match(r'\d+', s)
if match is None:
raise ValueError('String does not start with digits: {!r}'.format(s))
return int(match.group(0))
# Normally, Tkinter will look up these colour names for you, but we don't
# actually launch Tkinter when we're analysing code.
color_map = {
'alice blue': '#f0f8ff',
'aliceblue': '#f0f8ff',
'antique white': '#faebd7',
'antiquewhite': '#faebd7',
'antiquewhite1': '#ffefdb',
'antiquewhite2': '#eedfcc',
'antiquewhite3': '#cdc0b0',
'antiquewhite4': '#8b8378',
'aquamarine': '#7fffd4',
'aquamarine1': '#7fffd4',
'aquamarine2': '#76eec6',
'aquamarine3': '#66cdaa',
'aquamarine4': '#458b74',
'azure': '#f0ffff',
'azure1': '#f0ffff',
'azure2': '#e0eeee',
'azure3': '#c1cdcd',
'azure4': '#838b8b',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'bisque1': '#ffe4c4',
'bisque2': '#eed5b7',
'bisque3': '#cdb79e',
'bisque4': '#8b7d6b',
'black': '#000000',
'blanched almond': '#ffebcd',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blue violet': '#8a2be2',
'blue1': '#0000ff',
'blue2': '#0000ee',
'blue3': '#0000cd',
'blue4': '#00008b',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'brown1': '#ff4040',
'brown2': '#ee3b3b',
'brown3': '#cd3333',
'brown4': '#8b2323',
'burlywood': '#deb887',
'burlywood1': '#ffd39b',
'burlywood2': '#eec591',
'burlywood3': '#cdaa7d',
'burlywood4': '#8b7355',
'cadet blue': '#5f9ea0',
'cadetblue': '#5f9ea0',
'cadetblue1': '#98f5ff',
'cadetblue2': '#8ee5ee',
'cadetblue3': '#7ac5cd',
'cadetblue4': '#53868b',
'chartreuse': '#7fff00',
'chartreuse1': '#7fff00',
'chartreuse2': '#76ee00',
'chartreuse3': '#66cd00',
'chartreuse4': '#458b00',
'chocolate': '#d2691e',
'chocolate1': '#ff7f24',
'chocolate2': '#ee7621',
'chocolate3': '#cd661d',
'chocolate4': '#8b4513',
'coral': '#ff7f50',
'coral1': '#ff7256',
'coral2': '#ee6a50',
'coral3': '#cd5b45',
'coral4': '#8b3e2f',
'cornflower blue': '#6495ed',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'cornsilk1': '#fff8dc',
'cornsilk2': '#eee8cd',
'cornsilk3': '#cdc8b1',
'cornsilk4': '#8b8878',
'cyan': '#00ffff',
'cyan1': '#00ffff',
'cyan2': '#00eeee',
'cyan3': '#00cdcd',
'cyan4': '#008b8b',
'dark blue': '#00008b',
'dark cyan': '#008b8b',
'dark goldenrod': '#b8860b',
'dark gray': '#a9a9a9',
'dark green': '#006400',
'dark grey': '#a9a9a9',
'dark khaki': '#bdb76b',
'dark magenta': '#8b008b',
'dark olive green': '#556b2f',
'dark orange': '#ff8c00',
'dark orchid': '#9932cc',
'dark red': '#8b0000',
'dark salmon': '#e9967a',
'dark sea green': '#8fbc8f',
'dark slate blue': '#483d8b',
'dark slate gray': '#2f4f4f',
'dark slate grey': '#2f4f4f',
'dark turquoise': '#00ced1',
'dark violet': '#9400d3',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgoldenrod1': '#ffb90f',
'darkgoldenrod2': '#eead0e',
'darkgoldenrod3': '#cd950c',
'darkgoldenrod4': '#8b6508',
'darkgray': '#a9a9a9',
'darkgreen': '#006400',
'darkgrey': '#a9a9a9',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkolivegreen1': '#caff70',
'darkolivegreen2': '#bcee68',
'darkolivegreen3': '#a2cd5a',
'darkolivegreen4': '#6e8b3d',
'darkorange': '#ff8c00',
'darkorange1': '#ff7f00',
'darkorange2': '#ee7600',
'darkorange3': '#cd6600',
'darkorange4': '#8b4500',
'darkorchid': '#9932cc',
'darkorchid1': '#bf3eff',
'darkorchid2': '#b23aee',
'darkorchid3': '#9a32cd',
'darkorchid4': '#68228b',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkseagreen1': '#c1ffc1',
'darkseagreen2': '#b4eeb4',
'darkseagreen3': '#9bcd9b',
'darkseagreen4': '#698b69',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategray1': '#97ffff',
'darkslategray2': '#8deeee',
'darkslategray3': '#79cdcd',
'darkslategray4': '#528b8b',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deep pink': '#ff1493',
'deep sky blue': '#00bfff',
'deeppink': '#ff1493',
'deeppink1': '#ff1493',
'deeppink2': '#ee1289',
'deeppink3': '#cd1076',
'deeppink4': '#8b0a50',
'deepskyblue': '#00bfff',
'deepskyblue1': '#00bfff',
'deepskyblue2': '#00b2ee',
'deepskyblue3': '#009acd',
'deepskyblue4': '#00688b',
'dim gray': '#696969',
'dim grey': '#696969',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodger blue': '#1e90ff',
'dodgerblue': '#1e90ff',
'dodgerblue1': '#1e90ff',
'dodgerblue2': '#1c86ee',
'dodgerblue3': '#1874cd',
'dodgerblue4': '#104e8b',
'firebrick': '#b22222',
'firebrick1': '#ff3030',
'firebrick2': '#ee2c2c',
'firebrick3': '#cd2626',
'firebrick4': '#8b1a1a',
'floral white': '#fffaf0',
'floralwhite': '#fffaf0',
'forest green': '#228b22',
'forestgreen': '#228b22',
'gainsboro': '#dcdcdc',
'ghost white': '#f8f8ff',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'gold1': '#ffd700',
'gold2': '#eec900',
'gold3': '#cdad00',
'gold4': '#8b7500',
'goldenrod': '#daa520',
'goldenrod1': '#ffc125',
'goldenrod2': '#eeb422',
'goldenrod3': '#cd9b1d',
'goldenrod4': '#8b6914',
'gray': '#bebebe',
'gray0': '#000000',
'gray1': '#030303',
'gray2': '#050505',
'gray3': '#080808',
'gray4': '#0a0a0a',
'gray5': '#0d0d0d',
'gray6': '#0f0f0f',
'gray7': '#121212',
'gray8': '#141414',
'gray9': '#171717',
'gray10': '#1a1a1a',
'gray11': '#1c1c1c',
'gray12': '#1f1f1f',
'gray13': '#212121',
'gray14': '#242424',
'gray15': '#262626',
'gray16': '#292929',
'gray17': '#2b2b2b',
'gray18': '#2e2e2e',
'gray19': '#303030',
'gray20': '#333333',
'gray21': '#363636',
'gray22': '#383838',
'gray23': '#3b3b3b',
'gray24': '#3d3d3d',
'gray25': '#404040',
'gray26': '#424242',
'gray27': '#454545',
'gray28': '#474747',
'gray29': '#4a4a4a',
'gray30': '#4d4d4d',
'gray31': '#4f4f4f',
'gray32': '#525252',
'gray33': '#545454',
'gray34': '#575757',
'gray35': '#595959',
'gray36': '#5c5c5c',
'gray37': '#5e5e5e',
'gray38': '#616161',
'gray39': '#636363',
'gray40': '#666666',
'gray41': '#696969',
'gray42': '#6b6b6b',
'gray43': '#6e6e6e',
'gray44': '#707070',
'gray45': '#737373',
'gray46': '#757575',
'gray47': '#787878',
'gray48': '#7a7a7a',
'gray49': '#7d7d7d',
'gray50': '#7f7f7f',
'gray51': '#828282',
'gray52': '#858585',
'gray53': '#878787',
'gray54': '#8a8a8a',
'gray55': '#8c8c8c',
'gray56': '#8f8f8f',
'gray57': '#919191',
'gray58': '#949494',
'gray59': '#969696',
'gray60': '#999999',
'gray61': '#9c9c9c',
'gray62': '#9e9e9e',
'gray63': '#a1a1a1',
'gray64': '#a3a3a3',
'gray65': '#a6a6a6',
'gray66': '#a8a8a8',
'gray67': '#ababab',
'gray68': '#adadad',
'gray69': '#b0b0b0',
'gray70': '#b3b3b3',
'gray71': '#b5b5b5',
'gray72': '#b8b8b8',
'gray73': '#bababa',
'gray74': '#bdbdbd',
'gray75': '#bfbfbf',
'gray76': '#c2c2c2',
'gray77': '#c4c4c4',
'gray78': '#c7c7c7',
'gray79': '#c9c9c9',
'gray80': '#cccccc',
'gray81': '#cfcfcf',
'gray82': '#d1d1d1',
'gray83': '#d4d4d4',
'gray84': '#d6d6d6',
'gray85': '#d9d9d9',
'gray86': '#dbdbdb',
'gray87': '#dedede',
'gray88': '#e0e0e0',
'gray89': '#e3e3e3',
'gray90': '#e5e5e5',
'gray91': '#e8e8e8',
'gray92': '#ebebeb',
'gray93': '#ededed',
'gray94': '#f0f0f0',
'gray95': '#f2f2f2',
'gray96': '#f5f5f5',
'gray97': '#f7f7f7',
'gray98': '#fafafa',
'gray99': '#fcfcfc',
'gray100': '#ffffff',
'green': '#00ff00',
'green yellow': '#adff2f',
'green1': '#00ff00',
'green2': '#00ee00',
'green3': '#00cd00',
'green4': '#008b00',
'greenyellow': '#adff2f',
'grey': '#bebebe',
'grey0': '#000000',
'grey1': '#030303',
'grey2': '#050505',
'grey3': '#080808',
'grey4': '#0a0a0a',
'grey5': '#0d0d0d',
'grey6': '#0f0f0f',
'grey7': '#121212',
'grey8': '#141414',
'grey9': '#171717',
'grey10': '#1a1a1a',
'grey11': '#1c1c1c',
'grey12': '#1f1f1f',
'grey13': '#212121',
'grey14': '#242424',
'grey15': '#262626',
'grey16': '#292929',
'grey17': '#2b2b2b',
'grey18': '#2e2e2e',
'grey19': '#303030',
'grey20': '#333333',
'grey21': '#363636',
'grey22': '#383838',
'grey23': '#3b3b3b',
'grey24': '#3d3d3d',
'grey25': '#404040',
'grey26': '#424242',
'grey27': '#454545',
'grey28': '#474747',
'grey29': '#4a4a4a',
'grey30': '#4d4d4d',
'grey31': '#4f4f4f',
'grey32': '#525252',
'grey33': '#545454',
'grey34': '#575757',
'grey35': '#595959',
'grey36': '#5c5c5c',
'grey37': '#5e5e5e',
'grey38': '#616161',
'grey39': '#636363',
'grey40': '#666666',
'grey41': '#696969',
'grey42': '#6b6b6b',
'grey43': '#6e6e6e',
'grey44': '#707070',
'grey45': '#737373',
'grey46': '#757575',
'grey47': '#787878',
'grey48': '#7a7a7a',
'grey49': '#7d7d7d',
'grey50': '#7f7f7f',
'grey51': '#828282',
'grey52': '#858585',
'grey53': '#878787',
'grey54': '#8a8a8a',
'grey55': '#8c8c8c',
'grey56': '#8f8f8f',
'grey57': '#919191',
'grey58': '#949494',
'grey59': '#969696',
'grey60': '#999999',
'grey61': '#9c9c9c',
'grey62': '#9e9e9e',
'grey63': '#a1a1a1',
'grey64': '#a3a3a3',
'grey65': '#a6a6a6',
'grey66': '#a8a8a8',
'grey67': '#ababab',
'grey68': '#adadad',
'grey69': '#b0b0b0',
'grey70': '#b3b3b3',
'grey71': '#b5b5b5',
'grey72': '#b8b8b8',
'grey73': '#bababa',
'grey74': '#bdbdbd',
'grey75': '#bfbfbf',
'grey76': '#c2c2c2',
'grey77': '#c4c4c4',
'grey78': '#c7c7c7',
'grey79': '#c9c9c9',
'grey80': '#cccccc',
'grey81': '#cfcfcf',
'grey82': '#d1d1d1',
'grey83': '#d4d4d4',
'grey84': '#d6d6d6',
'grey85': '#d9d9d9',
'grey86': '#dbdbdb',
'grey87': '#dedede',
'grey88': '#e0e0e0',
'grey89': '#e3e3e3',
'grey90': '#e5e5e5',
'grey91': '#e8e8e8',
'grey92': '#ebebeb',
'grey93': '#ededed',
'grey94': '#f0f0f0',
'grey95': '#f2f2f2',
'grey96': '#f5f5f5',
'grey97': '#f7f7f7',
'grey98': '#fafafa',
'grey99': '#fcfcfc',
'grey100': '#ffffff',
'honeydew': '#f0fff0',
'honeydew1': '#f0fff0',
'honeydew2': '#e0eee0',
'honeydew3': '#c1cdc1',
'honeydew4': '#838b83',
'hot pink': '#ff69b4',
'hotpink': '#ff69b4',
'hotpink1': '#ff6eb4',
'hotpink2': '#ee6aa7',
'hotpink3': '#cd6090',
'hotpink4': '#8b3a62',
'indian red': '#cd5c5c',
'indianred': '#cd5c5c',
'indianred1': '#ff6a6a',
'indianred2': '#ee6363',
'indianred3': '#cd5555',
'indianred4': '#8b3a3a',
'ivory': '#fffff0',
'ivory1': '#fffff0',
'ivory2': '#eeeee0',
'ivory3': '#cdcdc1',
'ivory4': '#8b8b83',
'khaki': '#f0e68c',
'khaki1': '#fff68f',
'khaki2': '#eee685',
'khaki3': '#cdc673',
'khaki4': '#8b864e',
'lavender': '#e6e6fa',
'lavender blush': '#fff0f5',
'lavenderblush': '#fff0f5',
'lavenderblush1': '#fff0f5',
'lavenderblush2': '#eee0e5',
'lavenderblush3': '#cdc1c5',
'lavenderblush4': '#8b8386',
'lawn green': '#7cfc00',
'lawngreen': '#7cfc00',
'lemon chiffon': '#fffacd',
'lemonchiffon': '#fffacd',
'lemonchiffon1': '#fffacd',
'lemonchiffon2': '#eee9bf',
'lemonchiffon3': '#cdc9a5',
'lemonchiffon4': '#8b8970',
'light blue': '#add8e6',
'light coral': '#f08080',
'light cyan': '#e0ffff',
'light goldenrod': '#eedd82',
'light goldenrod yellow': '#fafad2',
'light gray': '#d3d3d3',
'light green': '#90ee90',
'light grey': '#d3d3d3',
'light pink': '#ffb6c1',
'light salmon': '#ffa07a',
'light sea green': '#20b2aa',
'light sky blue': '#87cefa',
'light slate blue': '#8470ff',
'light slate gray': '#778899',
'light slate grey': '#778899',
'light steel blue': '#b0c4de',
'light yellow': '#ffffe0',
'lightblue': '#add8e6',
'lightblue1': '#bfefff',
'lightblue2': '#b2dfee',
'lightblue3': '#9ac0cd',
'lightblue4': '#68838b',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightcyan1': '#e0ffff',
'lightcyan2': '#d1eeee',
'lightcyan3': '#b4cdcd',
'lightcyan4': '#7a8b8b',
'lightgoldenrod': '#eedd82',
'lightgoldenrod1': '#ffec8b',
'lightgoldenrod2': '#eedc82',
'lightgoldenrod3': '#cdbe70',
'lightgoldenrod4': '#8b814c',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgreen': '#90ee90',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightpink1': '#ffaeb9',
'lightpink2': '#eea2ad',
'lightpink3': '#cd8c95',
'lightpink4': '#8b5f65',
'lightsalmon': '#ffa07a',
'lightsalmon1': '#ffa07a',
'lightsalmon2': '#ee9572',
'lightsalmon3': '#cd8162',
'lightsalmon4': '#8b5742',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightskyblue1': '#b0e2ff',
'lightskyblue2': '#a4d3ee',
'lightskyblue3': '#8db6cd',
'lightskyblue4': '#607b8b',
'lightslateblue': '#8470ff',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightsteelblue1': '#cae1ff',
'lightsteelblue2': '#bcd2ee',
'lightsteelblue3': '#a2b5cd',
'lightsteelblue4': '#6e7b8b',
'lightyellow': '#ffffe0',
'lightyellow1': '#ffffe0',
'lightyellow2': '#eeeed1',
'lightyellow3': '#cdcdb4',
'lightyellow4': '#8b8b7a',
'lime green': '#32cd32',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'magenta1': '#ff00ff',
'magenta2': '#ee00ee',
'magenta3': '#cd00cd',
'magenta4': '#8b008b',
'maroon': '#b03060',
'maroon1': '#ff34b3',
'maroon2': '#ee30a7',
'maroon3': '#cd2990',
'maroon4': '#8b1c62',
'medium aquamarine': '#66cdaa',
'medium blue': '#0000cd',
'medium orchid': '#ba55d3',
'medium purple': '#9370db',
'medium sea green': '#3cb371',
'medium slate blue': '#7b68ee',
'medium spring green': '#00fa9a',
'medium turquoise': '#48d1cc',
'medium violet red': '#c71585',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumorchid1': '#e066ff',
'mediumorchid2': '#d15fee',
'mediumorchid3': '#b452cd',
'mediumorchid4': '#7a378b',
'mediumpurple': '#9370db',
'mediumpurple1': '#ab82ff',
'mediumpurple2': '#9f79ee',
'mediumpurple3': '#8968cd',
'mediumpurple4': '#5d478b',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnight blue': '#191970',
'midnightblue': '#191970',
'mint cream': '#f5fffa',
'mintcream': '#f5fffa',
'misty rose': '#ffe4e1',
'mistyrose': '#ffe4e1',
'mistyrose1': '#ffe4e1',
'mistyrose2': '#eed5d2',
'mistyrose3': '#cdb7b5',
'mistyrose4': '#8b7d7b',
'moccasin': '#ffe4b5',
'navajo white': '#ffdead',
'navajowhite': '#ffdead',
'navajowhite1': '#ffdead',
'navajowhite2': '#eecfa1',
'navajowhite3': '#cdb38b',
'navajowhite4': '#8b795e',
'navy': '#000080',
'navy blue': '#000080',
'navyblue': '#000080',
'old lace': '#fdf5e6',
'oldlace': '#fdf5e6',
'olive drab': '#6b8e23',
'olivedrab': '#6b8e23',
'olivedrab1': '#c0ff3e',
'olivedrab2': '#b3ee3a',
'olivedrab3': '#9acd32',
'olivedrab4': '#698b22',
'orange': '#ffa500',
'orange red': '#ff4500',
'orange1': '#ffa500',
'orange2': '#ee9a00',
'orange3': '#cd8500',
'orange4': '#8b5a00',
'orangered': '#ff4500',
'orangered1': '#ff4500',
'orangered2': '#ee4000',
'orangered3': '#cd3700',
'orangered4': '#8b2500',
'orchid': '#da70d6',
'orchid1': '#ff83fa',
'orchid2': '#ee7ae9',
'orchid3': '#cd69c9',
'orchid4': '#8b4789',
'pale goldenrod': '#eee8aa',
'pale green': '#98fb98',
'pale turquoise': '#afeeee',
'pale violet red': '#db7093',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'palegreen1': '#9aff9a',
'palegreen2': '#90ee90',
'palegreen3': '#7ccd7c',
'palegreen4': '#548b54',
'paleturquoise': '#afeeee',
'paleturquoise1': '#bbffff',
'paleturquoise2': '#aeeeee',
'paleturquoise3': '#96cdcd',
'paleturquoise4': '#668b8b',
'palevioletred': '#db7093',
'palevioletred1': '#ff82ab',
'palevioletred2': '#ee799f',
'palevioletred3': '#cd687f',
'palevioletred4': '#8b475d',
'papaya whip': '#ffefd5',
'papayawhip': '#ffefd5',
'peach puff': '#ffdab9',
'peachpuff': '#ffdab9',
'peachpuff1': '#ffdab9',
'peachpuff2': '#eecbad',
'peachpuff3': '#cdaf95',
'peachpuff4': '#8b7765',
'peru': '#cd853f',
'pink': '#ffc0cb',
'pink1': '#ffb5c5',
'pink2': '#eea9b8',
'pink3': '#cd919e',
'pink4': '#8b636c',
'plum': '#dda0dd',
'plum1': '#ffbbff',
'plum2': '#eeaeee',
'plum3': '#cd96cd',
'plum4': '#8b668b',
'powder blue': '#b0e0e6',
'powderblue': '#b0e0e6',
'purple': '#a020f0',
'purple1': '#9b30ff',
'purple2': '#912cee',
'purple3': '#7d26cd',
'purple4': '#551a8b',
'red': '#ff0000',
'red1': '#ff0000',
'red2': '#ee0000',
'red3': '#cd0000',
'red4': '#8b0000',
'rosy brown': '#bc8f8f',
'rosybrown': '#bc8f8f',
'rosybrown1': '#ffc1c1',
'rosybrown2': '#eeb4b4',
'rosybrown3': '#cd9b9b',
'rosybrown4': '#8b6969',
'royal blue': '#4169e1',
'royalblue': '#4169e1',
'royalblue1': '#4876ff',
'royalblue2': '#436eee',
'royalblue3': '#3a5fcd',
'royalblue4': '#27408b',
'saddle brown': '#8b4513',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'salmon1': '#ff8c69',
'salmon2': '#ee8262',
'salmon3': '#cd7054',
'salmon4': '#8b4c39',
'sandy brown': '#f4a460',
'sandybrown': '#f4a460',
'sea green': '#2e8b57',
'seagreen': '#2e8b57',
'seagreen1': '#54ff9f',
'seagreen2': '#4eee94',
'seagreen3': '#43cd80',
'seagreen4': '#2e8b57',
'seashell': '#fff5ee',
'seashell1': '#fff5ee',
'seashell2': '#eee5de',
'seashell3': '#cdc5bf',
'seashell4': '#8b8682',
'sienna': '#a0522d',
'sienna1': '#ff8247',
'sienna2': '#ee7942',
'sienna3': '#cd6839',
'sienna4': '#8b4726',
'sky blue': '#87ceeb',
'skyblue': '#87ceeb',
'skyblue1': '#87ceff',
'skyblue2': '#7ec0ee',
'skyblue3': '#6ca6cd',
'skyblue4': '#4a708b',
'slate blue': '#6a5acd',
'slate gray': '#708090',
'slate grey': '#708090',
'slateblue': '#6a5acd',
'slateblue1': '#836fff',
'slateblue2': '#7a67ee',
'slateblue3': '#6959cd',
'slateblue4': '#473c8b',
'slategray': '#708090',
'slategray1': '#c6e2ff',
'slategray2': '#b9d3ee',
'slategray3': '#9fb6cd',
'slategray4': '#6c7b8b',
'slategrey': '#708090',
'snow': '#fffafa',
'snow1': '#fffafa',
'snow2': '#eee9e9',
'snow3': '#cdc9c9',
'snow4': '#8b8989',
'spring green': '#00ff7f',
'springgreen': '#00ff7f',
'springgreen1': '#00ff7f',
'springgreen2': '#00ee76',
'springgreen3': '#00cd66',
'springgreen4': '#008b45',
'steel blue': '#4682b4',
'steelblue': '#4682b4',
'steelblue1': '#63b8ff',
'steelblue2': '#5cacee',
'steelblue3': '#4f94cd',
'steelblue4': '#36648b',
'tan': '#d2b48c',
'tan1': '#ffa54f',
'tan2': '#ee9a49',
'tan3': '#cd853f',
'tan4': '#8b5a2b',
'thistle': '#d8bfd8',
'thistle1': '#ffe1ff',
'thistle2': '#eed2ee',
'thistle3': '#cdb5cd',
'thistle4': '#8b7b8b',
'tomato': '#ff6347',
'tomato1': '#ff6347',
'tomato2': '#ee5c42',
'tomato3': '#cd4f39',
'tomato4': '#8b3626',
'turquoise': '#40e0d0',
'turquoise1': '#00f5ff',
'turquoise2': '#00e5ee',
'turquoise3': '#00c5cd',
'turquoise4': '#00868b',
'violet': '#ee82ee',
'violet red': '#d02090',
'violetred': '#d02090',
'violetred1': '#ff3e96',
'violetred2': '#ee3a8c',
'violetred3': '#cd3278',
'violetred4': '#8b2252',
'wheat': '#f5deb3',
'wheat1': '#ffe7ba',
'wheat2': '#eed8ae',
'wheat3': '#cdba96',
'wheat4': '#8b7e66',
'white': '#ffffff',
'white smoke': '#f5f5f5',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellow green': '#9acd32',
'yellow1': '#ffff00',
'yellow2': '#eeee00',
'yellow3': '#cdcd00',
'yellow4': '#8b8b00',
'yellowgreen': '#9acd32',
}
|
the-stack_0_27718
|
from django.conf import settings
from django.db import transaction
from django.db.models import (
F, IntegerField, Max, Min, OuterRef, Prefetch, Subquery, Sum,
)
from django.db.models.functions import Coalesce, Greatest
from django.http import JsonResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views import View
from django.views.generic import ListView
from formtools.wizard.views import SessionWizardView
from i18nfield.strings import LazyI18nString
from pretix.base.i18n import language
from pretix.base.models import Event, Organizer, Quota, Team
from pretix.control.forms.event import (
EventWizardBasicsForm, EventWizardCopyForm, EventWizardFoundationForm,
)
from pretix.control.forms.filter import EventFilterForm
from pretix.control.permissions import OrganizerPermissionRequiredMixin
from pretix.control.views import PaginationMixin
class EventList(PaginationMixin, ListView):
model = Event
context_object_name = 'events'
template_name = 'pretixcontrol/events/index.html'
def get_queryset(self):
qs = self.request.user.get_events_with_any_permission(self.request).select_related('organizer').prefetch_related(
'_settings_objects', 'organizer___settings_objects'
).order_by('-date_from')
qs = qs.annotate(
min_from=Min('subevents__date_from'),
max_from=Max('subevents__date_from'),
max_to=Max('subevents__date_to'),
max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from'))
).annotate(
order_from=Coalesce('min_from', 'date_from'),
order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),
)
sum_tickets_paid = Quota.objects.filter(
event=OuterRef('pk'), subevent__isnull=True
).order_by().values('event').annotate(
s=Sum('cached_availability_paid_orders')
).values(
's'
)
qs = qs.annotate(
sum_tickets_paid=Subquery(sum_tickets_paid, output_field=IntegerField())
).prefetch_related(
Prefetch('quotas',
queryset=Quota.objects.filter(subevent__isnull=True).annotate(s=Coalesce(F('size'), 0)).order_by('-s'),
to_attr='first_quotas')
)
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
orga_c = Organizer.objects.filter(
pk__in=self.request.user.teams.values_list('organizer', flat=True)
).count()
ctx['hide_orga'] = orga_c <= 1
for s in ctx['events']:
s.first_quotas = s.first_quotas[:4]
for q in s.first_quotas:
q.cached_avail = (
(q.cached_availability_state, q.cached_availability_number)
if q.cached_availability_time is not None
else q.availability(allow_cache=True)
)
if q.size is not None:
q.percent_paid = min(
100,
round(q.cached_availability_paid_orders / q.size * 100) if q.size > 0 else 100
)
return ctx
@cached_property
def filter_form(self):
return EventFilterForm(data=self.request.GET, request=self.request)
def condition_copy(wizard):
return EventWizardCopyForm.copy_from_queryset(wizard.request.user).exists()
class EventWizard(SessionWizardView):
form_list = [
('foundation', EventWizardFoundationForm),
('basics', EventWizardBasicsForm),
('copy', EventWizardCopyForm),
]
templates = {
'foundation': 'pretixcontrol/events/create_foundation.html',
'basics': 'pretixcontrol/events/create_basics.html',
'copy': 'pretixcontrol/events/create_copy.html',
}
condition_dict = {
'copy': condition_copy
}
def get_context_data(self, form, **kwargs):
ctx = super().get_context_data(form, **kwargs)
ctx['has_organizer'] = self.request.user.teams.filter(can_create_events=True).exists()
if self.steps.current == 'basics':
ctx['organizer'] = self.get_cleaned_data_for_step('foundation').get('organizer')
return ctx
def render(self, form=None, **kwargs):
if self.steps.current != 'foundation':
fdata = self.get_cleaned_data_for_step('foundation')
if fdata is None:
return self.render_goto_step('foundation')
return super().render(form, **kwargs)
def get_form_kwargs(self, step=None):
kwargs = {
'user': self.request.user
}
if step != 'foundation':
fdata = self.get_cleaned_data_for_step('foundation')
kwargs.update(fdata)
return kwargs
def get_template_names(self):
return [self.templates[self.steps.current]]
def done(self, form_list, form_dict, **kwargs):
foundation_data = self.get_cleaned_data_for_step('foundation')
basics_data = self.get_cleaned_data_for_step('basics')
copy_data = self.get_cleaned_data_for_step('copy')
with transaction.atomic(), language(basics_data['locale']):
event = form_dict['basics'].instance
event.organizer = foundation_data['organizer']
event.plugins = settings.PRETIX_PLUGINS_DEFAULT
event.has_subevents = foundation_data['has_subevents']
form_dict['basics'].save()
has_control_rights = self.request.user.teams.filter(
organizer=event.organizer, all_events=True, can_change_event_settings=True, can_change_items=True,
can_change_orders=True, can_change_vouchers=True
).exists()
if not has_control_rights:
t = Team.objects.create(
organizer=event.organizer, name=_('Team {event}').format(event=event.name),
can_change_event_settings=True, can_change_items=True,
can_view_orders=True, can_change_orders=True, can_view_vouchers=True,
can_change_vouchers=True
)
t.members.add(self.request.user)
t.limit_events.add(event)
if event.has_subevents:
se = event.subevents.create(
name=event.name,
date_from=event.date_from,
date_to=event.date_to,
presale_start=event.presale_start,
presale_end=event.presale_end,
location=event.location,
active=True
)
if basics_data['tax_rate']:
event.settings.tax_rate_default = event.tax_rules.create(
name=LazyI18nString.from_gettext(ugettext('VAT')),
rate=basics_data['tax_rate']
)
logdata = {}
for f in form_list:
logdata.update({
k: v for k, v in f.cleaned_data.items()
})
event.log_action('pretix.event.settings', user=self.request.user, data=logdata)
if copy_data and copy_data['copy_from_event']:
from_event = copy_data['copy_from_event']
event.copy_data_from(from_event)
elif event.has_subevents:
event.checkin_lists.create(
name=str(se),
all_products=True,
subevent=se
)
else:
event.checkin_lists.create(
name=_('Default'),
all_products=True
)
event.settings.set('timezone', basics_data['timezone'])
event.settings.set('locale', basics_data['locale'])
event.settings.set('locales', foundation_data['locales'])
if (copy_data and copy_data['copy_from_event']) or event.has_subevents:
return redirect(reverse('control:event.settings', kwargs={
'organizer': event.organizer.slug,
'event': event.slug,
}) + '?congratulations=1')
else:
return redirect(reverse('control:event.quick', kwargs={
'organizer': event.organizer.slug,
'event': event.slug,
}) + '?congratulations=1')
class SlugRNG(OrganizerPermissionRequiredMixin, View):
def get(self, request, *args, **kwargs):
# See Order.assign_code
charset = list('abcdefghjklmnpqrstuvwxyz3789')
for i in range(100):
val = get_random_string(length=settings.ENTROPY['order_code'], allowed_chars=charset)
if not self.request.organizer.events.filter(slug__iexact=val).exists():
break
return JsonResponse({'slug': val})
|
the-stack_0_27721
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2012-2014 Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Incremental JSON parser.
"""
from __future__ import unicode_literals
try:
from builtins import chr as _chr
except ImportError:
from __builtin__ import unichr as _chr
from itertools import groupby
from string import digits, whitespace
__all__ = ["JSONStream", "assembled", "grouped"]
class AwaitingData(BaseException):
""" Raised when data is temporarily unavailable.
"""
def __init__(self, *args, **kwargs):
super(AwaitingData, self).__init__(*args, **kwargs)
class EndOfStream(BaseException):
""" Raised when stream is exhausted.
"""
def __init__(self, *args, **kwargs):
super(EndOfStream, self).__init__(*args, **kwargs)
class UnexpectedCharacter(ValueError):
""" Raised when a unexpected character is encountered.
"""
def __init__(self, *args, **kwargs):
super(UnexpectedCharacter, self).__init__(*args, **kwargs)
class TextStream:
def __init__(self):
self.__data = []
self.__current_line = 0
self.__current_char = 0
self.__writable = True
self.__marked_line = 0
self.__marked_char = 0
def close(self):
self.__writable = False
def peek(self):
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
return line[self.__current_char]
else:
# no more characters on this line, jump to the next
self.__current_line += 1
self.__current_char = 0
if self.__current_line < len(self.__data):
return self.__data[self.__current_line][self.__current_char]
if self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def read(self):
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
ch = line[self.__current_char]
self.__current_char += 1
return ch
else:
self.__current_line += 1
if self.__current_line < len(self.__data):
self.__current_char = 1
return self.__data[self.__current_line][0]
else:
self.__current_char = 0
if self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def read_any(self, allowed):
out = []
start = self.__current_char
while True:
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
ch = self.__data[self.__current_line][self.__current_char]
if ch in allowed:
# move forward
self.__current_char += 1
else:
# return everything between start and here
out.append(line[start:self.__current_char])
return "".join(out)
else:
# no more characters on this line
out.append(line[start:])
self.__current_line += 1
self.__current_char = 0
start = 0
elif self.__writable:
raise AwaitingData()
else:
return "".join(out)
#def read_until(self, marker):
# out = []
# line = self.__current_line
# start = self.__current_char
# while True:
# try:
# end = self.__data[line].index(marker, start)
# except IndexError: # no more lines
# if self.__writable:
# raise AwaitingData()
# else:
# raise EndOfStream()
# except ValueError: # not found
# out.append(self.__data[line][start:])
# line += 1
# start = 0
# else:
# # found
# self.__current_line = line
# self.__current_char = end + 1
# out.append(self.__data[line][start:self.__current_char])
# return "".join(out)
def read_until_any(self, markers):
out = []
line = self.__current_line
start = self.__current_char
while True:
if line < len(self.__data):
try:
end = min(pos
for pos in [self.__data[line].find(marker, start)
for marker in markers]
if pos >= 0)
except ValueError: # not found
out.append(self.__data[line][start:])
line += 1
start = 0
else:
# found
self.__current_line = line
self.__current_char = end + 1
out.append(self.__data[line][start:self.__current_char])
return "".join(out)
elif self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def peek_after_any(self, markers):
"""
skips any characters in the marker set and returns a peek of the next
"""
while True:
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
ch = self.__data[self.__current_line][self.__current_char]
if ch in markers:
# skip
self.__current_char += 1
else:
# peek
return ch
else:
# no more characters on this line
self.__current_line += 1
self.__current_char = 0
elif self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def write(self, data):
if not self.__writable:
raise IOError("Stream is not writable")
if data:
# so we can guarantee no line is empty
self.__data.append(data)
def mark(self):
self.__marked_line = self.__current_line
self.__marked_char = self.__current_char
def undo(self):
self.__current_line = self.__marked_line
self.__current_char = self.__marked_char
class Tokeniser(object):
def __init__(self):
self.__text = TextStream()
def close(self):
self.__text.close()
def write(self, data):
"""Write raw JSON data to the decoder stream.
"""
self.__text.write(data)
def _read_literal(self, literal):
self.__text.mark()
try:
for expected in literal:
actual = self.__text.read()
if actual != expected:
raise UnexpectedCharacter(actual)
except AwaitingData:
self.__text.undo()
raise
return literal
def _read_string(self):
self.__text.mark()
try:
src, value = [self._read_literal('"')], []
while True:
chunk = self.__text.read_until_any(('"', '\\'))
src.append(chunk)
value.append(chunk[:-1])
if chunk.endswith('\\'):
ch = self.__text.read()
src.append(ch)
if ch in '"/\\':
value.append(ch)
elif ch == 'b':
value.append('\b')
elif ch == 'f':
value.append('\f')
elif ch == 'n':
value.append('\n')
elif ch == 'r':
value.append('\r')
elif ch == 't':
value.append('\t')
elif ch == 'u':
n = 0
for i in range(4):
ch = self.__text.read()
src.append(ch)
n = 16 * n + int(ch, 16)
value.append(_chr(n))
else:
raise UnexpectedCharacter(ch)
else:
return "".join(src), "".join(value)
except AwaitingData:
self.__text.undo()
raise
def _read_number(self):
src = []
has_fractional_part = False
has_exponent = False
self.__text.mark()
try:
# check for sign
ch = self.__text.read_any("-")
if ch:
src.append(ch)
# read integer part
src.append(self.__text.read_any(digits))
# read fractional part
ch = self.__text.read_any(".")
if ch:
has_fractional_part = True
src.append(ch)
src.append(self.__text.read_any(digits))
# read exponent
ch = self.__text.read_any('Ee')
if ch:
has_exponent = True
src.append(ch)
ch = self.__text.read_any('+-')
if ch:
src.append(ch)
src.append(self.__text.read_any(digits))
except AwaitingData:
# number potentially incomplete: need to wait for
# further data or end of stream
self.__text.undo()
raise
str_src = "".join(src)
if has_fractional_part or has_exponent:
return str_src, float(str_src)
else:
return str_src, int(str_src)
def read_token(self):
""" Read token
"""
ch = self.__text.peek_after_any(whitespace)
if ch in ',:[]{}':
return self.__text.read(), None
if ch == '"':
return self._read_string()
if ch in '0123456789+-':
return self._read_number()
if ch == 't':
return self._read_literal("true"), True
if ch == 'f':
return self._read_literal("false"), False
if ch == 'n':
return self._read_literal("null"), None
raise UnexpectedCharacter(ch)
# Token constants used for expectation management
VALUE = 0x01
OPEN_BRACKET = 0x02
CLOSE_BRACKET = 0x04
OPEN_BRACE = 0x08
CLOSE_BRACE = 0x10
COMMA = 0x20
COLON = 0x40
VALUE_OR_OPEN = VALUE | OPEN_BRACKET | OPEN_BRACE
VALUE_BRACKET_OR_OPEN_BRACE = VALUE | OPEN_BRACKET | CLOSE_BRACKET | OPEN_BRACE
COMMA_OR_CLOSE_BRACKET = COMMA | CLOSE_BRACKET
COMMA_OR_CLOSE_BRACE = COMMA | CLOSE_BRACE
VALUE_OR_CLOSE_BRACE = VALUE | CLOSE_BRACE
class JSONStream(object):
""" Streaming JSON decoder. This class both expects Unicode input and will
produce Unicode output.
"""
def __init__(self, source):
self.tokeniser = Tokeniser()
self.source = iter(source)
self.path = []
self._expectation = VALUE_OR_OPEN
def _in_array(self):
return self.path and isinstance(self.path[-1], int)
def _in_object(self):
return self.path and not isinstance(self.path[-1], int)
def __iter__(self):
while True:
try:
try:
self.tokeniser.write(next(self.source))
except StopIteration:
self.tokeniser.close()
while True:
try:
src, value = self.tokeniser.read_token()
if src == ',':
if not self._expectation & COMMA:
raise UnexpectedCharacter(",")
self._expectation = VALUE_OR_OPEN
elif src == ':':
if not self._expectation & COLON:
raise UnexpectedCharacter(":")
self._expectation = VALUE_OR_OPEN
elif src == '[':
yield tuple(self.path), []
if not self._expectation & OPEN_BRACKET:
raise UnexpectedCharacter("[")
self.path.append(0)
self._expectation = VALUE_BRACKET_OR_OPEN_BRACE
elif src == ']':
if not self._expectation & CLOSE_BRACKET:
raise UnexpectedCharacter("]")
self.path.pop()
if self._in_array():
self.path[-1] += 1
self._expectation = COMMA_OR_CLOSE_BRACKET
elif self._in_object():
self.path[-1] = None
self._expectation = COMMA_OR_CLOSE_BRACE
else:
self._expectation = VALUE_OR_OPEN
elif src == '{':
yield tuple(self.path), {}
if not self._expectation & OPEN_BRACE:
raise UnexpectedCharacter("{")
self.path.append(None)
self._expectation = VALUE_OR_CLOSE_BRACE
elif src == '}':
if not self._expectation & CLOSE_BRACE:
raise UnexpectedCharacter("}")
self.path.pop()
if self._in_array():
self.path[-1] += 1
self._expectation = COMMA_OR_CLOSE_BRACKET
elif self._in_object():
self.path[-1] = None
self._expectation = COMMA_OR_CLOSE_BRACE
else:
self._expectation = VALUE_OR_OPEN
else:
if not self._expectation & VALUE:
raise UnexpectedCharacter(src)
if self._in_array():
# array value
yield tuple(self.path), value
self.path[-1] += 1
self._expectation = COMMA_OR_CLOSE_BRACKET
elif self._in_object():
if self.path[-1] is None:
# object key (no yield)
self.path[-1] = value
self._expectation = COLON
else:
# object value
yield tuple(self.path), value
self.path[-1] = None
self._expectation = COMMA_OR_CLOSE_BRACE
else:
# simple value
yield tuple(self.path), value
except AwaitingData:
break
except EndOfStream:
break
def _merged(obj, key, value):
""" Returns object with value merged at a position described by iterable
key. The key describes a navigable path through the object hierarchy with
integer items describing list indexes and other types of items describing
dictionary keys.
>>> obj = None
>>> obj = _merged(obj, ("drink",), "lemonade")
>>> obj
{'drink': 'lemonade'}
>>> obj = _merged(obj, ("cutlery", 0), "knife")
>>> obj = _merged(obj, ("cutlery", 1), "fork")
>>> obj = _merged(obj, ("cutlery", 2), "spoon")
>>> obj
{'cutlery': ['knife', 'fork', 'spoon'], 'drink': 'lemonade'}
"""
if key:
k = key[0]
if isinstance(k, int):
if isinstance(obj, list):
obj = list(obj)
else:
obj = []
while len(obj) <= k:
obj.append(None)
else:
if isinstance(obj, dict):
obj = dict(obj)
else:
obj = {}
obj.setdefault(k, None)
obj[k] = _merged(obj[k], key[1:], value)
return obj
else:
return value
def assembled(iterable):
""" Returns a JSON-derived value from a set of key-value pairs as produced
by the JSONStream process. This operates in a similar way to the built-in
`dict` function. Internally, this uses the `merged` function on each pair
to build the return value.
>>> data = [
... (("drink",), "lemonade"),
... (("cutlery", 0), "knife"),
... (("cutlery", 1), "fork"),
... (("cutlery", 2), "spoon"),
... ]
>>> assembled(data)
{'cutlery': ['knife', 'fork', 'spoon'], 'drink': 'lemonade'}
:param iterable: key-value pairs to be merged into assembled value
"""
obj = None
for key, value in iterable:
obj = _merged(obj, key, value)
return obj
def _group(iterable, level):
for key, value in iterable:
yield key[level:], value
def grouped(iterable, level=1):
def _group_key(item):
key, value = item
if len(key) >= level:
return key[0:level]
else:
return None
for key, value in groupby(iterable, _group_key):
if key is not None:
yield key, _group(value, level)
|
the-stack_0_27722
|
"""
Sequential
by James Paterson.
Displaying a sequence of images creates the illusion of motion.
Twelve images are loaded and each is displayed individually in a loop.
"""
numFrames = 12 # The number of frames in the animation
currentFrame = 0
images = [PImage] * numFrames
def setup():
size(640, 360)
frameRate(24)
images[0] = loadImage("PT_anim0000.gif")
images[1] = loadImage("PT_anim0001.gif")
images[2] = loadImage("PT_anim0002.gif")
images[3] = loadImage("PT_anim0003.gif")
images[4] = loadImage("PT_anim0004.gif")
images[5] = loadImage("PT_anim0005.gif")
images[6] = loadImage("PT_anim0006.gif")
images[7] = loadImage("PT_anim0007.gif")
images[8] = loadImage("PT_anim0008.gif")
images[9] = loadImage("PT_anim0009.gif")
images[10] = loadImage("PT_anim0010.gif")
images[11] = loadImage("PT_anim0011.gif")
# If you don't want to load each image separately
# and you know how many frames you have, you
# can create the filenames as the program runs.
# The nf() command does number formatting, which will
# ensure that the number is (in this case) 4 digits.
# for i in range(numFrames):
# imageName = "PT_anim" + nf(i, 4) + ".gif"
# images[i] = loadImage(imageName)
#
def draw():
background(0)
# Use % to cycle through frames
currentFrame = (currentFrame + 1) % numFrames
offset = 0
for x in range(-100, width, images[0].width):
image(images[(currentFrame + offset) % numFrames], x, -20)
offset += 2
image(images[(currentFrame + offset) % numFrames], x, height / 2)
offset += 2
|
the-stack_0_27723
|
# ---------------------------------------------------------------------
# Summary: Create an ODM2 SQLite database from the ODM2_for_SQLite.sql
# DDL SQL script
# Created by: Jeff Horsburgh
# Created on: 11-13-2014
#
# Requirements:
# 1. Expects ODM2_for_SQLite.sql in the same directory as
# the script file
#
# Outputs:
# 1. Creates a SQLite database called ODM2.sqlite in the same
# directory as the script file
# ---------------------------------------------------------------------
import sqlite3
# Create a new SQLite database and get a cursor
conn = sqlite3.connect('ODM2.sqlite')
c = conn.cursor()
# Open the DDL SQL file and read it
sqlString = open('ODM2_for_SQLite.sql', 'r').read()
# Execute the DDL SQL script on the blank SQLite database
c.executescript(sqlString)
# Close the connection to the database
conn.close()
|
the-stack_0_27728
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helpers for sphinx documentation.
Can be used by armi docs or docs of anything else that
can import armi.
"""
import sys
import inspect
import datetime
import os
import subprocess
import shutil
from docutils.parsers.rst import Directive, directives
from docutils import nodes, statemachine
APIDOC_DIR = ".apidocs"
def create_figure(path, caption=None, align=None, alt=None, width=None):
"""
This method is available within ``.. exec::``. It allows someone to create a figure with a
caption.
"""
rst = [".. figure:: {}".format(path)]
if align:
rst += [" :align: {}".format(align)]
if alt:
rst += [" :alt: {}".format(alt)]
if width:
rst += [" :width: {}".format(width)]
if caption:
rst += [""]
if caption:
rst += [" {}".format(caption)]
return rst
def create_table(rst_table, caption=None, align=None, widths=None, width=None):
"""
This method is available within ``.. exec::``. It allows someone to create a table with a
caption.
The ``rst_table``
"""
rst = [".. table:: {}".format(caption or "")]
if align:
rst += [" :align: {}".format(align)]
if width:
rst += [" :width: {}".format(width)]
if widths:
rst += [" :widths: {}".format(widths)]
rst += [""]
rst += [" " + line for line in rst_table.split("\n")]
return "\n".join(rst)
class ExecDirective(Directive):
"""
Execute the specified python code and insert the output into the document.
The code is used as the body of a method, and must return a ``str``. The string result is
interpreted as reStructuredText.
Error handling informed by https://docutils.sourceforge.io/docs/howto/rst-directives.html#error-handling
The self.error function should both inform the documentation builder of the error and also
insert an error into the built documentation.
.. warning:: This only works on a single node in the doctree, so the rendered code
may not contain any new section names or labels. They will result in
``WARNING: Unexpected section title`` warnings.
"""
has_content = True
def run(self):
try:
code = inspect.cleandoc(
"""
def usermethod():
{}
"""
).format("\n ".join(self.content))
exec(code)
result = locals()["usermethod"]()
if result is None:
raise self.error(
"Return value needed! The body of your `.. exec::` is used as a "
"function call that must return a value."
)
para = nodes.container()
# tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
lines = statemachine.StringList(result.split("\n"))
self.state.nested_parse(lines, self.content_offset, para)
return [para]
except Exception as e:
docname = self.state.document.settings.env.docname
raise self.error(
"Unable to execute embedded doc code at {}:{} ... {}\n{}".format(
docname, self.lineno, datetime.datetime.now(), str(e)
)
)
class PyReverse(Directive):
"""Runs pyreverse to generate UML for specified module name and options.
The directive accepts the same arguments as pyreverse, except you should not specify
``--project`` or ``-o`` (output format). These are automatically specified.
If you pass ``-c`` to this, the figure generated is forced to be the className.png
like ``BurnMatrix.png``. For .gitignore purposes, this is a pain. Thus, we
auto-prefix ALL images generated by this directive with ``pyrev_``.
"""
has_content = True
required_arguments = 1
optional_arguments = 50
option_spec = {
"alt": directives.unchanged,
"height": directives.length_or_percentage_or_unitless,
"width": directives.length_or_percentage_or_unitless,
"align": lambda arg: directives.choice(arg, ("left", "right", "center")),
"filename": directives.unchanged,
}
def run(self):
try:
args = list(self.arguments)
args.append("--project")
args.append(f"{args[0]}")
args.append("-opng")
# cannot use "pylint.pyreverse.main.Run" because it calls `sys.exit`. why?
fig_name = self.options.get("filename", "classes_{}.png".format(args[0]))
command = [sys.executable, "-m", "pylint.pyreverse.main"]
print("Running {}".format(command + args))
env = dict(os.environ)
# apply any runtime path mods to the pythonpath env variable (e.g. sys.path
# mods made during doc confs)
env["PYTHONPATH"] = os.pathsep.join(sys.path)
subprocess.check_call(command + args, env=env)
try:
os.remove(os.path.join(APIDOC_DIR, fig_name))
except:
pass
shutil.move(fig_name, APIDOC_DIR)
# add .gitignore helper prefix
shutil.move(
os.path.join(APIDOC_DIR, fig_name),
os.path.join(APIDOC_DIR, f"pyr_{fig_name}"),
)
new_content = [f".. figure:: /{APIDOC_DIR}/pyr_{fig_name}"]
# assume we don't need the packages_, and delete.
try:
os.remove("packages_{}.png".format(args[0]))
except:
pass
# pass the other args through (figure args like align)
for opt, val in self.options.items():
if opt in ("filename",):
continue
new_content.append(" :{}: {}\n".format(opt, val))
new_content.append("\n")
for line in self.content:
new_content.append(" " + line)
para = nodes.container()
# tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
lines = statemachine.StringList(new_content)
self.state.nested_parse(lines, self.content_offset, para)
return [para]
except Exception as e:
docname = self.state.document.settings.env.docname
# add the error message directly to the built documentation and also tell the
# builder
raise self.error(
"Unable to execute embedded doc code at {}:{} ... {}\n{}".format(
docname, self.lineno, datetime.datetime.now(), str(e)
)
)
def generateParamTable(klass, fwParams, app=None):
"""
Return a string containing one or more restructured text list tables containing
parameter descriptions for the passed ArmiObject class.
Parameters
----------
klass : ArmiObject subclass
The Class for which parameter tables should be generated
fwParams : ParameterDefinitionCollection
A parameter definition collection containing the parameters that are always
defined for the passed ``klass``. The rest of the parameters come from the
plugins registered with the passed ``app``
app : App, optional
The ARMI-based application to draw plugins from.
Notes
-----
It would be nice to have better section labels between the different sources
but this cannot be done withing an ``exec`` directive in Sphinx so we settle
for just putting in anchors for hyperlinking to.
"""
from armi import apps
if app is None:
app = apps.App()
defs = {None: fwParams}
app = apps.App()
for plugin in app.pluginManager.get_plugins():
plugParams = plugin.defineParameters()
if plugParams is not None:
pDefs = plugParams.get(klass, None)
if pDefs is not None:
defs[plugin] = pDefs
headerContent = """
.. list-table:: {} Parameters from {{}}
:header-rows: 1
:widths: 30 40 30
* - Name
- Description
- Units
""".format(
klass.__name__
)
content = []
for plugin, pdefs in defs.items():
srcName = plugin.__name__ if plugin is not None else "Framework"
content.append(f".. _{srcName}-{klass.__name__}-param-table:")
pluginContent = headerContent.format(srcName)
for pd in pdefs:
pluginContent += f""" * - {pd.name}
- {pd.description}
- {pd.units}
"""
content.append(pluginContent + "\n")
return "\n".join(content)
|
the-stack_0_27729
|
class Cargo:
def __init__(self,origin,destination,volume,type,ownerId,driverId=None):
self.origin = origin
self.destination = destination
self.volume = volume
self.type = type
self.ownerId = ownerId
self.driverId = driverId
|
the-stack_0_27730
|
# Logic Circuit
import pygame
from os.path import dirname, abspath
# import local code
from colors import *
from logic import *
from constants import *
from classes import (
Basic,
Button,
Switch,
Wire,
Led,
BasicGate,
)
# Set name, logo, font
pygame.display.set_caption(WIN_NAME)
pygame.font.init()
fps_font = pygame.font.SysFont("Arial", 15)
font = pygame.font.SysFont("adobegothicstdkalin", 20)
img = pygame.image.load(f"{dirname(abspath(__file__))}/logo.png")
pygame.display.set_icon(img)
# constant variables(depending on pygame)
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
# Creating circle for when object is pressed
circle = pygame.Surface((20, 20), pygame.SRCALPHA)
pygame.draw.circle(circle, (123, 123, 123, 255), (10, 10), 10)
# Defining temporary variables
selected = None
first_gate = None
temp_points = []
# Defining lists for each object
gates = []
leds = []
wires = []
switches = []
# Defining panels
BUTTON_PANEL = Basic(
(0, HEIGHT - int(HEIGHT * 0.1)), (WIDTH, int(HEIGHT * 0.1)), PANEL_COLOR, "", font
)
SWITCH_PANEL = Basic(
(0, HEIGHT / 6),
(int(WIDTH * 0.0277 * 2), int(HEIGHT * 0.06) * 12),
PANEL_COLOR,
"",
font,
)
LED_PANEL = Basic(
(WIDTH - int(WIDTH * 0.0277 * 2), HEIGHT / 6),
(int(WIDTH * 0.0277 * 2), int(HEIGHT * 0.06) * 12),
PANEL_COLOR,
"",
font,
)
# Switch add/remove buttons
SWITCH_ADD_BUTTON = Button(
(0, -(int((HEIGHT * 0.15) / 2) - int(WIDTH * 0.01385) + BASIC_SIZE[1] + 2)),
BASIC_SIZE,
GREEN,
"+Switch",
font,
panel=SWITCH_PANEL,
)
SWITCH_REMOVE_BUTTON = Button(
(0, -(int((HEIGHT * 0.15) / 2) - int(WIDTH * 0.01385))),
BASIC_SIZE,
RED,
"-Switch",
font,
panel=SWITCH_PANEL,
)
# Led add/remove buttons
LED_ADD_BUTTON = Button(
(0, -(int((HEIGHT * 0.15) / 2) - int(WIDTH * 0.01385) + BASIC_SIZE[1] + 2)),
BASIC_SIZE,
GREEN,
"+LED",
font,
panel=LED_PANEL,
)
LED_REMOVE_BUTTON = Button(
(0, -(int((HEIGHT * 0.15) / 2) - int(WIDTH * 0.01385))),
BASIC_SIZE,
RED,
"-LED",
font,
panel=LED_PANEL,
)
# Logic gate buttons
NOT_GATE_BUTTON = Button(
(int(WIDTH * 0.1108), int((HEIGHT * 0.1) / 2) - int(WIDTH * 0.01385)),
BASIC_SIZE,
BLUE,
"Not",
font,
panel=BUTTON_PANEL,
gate_list=gates,
total_inp=1,
logic=not_gate_logic,
)
AND_GATE_BUTTON = Button(
(int(WIDTH * 0.1108 * 2), int((HEIGHT * 0.1) / 2) - int(WIDTH * 0.01385)),
BASIC_SIZE,
LIGHT_GREEN,
"And",
font,
panel=BUTTON_PANEL,
gate_list=gates,
total_inp=2,
logic=and_gate_logic,
)
OR_GATE_BUTTON = Button(
(int(WIDTH * 0.1108 * 3), int((HEIGHT * 0.1) / 2) - int(WIDTH * 0.01385)),
BASIC_SIZE,
LIGHT_YELLOW,
"Or",
font,
panel=BUTTON_PANEL,
gate_list=gates,
total_inp=2,
logic=or_gate_logic,
)
NOR_GATE_BUTTON = Button(
(int(WIDTH * 0.1108 * 4), int((HEIGHT * 0.1) / 2) - int(WIDTH * 0.01385)),
BASIC_SIZE,
LIGHT_ORANGE,
"Nor",
font,
panel=BUTTON_PANEL,
gate_list=gates,
total_inp=2,
logic=nor_gate_logic,
)
XOR_GATE_BUTTON = Button(
(int(WIDTH * 0.1108 * 5), int((HEIGHT * 0.1) / 2) - int(WIDTH * 0.01385)),
BASIC_SIZE,
LIGHT_PURPLE,
"Xor",
font,
panel=BUTTON_PANEL,
gate_list=gates,
total_inp=2,
logic=xor_gate_logic,
)
NAND_GATE_BUTTON = Button(
(int(WIDTH * 0.1108 * 6), int((HEIGHT * 0.1) / 2) - int(WIDTH * 0.01385)),
BASIC_SIZE,
LIGHT_PINK,
"Nand",
font,
panel=BUTTON_PANEL,
gate_list=gates,
total_inp=2,
logic=nand_gate_logic,
)
XNOR_GATE_BUTTON = Button(
(int(WIDTH * 0.1108 * 7), int((HEIGHT * 0.1) / 2) - int(WIDTH * 0.01385)),
BASIC_SIZE,
TEA_GREEN,
"Xnor",
font,
panel=BUTTON_PANEL,
gate_list=gates,
total_inp=2,
logic=xnor_gate_logic,
)
buttons = [
NOT_GATE_BUTTON,
AND_GATE_BUTTON,
OR_GATE_BUTTON,
NOR_GATE_BUTTON,
XOR_GATE_BUTTON,
NAND_GATE_BUTTON,
XNOR_GATE_BUTTON,
]
# Update logic
def update():
# update gates
for gate in gates:
for inp in gate.inp:
inp.value = False
for wire in inp.connected_wires:
if wire.value is True:
inp.value = True
break
# update leds
for led in leds:
led.update()
# update wires
for wire in wires:
wire.update()
# Handling led creation/deletion
def led_handler(pos, event):
# LEFT CLICK
if event.button == 1:
if LED_ADD_BUTTON.click(pos) and len(leds) < 12:
# add new led
leds.append(Led((0, int(HEIGHT * 0.06) * len(leds)), BASIC_SIZE, LED_PANEL))
elif LED_REMOVE_BUTTON.click(pos) and leds:
# delete last led in list
# Delete connected wires
for wire in leds[len(leds) - 1].inp.connected_wires:
wires.remove(wire)
# Remove led
leds.pop()
# Handling switch creation/deletion
def switch_handler(pos, event):
global selected
global first_gate
# LEFT CLICK
if event.button == 1:
# Creating/removing switches
if SWITCH_ADD_BUTTON.click(pos) and len(switches) < 12:
# add new switch
switches.append(
Switch(
(0, int(HEIGHT * 0.06) * len(switches)), BASIC_SIZE, SWITCH_PANEL
)
)
elif SWITCH_REMOVE_BUTTON.click(pos) and switches:
# delete last switch in list
# Unselecting all gates when deleting
selected = None
first_gate = None
# copy of switches.out
temp_array = switches[len(switches) - 1].out[:]
for switch_wire in temp_array:
for gate in gates: # Parse through gates
for inp in gate.inp:
if switch_wire in inp.connected_wires:
switches[len(switches) - 1].out.remove(switch_wire)
wires.remove(switch_wire)
inp.connected_wires.remove(switch_wire)
for led in leds: # Parse through leds
if switch_wire in led.inp.connected_wires:
led.inp.connected_wires.remove(switch_wire)
wires.remove(switch_wire)
# Remove switch
switches.pop()
# MIDDLE CLICK
elif event.button == 2:
# Flipping switch value
for switch in switches:
if switch.click(pos):
switch.value = not switch.value
# Handling gate creation/deletion
def gates_handler(pos, event):
global selected
global first_gate
global temp_points
# LEFT CLICK
if event.button == 1:
# Creating gates
if selected and not BUTTON_PANEL.click(pos):
temp = BasicGate(
(pos[0] - 15, pos[1] - 15),
BASIC_SIZE,
selected.color,
selected.text,
font,
logic=selected.logic,
total_inp=selected.total_inp,
)
gates.append(temp)
selected = None
for button in buttons:
# select gate from bottom options
if button.click(pos):
selected = button
break
# RIGHT CLICK
elif event.button == 3:
for gate in gates:
if gate.click(pos):
# Unselecting all gates when deleting
selected = None
first_gate = None
# Delete connected wires & remove temp_points
temp_wires = []
temp_points = []
# Rounding up all output wires of gate
for wire in wires:
if wire.inp is gate:
temp_wires.append(wire)
# Deleting gate's output wires from inputs
# Only if gate's output wire goes to another gate
for out_gate in gates:
for inp in out_gate.inp:
for wire in inp.connected_wires:
if wire in temp_wires:
temp_wires.remove(wire)
wires.remove(wire)
inp.connected_wires.remove(wire)
# Clearing from memory
# ...going to implement later
# del wire
# gc.collect()
# Only if gate's output wire goes to an led
for led in leds:
for wire in led.inp.connected_wires:
if wire in temp_wires:
temp_wires.remove(wire)
wires.remove(wire)
led.inp.connected_wires.remove(wire)
# Deleting gate's input wires
for inp in gate.inp:
for wire in inp.connected_wires:
wires.remove(wire)
# remove gate
gates.remove(gate)
# Handling wire creation and attachment to other objects
def wire_handler(pos, event):
global selected
global first_gate
global temp_points
# LEFT CLICK
if event.button == 1 and selected is None:
# Gates
for gate in gates:
# Output Gate
if gate.click(pos) and not first_gate:
# creating wire if wire input is gate
# Assigning first_gate to currently clicked gate
temp_points = []
first_gate = gate
temp_points.append(
(
first_gate.out_rect[0] + first_gate.out_rect[3],
first_gate.out_rect[1],
)
)
break
elif first_gate:
# Input Gate
for inp in gate.inp:
if inp.click(pos):
temp_points.append(inp.pos)
wire = Wire(
first_gate,
temp_points,
)
if first_gate in switches:
first_gate.out.append(wire)
# Adding wire to wires array
wires.append(wire)
# Adding wire to inp(wire connecting to out)
inp.connected_wires.append(wire)
# Reseting first_gate and temp_points
first_gate = None
else:
temp_points.append(pos)
# Switches
if not first_gate:
for switch in switches:
if switch.click(pos):
# creating wire if wire input is switch
temp_points = []
first_gate = switch
temp_points.append(
(first_gate.pos[0] + first_gate.size[0], first_gate.pos[1])
)
break
# Leds
if first_gate:
for led in leds:
if led.inp.click(pos):
temp_points.append(led.inp.pos)
# Creating wire
wire = Wire(
first_gate,
temp_points,
)
# Adding wire to wires array
wires.append(wire)
if first_gate in switches:
first_gate.out.append(wire)
# Setting led input to wire
led.inp.connected_wires.append(wire)
first_gate = None
temp_points = []
break
# Handling window drawing
def draw_window(fps: int):
global temp_points
global first_gate
x, y = pygame.mouse.get_pos()
# draw bg and panels
WIN.fill(BG_COLOR)
BUTTON_PANEL.draw(WIN)
SWITCH_PANEL.draw(WIN)
LED_PANEL.draw(WIN)
# draw objects
for gate in gates:
gate.draw(WIN)
for wire in wires:
wire.draw(WIN)
for switch in switches:
switch.draw(WIN)
for led in leds:
led.draw(WIN)
# draw logic gate buttons
for button in buttons:
button.draw(WIN)
# draw switch & led buttons
SWITCH_ADD_BUTTON.draw(WIN)
SWITCH_REMOVE_BUTTON.draw(WIN)
LED_ADD_BUTTON.draw(WIN)
LED_REMOVE_BUTTON.draw(WIN)
# fps counter
fps_counter = fps_font.render(str(fps), False, (0, 204, 34))
WIN.blit(fps_counter, (9, 9))
# draw wire creation
if first_gate and temp_points:
for i in range(len(temp_points) - 1):
pygame.draw.line(
WIN,
(255, 255, 255),
(int(temp_points[i][0]), int(temp_points[i][1])),
(int(temp_points[i + 1][0]), int(temp_points[i + 1][1])),
5,
)
pygame.draw.line(
WIN,
(255, 255, 255),
(
int(temp_points[len(temp_points) - 1][0]),
int(temp_points[len(temp_points) - 1][1]),
),
(x, y),
5,
)
# draw circle when an item is selected
if selected:
WIN.blit(circle, (x - 10, y - 10))
# update screen
pygame.display.update()
# Handling clicks
def click(pos, event):
led_handler(pos, event)
switch_handler(pos, event)
wire_handler(pos, event)
gates_handler(pos, event)
# Main function
def main():
# defining varibles
global selected
global first_gate
clock = pygame.time.Clock()
run = True
while run:
# while window is running
clock.tick(FPS)
update()
# looping through pygame events
for event in pygame.event.get():
if event.type == pygame.QUIT:
# if signal to shutoff program
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
# if mouse click
click(pygame.mouse.get_pos(), event)
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
# clear selected and first_gate whe escape is pressed
selected = None
first_gate = None
# draw window
draw_window(int(clock.get_fps()))
# when program is turned off kill pygame window
pygame.quit()
|
the-stack_0_27731
|
"""Built-in Tasks.
The built-in tasks are always available in all app instances.
"""
from celery._state import connect_on_app_finalize
from celery.utils.log import get_logger
__all__ = ()
logger = get_logger(__name__)
@connect_on_app_finalize
def add_backend_cleanup_task(app):
"""Task used to clean up expired results.
If the configured backend requires periodic cleanup this task is also
automatically configured to run every day at 4am (requires
:program:`celery beat` to be running).
"""
@app.task(name='celery.backend_cleanup', shared=False, lazy=False)
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup
@connect_on_app_finalize
def add_accumulate_task(app):
"""Task used by Task.replace when replacing task with group."""
@app.task(bind=True, name='celery.accumulate', shared=False, lazy=False)
def accumulate(self, *args, **kwargs):
index = kwargs.get('index')
return args[index] if index is not None else args
return accumulate
@connect_on_app_finalize
def add_unlock_chord_task(app):
"""Task used by result backends without native chord support.
Will joins chord by creating a task chain polling the header
for completion.
"""
from celery.canvas import maybe_signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple
@app.task(name='celery.chord_unlock', max_retries=None, shared=False,
default_retry_delay=app.conf.result_chord_retry_interval, ignore_result=True, lazy=False, bind=True)
def unlock_chord(self, group_id, callback, interval=None,
max_retries=None, result=None,
Result=app.AsyncResult, GroupResult=app.GroupResult,
result_from_tuple=result_from_tuple, **kwargs):
if interval is None:
interval = self.default_retry_delay
# check if the task group is ready, and if so apply the callback.
callback = maybe_signature(callback, app)
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
app=app,
)
j = deps.join_native if deps.supports_native_join else deps.join
try:
ready = deps.ready()
except Exception as exc:
raise self.retry(
exc=exc, countdown=interval, max_retries=max_retries,
)
else:
if not ready:
raise self.retry(countdown=interval, max_retries=max_retries)
callback = maybe_signature(callback, app=app)
try:
with allow_join_result():
ret = j(
timeout=app.conf.result_chord_join_timeout,
propagate=True,
)
except Exception as exc: # pylint: disable=broad-except
try:
culprit = next(deps._failed_join_report())
reason = f'Dependency {culprit.id} raised {exc!r}'
except StopIteration:
reason = repr(exc)
logger.exception('Chord %r raised: %r', group_id, exc)
app.backend.chord_error_from_stack(callback, ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc: # pylint: disable=broad-except
logger.exception('Chord %r raised: %r', group_id, exc)
app.backend.chord_error_from_stack(
callback,
exc=ChordError(f'Callback error: {exc!r}'),
)
return unlock_chord
@connect_on_app_finalize
def add_map_task(app):
from celery.canvas import signature
@app.task(name='celery.map', shared=False, lazy=False)
def xmap(task, it):
task = signature(task, app=app).type
return [task(item) for item in it]
return xmap
@connect_on_app_finalize
def add_starmap_task(app):
from celery.canvas import signature
@app.task(name='celery.starmap', shared=False, lazy=False)
def xstarmap(task, it):
task = signature(task, app=app).type
return [task(*item) for item in it]
return xstarmap
@connect_on_app_finalize
def add_chunk_task(app):
from celery.canvas import chunks as _chunks
@app.task(name='celery.chunks', shared=False, lazy=False)
def chunks(task, it, n):
return _chunks.apply_chunks(task, it, n)
return chunks
@connect_on_app_finalize
def add_group_task(app):
"""No longer used, but here for backwards compatibility."""
from celery.canvas import maybe_signature
from celery.result import result_from_tuple
@app.task(name='celery.group', bind=True, shared=False, lazy=False)
def group(self, tasks, result, group_id, partial_args, add_to_parent=True):
app = self.app
result = result_from_tuple(result, app)
# any partial args are added to all tasks in the group
taskit = (maybe_signature(task, app=app).clone(partial_args)
for i, task in enumerate(tasks))
with app.producer_or_acquire() as producer:
[stask.apply_async(group_id=group_id, producer=producer,
add_to_parent=False) for stask in taskit]
parent = app.current_worker_task
if add_to_parent and parent:
parent.add_trail(result)
return result
return group
@connect_on_app_finalize
def add_chain_task(app):
"""No longer used, but here for backwards compatibility."""
@app.task(name='celery.chain', shared=False, lazy=False)
def chain(*args, **kwargs):
raise NotImplementedError('chain is not a real task')
return chain
@connect_on_app_finalize
def add_chord_task(app):
"""No longer used, but here for backwards compatibility."""
from celery import chord as _chord
from celery import group
from celery.canvas import maybe_signature
@app.task(name='celery.chord', bind=True, ignore_result=False,
shared=False, lazy=False)
def chord(self, header, body, partial_args=(), interval=None,
countdown=1, max_retries=None, eager=False, **kwargs):
app = self.app
# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([
maybe_signature(s, app=app) for s in tasks
], app=self.app)
body = maybe_signature(body, app=app)
ch = _chord(header, body)
return ch.run(header, body, partial_args, app, interval,
countdown, max_retries, **kwargs)
return chord
|
the-stack_0_27732
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import requests
from .request_params import request_settings
from .utils import get_data_in_format
def get_experiment_info() -> json:
"""Get basic information of experiment.
Returns:
json: Basic information of current experiment.
"""
get_exp_name_params = params = {
"query": "select name from pending_experiments order by time desc limit 1",
}
exp_name = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=get_exp_name_params
).json()
print(exp_name)
exp_name = exp_name["dataset"][0][0]
print(exp_name)
params = {
"query": f"select * from maro.experiments where name='{exp_name}'",
"count": "true"
}
requests.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
experiment_info = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=params
).json()
data_in_format = get_data_in_format(experiment_info)
experiment_name = data_in_format["name"][0]
episode_params = {
"query": f"select episode, tick from {experiment_name}.port_details order by timestamp asc limit 1",
"count": "true"
}
min_episode = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=episode_params
).json()
start_episode_num = int(min_episode["dataset"][0][0])
start_snapshot_num = int(min_episode["dataset"][0][1])
data_in_format["start_episode"] = start_episode_num
data_in_format['start_snapshot'] = start_snapshot_num
total_params = {
"query": f"select count(episode), count(tick) from {experiment_name}.port_details",
"count": "true"
}
total_episode = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=total_params
).json()
data_in_format["total_episodes"] = int(total_episode["dataset"][0][0])
data_in_format['durations'] = int(total_episode["dataset"][0][1])
port_number_params = {
"query": f"select count(*) from {experiment_name}.port_details"
f" where episode='{start_episode_num}' and tick='{start_snapshot_num}'",
"count": "true"
}
port_number = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=port_number_params
).json()
end_epoch_num = start_episode_num + int(data_in_format["total_episodes"]) - 1
end_tick_num = start_snapshot_num + int(total_episode["dataset"][0][1]) - 1
display_type_params = {
"query": f"select * from {experiment_name}.port_details"
f" where episode='{end_epoch_num}' and tick='{end_tick_num}'",
"count": "true"
}
display_type_response = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=display_type_params
).json()
if display_type_response["dataset"] != []:
data_in_format["display_type"] = "local"
else:
data_in_format["display_type"] = "real_time"
data_in_format["port_number"] = int(port_number["dataset"][0][0])
exp_data = data_in_format.to_json(orient='records')
return exp_data
|
the-stack_0_27735
|
from django.conf.urls import patterns, include, url
from tastypie.api import Api
from .api import (FloorResource,
LocationResource,
AccessPointResource,
UserDeviceResource)
from .locate_me_api import LocateMeResource
v1_api = Api(api_name='v1')
v1_api.register(FloorResource())
v1_api.register(LocationResource())
v1_api.register(AccessPointResource())
v1_api.register(LocateMeResource())
v1_api.register(UserDeviceResource())
# url(r'^register-by-token/(?P<backend>[^/]+)/$',
# 'register_by_access_token')
urlpatterns = patterns('marauder.views',
url(r'^api/', include(v1_api.urls)),
url(r'^register-by-token/(?P<backend>[^/]+)/$', 'register_by_access_token'),
)
|
the-stack_0_27736
|
"""Candle Theme API handler."""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib'))
import json
import time
from time import sleep
import requests
import threading
try:
from gateway_addon import APIHandler, APIResponse, Database
#print("succesfully loaded APIHandler and APIResponse from gateway_addon")
except:
print("Import APIHandler and APIResponse from gateway_addon failed. Use at least WebThings Gateway version 0.10")
sys.exit(1)
_TIMEOUT = 3
_CONFIG_PATHS = [
os.path.join(os.path.expanduser('~'), '.webthings', 'config'),
]
if 'WEBTHINGS_HOME' in os.environ:
_CONFIG_PATHS.insert(0, os.path.join(os.environ['WEBTHINGS_HOME'], 'config'))
class CandleThemeAPIHandler(APIHandler):
"""Candle API handler."""
def __init__(self, verbose=False):
"""Initialize the object."""
#print("INSIDE API HANDLER INIT")
self.addon_name = 'candle-theme'
self.running = True
self.server = 'http://127.0.0.1:8080'
self.DEBUG = False
self.persistent_data = {'background_color':"",'hide_floorplan':False, 'zoom':'100%', 'collections':{}}
# Paths
# Get persistent data
try:
self.persistence_file_dir = os.path.join(self.user_profile['dataDir'], self.addon_name)
self.persistence_file_path = os.path.join(self.user_profile['dataDir'], self.addon_name, 'persistence.json')
if not os.path.isdir(self.persistence_file_dir):
os.mkdir(self.persistence_file_dir)
except:
try:
if self.DEBUG:
print("setting persistence file path failed, will try older method.")
self.persistence_file_path = os.path.join(os.path.expanduser('~'), '.webthings', 'data', self.addon_name,'persistence.json')
except:
if self.DEBUG:
print("Double error making persistence file path")
self.persistence_file_path = "/home/pi/.webthings/data/" + self.addon_name + "/persistence.json"
if self.DEBUG:
print("Current working directory: " + str(os.getcwd()))
first_run = False
try:
with open(self.persistence_file_path) as f:
self.persistent_data = json.load(f)
if self.DEBUG:
print("Persistence data was loaded succesfully.")
except:
first_run = True
print("Could not load persistent data (if you just installed the add-on then this is normal)")
self.persistent_data = {'background_color':"",'hide_floorplan':False, 'collections':{}}
self.save_persistent_data()
if not 'collections' in self.persistent_data:
self.persistent_data['collections'] = {}
if not 'zoom' in self.persistent_data:
self.persistent_data['zoom'] = '100%'
if not 'zoom_everywhere' in self.persistent_data:
self.persistent_data['zoom_everywhere'] = False
if not 'developer' in self.persistent_data:
self.persistent_data['developer'] = False
if not 'allow_pinch_to_zoom' in self.persistent_data:
self.persistent_data['allow_pinch_to_zoom'] = False
if not 'hide_virtual_keyboard' in self.persistent_data:
self.persistent_data['hide_virtual_keyboard'] = False
# LOAD CONFIG
try:
self.add_from_config()
self.save_persistent_data()
except Exception as ex:
print("Error loading config: " + str(ex))
if self.DEBUG:
print("self.persistent_data is now: " + str(self.persistent_data))
# Is there user profile data?
#try:
# print(str(self.user_profile))
#except:
# print("no user profile data")
# Intiate extension addon API handler
try:
manifest_fname = os.path.join(
os.path.dirname(__file__),
'..',
'manifest.json'
)
with open(manifest_fname, 'rt') as f:
manifest = json.load(f)
APIHandler.__init__(self, manifest['id'])
self.manager_proxy.add_api_handler(self)
if self.DEBUG:
#print("self.manager_proxy = " + str(self.manager_proxy))
print("Created new API HANDLER: " + str(manifest['id']))
except Exception as e:
print("Failed to init UX extension API handler: " + str(e))
# Read the settings from the add-on settings page
def add_from_config(self):
"""Attempt to read config data."""
try:
database = Database(self.addon_name)
if not database.open():
print("Could not open settings database")
return
config = database.load_config()
database.close()
except Exception as ex:
print("Error! Failed to open settings database: " + str(ex))
if not config:
print("Error loading config from database")
return
if 'Debugging' in config:
self.DEBUG = bool(config['Debugging'])
if self.DEBUG:
print("-Debugging preference was in config: " + str(self.DEBUG))
if 'Hide floorplan' in config:
self.persistent_data['hide_floorplan'] = bool(config['Hide floorplan'])
if self.DEBUG:
print("-Hide floorplan preference was in config: " + str(self.persistent_data['hide_floorplan']))
if 'Zoom' in config:
self.persistent_data['zoom'] = str(config['Zoom'])
if self.DEBUG:
print("-Zoom preference was in config: " + str(self.persistent_data['zoom']))
if 'Use zoom everywhere' in config:
self.persistent_data['zoom_everywhere'] = bool(config['Use zoom everywhere'])
if self.DEBUG:
print("-Use zoom everywhere preference was in config: " + str(self.persistent_data['zoom_everywhere']))
if 'Allow pinch-to-zoom' in config:
self.persistent_data['allow_pinch_to_zoom'] = bool(config['Allow pinch-to-zoom'])
if self.DEBUG:
print("-Pinch to zoom preference was in config: " + str(self.persistent_data['allow_pinch_to_zoom']))
if 'Hide virtual keyboard' in config:
self.persistent_data['hide_virtual_keyboard'] = bool(config['Hide virtual keyboard'])
if self.DEBUG:
print("-Hide virtual keyboard preference was in config: " + str(self.persistent_data['hide_virtual_keyboard']))
if 'Show developer options' in config:
self.persistent_data['developer'] = bool(config['Show developer options'])
if self.DEBUG:
print("-Show developer options preference was in config: " + str(self.persistent_data['developer']))
# Background color
try:
if 'Background color' in config:
self.persistent_data['background_color'] = str(config['Background color'])
if self.DEBUG:
print("-Background color is present in the config data.")
else:
self.persistent_data['background_color'] = ""
except Exception as ex:
print("Error loading background color preference from settings: " + str(ex))
if self.DEBUG:
print("config: " + str(config))
# Api token
#try:
# if 'Authorization token' in config:
# self.token = str(config['Authorization token'])
# print("-Authorization token is present in the config data.")
#except:
# print("Error loading api token from settings")
#
# HANDLE REQUEST
#
def handle_request(self, request):
"""
Handle a new API request for this handler.
request -- APIRequest object
"""
try:
if request.method != 'POST':
return APIResponse(status=404)
if request.path == '/ajax':
try:
action = str(request.body['action'])
if action == 'init':
if self.DEBUG:
print("in init")
return APIResponse(
status=200,
content_type='application/json',
content=json.dumps({'debug': self.DEBUG, 'background_color':self.persistent_data['background_color'], 'hide_floorplan':self.persistent_data['hide_floorplan'], 'zoom':self.persistent_data['zoom'],'zoom_everywhere':self.persistent_data['zoom_everywhere'], 'allow_pinch_to_zoom':self.persistent_data['allow_pinch_to_zoom'], 'hide_virtual_keyboard':self.persistent_data['hide_virtual_keyboard'],'developer':self.persistent_data['developer']}),
)
elif action == 'get_collections':
return APIResponse(
status=200,
content_type='application/json',
content=json.dumps({'state' : 'ok', 'collections': self.persistent_data['collections']}),
)
elif action == 'save_collections':
state = 'ok'
try:
self.persistent_data['collections'] = request.body['collections']
self.save_persistent_data()
except Exception as ex:
print("Error saving collections: " + str(ex))
state = 'error'
return APIResponse(
status=200,
content_type='application/json',
content=json.dumps({'state' : state, 'collections': self.persistent_data['collections']}),
)
else:
return APIResponse( status=404 )
except Exception as ex:
if self.DEBUG:
print("Error while handling request: " + str(ex))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("Error in API handler"),
)
else:
return APIResponse(status=404)
except Exception as e:
if self.DEBUG:
print("Failed to handle UX extension API request: " + str(e))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("API Error"),
)
def unload(self):
self.running = False
if self.DEBUG:
print("Candle theme shutting down")
def cancel_pairing(self):
"""Cancel the pairing process."""
#print("END OF PAIRING -----------------------------")
#
# SAVE TO PERSISTENCE
#
def save_persistent_data(self):
#if self.DEBUG:
#print("Saving to persistence data store at path: " + str(self.persistence_file_path))
try:
if not os.path.isfile(self.persistence_file_path):
open(self.persistence_file_path, 'a').close()
if self.DEBUG:
print("Created an empty persistence file")
#else:
# if self.DEBUG:
# print("Persistence file existed. Will try to save to it.")
with open(self.persistence_file_path) as f:
if self.DEBUG:
print("saving persistent data: " + str(self.persistent_data))
json.dump( self.persistent_data, open( self.persistence_file_path, 'w+' ) )
return True
except Exception as ex:
print("Error: could not store data in persistent store: " + str(ex) )
return False
|
the-stack_0_27737
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
The info commands gets the information on a Sheep or XP and can be used
to retrieve the job status, logs etc.
"""
from functools import partial
import json
import os
import shutil
import sys
from .main import DecoratedMain
from .shep import Shepherd
from .log import simple_log, fatal
log = partial(simple_log, "Info:")
def info_action(args, main: DecoratedMain):
shepherd = Shepherd(main)
if args.job_id is not None:
if len(args.argv) > 0:
fatal("If a job id is provided, you shouldn't pass argv.")
sheep = shepherd.get_sheep_from_job_id(args.job_id)
if sheep is None:
fatal("Could not find any matching sheep")
else:
sheep = shepherd.get_sheep_from_argv(args.argv)
log("Found sheep", sheep)
log("Folder is", sheep.xp.folder)
if sheep.log:
log("Main log is", sheep.log)
if args.metrics:
metrics = main.get_xp_history(sheep.xp)
out = f"Metrics[{len(metrics)}]: "
if metrics:
out += json.dumps(metrics[-1])
log(out)
if args.cancel:
if sheep.job is None:
log("Could not cancel non existing job")
elif sheep.is_done():
log("Job is not running")
else:
sheep.job.cancel()
if args.log:
if sheep.log is None:
fatal("No log, sheep hasn't been scheduled yet.")
if not sheep.log.exists():
fatal(f"Log {sheep.log} does not exist")
shutil.copyfileobj(open(sheep.log, "r"), sys.stdout, 4096)
if args.tail:
if not sheep.log.exists():
fatal(f"Log {sheep.log} does not exist")
os.execvp("tail", ["tail", "-n", "200", "-f", sheep.log])
|
the-stack_0_27738
|
# -*- coding: utf-8 -*-
import datetime
import json
import gettext
class Sites(object):
__tablename__ = 'sites'
id = 0
title = ""
website = ""
active = 1
def __init__(self, id, website):
self.id = id
self.website = website
def todata(self):
result = {
'id': self.id,
'website': self.website
}
return result
@staticmethod
def fieldnames():
result = ['id', 'website']
return result
def __repr__(self):
return '<Site %r>' % self.title
class Rating(object):
overall = -1
overall_count = 1
overall_review = ''
integrity_and_security = -1
integrity_and_security_count = 1
integrity_and_security_review = ''
performance = -1
performance_count = 1
performance_review = ''
standards = -1
standards_count = 1
standards_review = ''
a11y = -1
a11y_count = 1
a11y_review = ''
_ = False
is_set = False
def __init__(self, _=None):
# don't know anything we want todo yet
self.overall = -1
self._ = _
def set_overall(self, points, review=''):
if(points < 1.0):
self.overall = 1.0
elif(points > 5.0):
self.overall = 5.0
else:
self.overall = points
self.overall_review = review
self.is_set = True
def get_overall(self):
return self.transform_value(self.overall / self.overall_count)
def set_integrity_and_security(self, points, review=''):
if(points < 1.0):
self.integrity_and_security = 1.0
elif(points > 5.0):
self.integrity_and_security = 5.0
else:
self.integrity_and_security = points
self.integrity_and_security_review = review
self.is_set = True
def get_integrity_and_security(self):
return self.transform_value(self.integrity_and_security / self.integrity_and_security_count)
def set_performance(self, points, review=''):
if(points < 1.0):
self.performance = 1.0
elif(points > 5.0):
self.performance = 5.0
else:
self.performance = points
self.performance_review = review
self.is_set = True
def get_performance(self):
return self.transform_value(self.performance / self.performance_count)
def set_standards(self, points, review=''):
if(points < 1.0):
self.standards = 1.0
elif(points > 5.0):
self.standards = 5.0
else:
self.standards = points
self.standards_review = review
self.is_set = True
def get_standards(self):
return self.transform_value(self.standards / self.standards_count)
def set_a11y(self, points, review=''):
if(points < 1.0):
self.a11y = 1.0
elif(points > 5.0):
self.a11y = 5.0
else:
self.a11y = points
self.a11y_review = review
self.is_set = True
def get_a11y(self):
return self.transform_value(self.a11y / self.a11y_count)
def isused(self):
return self.is_set
def transform_value(self, value):
return float("{0:.2f}".format(value))
def get_reviews(self):
text = self._('TEXT_TEST_REVIEW_OVERVIEW').format(self.overall_review)
if (self.get_integrity_and_security() != -1 and self.integrity_and_security_review != ''):
text += self._('TEXT_TEST_REVIEW_INTEGRITY_SECURITY').format(
self.integrity_and_security_review)
if (self.get_performance() != -1 and self.performance_review != ''):
text += self._('TEXT_TEST_REVIEW_PERFORMANCE').format(
self.performance_review)
if (self.get_a11y() != -1 and self.a11y_review != ''):
text += self._('TEXT_TEST_REVIEW_ALLY').format(self.a11y_review)
if (self.get_standards() != -1 and self.standards_review != ''):
text += self._('TEXT_TEST_REVIEW_STANDARDS').format(
self.standards_review)
return text
def todata(self):
result = {
'rating_overall': self.get_overall(),
'rating_security': self.get_integrity_and_security(),
'rating_performance': self.get_performance(),
'rating_standards': self.get_standards(),
'rating_a11y': self.get_a11y()
}
return result
@staticmethod
def fieldnames():
result = ['rating_overall', 'rating_integrity_and_security',
'rating_performance', 'rating_standards', 'rating_a11y']
return result
def __add__(self, other):
if (not isinstance(other, Rating)):
raise TypeError
else:
if self._ != None:
tmp = Rating(self._)
else:
tmp = Rating(other._)
tmp_value = tmp.get_combined_value(
self.overall, self.overall_count, other.overall, other.overall_count)
if (tmp_value[0] != -1):
tmp.is_set = True
tmp.overall = tmp_value[0]
tmp.overall_count = tmp_value[1]
tmp.overall_review = self.overall_review + \
other.overall_review
tmp_value = tmp.get_combined_value(
self.integrity_and_security, self.integrity_and_security_count, other.integrity_and_security, other.integrity_and_security_count)
if (tmp_value[0] != -1):
tmp.is_set = True
tmp.integrity_and_security = tmp_value[0]
tmp.integrity_and_security_count = tmp_value[1]
tmp.integrity_and_security_review = self.integrity_and_security_review + \
other.integrity_and_security_review
tmp_value = tmp.get_combined_value(
self.performance, self.performance_count, other.performance, other.performance_count)
if (tmp_value[0] != -1):
tmp.is_set = True
tmp.performance = tmp_value[0]
tmp.performance_count = tmp_value[1]
tmp.performance_review = self.performance_review + other.performance_review
tmp_value = tmp.get_combined_value(
self.standards, self.standards_count, other.standards, other.standards_count)
if (tmp_value[0] != -1):
tmp.is_set = True
tmp.standards = tmp_value[0]
tmp.standards_count = tmp_value[1]
tmp.standards_review = self.standards_review + other.standards_review
tmp_value = tmp.get_combined_value(
self.a11y, self.a11y_count, other.a11y, other.a11y_count)
if (tmp_value[0] != -1):
tmp.is_set = True
tmp.a11y = tmp_value[0]
tmp.a11y_count = tmp_value[1]
tmp.a11y_review = self.a11y_review + other.a11y_review
return tmp
def get_combined_value(self, val1, val1_count, val2, val2_count):
val1_has_value = val1 != -1
val2_has_value = val2 != -1
if (val1_has_value and val2_has_value):
return (val1 + val2, val1_count + val2_count)
elif (not val1_has_value and not val2_has_value):
return (-1, 1)
elif(val1_has_value):
return (val1, val1_count)
else:
return (val2, val2_count)
def __repr__(self):
text = self._('TEXT_TEST_RATING_OVERVIEW').format(self.get_overall())
if (self.get_integrity_and_security() != -1):
text += self._('TEXT_TEST_RATING_INTEGRITY_SECURITY').format(
self.get_integrity_and_security())
if (self.get_performance() != -1):
text += self._('TEXT_TEST_RATING_PERFORMANCE').format(self.get_performance())
if (self.get_a11y() != -1):
text += self._('TEXT_TEST_RATING_ALLY').format(self.get_a11y())
if (self.get_standards() != -1):
text += self._('TEXT_TEST_RATING_STANDARDS').format(
self.get_standards())
return text
class SiteTests(object):
__tablename__ = 'sitetests'
site_id = 0
id = 0
test_date = datetime.datetime.now()
type_of_test = 0
check_report = ""
check_report_sec = ""
check_report_perf = ""
check_report_a11y = ""
check_report_stand = ""
json_check_data = ""
most_recent = 1
rating = -1 # rating from 1-5 on how good the results were
rating_sec = -1 # rating from 1-5 on how good the results were
rating_perf = -1 # rating from 1-5 on how good the results were
rating_a11y = -1 # rating from 1-5 on how good the results were
rating_stand = -1 # rating from 1-5 on how good the results were
def __init__(self, site_id, type_of_test, rating, test_date, json_check_data):
self.site_id = site_id
self.type_of_test = type_of_test
self.check_report = self.encode_review(rating.overall_review)
self.check_report_sec = self.encode_review(
rating.integrity_and_security_review)
self.check_report_perf = self.encode_review(rating.performance_review)
self.check_report_a11y = self.encode_review(rating.a11y_review)
self.check_report_stand = self.encode_review(rating.standards_review)
self.rating = rating.get_overall()
self.rating_sec = rating.get_integrity_and_security()
self.rating_perf = rating.get_performance()
self.rating_a11y = rating.get_a11y()
self.rating_stand = rating.get_standards()
self.test_date = test_date
self.json_check_data = json_check_data
def encode_review(self, review):
review_encoded = str(review).encode(
'utf-8') # för att lösa encoding-probs
return review_encoded
def todata(self):
result = [{
'site_id': self.site_id,
'type_of_test': self.type_of_test,
'rating': self.rating,
'rating_sec': self.rating_sec,
'rating_perf': self.rating_perf,
'rating_a11y': self.rating_a11y,
'rating_stand': self.rating_stand,
'date': self.test_date.isoformat(),
'report': self.check_report.decode('utf-8'),
'report_sec': self.check_report_sec.decode('utf-8'),
'report_perf': self.check_report_perf.decode('utf-8'),
'report_a11y': self.check_report_a11y.decode('utf-8'),
'report_stand': self.check_report_stand.decode('utf-8'),
'data': self.json_check_data.decode('utf-8')
}]
return result
@staticmethod
def fieldnames():
result = ['site_id', 'type_of_test',
'rating', 'rating_sec', 'rating_perf',
'rating_a11y', 'rating_stand',
'date', 'report', 'report_sec', 'report_perf', 'report_a11y', 'report_stand', 'data']
return result
def __repr__(self):
return '<SiteTest %r>' % self.test_date
|
the-stack_0_27739
|
from __future__ import print_function, unicode_literals
import requests
import json
import frappe
from six import iteritems, string_types
'''
FrappeClient is a library that helps you connect with other frappe systems
'''
class AuthError(Exception):
pass
class SiteExpiredError(Exception):
pass
class FrappeException(Exception):
pass
class FrappeClient(object):
def __init__(self, url, username=None, password=None, verify=True):
self.headers = dict(Accept='application/json')
self.verify = verify
self.session = requests.session()
self.url = url
# login if username/password provided
if username and password:
self._login(username, password)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.logout()
def _login(self, username, password):
'''Login/start a sesion. Called internally on init'''
r = self.session.post(self.url, data={
'cmd': 'login',
'usr': username,
'pwd': password
}, verify=self.verify, headers=self.headers)
if r.status_code==200 and r.json().get('message') in ("Logged In", "No App"):
return r.json()
else:
if json.loads(r.text).get('exc_type') == "SiteExpiredError":
raise SiteExpiredError
raise AuthError
def logout(self):
'''Logout session'''
self.session.get(self.url, params={
'cmd': 'logout',
}, verify=self.verify, headers=self.headers)
def get_list(self, doctype, fields='"*"', filters=None, limit_start=0, limit_page_length=0):
"""Returns list of records of a particular type"""
if not isinstance(fields, string_types):
fields = json.dumps(fields)
params = {
"fields": fields,
}
if filters:
params["filters"] = json.dumps(filters)
if limit_page_length:
params["limit_start"] = limit_start
params["limit_page_length"] = limit_page_length
res = self.session.get(self.url + "/api/resource/" + doctype, params=params, verify=self.verify, headers=self.headers)
return self.post_process(res)
def insert(self, doc):
'''Insert a document to the remote server
:param doc: A dict or Document object to be inserted remotely'''
res = self.session.post(self.url + "/api/resource/" + doc.get("doctype"),
data={"data":frappe.as_json(doc)}, verify=self.verify, headers=self.headers)
return self.post_process(res)
def insert_many(self, docs):
'''Insert multiple documents to the remote server
:param docs: List of dict or Document objects to be inserted in one request'''
return self.post_request({
"cmd": "frappe.client.insert_many",
"docs": frappe.as_json(docs)
})
def update(self, doc):
'''Update a remote document
:param doc: dict or Document object to be updated remotely. `name` is mandatory for this'''
url = self.url + "/api/resource/" + doc.get("doctype") + "/" + doc.get("name")
res = self.session.put(url, data={"data":frappe.as_json(doc)}, verify=self.verify, headers=self.headers)
return self.post_process(res)
def bulk_update(self, docs):
'''Bulk update documents remotely
:param docs: List of dict or Document objects to be updated remotely (by `name`)'''
return self.post_request({
"cmd": "frappe.client.bulk_update",
"docs": frappe.as_json(docs)
})
def delete(self, doctype, name):
'''Delete remote document by name
:param doctype: `doctype` to be deleted
:param name: `name` of document to be deleted'''
return self.post_request({
"cmd": "frappe.client.delete",
"doctype": doctype,
"name": name
})
def submit(self, doc):
'''Submit remote document
:param doc: dict or Document object to be submitted remotely'''
return self.post_request({
"cmd": "frappe.client.submit",
"doc": frappe.as_json(doc)
})
def get_value(self, doctype, fieldname=None, filters=None):
'''Returns a value form a document
:param doctype: DocType to be queried
:param fieldname: Field to be returned (default `name`)
:param filters: dict or string for identifying the record'''
return self.get_request({
"cmd": "frappe.client.get_value",
"doctype": doctype,
"fieldname": fieldname or "name",
"filters": frappe.as_json(filters)
})
def set_value(self, doctype, docname, fieldname, value):
'''Set a value in a remote document
:param doctype: DocType of the document to be updated
:param docname: name of the document to be updated
:param fieldname: fieldname of the document to be updated
:param value: value to be updated'''
return self.post_request({
"cmd": "frappe.client.set_value",
"doctype": doctype,
"name": docname,
"fieldname": fieldname,
"value": value
})
def cancel(self, doctype, name):
'''Cancel a remote document
:param doctype: DocType of the document to be cancelled
:param name: name of the document to be cancelled'''
return self.post_request({
"cmd": "frappe.client.cancel",
"doctype": doctype,
"name": name
})
def get_doc(self, doctype, name="", filters=None, fields=None):
'''Returns a single remote document
:param doctype: DocType of the document to be returned
:param name: (optional) `name` of the document to be returned
:param filters: (optional) Filter by this dict if name is not set
:param fields: (optional) Fields to be returned, will return everythign if not set'''
params = {}
if filters:
params["filters"] = json.dumps(filters)
if fields:
params["fields"] = json.dumps(fields)
res = self.session.get(self.url + "/api/resource/" + doctype + "/" + name,
params=params, verify=self.verify, headers=self.headers)
return self.post_process(res)
def rename_doc(self, doctype, old_name, new_name):
'''Rename remote document
:param doctype: DocType of the document to be renamed
:param old_name: Current `name` of the document to be renamed
:param new_name: New `name` to be set'''
params = {
"cmd": "frappe.client.rename_doc",
"doctype": doctype,
"old_name": old_name,
"new_name": new_name
}
return self.post_request(params)
def migrate_doctype(self, doctype, filters=None, update=None, verbose=1, exclude=None, preprocess=None):
"""Migrate records from another doctype"""
meta = frappe.get_meta(doctype)
tables = {}
for df in meta.get_table_fields():
if verbose: print("getting " + df.options)
tables[df.fieldname] = self.get_list(df.options, limit_page_length=999999)
# get links
if verbose: print("getting " + doctype)
docs = self.get_list(doctype, limit_page_length=999999, filters=filters)
# build - attach children to parents
if tables:
docs = [frappe._dict(doc) for doc in docs]
docs_map = dict((doc.name, doc) for doc in docs)
for fieldname in tables:
for child in tables[fieldname]:
child = frappe._dict(child)
if child.parent in docs_map:
docs_map[child.parent].setdefault(fieldname, []).append(child)
if verbose: print("inserting " + doctype)
for doc in docs:
if exclude and doc["name"] in exclude:
continue
if preprocess:
preprocess(doc)
if not doc.get("owner"):
doc["owner"] = "Administrator"
if doctype != "User" and not frappe.db.exists("User", doc.get("owner")):
frappe.get_doc({"doctype": "User", "email": doc.get("owner"),
"first_name": doc.get("owner").split("@")[0] }).insert()
if update:
doc.update(update)
doc["doctype"] = doctype
new_doc = frappe.get_doc(doc)
new_doc.insert()
if not meta.istable:
if doctype != "Communication":
self.migrate_doctype("Communication", {"reference_doctype": doctype, "reference_name": doc["name"]},
update={"reference_name": new_doc.name}, verbose=0)
if doctype != "File":
self.migrate_doctype("File", {"attached_to_doctype": doctype,
"attached_to_name": doc["name"]}, update={"attached_to_name": new_doc.name}, verbose=0)
def migrate_single(self, doctype):
doc = self.get_doc(doctype, doctype)
doc = frappe.get_doc(doc)
# change modified so that there is no error
doc.modified = frappe.db.get_single_value(doctype, "modified")
frappe.get_doc(doc).insert()
def get_api(self, method, params={}):
res = self.session.get(self.url + "/api/method/" + method + "/",
params=params, verify=self.verify, headers=self.headers)
return self.post_process(res)
def post_api(self, method, params={}):
res = self.session.post(self.url + "/api/method/" + method + "/",
params=params, verify=self.verify, headers=self.headers)
return self.post_process(res)
def get_request(self, params):
res = self.session.get(self.url, params=self.preprocess(params), verify=self.verify, headers=self.headers)
res = self.post_process(res)
return res
def post_request(self, data):
res = self.session.post(self.url, data=self.preprocess(data), verify=self.verify, headers=self.headers)
res = self.post_process(res)
return res
def preprocess(self, params):
"""convert dicts, lists to json"""
for key, value in iteritems(params):
if isinstance(value, (dict, list)):
params[key] = json.dumps(value)
return params
def post_process(self, response):
try:
rjson = response.json()
except ValueError:
print(response.text)
raise
if rjson and ("exc" in rjson) and rjson["exc"]:
try:
exc = json.loads(rjson["exc"])[0]
exc = 'FrappeClient Request Failed\n\n' + exc
except Exception:
exc = rjson["exc"]
raise FrappeException(exc)
if 'message' in rjson:
return rjson['message']
elif 'data' in rjson:
return rjson['data']
else:
return None
class FrappeOAuth2Client(FrappeClient):
def __init__(self, url, access_token, verify=True):
self.access_token = access_token
self.headers = {
"Authorization": "Bearer " + access_token,
"content-type": "application/x-www-form-urlencoded"
}
self.verify = verify
self.session = OAuth2Session(self.headers)
self.url = url
def get_request(self, params):
res = requests.get(self.url, params=self.preprocess(params), headers=self.headers, verify=self.verify)
res = self.post_process(res)
return res
def post_request(self, data):
res = requests.post(self.url, data=self.preprocess(data), headers=self.headers, verify=self.verify)
res = self.post_process(res)
return res
class OAuth2Session():
def __init__(self, headers):
self.headers = headers
def get(self, url, params, verify):
res = requests.get(url, params=params, headers=self.headers, verify=verify)
return res
def post(self, url, data, verify):
res = requests.post(url, data=data, headers=self.headers, verify=verify)
return res
def put(self, url, data, verify):
res = requests.put(url, data=data, headers=self.headers, verify=verify)
return res
|
the-stack_0_27741
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import time
from novaclient import client as novaclient
from novaclient import exceptions as nova_exections
def nova_client(session):
return novaclient.Client('2.1', session=session)
def get_keypair(nova, name_or_id):
try:
keypair = nova.keypairs.get(name_or_id)
return keypair
except nova_exections.NotFound:
return nova.keypairs.find(name=name_or_id)
def create_keypair(nova, name, pub_key):
nova.keypairs.create(name, pub_key)
def delete_keypair(nova, name):
try:
keypair = get_keypair(nova, name)
nova.keypairs.delete(keypair)
except nova_exections.NotFound:
pass
def get_flavor(nova, name_or_id):
try:
flavor = nova.flavors.get(name_or_id)
return flavor
except nova_exections.NotFound:
return nova.flavors.find(name=name_or_id)
def get_server(nova, name_or_id):
try:
server = nova.servers.get(name_or_id)
return server
except nova_exections.NotFound:
return nova.servers.find(name=name_or_id)
def wait_instance(
nova,
instance,
timeout=300,
target_states=('active', 'shutoff'),
transition_states=('build'),
):
_timeout = 0
status = instance.status.lower()
while status not in target_states:
if status not in transition_states:
raise RuntimeError(
'Fail to server "%s": %s (%s)' % (
target_states,
instance.name,
instance.status
)
)
sys.stderr.write(
'Waiting server %s: %s (%s)\n' % (
target_states,
instance.name,
instance.status)
)
time.sleep(5)
_timeout += 5
if _timeout > timeout:
raise RuntimeError("Timeout!")
instance = nova.servers.get(instance.id)
status = instance.status.lower()
|
the-stack_0_27743
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[
{'shape': (3, 4), 'axis': 0, 'y_shape': (2, 3, 4)},
{'shape': (3, 4), 'axis': 1, 'y_shape': (3, 2, 4)},
{'shape': (3, 4), 'axis': 2, 'y_shape': (3, 4, 2)},
{'shape': (3, 4), 'axis': -1, 'y_shape': (3, 4, 2)},
{'shape': (3, 4), 'axis': -2, 'y_shape': (3, 2, 4)},
{'shape': (3, 4), 'axis': -3, 'y_shape': (2, 3, 4)},
{'shape': (), 'axis': 0, 'y_shape': (2,)},
{'shape': (), 'axis': -1, 'y_shape': (2,)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
class TestStack(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
]
self.g = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
def check_forward(self, xs_data):
xs = [chainer.Variable(x) for x in xs_data]
y = functions.stack(xs, axis=self.axis)
if hasattr(numpy, 'stack'):
# run test only with numpy>=1.10
expect = numpy.stack(self.xs, axis=self.axis)
testing.assert_allclose(y.data, expect)
y_data = cuda.to_cpu(y.data)
self.assertEqual(y_data.shape[self.axis], 2)
numpy.testing.assert_array_equal(
y_data.take(0, axis=self.axis), self.xs[0])
numpy.testing.assert_array_equal(
y_data.take(1, axis=self.axis), self.xs[1])
def test_forward_cpu(self):
self.check_forward(self.xs)
@attr.gpu
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(x) for x in self.xs])
def check_backward(self, xs_data, g_data):
def func(*xs):
return functions.stack(xs, self.axis)
gradient_check.check_backward(
func, xs_data, g_data, eps=2.0 ** -2, dtype='d')
def test_backward_cpu(self):
self.check_backward(self.xs, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
[cuda.to_gpu(x) for x in self.xs], cuda.to_gpu(self.g))
testing.run_module(__name__, __file__)
|
the-stack_0_27745
|
# -*- coding: utf-8 -*-
from scout.commands import cli
def test_load_research(mock_app, case_obj):
"""Testing the load research cli command"""
runner = mock_app.test_cli_runner()
assert runner
# Test command without case_id:
result = runner.invoke(cli, ["load", "research"])
assert result.exit_code == 0
assert "Get cases with query {'research_requested': True}" in result.output
# Test command providing a case_id:
result = runner.invoke(cli, ["load", "research", "-c", case_obj["_id"]])
assert result.exit_code == 0
# Test command providing case_id, institute and force flag:
result = runner.invoke(
cli, ["load", "research", "-c", case_obj["_id"], "-i", case_obj["owner"], "-f"]
)
assert result.exit_code == 0
|
the-stack_0_27746
|
"""A lightweight, decoupled wrapper for dynamic class assignment."""
import functools
from types import FunctionType
"""These are tied to the operation of this module (along with __class__) - try not to step on them!"""
_ACTIVE_CLASS = "_active_class"
_PROTECTED_SELF = "_protected_self"
_SHOULD_DECORATE_FLAG = "_protect_self_reference"
QUEEN = "queen"
DRONE = "drone"
"""These are just default settings for the wrapper. It might help to read these, but you're fine without them."""
DEFAULT_NO_DECORATES_ON_ANY_IN_INHERITANCE_TREE = { # These attrs are never wrapped to inject self.queen for self.
"__new__", "__init__", "__del__", "__dict__", # However, if called on "self" instead of "self.drone",
"__getattr__", "__delattr__", "__getattribute__", "__setattr__", # and the queen is designed not to forward it
QUEEN, DRONE, # then the queen's method will be called and self will be queen regardless of self.queen injection.
"__eq__", "__hash__", "__ne__", # (see MAGIC_NO_REDIRECT_METHODS for methods which queen will not forward)
"__str__", "__repr__", "__format__",
"__set__", "__delete__", "__get__",
"__prepare__", "__init_subclass__",
"__traceback__", "__wrapped__ ",
"__qualname__", "__self__",
"__defaults__", "__kwdefaults__",
"__globals__", "__closure__",
"__subclasshook__",
}
DEFAULT_FORCED_DECORATES_ON_DECORATED_CLASS_ONLY = { # These methods are always affected in an @assimilated class.
"__new__", "__init__", "__setattr__" # new and init and both called as controlled by queen,
} # and setattr will always check if attr should be wrapped. However, you can still access them through self.drone.
"""If you plan on using magic methods, this section of constants is for you!"""
REDIRECT_METHODS = { # These all auto-redirect if bound to an instance which implements them.
"__isabstractmethod__",
"__objclass__", "__set_name__",
"__mro_entries__", "__classcell__", "__class_getitem__",
"__func__", "__annotations__",
"__file__", "__module__",
"__copy__", "__deepcopy__",
"__set__", "__delete__", "__get__",
"__delitem__", "__setitem__", "__missing__", "__getitem__",
"__contains__", "__reversed__", "__iter__",
"__anext__", "__next__", "__aiter__",
"__length_hint__", "__len__",
"__getinitargs__", "__getnewargs__", "__getstate__", "__setstate__", "__reduce__", "__reduce_ex__",
"__enter__", "__exit__", "__aenter__", "__aexit__",
"__call__", "__await__",
"__float__", "__int__", "__index__",
"__complex__", "__invert__",
"__ceil__", "__floor__", "__trunc__", "__round__",
"__abs__", "__pos__", "__neg__",
"__lt__", "__le__", "__gt__", "__ge__",
}
REDIRECT_I_R_ABLE_METHODS = { # These all auto-redirect if bound to an instance which implements them, along with
"__add__", "__sub__", # their in-place- and right- versions. (For example: (__add__, __iadd__, __radd__))
"__mul__", "__matmul__",
"__truediv__", "__floordiv__",
"__mod__", "__divmod__",
"__pow__",
"__lshift__", "__rshift__",
"__and__", "__xor__"
}
MAGIC_CONTROLLED_REDIRECT_METHODS = { # These end up redirecting either through wrapper implementation or __class__.
"__class__", "__dict__", "__doc__",
"__repr__", "__str__", "__format__"
"__bool__", "__sizeof__", # bool evaluates to False for unbound BorgPods instances, redirects otherwise.
"__name__", "__dir__",
"__mro__", "__bases__",
"__instancecheck__", "__subclasscheck__",
}
MAGIC_NO_REDIRECT_METHODS = { # These do not redirect either because they cannot for consistency / "should not".
"__prepare__", "__init_subclass__", # I believe these will be handled by __class__ - untested.
"__traceback__", "__wrapped__ ",
"__qualname__", "__self__",
"__defaults__", "__kwdefaults__", # I don't have a great reason for not forwarding these attrs other than utility.
"__globals__", "__closure__",
"__code__", "__del__", "__slots__", # Will not support __slots__ as is.
"__eq__", "__ne__", "__hash__", # Hash consistency with equality requirement, and binding != for no surprises.
"__subclasshook__", "__getattribute__",
"__setattr__", "__delattr__", "__getattr__"
"__weakref__", "__init__", "__new__", # Init and new are still called in any wrapped class,
} # you just can't access either post-init from self.__init__() as expected.
# deprecated: __unicode__ __nonzero__ __div__ __coerce__ __cmp__
# (TODO Customize?): "__copy__", "__deepcopy__",
# (TODO Customize?): "__getinitargs__", "__getnewargs__", "__getstate__", "__setstate__", "__reduce__", "__reduce_ex__"
def resist(this_function): # Note: This is the first of 3 module attributes you should know about!
"""
Use this @wrapper to prevent a self.queen reference being passed as self in this_function for any @assimilate class.
:Parameters:
:param Function this_function: A method which utilizes an object instance as the first implicit pos argument.
:rtype: Function
:return Function this_function: The same method, but with a flag attribute to prevent self.queen injection wrapper.
"""
this_function._protect_self_reference = False
return this_function
def _should_protect_self_access(attr, value):
"""
Determines if value method tied to class/instance attribute should be wrapped to protect access to self by
substituting self.queen. True if:
attr is not hardcoded to be ignored, and
value is an instance of FunctionType (as are methods utilizing self), and
value has not been flagged with _SHOULD_DECORATE_FLAG ("_protect_self_reference") as False.
:Parameters:
:param str attr: The attribute key to be associated with value in the relevant Class.
:param value: Value attr is being set to. Modified if value's key, type, and attributes meet requirements
listed at top of docstring.
:rtype: bool
:return: True if value and attr key meet requirements at top of docstring, else False.
"""
return (
attr not in DEFAULT_NO_DECORATES_ON_ANY_IN_INHERITANCE_TREE and isinstance(value, FunctionType)
and getattr(value, _SHOULD_DECORATE_FLAG, True)
)
def _safe_self_access_decorator(wrapped_method):
"""
Replaces method "self" arguments with "self.queen" implicitly due to the overwhelmingly more common use intentions.
:param Function wrapped_method: The method which utilizes self to be modified.
:rtype: Function
:return: The wrapped method.
"""
@functools.wraps(wrapped_method)
def method_wrapper(self, *args, **kwargs):
if hasattr(self, QUEEN):
return wrapped_method(self.queen, *args, **kwargs)
return wrapped_method(self, *args, **kwargs)
return method_wrapper
def _modify_methods_for_self_reference(this_class):
"""
Modifies relevant methods in this_class to protect references to self per method if _should_protect_self_access
returns True when passed *(attribute_key, method_value).
:param Class this_class: @assimilate decorated class or parent class with methods to be protected if self.queen
exists in the instance.
:rtype: None
:return: None
"""
for c_attribute, c_method in this_class.__dict__.copy().items():
if _should_protect_self_access(c_attribute, c_method):
setattr(this_class, c_attribute, _safe_self_access_decorator(c_method))
c_method._protect_self_reference = False
def _borg_pod_set_with_safe_self_access(wrapped_method):
"""
Wrapper for __setattr__ methods in @assimilate decorated classes to apply self.queen injection wrapper on any
relevant instance methods set during runtime.
:param Function wrapped_method: A @assimilate decorated class's __setattr__ method.
:rtype: Function
:return: The decorated __setattr__ method.
"""
@functools.wraps(wrapped_method)
def setter_wrapper(self, attribute, value):
if _should_protect_self_access(attribute, value):
value = _safe_self_access_decorator(value)
wrapped_method(self, attribute, value)
return setter_wrapper
def assimilate(_wrapped_class=None, *, default_class=None): # Note: This is the main module attribute you should know!
"""
Wraps a class such that its instances can be converted to another @assimilate'd class while preserving its
attributes and ID, and performing that conversion across all references to that instance. Unlike the singleton
pattern, there can be multiple unique IDs per any @assimilate class. Unlike the borg idiom, those sharing a set
of attributes also share an ID. This gives objects a dynamic (over time, but not over shared states) class
decoupled from each potential class's implementation.
:Parameters:
:param Class _wrapped_class: The class with instances to be incorporated into individual borg pods.
:param Class default_class: The class of the proxy object to be searched for if _base_class is not provide at
init (instance class conversion) time, as well as the class of the proxy object to be created if no
instance is found.
:rtype: Class
:return: wrapped_class with the proper state-setting and self-reference-preserving wrappers around instance methods.
:What:
A new design pattern / idiom / anti-pattern / affront to the light of Heaven, depending on your view, wrapped up
in an easy to deploy decorator! It's a:
Borg (shared state, unique instances)
Singleton (shared state, shared ID / memory location)
Bridge (object interacted with does not contain the implementation logic)
It's also:
not a Borg (ID/memory location is consistent between shared states) and
not a Singleton (can create multiple instances) and
not a Bridge. (object's implementation and abstraction are bound, and at the same time completely free to
change due to borg-like state-sharing and the strengths of __new__ for controlling return and init())
:What^2:
You might ask, isn't this a State? A strategy? Take a look at the implementation. In terms of developer
strain, working with a state machine is... not ideal. It's a hassle to implement, and in most cases it's
preferable to treat everything as if it were the same object and not need to worry about what is being
accessed from where. Why can't I put a decorator above the class and be done with it? Why can't I change
a class on the fly? Now you can. The Borg Singleton State Facade. THE BORG POD.
:How to Use:
Take a class you wish to use in a borg pod, and put @assimilate above the class definition - that's the gist. If
you want to create a new borg pod (a new shared state and shared ID set) you may just call:
YourAssimilatedClass(); when you wish to convert an instance of another borg pod class to that class:
YourAssimilatedClass(other_assimilated_instance); you may optionally use:
YourAssimilatedClass(some, args, queen=other_assimilated_instance, key=word, arguments=here); or:
YourAssimilatedClass(some, args, other_assimilated_instance, other, args, kw=args); as,
if queen is not provided, the wrapper will pick out and remove the first _base_class instance (if
_base_class provided as kwarg) or default_class (if not provided) instance from args. You may also just
call YourAssimilatedClass(), and it will instance a new base_class() and use it as the new borg pod. The
borg pod instance, not the YourAssimilatedClass() instance, is returned from __new__() after __init__() is
called to maintain reference consistency.
You need not modify your __init__() arguments for the wrapper, as the queen will be removed if provided, the
first instance of class _base_class if not (if provided as kwarg; wrapper's default_class if not), or it
will silently create a new _base_class (or default_class) if neither a queen was explicitly provided nor was
a queen instance found. Nothing associated with the wrapper will get through to your init, and nothing
about the instance will be changed* by the wrapper once it reaches one of your __init__ calls.
*__setattr__() will modify self-methods set in a class / instance to inject self.queen if the new
self-method is not pre-wrapped with @resist.
:Best Practices:
1. Any of the attributes set by this wrapper (self.queen, self.drone, _protected_self, _active_class, __doc__,
__class__ property, and _protect_self_reference [the last is on methods]) should not be changed except
through the interface (@assimilate, @resist) offered by this module - and any method or class should only be
explicitly decorated a single time. (You may subclass and decorate from both unwrapped and wrapped classes -
just don't put @assimilate\n@assimilate\nclass YourClass() and be surprised when you have a bad time.)
2. Because of how the self.queen injection works, the best course of action is letting the interpreter fill in
super() arguments as in super().parent_method() instead of explicitly calling
super(self.__class__, self).parent_method(). This will automatically select the parent class method based on
the owning class of the method being called from rather than the class of self.
3. Definitely needs new-style classes, almost certainly needs Python >= 3, needs testing for <3.6.
:How it Works:
The 'borg pod' is a bridge or state-machine-like object, which stores a protected reference to both itself and
the current acting class instance. It too shares an internal state with the @assimilate'd instance, and will
search that instance for methods not found in its class. You may subclass or change the proxy class, and set
per-wrapper the default proxy class to use if no class is provided at __new__ time by setting the optional
default_class decorator argument.
For consistency, all references to self passed into a method are converted to the proxy's self. This allows for
chaining calls and setting references in other objects without the overhead of explicitly calling
self.queen, as the object should always be accessed through self.queen to behave as expected. You may use
the @resist wrapper on any method to prevent this from implicitly happening. self.drone will explicitly
access the current de facto class, and self.queen will access the proxy object. As the need for an implicit
conversion mechanism implies, it is suggested that self references be converted to self.queen unless there
is a significant reason to do so. Although the object will still have a self.queen reference to recover its
'borg pod' object ID, this could be a frustrating source of identity bugs!
@assimilate decorated classes are subclass, __magic__ and inspection-friendly. The "queen" class instance will
adopt the __class__*, __doc__, __dir__, methods, and even magic methods** of the drone class instance.
*if inspected - they don't really set their __class__! I'm actually not sure if it is even accessible
through normal methods [as in overwriting inherited __slots__ names] - but super(), etc still work.
**if not found in MAGIC_NO_REDIRECT_METHODS at the top of the module. Most notable among them are
__eq__() and __hash__(). For reasoning behind these choices, please read the Python 3 documentation
(https://docs.python.org/3/reference/datamodel.html) on customizing __hash__ and __eq__ - keeping
in the mind the context of maintaining a consistent identity - for why.
:Reasoning for the Madness:
I had an image full of objects of ambiguous and potentially changing classes depending on the state, values,
and classes of other objects. While the options of {computing the context per object until their states
settled, holding off on initiating what would be an @assimilate'd class instance until the object's class
was no longer ambiguous, depth-first classifying per object the correct static class in __new__, or
creating some sort of stateful monstrous mega-class-conglomerate} were all available, it was useful to be
able to treat an object under the assumption that it was a certain class to bootstrap the context
process and not need to worry about where I was accessing what attributes. It's the same object, why can't I
use self? There is a clear need for the ability to change the effective class and methods of an object
without going through the trouble of fixing all pointers to the new Class instance and without needing to
explicitly transfer states and couple the potential classes. The solution? The Borg Pod.
:Credits:
borg_pod module by Andrew M. Hogan. (borg_pod Copyright 2018 Hogan Consulting Group)
"""
if default_class is None:
default_class = BorgPod
def borg_pod_decorator(wrapped_class):
"""Modify methods and attributes of wrapped_class to support the borg pod interface."""
def _setup_pod_in_new(wrapped_new):
"""Modifies the __new__ method to return an instance of the borg pod object rather than wrapped_class."""
@functools.wraps(wrapped_new)
def new_wrapper(cls, *args, queen=None, _base_class=None, **kwargs):
if _should_be_self_class_unless_called_from_child_class != cls:
# This was called from a subclass; better just return the new object without anything crazy.
return wrapped_new(cls)
# Is this the False queen?
if queen is None:
# Just be glad this isn't a spit() function.
if _base_class is None:
_base_class = default_class
for ids, arg in enumerate(args):
if isinstance(arg, _base_class):
# We have found the queen which evaluates to True. Remove from args so __init__ is okay.
queen = arg
args = args[:ids] + args[ids + 1:]
break
else:
# ...Then we shall forge our own queen.
queen = _base_class({})
new_object = wrapped_new(cls)
new_object.__init__(*args, queen=queen, **kwargs)
return queen
return new_wrapper
def _assimilate_in_init(wrapped_init):
"""Modifies the __init__ method to ensure that the instance dict is converted if not called by subclass."""
@functools.wraps(wrapped_init)
def init_wrapper(self, *args, queen=None, **kwargs):
if queen is None:
# Prevents recursive loop in wrapped Parent classes.
return wrapped_init(self, *args, **kwargs)
queen.__doc__ = self.__doc__
self.__dict__ = queen.__dict__
self._active_class = self
self.queen = self._protected_self.queen
self.drone = self._protected_self.drone
return wrapped_init(self, *args, **kwargs)
return init_wrapper
# These ancestor lists will always be the ones available when the class method is called - pretty handy!
_all_ancestors = wrapped_class.mro()
_should_be_self_class_unless_called_from_child_class, ancestors = _all_ancestors[0], _all_ancestors[1:]
# Instance method self-reference protector
_modify_methods_for_self_reference(wrapped_class)
# Some special magic methods that make everything sweeter with a little forced decoration.
setattr(wrapped_class, '__new__', _setup_pod_in_new(wrapped_class.__new__))
setattr(wrapped_class, '__init__', _assimilate_in_init(wrapped_class.__init__))
# setattr(wrapped_class, '__hash__', lambda x: hash(x.queen))
# setattr(wrapped_class, '__eq__', lambda x, y: x.queen is y.queen if hasattr(y, QUEEN) else False)
setattr(wrapped_class, '__setattr__', _borg_pod_set_with_safe_self_access(wrapped_class.__setattr__))
for this_method in DEFAULT_FORCED_DECORATES_ON_DECORATED_CLASS_ONLY:
getattr(wrapped_class, this_method)._protect_self_reference = False
for ancestor in ancestors:
if ancestor is not object: # TODO: Check against all builtin types? (~Submit a pull request~)
_modify_methods_for_self_reference(ancestor)
return wrapped_class
if _wrapped_class is None:
# An optional keyword argument was provided!
return borg_pod_decorator
return borg_pod_decorator(_wrapped_class)
def _set_magic_methods(wrapped_class, names):
"""Betcha can't have just one!"""
for name in names:
_set_magic_method(wrapped_class, name)
def _set_magic_method(wrapped_class, name):
"""Aw..."""
setattr(wrapped_class, name, _magic_dictate(lambda self: self._active_class, name))
def _magic_dictate(wrapped_method, name):
"""It's Wing-gar-dium Levi-o-sa, make the 'gar' nice and long."""
@functools.wraps(wrapped_method)
def magic_wrapper(self, *args, **kwargs):
try:
return getattr(wrapped_method(self), name)(*args, **kwargs)
except RecursionError:
_unbound_access_error(self, name)
return magic_wrapper
def _unbound_access_error(this_instance, this_method_name):
"""Where my assimilates at?"""
raise AttributeError(
"Instances of Class {} cannot call method {} without being bound to another object with that method.".format(
this_instance.__class__, this_method_name
)
)
def _redirect_magic_methods(wrapped_class):
"""You really expected a newly-created, implementation-detail, private wrapper to have documentation?"""
for name in REDIRECT_I_R_ABLE_METHODS:
r_name = "__r" + name[2:]
i_name = "__i" + name[2:]
_set_magic_methods(wrapped_class, (name, r_name, i_name))
for name in REDIRECT_METHODS:
_set_magic_method(wrapped_class, name)
return wrapped_class
@_redirect_magic_methods
class BorgPod(object): # Note: This is the last module attribute you should know! It's all test material from here.
"""
This is a hidden access point to instances of classes bound by @assimilate. It is a state machine, facade, proxy,
bridge, and none of the above at the same time! Unless a method is decorated with @resist, all implicitly
passed references of self are converted to an instance of this class (or the provided _base_class or
default_class). It will still access your instances methods and attributes! It's just a way of simplifying
development overhead for implementing design patterns like strategy which should frankly be built into the
language. The entire group of objects in a state machine system refer to the same thing. Why do I have to change
the way I access attributes? Why do I even have to set up that division in the first place?
"""
# _base_borgs = set() # If you were to instance BorgPods off of existing objects, I'd use a hash lookup in __new__.
def __init__(self, _shared_state=None):
self.__dict__ = _shared_state if _shared_state is not None else {}
self._active_class = self
self._protected_self = self
super().__init__()
@property
def queen(self):
"""The queen is like a decoupled proxy object for accessing attributes and methods of the borg pod."""
return self._protected_self
@property
def drone(self):
"""The drone controls both the attributes and methods of the borg pod during its lifetime as _active_class."""
return self._active_class
@property
def __class__(self):
"""This isn't where the magic happens, but it does make things much more inspection-friendly."""
if self._active_class is not self._protected_self:
return self._active_class.__class__
return BorgPod
def __bool__(self):
"""Returns False if not bound to an object."""
if self._active_class is not self._protected_self:
return bool(self._active_class)
return False
def __getattr__(self, name):
"""__getattr__ is called if 'name' was not found in this class. Magic methods use another route due to magic."""
try:
return getattr(self._active_class, name)
except RecursionError:
_unbound_access_error(self, name)
def __hash__(self):
"""Identical borg pod objects are always sent to the same location in a hash table."""
return hash((BorgPod.__name__, id(self)))
def __eq__(self, other):
"""Identical borg pod objects always evaluate as the same object."""
return hash(self) == hash(other)
def __str__(self):
"""Calls drone's __str__ or defaults small description with class and ID."""
if self._protected_self is not self._active_class:
return self._active_class.__str__()
return "<Unbound {} object #{}>".format(self._protected_self.__class__.__name__, id(self._protected_self))
def __repr__(self):
"""Calls drone's __repr__ or defaults str."""
if self._protected_self is not self._active_class:
return self._active_class.__repr__()
return self.__str__()
def __format__(self, format_spec):
"""Calls drone's __format__ or defaults super()."""
if self._protected_self is not self._active_class:
return self._active_class.__format__(format_spec)
return super().__format__(format_spec)
def __sizeof__(self):
"""Calls drone's __sizeof__ or defaults super()."""
if self._active_class is not self._protected_self:
return self._active_class.__sizeof__()
return super().__sizeof__()
class _PerfectGreekInfluencedChalkDrawingOfFace(object): # Abandon all hope of helpful docs/names, ye' who enter here.
"""The first step towards drawing any circle."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shape_type = "pre-circle"
def self_method(self):
return self
@assimilate(default_class=BorgPod)
class _Circle(_PerfectGreekInfluencedChalkDrawingOfFace):
"""
The Borg Pod does not care about unique inheritance. Your biological distinctiveness has been added to the
collective.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shape_type = "circle"
@staticmethod
def info():
print("I AM CIRCLE.")
# def self_method(self): # Oh no! But inheritance still works.
# return self
def __str__(self):
if hasattr(self, DRONE):
return "<{} object #{} bound to same address as Queen id #{}>".format(
self.drone.__class__.__name__, id(self.drone), id(self.queen)
)
return "<Unassimilated {} object #{}>".format(self.__class__.__name__, id(self))
__repr__ = __str__
@assimilate
class _Ellipse(_Circle):
"""
The Borg Pod does not care about unique inheritance. Your biological distinctiveness has been added to the
collective.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shape_type = "ellipse"
@assimilate
class _AlphaNumeric(object):
"""
The Borg Pod does not care about unique signatures. Your technological distinctiveness has been added to the
collective.
"""
def __init__(self):
self.shape_type = "character"
@staticmethod
def info():
print("I AM CHARACTER.")
def self_method(self):
self.info()
return self
def __add__(self, other):
print("IN _ALPHANUMERIC __add__(): {} + {}".format(self, other))
return 1
def __str__(self):
if hasattr(self, DRONE):
return "<{} object #{} bound to same address as Queen id #{}>".format(
self.drone.__class__.__name__, id(self.drone), id(self.queen)
)
return "<Unassimilated {} object #{}>".format(self.__class__.__name__, id(self))
__repr__ = __str__
@assimilate
class _Punctuation(object):
"""The Borg Pod does care about resistance due to the current political climate, but stresses its futility."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shape_type = "punctuation"
@staticmethod
def info():
print("I AM PUNCTUATION.")
@resist
def self_method(self):
return self
def __str__(self):
if hasattr(self, DRONE):
return "<{} object #{} bound to same address as Queen id #{}>".format(
self.drone.__class__.__name__, id(self.drone), id(self.queen)
)
return "<Unassimilated {} object #{}>".format(self.__class__.__name__, id(self))
__repr__ = __str__
def _compare_seq(sequence, sequence_2=None):
"""
Print whether items in the iterable are/are not the same as the next element in the same list,
or the matching-index elements in sequence_2 (if not None).
"""
if sequence_2 is None:
for ob_a, ob_b in zip(sequence, sequence[1::] + [sequence[0]]):
print("{}: {} is {}".format(ob_a is ob_b, ob_a, ob_b))
else:
for ob_a, ob_b in zip(sequence, sequence_2):
print("{}: {} is {}".format(ob_a is ob_b, ob_a, ob_b))
def _assert_seq(sequence, sequence_2=None, *, assert_val=True):
"""
Assert that items in the iterable are/are not the same as the next element in the same list,
or the matching-index element in sequence_2 (if not None).
"""
if sequence_2 is None:
for ob_a, ob_b in zip(sequence, sequence[1::] + [sequence[0]]):
assert (ob_a is ob_b) == assert_val, "Assertion that {} is {} did not match provided value of {}.".format(
ob_a, ob_b, assert_val
)
else:
for ob_a, ob_b in zip(sequence, sequence_2):
assert (ob_a is ob_b) == assert_val, "Assertion that {} is {} did not match provided value of {}.".format(
ob_a, ob_b, assert_val
)
def _convert_seq(sequence, new_class):
"""Convert the iterable to a new class!"""
return [new_class(obj) for obj in sequence]
def _identity_crisis_test(num_objects):
"""Test creation, identity, and inheritance control flows."""
test_objects_original = [BorgPod() for _ in range(num_objects)]
print("\n____\nBEGIN @ASSIMILATE IDENTITY TESTS\n")
print("Are they unique objects?")
_assert_seq(test_objects_original, assert_val=False)
print("\nConvert To Circles->")
test_objects_circle = _convert_seq(test_objects_original, _Circle)
print("Are they unique objects?")
_assert_seq(test_objects_circle, assert_val=False)
print("Is equal to original list?")
_assert_seq(test_objects_circle, test_objects_original)
print("what if we return self?")
self_list_ambiguous = [obj.self_method() for obj in test_objects_circle]
print("Are they equal to the old version?")
_assert_seq(self_list_ambiguous, test_objects_circle)
print("Can we still use instances of an undecorated parent class if a subclass is decorated with @assimilate?")
test_objects_undecorated_parent_class = [_PerfectGreekInfluencedChalkDrawingOfFace() for _ in range(num_objects)]
self_list_face = [obj.self_method() for obj in test_objects_undecorated_parent_class]
for face_return, original_face in zip(self_list_face, test_objects_undecorated_parent_class):
assert not isinstance(face_return, BorgPod)
assert isinstance(face_return, _PerfectGreekInfluencedChalkDrawingOfFace)
assert not isinstance(original_face, BorgPod)
assert isinstance(original_face, _PerfectGreekInfluencedChalkDrawingOfFace)
assert face_return is original_face
print("What if we use a child class decorated with @assimilate when a parent class is also decorated?")
test_objects_decorated_subclass = [_Ellipse() for _ in range(num_objects)]
print("Is the self-return converted properly?")
self_list_sub = [obj.self_method() for obj in test_objects_decorated_subclass]
_assert_seq(self_list_sub, test_objects_decorated_subclass)
print("Can the sub-class still be converted?")
print("\nConvert To Characters->")
test_objects_subclass_converted = _convert_seq(test_objects_decorated_subclass, _AlphaNumeric)
_assert_seq(test_objects_subclass_converted, test_objects_decorated_subclass)
print("Nice! Let's test some more attributes on the original circle objects.")
return test_objects_original, test_objects_circle
def _magic_test(test_objects_circle, test_objects_original):
"""Test magic method binding, class, and inspection attributes."""
print("\n____\nBEGIN MAGIC TESTS")
print("First we'll convert to a class with magic methods, and assert a couple common sense identity attrs again.")
print("\nConvert To Characters->")
test_objects_characters = _convert_seq(test_objects_circle, _AlphaNumeric)
print("Are they still unique objects with their previous IDs?")
_assert_seq(test_objects_characters, assert_val=False)
_assert_seq(test_objects_characters, test_objects_circle)
_assert_seq(test_objects_characters, test_objects_original)
print("Does using magic methods implemented in an @assimilated class still work? Let's try an object + 1.")
_ = test_objects_characters[0] + 1
print("Nice! What happens if we call a magic method on an unbound Borg Pod?")
test_object = BorgPod()
try:
test_object *= 1
except AttributeError as e:
print("That was close! Here is our error: {}".format(e))
else:
raise AssertionError("The imul method should have error'd while unbound. Fortunately, I have a spare!")
print("Does the unbound instance preserve its docstring?")
assert test_object.__doc__ is BorgPod.__doc__
print("Does a bound instance properly inherit the bound docstring?")
assert test_objects_characters[0].__doc__ is _AlphaNumeric.__doc__
assert test_object.__doc__ is not test_objects_characters[0]
assert test_object.__doc__ is not test_objects_characters[0].__doc__
print("Are their available dirs in line with their classes?")
assert ([attr for attr in dir(test_objects_characters[0]) if
attr not in {"queen", "drone", "_active_class", "_protected_self", "shape_type"}]
== dir(_AlphaNumeric)), "Did you add more instance methods?"
assert ([attr for attr in dir(test_object)
if attr not in {"_active_class", "_protected_self", "shape_type"}]
== dir(BorgPod)), "Did you add more instance methods?"
print("And that means separately bound instance dirs are not the same nor equal, correct?")
assert dir(test_objects_characters[0]) is not dir(test_object)
assert dir(test_objects_characters[0]) != dir(test_object)
print("And that means their respective bound classes evaluate as the same as the module-level class, correct?")
assert test_objects_characters[0].__class__ is _AlphaNumeric
assert test_object.__class__ is BorgPod
return test_objects_characters
def _the_resistance_test(test_objects_characters):
"""Test the @resist decorator."""
print("\n____\nBEGIN @RESIST DECORATOR TESTS")
print("Let's try converting to a class with a @resist decorated method returning 'self'.")
print("\nConvert To Punctuations->")
test_objects_punctuation = _convert_seq(test_objects_characters, _Punctuation)
print("First, let's assert a couple common sense identity attrs again.")
print("Are they unique objects?")
_assert_seq(test_objects_punctuation, assert_val=False)
print("Is equal to character list?")
_assert_seq(test_objects_punctuation, test_objects_characters)
print("What if we return self from a method decorated with @resist?")
self_list_unprotected = [obj.self_method() for obj in test_objects_punctuation]
print("(A method decorated with @resist will not convert 'self' to the queen reference. [not suggested])")
print("Are they unique objects?")
_assert_seq(self_list_unprotected, assert_val=False)
print("Do they no longer evaluate as the same object from the previous punctuation list?")
_assert_seq(self_list_unprotected, test_objects_punctuation, assert_val=False)
print("What if we retrieve the drones from the previous characters list?")
drone_list_characters = [obj.drone for obj in test_objects_characters]
print("Does it evaluate as the same to the @resist self list?")
_assert_seq(drone_list_characters, self_list_unprotected)
print("What if we retrieve the queen from the @resist self list?")
self_list_restored = [obj.queen for obj in self_list_unprotected]
print("Does it evaluate as the same to the original characters list?")
_assert_seq(self_list_restored, test_objects_characters)
def main(num_objects=6):
"""
Run some assertion tests and prints to demonstrate that you too can have easy, dynamic classes in existing
infrastructure!
:Parameters:
:param int num_objects: The length of the list of test objects to be created.
:rtype: None
:return: None
"""
print("\n____\nBEGIN TESTS\nLet's run some assertion tests and print some examples.")
_the_resistance_test(_magic_test(*_identity_crisis_test(num_objects)))
print("\nTests Complete\n____")
if __name__ == "__main__":
main()
|
the-stack_0_27748
|
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
"""Chroma services may subscribe to named queues using this module. The `ServiceQueue` class is a wrapper
around an AMQP queue."""
import threading
from chroma_core.services import _amqp_connection
from chroma_core.services.log import log_register
log = log_register("queue")
class ServiceQueue(object):
"""Simple FIFO queue, multiple senders, single receiver. Payloads
must be JSON-serializable.
Subclass this for each named queue, setting the `name` class attribute.
Example declaring a queue:
::
class AcmeQueue(ServiceQueue):
name = 'acme'
Example sending to a queue:
::
AcmeQueue().put({'foo': 'bar'})
"""
name = None
def put(self, body):
with _amqp_connection() as conn:
q = conn.SimpleQueue(
self.name, serializer="json", exchange_opts={"durable": False}, queue_opts={"durable": False}
)
q.put(body)
def purge(self):
with _amqp_connection() as conn:
purged = conn.SimpleQueue(
self.name, exchange_opts={"durable": False}, queue_opts={"durable": False}
).consumer.purge()
log.info("Purged %s messages from '%s' queue" % (purged, self.name))
def __init__(self):
self._stopping = threading.Event()
def stop(self):
log.info("Stopping ServiceQueue %s" % self.name)
self._stopping.set()
def serve(self, callback):
from Queue import Empty as QueueEmpty
with _amqp_connection() as conn:
q = conn.SimpleQueue(
self.name, serializer="json", exchange_opts={"durable": False}, queue_opts={"durable": False}
)
# FIXME: it would be preferable to avoid waking up so often: really what is wanted
# here is to sleep on messages or a stop event.
while not self._stopping.is_set():
try:
message = q.get(timeout=1)
message.ack()
message = message.decode()
callback(message)
except QueueEmpty:
pass
class AgentRxQueue(ServiceQueue):
def __route_message(self, message):
if message["type"] == "DATA" and self.__data_callback:
self.__data_callback(message["fqdn"], message["body"])
elif self.__session_callback:
self.__session_callback(message)
else:
# Not a data message, and no session callback, drop.
pass
def __init__(self, plugin):
"""Specialization of ServiceQueue for receiving messages from agents:
the callback invoked depends on the message_type. Instead of
setting the queue name, set the plugin name."""
super(AgentRxQueue, self).__init__()
self.name = "agent_%s_rx" % plugin
def serve(self, data_callback=None, session_callback=None):
"""Data callback will receive only DATA mesages, being passed the fqdn and the body (i.e.
the object returned by a device plugin). Session callback will receive all messages,
including the outer envelope.
Simple consumer services should just set data_callback. Session-aware services should
set session_callback.
"""
if data_callback is None and session_callback is None:
raise AssertionError("Set at least one callback")
self.__data_callback = data_callback
self.__session_callback = session_callback
return ServiceQueue.serve(self, self.__route_message)
|
the-stack_0_27749
|
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
import sys
sys.dont_write_bytecode = True
import pygame
import pygame.gfxdraw
from .src.players import Knight, Archer
from .src.zombie import Zombie
from .src.weapons import Arrow, Sword
from .manual_control import manual_control
import numpy as np
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector
from gym.spaces import Box, Discrete
from gym.utils import seeding
from pettingzoo.utils import wrappers
from gym.utils import EzPickle
from pettingzoo.utils.conversions import parallel_wrapper_fn
from .src.img import get_image
def env(**kwargs):
env = raw_env(**kwargs)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv, EzPickle):
metadata = {'render.modes': ['human', "rgb_array"], 'name': "knights_archers_zombies_v7"}
def __init__(self, spawn_rate=20, num_archers=2, num_knights=2, killable_knights=True, killable_archers=True, pad_observation=True, line_death=False, max_cycles=900):
EzPickle.__init__(self, spawn_rate, num_archers, num_knights, killable_knights, killable_archers, pad_observation, line_death, max_cycles)
# Game Constants
self.ZOMBIE_SPAWN = spawn_rate
self.WIDTH = 1280
self.HEIGHT = 720
self.max_cycles = max_cycles
self.frames = 0
self.pad_observation = pad_observation
self.killable_knights = killable_knights
self.killable_archers = killable_archers
self.line_death = line_death
self.has_reset = False
self.seed()
# Dictionaries for holding new players and their weapons
self.archer_dict = {}
self.knight_dict = {}
self.arrow_dict = {}
self.sword_dict = {}
# Game Variables
self.score = 0
self.run = True
self.arrow_spawn_rate = self.sword_spawn_rate = self.zombie_spawn_rate = 0
self.knight_player_num = self.archer_player_num = 0
self.archer_killed = False
self.knight_killed = False
self.sword_killed = False
self.closed = False
# Creating Sprite Groups
self.all_sprites = pygame.sprite.Group()
self.zombie_list = pygame.sprite.Group()
self.arrow_list = pygame.sprite.Group()
self.sword_list = pygame.sprite.Group()
self.archer_list = pygame.sprite.Group()
self.knight_list = pygame.sprite.Group()
self.num_archers = num_archers
self.num_knights = num_knights
# Represents agents to remove at end of cycle
self.kill_list = []
# Initializing Pygame
self.render_on = False
pygame.init()
# self.WINDOW = pygame.display.set_mode([self.WIDTH, self.HEIGHT])
self.WINDOW = pygame.Surface((self.WIDTH, self.HEIGHT))
pygame.display.set_caption("Knights, Archers, Zombies")
self.left_wall = get_image(os.path.join('img', 'left_wall.png'))
self.right_wall = get_image(os.path.join('img', 'right_wall.png'))
self.right_wall_rect = self.right_wall.get_rect()
self.right_wall_rect.left = self.WIDTH - self.right_wall_rect.width
self.floor_patch1 = get_image(os.path.join('img', 'patch1.png'))
self.floor_patch2 = get_image(os.path.join('img', 'patch2.png'))
self.floor_patch3 = get_image(os.path.join('img', 'patch3.png'))
self.floor_patch4 = get_image(os.path.join('img', 'patch4.png'))
self.agent_list = []
self.agents = []
for i in range(num_archers):
name = "archer_" + str(i)
self.archer_dict["archer{0}".format(self.archer_player_num)] = Archer(agent_name=name)
self.archer_dict["archer{0}".format(self.archer_player_num)].offset(i * 50, 0)
self.archer_list.add(self.archer_dict["archer{0}".format(self.archer_player_num)])
self.all_sprites.add(self.archer_dict["archer{0}".format(self.archer_player_num)])
self.agent_list.append(self.archer_dict["archer{0}".format(self.archer_player_num)])
if i != num_archers - 1:
self.archer_player_num += 1
for i in range(num_knights):
name = "knight_" + str(i)
self.knight_dict["knight{0}".format(self.knight_player_num)] = Knight(agent_name=name)
self.knight_dict["knight{0}".format(self.knight_player_num)].offset(i * 50, 0)
self.knight_list.add(self.knight_dict["knight{0}".format(self.knight_player_num)])
self.all_sprites.add(self.knight_dict["knight{0}".format(self.knight_player_num)])
self.agent_list.append(self.knight_dict["knight{0}".format(self.knight_player_num)])
if i != num_knights - 1:
self.knight_player_num += 1
self.agent_name_mapping = {}
a_count = 0
for i in range(num_archers):
a_name = "archer_" + str(i)
self.agents.append(a_name)
self.agent_name_mapping[a_name] = a_count
a_count += 1
for i in range(num_knights):
k_name = "knight_" + str(i)
self.agents.append(k_name)
self.agent_name_mapping[k_name] = a_count
a_count += 1
self.observation_spaces = dict(zip(self.agents, [Box(low=0, high=255, shape=(512, 512, 3), dtype=np.uint8) for _ in enumerate(self.agents)]))
self.action_spaces = dict(zip(self.agents, [Discrete(6) for _ in enumerate(self.agents)]))
self.state_space = Box(low=0, high=255, shape=(self.HEIGHT, self.WIDTH, 3), dtype=np.uint8)
self.possible_agents = self.agents[:]
self._agent_selector = agent_selector(self.agents)
self.reinit()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
# Controls the Spawn Rate of Weapons
def check_weapon_spawn(self, sword_spawn_rate, arrow_spawn_rate):
if sword_spawn_rate > 0:
sword_spawn_rate += 1
if sword_spawn_rate > 3:
sword_spawn_rate = 0
if arrow_spawn_rate > 0:
arrow_spawn_rate += 1
if arrow_spawn_rate > 3:
arrow_spawn_rate = 0
return sword_spawn_rate, arrow_spawn_rate
# Spawn New Players
class spawnPlayers(pygame.sprite.Sprite):
def __init__(self, event, knight_player_num, archer_player_num, knight_list, archer_list, all_sprites, knight_dict, archer_dict):
super().__init__()
self.event = event
self.knight_player_num = knight_player_num
self.archer_player_num = archer_player_num
self.knight_dict = knight_dict
self.archer_dict = archer_dict
self.knight_list = knight_list
self.archer_list = archer_list
self.all_sprites = all_sprites
# Spawn New Knight
def spawnKnight(self):
# if self.event.key == pygame.K_m:
# self.knight_player_num += 1
# self.knight_dict['knight{0}'.format(self.knight_player_num)] = Knight()
# self.knight_list.add(self.knight_dict['knight{0}'.format(self.knight_player_num)])
# self.all_sprites.add(self.knight_dict['knight{0}'.format(self.knight_player_num)])
return self.knight_player_num, self.knight_list, self.all_sprites, self.knight_dict
# Spawn New Archer
def spawnArcher(self):
# if self.event.key == pygame.K_x:
# self.archer_player_num += 1
# self.archer_dict['archer{0}'.format(self.archer_player_num)] = Archer()
# self.archer_list.add(self.archer_dict['archer{0}'.format(self.archer_player_num)])
# self.all_sprites.add(self.archer_dict['archer{0}'.format(self.archer_player_num)])
return self.archer_player_num, self.archer_list, self.all_sprites, self.archer_dict
# Spawn New Weapons
class spawnWeapons(pygame.sprite.Sprite):
def __init__(self, action, agent_index, agent_list, sword_spawn_rate, arrow_spawn_rate, knight_killed, archer_killed,
knight_dict, archer_dict, knight_list, archer_list, knight_player_num, archer_player_num,
all_sprites, sword_dict, arrow_dict, sword_list, arrow_list):
super().__init__()
self.action = action
self.sword_spawn_rate = sword_spawn_rate
self.arrow_spawn_rate = arrow_spawn_rate
self.knight_killed = knight_killed
self.archer_killed = archer_killed
self.knight_dict = knight_dict
self.archer_dict = archer_dict
self.knight_list = knight_list
self.archer_list = archer_list
self.knight_player_num = knight_player_num
self.archer_player_num = archer_player_num
self.all_sprites = all_sprites
self.sword_dict = sword_dict
self.arrow_dict = arrow_dict
self.sword_list = sword_list
self.arrow_list = arrow_list
self.agent_index = agent_index
self.agent_list = agent_list
# Spawning Swords for Players
def spawnSword(self):
if (self.action == 5 and self.sword_spawn_rate == 0 and self.agent_list[self.agent_index].is_knight):
if not self.sword_list: # Sword List is Empty
if not self.knight_killed:
for i in range(0, self.knight_player_num + 1):
self.sword_dict['sword{0}'.format(i)] = Sword((self.knight_dict['knight{0}'.format(i)]))
self.sword_list.add(self.sword_dict[('sword{0}'.format(i))])
self.all_sprites.add(self.sword_dict[('sword{0}'.format(i))])
self.sword_spawn_rate = 1
self.knight_killed = False
else:
for knight in self.knight_list:
temp = Sword(knight)
self.sword_list.add(temp)
self.all_sprites.add(temp)
self.sword_spawn_rate = 1
return self.sword_spawn_rate, self.knight_killed, self.knight_dict, self.knight_list, self.knight_player_num, self.all_sprites, self.sword_dict, self.sword_list
# Spawning Arrows for Players
def spawnArrow(self):
if (self.action == 5 and self.arrow_spawn_rate == 0 and self.agent_list[self.agent_index].is_archer):
if not self.archer_killed:
for i in range(0, self.archer_player_num + 1):
if i == self.agent_index:
self.arrow_dict[('arrow{0}'.format(i))] = Arrow(self.archer_dict[('archer{0}'.format(i))])
self.arrow_list.add(self.arrow_dict[('arrow{0}'.format(i))])
self.all_sprites.add(self.arrow_dict[('arrow{0}'.format(i))])
self.arrow_spawn_rate = 1
self.archer_killed = False
else:
for archer in self.archer_list:
temp = Arrow(archer)
self.arrow_list.add(temp)
self.all_sprites.add(temp)
self.arrow_spawn_rate = 1
return self.arrow_spawn_rate, self.archer_killed, self.archer_dict, self.archer_list, self.archer_player_num, self.all_sprites, self.arrow_dict, self.arrow_list
# Stab the Sword
def sword_stab(self, sword_list, all_sprites):
for sword in sword_list:
sword_active = sword.update()
if not sword_active: # remove the sprite
sword_list.remove(sword)
all_sprites.remove(sword)
return sword_list, all_sprites
# Spawning Zombies at Random Location at every 100 iterations
def spawn_zombie(self, zombie_spawn_rate, zombie_list, all_sprites):
zombie_spawn_rate += 1
zombie = Zombie(self.np_random)
if zombie_spawn_rate >= self.ZOMBIE_SPAWN:
zombie.rect.x = self.np_random.randint(0, self.WIDTH)
zombie.rect.y = 5
zombie_list.add(zombie)
all_sprites.add(zombie)
zombie_spawn_rate = 0
return zombie_spawn_rate, zombie_list, all_sprites
# Zombie Kills the Knight
def zombie_knight(self, zombie_list, knight_list, all_sprites, knight_killed, sword_list, sword_killed):
for zombie in zombie_list:
zombie_knight_list = pygame.sprite.spritecollide(zombie, knight_list, True)
for knight in zombie_knight_list:
knight.alive = False
knight_list.remove(knight)
all_sprites.remove(knight)
sword_killed = True
knight_killed = True
if knight.agent_name not in self.kill_list:
self.kill_list.append(knight.agent_name)
return zombie_list, knight_list, all_sprites, knight_killed, sword_list, sword_killed
# Kill the Sword when Knight dies
def kill_sword(self, sword_killed, sword_list, all_sprites):
for sword in sword_list:
if sword_killed:
sword_list.remove(sword)
all_sprites.remove(sword)
sword_killed = False
return sword_killed, sword_list, all_sprites
# Zombie Kills the Archer
def zombie_archer(self, zombie_list, archer_list, all_sprites, archer_killed):
for zombie in zombie_list:
zombie_archer_list = pygame.sprite.spritecollide(zombie, archer_list, True)
for archer in zombie_archer_list:
archer.alive = False
archer_list.remove(archer)
all_sprites.remove(archer)
archer_killed = True
if archer.agent_name not in self.kill_list:
self.kill_list.append(archer.agent_name)
return zombie_list, archer_list, all_sprites, archer_killed
# Zombie Kills the Sword
def zombie_sword(self, zombie_list, sword_list, all_sprites, score):
for sword in sword_list:
zombie_sword_list = pygame.sprite.spritecollide(sword, zombie_list, True)
# For each zombie hit, remove the sword and add to the score
for zombie in zombie_sword_list:
sword_list.remove(sword)
all_sprites.remove(sword)
zombie_list.remove(zombie)
all_sprites.remove(zombie)
sword.knight.score += 1
return zombie_list, sword_list, all_sprites, score
# Zombie Kills the Arrow
def zombie_arrow(self, zombie_list, arrow_list, all_sprites, score):
for arrow in arrow_list:
zombie_arrow_list = pygame.sprite.spritecollide(arrow, zombie_list, True)
# For each zombie hit, remove the arrow, zombie and add to the score
for zombie in zombie_arrow_list:
arrow_list.remove(arrow)
all_sprites.remove(arrow)
zombie_list.remove(zombie)
all_sprites.remove(zombie)
# score += 1
arrow.archer.score += 1
# Remove the arrow if it flies up off the screen
if arrow.rect.y < 0:
arrow_list.remove(arrow)
all_sprites.remove(arrow)
return zombie_list, arrow_list, all_sprites, score
# Zombie reaches the End of the Screen
def zombie_endscreen(self, run, zombie_list):
for zombie in zombie_list:
if zombie.rect.y > 690:
run = False
return run
# Zombie Kills all Players
def zombie_all_players(self, knight_list, archer_list, run):
if not knight_list and not archer_list:
run = False
return run
def observe(self, agent):
screen = pygame.surfarray.pixels3d(self.WINDOW)
i = self.agent_name_mapping[agent]
agent_obj = self.agent_list[i]
agent_position = (agent_obj.rect.x, agent_obj.rect.y)
if not agent_obj.alive:
cropped = np.zeros((512, 512, 3), dtype=np.uint8)
else:
min_x = agent_position[0] - 256
max_x = agent_position[0] + 256
min_y = agent_position[1] - 256
max_y = agent_position[1] + 256
lower_y_bound = max(min_y, 0)
upper_y_bound = min(max_y, self.HEIGHT)
lower_x_bound = max(min_x, 0)
upper_x_bound = min(max_x, self.WIDTH)
startx = lower_x_bound - min_x
starty = lower_y_bound - min_y
endx = 512 + upper_x_bound - max_x
endy = 512 + upper_y_bound - max_y
cropped = np.zeros_like(self.observation_spaces[agent].low)
cropped[startx:endx, starty:endy, :] = screen[lower_x_bound:upper_x_bound, lower_y_bound:upper_y_bound, :]
return np.swapaxes(cropped, 1, 0)
def state(self):
'''
Returns an observation of the global environment
'''
state = pygame.surfarray.pixels3d(self.WINDOW).copy()
state = np.rot90(state, k=3)
state = np.fliplr(state)
return state
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
agent = self.agent_selection
if self._agent_selector.is_last():
# Controls the Spawn Rate of Weapons
self.sword_spawn_rate, self.arrow_spawn_rate = self.check_weapon_spawn(self.sword_spawn_rate, self.arrow_spawn_rate)
agent_name = self.agent_list[self.agent_name_mapping[agent]]
action = action + 1
out_of_bounds = agent_name.update(action)
if self.line_death and out_of_bounds:
agent_name.alive = False
if agent_name in self.archer_list:
self.archer_list.remove(agent_name)
else:
self.knight_list.remove(agent_name)
self.all_sprites.remove(agent_name)
self.kill_list.append(agent_name.agent_name)
sp = self.spawnPlayers(action, self.knight_player_num, self.archer_player_num, self.knight_list, self.archer_list, self.all_sprites, self.knight_dict, self.archer_dict)
# Knight
self.knight_player_num, self.knight_list, self.all_sprites, self.knight_dict = sp.spawnKnight()
# Archer
self.archer_player_num, self.archer_list, self.all_sprites, self.archer_dict = sp.spawnArcher()
# Spawn Weapons
sw = self.spawnWeapons(action, self.agent_name_mapping[agent], self.agent_list, self.sword_spawn_rate, self.arrow_spawn_rate, self.knight_killed, self.archer_killed, self.knight_dict, self.archer_dict, self.knight_list, self.archer_list, self.knight_player_num, self.archer_player_num, self.all_sprites, self.sword_dict, self.arrow_dict, self.sword_list, self.arrow_list)
# Sword
self.sword_spawn_rate, self.knight_killed, self.knight_dict, self.knight_list, self.knight_player_num, self.all_sprites, self.sword_dict, self.sword_list = sw.spawnSword()
# Arrow
self.arrow_spawn_rate, self.archer_killed, self.archer_dict, self.archer_list, self.archer_player_num, self.all_sprites, self.arrow_dict, self.arrow_list = sw.spawnArrow()
if self._agent_selector.is_last():
# Spawning Zombies at Random Location at every 100 iterations
self.zombie_spawn_rate, self.zombie_list, self.all_sprites = self.spawn_zombie(self.zombie_spawn_rate, self.zombie_list, self.all_sprites)
# Stab the Sword
self.sword_list, self.all_sprites = self.sword_stab(self.sword_list, self.all_sprites)
# Zombie Kills the Arrow
self.zombie_list, self.arrow_list, self.all_sprites, self.score = self.zombie_arrow(self.zombie_list, self.arrow_list, self.all_sprites, self.score)
# Zombie Kills the Sword
self.zombie_list, self.sword_list, self.all_sprites, self.score = self.zombie_sword(self.zombie_list, self.sword_list, self.all_sprites, self.score)
# Zombie Kills the Archer
if self.killable_archers:
self.zombie_archer(self.zombie_list, self.archer_list, self.all_sprites, self.archer_killed)
# Zombie Kills the Knight
if self.killable_knights:
self.zombie_list, self.knight_list, self.all_sprites, self.knight_killed, self.sword_list, self.sword_killed = self.zombie_knight(self.zombie_list, self.knight_list, self.all_sprites, self.knight_killed, self.sword_list, self.sword_killed)
# Kill the Sword when Knight dies
self.sword_killed, self.sword_list, self.all_sprites = self.kill_sword(self.sword_killed, self.sword_list, self.all_sprites)
for zombie in self.zombie_list:
zombie.update()
arrows_to_delete = []
for arrow in self.arrow_list:
arrow.update()
if not arrow.is_active():
arrows_to_delete.append(arrow)
for arrow in arrows_to_delete:
self.arrow_list.remove(arrow)
self.all_sprites.remove(arrow)
self.draw()
self.check_game_end()
self.frames += 1
self._clear_rewards()
self.rewards[agent] = agent_name.score
agent_name.score = 0
done = not self.run or self.frames >= self.max_cycles
self.dones = {a: done for a in self.agents}
if self._agent_selector.is_last():
_live_agents = self.agents[:]
for k in self.kill_list:
self.dones[k] = True
_live_agents.remove(k)
# reset the kill list
self.kill_list = []
self._agent_selector.reinit(_live_agents)
if len(self._agent_selector.agent_order):
self.agent_selection = self._agent_selector.next()
self._cumulative_rewards[agent] = 0
self._accumulate_rewards()
self._dones_step_first()
def enable_render(self):
self.WINDOW = pygame.display.set_mode([self.WIDTH, self.HEIGHT])
# self.WINDOW = pygame.Surface((self.WIDTH, self.HEIGHT))
self.render_on = True
self.draw()
def draw(self):
self.WINDOW.fill((66, 40, 53))
self.WINDOW.blit(self.left_wall, self.left_wall.get_rect())
self.WINDOW.blit(self.right_wall, self.right_wall_rect)
self.WINDOW.blit(self.floor_patch1, (500, 500))
self.WINDOW.blit(self.floor_patch2, (900, 30))
self.WINDOW.blit(self.floor_patch3, (150, 430))
self.WINDOW.blit(self.floor_patch4, (300, 50))
self.WINDOW.blit(self.floor_patch1, (1000, 250))
self.all_sprites.draw(self.WINDOW) # Draw all the sprites
def render(self, mode="human"):
if not self.render_on and mode == "human":
# sets self.render_on to true and initializes display
self.enable_render()
observation = np.array(pygame.surfarray.pixels3d(self.WINDOW))
if mode == "human":
pygame.display.flip()
return np.transpose(observation, axes=(1, 0, 2)) if mode == "rgb_array" else None
def close(self):
if not self.closed:
self.closed = True
if self.render_on:
# self.WINDOW = pygame.display.set_mode([self.WIDTH, self.HEIGHT])
self.WINDOW = pygame.Surface((self.WIDTH, self.HEIGHT))
self.render_on = False
pygame.event.pump()
pygame.display.quit()
def check_game_end(self):
# Zombie reaches the End of the Screen
self.run = self.zombie_endscreen(self.run, self.zombie_list)
# Zombie Kills all Players
self.run = self.zombie_all_players(self.knight_list, self.archer_list, self.run)
def reinit(self):
# Dictionaries for holding new players and their weapons
self.archer_dict = {}
self.knight_dict = {}
self.arrow_dict = {}
self.sword_dict = {}
# Game Variables
self.score = 0
self.run = True
self.arrow_spawn_rate = self.sword_spawn_rate = self.zombie_spawn_rate = 0
self.knight_player_num = self.archer_player_num = 0
self.archer_killed = False
self.knight_killed = False
self.sword_killed = False
# Creating Sprite Groups
self.all_sprites = pygame.sprite.Group()
self.zombie_list = pygame.sprite.Group()
self.arrow_list = pygame.sprite.Group()
self.sword_list = pygame.sprite.Group()
self.archer_list = pygame.sprite.Group()
self.knight_list = pygame.sprite.Group()
self.agent_list = []
self.agents = []
for i in range(self.num_archers):
name = "archer_" + str(i)
self.archer_dict["archer{0}".format(self.archer_player_num)] = Archer(agent_name=name)
self.archer_dict["archer{0}".format(self.archer_player_num)].offset(i * 50, 0)
self.archer_list.add(self.archer_dict["archer{0}".format(self.archer_player_num)])
self.all_sprites.add(self.archer_dict["archer{0}".format(self.archer_player_num)])
self.agent_list.append(self.archer_dict["archer{0}".format(self.archer_player_num)])
if i != self.num_archers - 1:
self.archer_player_num += 1
for i in range(self.num_knights):
name = "knight_" + str(i)
self.knight_dict["knight{0}".format(self.knight_player_num)] = Knight(agent_name=name)
self.knight_dict["knight{0}".format(self.knight_player_num)].offset(i * 50, 0)
self.knight_list.add(self.knight_dict["knight{0}".format(self.knight_player_num)])
self.all_sprites.add(self.knight_dict["knight{0}".format(self.knight_player_num)])
self.agent_list.append(self.knight_dict["knight{0}".format(self.knight_player_num)])
if i != self.num_knights - 1:
self.knight_player_num += 1
self.agent_name_mapping = {}
a_count = 0
for i in range(self.num_archers):
a_name = "archer_" + str(i)
self.agents.append(a_name)
self.agent_name_mapping[a_name] = a_count
a_count += 1
for i in range(self.num_knights):
k_name = "knight_" + str(i)
self.agents.append(k_name)
self.agent_name_mapping[k_name] = a_count
a_count += 1
self.draw()
self.frames = 0
def reset(self):
self.has_reset = True
self.agents = self.possible_agents[:]
self._agent_selector.reinit(self.agents)
self.agent_selection = self._agent_selector.next()
self.rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self._cumulative_rewards = {a: 0 for a in self.agents}
self.dones = dict(zip(self.agents, [False for _ in self.agents]))
self.infos = dict(zip(self.agents, [{} for _ in self.agents]))
self.reinit()
# The original code for this game, that was added by Justin Terry, was
# created by Dipam Patel in a different repository (hence the git history)
# Game art purchased from https://finalbossblues.itch.io/time-fantasy-monsters
# and https://finalbossblues.itch.io/icons
|
the-stack_0_27750
|
# Author: True Price <[email protected]>
import sys
from collections import defaultdict
WEIGHT_THRESHOLD = 0.2
##
WEIGHT_THRESHOLD = 1 - WEIGHT_THRESHOLD
# dictionary type that returns zero for missing values
# used here in 'edges' dictionary
class zerodict(dict):
def __missing__(self, k):
return 0
def mcode(filename):
graph = defaultdict(set) # node id => neighboring node ids
edges = defaultdict(zerodict)
# read in graph
print('loading graph...', file=sys.stderr)
with open(filename, 'r') as f:
for line in f:
a,b,w = line.split()[:3]
w = float(w)
graph[a].add(b)
graph[b].add(a)
edges[a][b], edges[b][a] = w, w
# Stage 1: Vertex Weighting
print('vertex weighting...', file=sys.stderr)
weights = dict(
(v,sum(edges[v].values()) / len(edges[v])**2) for v in graph)
for i,v in enumerate(graph):
if i > 0 and i % 1000 == 0:
print(i, '/', len(graph), file=sys.stderr)
neighborhood = set((v,)) | graph[v]
# if node has only one neighbor, we know everything we need to know
if len(neighborhood) <= 2: continue
# valid k-core with the highest weight
k = 2 # already covered k = 1
while True:
invalid_nodes = True
while invalid_nodes and neighborhood:
invalid_nodes = set(
n for n in neighborhood if len(graph[n] & neighborhood) < k)
neighborhood -= invalid_nodes
if not neighborhood: break
# vertex weight = k-core number * density of k-core
weights[v] = max(weights[v],
k * sum(edges[v][n] for n in neighborhood) / len(neighborhood)**2)
k += 1
# Stage 2: Molecular Complex Prediction
print('molecular complex prediction...', file=sys.stderr)
unvisited = set(graph)
num_clusters = 0
for seed in sorted(weights, key=weights.get, reverse=True):
if seed not in unvisited: continue
cluster, frontier = set((seed,)), set((seed,))
w = weights[seed] * WEIGHT_THRESHOLD
while frontier:
cluster.update(frontier)
unvisited -= frontier
frontier = set(
n for n in set.union(*(graph[n] for n in frontier)) & unvisited
if weights[n] > w)
# haircut: only keep 2-core complexes
invalid_nodes = True
while invalid_nodes and cluster:
invalid_nodes = set(n for n in cluster if len(graph[n] & cluster) < 2)
cluster -= invalid_nodes
if cluster:
print(' '.join(cluster))
num_clusters += 1
print(num_clusters, len(cluster), seed, file=sys.stderr)
if not unvisited: break # quit if all nodes visited
if __name__ == '__main__':
mcode(sys.argv[1])
|
the-stack_0_27754
|
#!/usr/bin/env python
# Lint as: python3
# -*- encoding: utf-8 -*-
"""Test the hunt_view interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import traceback
from absl import app
from grr_response_core.lib import rdfvalue
from grr_response_server import data_store
from grr_response_server import flow
from grr_response_server import hunt
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr.test_lib import test_lib
class TestHuntView(gui_test_lib.GRRSeleniumHuntTest):
"""Test the Cron view GUI."""
reason = "Felt like it!"
def SetupTestHuntView(self, client_limit=0, client_count=10):
# Create some clients and a hunt to view.
hunt_id = self.CreateSampleHunt(
client_limit=client_limit, client_count=client_count)
self.RunHunt(failrate=2)
self.AddLogToHunt(hunt_id, self.client_ids[0], "TestLogLine")
# Log an error just with some random traceback.
self.AddErrorToHunt(hunt_id, self.client_ids[1], "Client Error 1",
traceback.format_exc())
hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id)
if client_limit == 0:
self.assertEqual(hunt_counters.num_clients, client_count)
else:
self.assertEqual(hunt_counters.num_clients, min(client_count,
client_limit))
return hunt_id
def testPageTitleReflectsSelectedHunt(self):
hunt_id = self.CreateSampleHunt(stopped=True)
self.Open("/#/hunts")
self.WaitUntilEqual("GRR | Hunts", self.GetPageTitle)
self.Click("css=td:contains('%s')" % hunt_id)
self.WaitUntilEqual("GRR | " + hunt_id, self.GetPageTitle)
def testHuntView(self):
"""Test that we can see all the hunt data."""
hunt_id = self.SetupTestHuntView()
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Check we can now see the details.
self.WaitUntil(self.IsElementPresent, "css=dl.dl-hunt")
self.WaitUntil(self.IsTextPresent, "Clients Scheduled")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
# Click the Log Tab.
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "TestLogLine")
# Click the Error Tab.
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsTextPresent, "Client Error 1")
def SetupHuntDetailView(self, failrate=2):
"""Create some clients and a hunt to view."""
hunt_id = self.CreateSampleHunt()
self.RunHunt(client_ids=self.client_ids, failrate=failrate)
self.AddErrorToHunt(hunt_id, self.client_ids[1], "Client Error 1",
traceback.format_exc())
return hunt_id
def testHuntClientsView(self):
"""Test the detailed client view works."""
hunt_id = self._CreateHuntWithDownloadedFile()
# Open up and click on View Hunts then the first Hunt.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
self.Click("css=td:contains('%s')" % hunt_id)
# Click the Overview Tab then the Details Link.
self.Click("css=li[heading=Overview]")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
# Check the Hunt Clients tab.
self.Click("css=li[heading=Clients]")
client_id = self.client_ids[0]
self.WaitUntil(self.IsElementPresent, "css=tr:contains('%s')" % client_id)
self.RequestAndGrantClientApproval(client_id)
# TODO(user): move the code below outside of if as soon as hunt's
# subflows are properly reported in the REL_DB implementation.
self.Click("css=tr:contains('%s') td:nth-of-type(2) a" % client_id)
self.WaitUntil(self.IsTextPresent, "Flow Information")
self.WaitUntil(self.IsTextPresent, self.base_path)
def testHuntOverviewShowsStats(self):
"""Test the detailed client view works."""
hunt_id = self.CreateSampleHunt()
client_id = self.SetupClient(0)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow.RandomFlowId(),
parent_hunt_id=hunt_id,
create_time=rdfvalue.RDFDatetime.Now())
rdf_flow.cpu_time_used.user_cpu_time = 5000
rdf_flow.network_bytes_sent = 1000000
data_store.REL_DB.WriteFlowObject(rdf_flow)
# Open up and click on View Hunts then the first Hunt.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
self.Click("css=td:contains('%s')" % hunt_id)
# Click the Overview Tab and check that the stats are present.
self.Click("css=li[heading=Overview]")
self.WaitUntil(self.IsTextPresent, "1h 23m 20s")
self.WaitUntil(self.IsTextPresent, "976.6KiB")
def testHuntOverviewGetsUpdatedWhenHuntChanges(self):
hunt_id = self.CreateSampleHunt()
client_id = self.SetupClient(0)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow.RandomFlowId(),
parent_hunt_id=hunt_id,
create_time=rdfvalue.RDFDatetime.Now())
rdf_flow.cpu_time_used.user_cpu_time = 5000
rdf_flow.network_bytes_sent = 1000000
data_store.REL_DB.WriteFlowObject(rdf_flow)
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntOverviewDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.WaitUntil(self.IsTextPresent, "1h 23m 20s")
self.WaitUntil(self.IsTextPresent, "976.6KiB")
client_id = self.SetupClient(1)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow.RandomFlowId(),
parent_hunt_id=hunt_id,
create_time=rdfvalue.RDFDatetime.Now())
rdf_flow.cpu_time_used.user_cpu_time = 1000
rdf_flow.network_bytes_sent = 10000000
data_store.REL_DB.WriteFlowObject(rdf_flow)
self.WaitUntil(self.IsTextPresent, "1h 40m")
self.WaitUntil(self.IsTextPresent, "10.5MiB")
def testHuntOverviewShowsStartAndExpirationTime(self):
duration = rdfvalue.Duration.From(3, rdfvalue.DAYS)
init_start_time = rdfvalue.RDFDatetime.FromHumanReadable("1973-01-01 08:34")
last_start_time = rdfvalue.RDFDatetime.FromHumanReadable("1981-03-04 12:52")
expiration_time = init_start_time + duration
hunt_id = self.CreateHunt(duration=duration)
# Navigate to the hunt view.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select the hunt.
self.Click("css=td:contains('{}')".format(hunt_id))
self.RequestAndGrantHuntApproval(hunt_id)
self.assertFalse(self.IsTextPresent(str(init_start_time)))
self.assertFalse(self.IsTextPresent(str(expiration_time)))
self.assertFalse(self.IsTextPresent(str(last_start_time)))
with test_lib.FakeTime(init_start_time):
hunt.StartHunt(hunt_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(init_start_time))
self.WaitUntil(self.IsTextPresent, str(expiration_time))
self.assertFalse(self.IsTextPresent(str(last_start_time)))
with test_lib.FakeTime(last_start_time):
hunt.PauseHunt(hunt_id)
hunt.StartHunt(hunt_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(init_start_time))
self.WaitUntil(self.IsTextPresent, str(expiration_time))
self.WaitUntil(self.IsTextPresent, str(last_start_time))
def testHuntListShowsStartAndExpirationTime(self):
hunt_1_start_time = rdfvalue.RDFDatetime.FromHumanReadable("1992-11-11")
hunt_2_start_time = rdfvalue.RDFDatetime.FromHumanReadable("2001-05-03")
hunt_1_duration = rdfvalue.Duration.From(3, rdfvalue.DAYS)
hunt_2_duration = rdfvalue.Duration.From(5, rdfvalue.HOURS)
hunt_1_expiration_time = hunt_1_start_time + hunt_1_duration
hunt_2_expiration_time = hunt_2_start_time + hunt_2_duration
hunt_1_id = self.CreateHunt(duration=hunt_1_duration)
hunt_2_id = self.CreateHunt(duration=hunt_2_duration)
# Navigate to the hunt list.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_1_id)
self.WaitUntil(self.IsTextPresent, hunt_2_id)
self.assertFalse(self.IsTextPresent(str(hunt_1_start_time)))
self.assertFalse(self.IsTextPresent(str(hunt_1_expiration_time)))
self.assertFalse(self.IsTextPresent(str(hunt_2_start_time)))
self.assertFalse(self.IsTextPresent(str(hunt_2_expiration_time)))
with test_lib.FakeTime(hunt_1_start_time):
hunt.StartHunt(hunt_1_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(hunt_1_start_time))
self.WaitUntil(self.IsTextPresent, str(hunt_1_expiration_time))
self.assertFalse(self.IsTextPresent(str(hunt_2_start_time)))
self.assertFalse(self.IsTextPresent(str(hunt_2_expiration_time)))
with test_lib.FakeTime(hunt_2_start_time):
hunt.StartHunt(hunt_2_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(hunt_1_start_time))
self.WaitUntil(self.IsTextPresent, str(hunt_1_expiration_time))
self.WaitUntil(self.IsTextPresent, str(hunt_2_start_time))
self.WaitUntil(self.IsTextPresent, str(hunt_2_expiration_time))
def testHuntStatsView(self):
hunt_id = self.SetupTestHuntView()
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
self.Click("css=td:contains('%s')" % hunt_id)
# Click the Stats tab.
self.Click("css=li[heading=Stats]")
self.WaitUntil(self.IsTextPresent, "Total number of clients")
self.WaitUntil(self.IsTextPresent, "10")
self.WaitUntil(self.IsTextPresent, "User CPU mean")
self.WaitUntil(self.IsTextPresent, "5.5")
self.WaitUntil(self.IsTextPresent, "User CPU stddev")
self.WaitUntil(self.IsTextPresent, "2.9")
self.WaitUntil(self.IsTextPresent, "System CPU mean")
self.WaitUntil(self.IsTextPresent, "11")
self.WaitUntil(self.IsTextPresent, "System CPU stddev")
self.WaitUntil(self.IsTextPresent, "5.7")
self.WaitUntil(self.IsTextPresent, "Network bytes sent mean")
self.WaitUntil(self.IsTextPresent, "16.5")
self.WaitUntil(self.IsTextPresent, "Network bytes sent stddev")
self.WaitUntil(self.IsTextPresent, "8.6")
def testHuntNotificationIsShownAndClickable(self):
hunt_id = self.CreateSampleHunt(
path=os.path.join(self.base_path, "test.plist"))
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=#notification_button")
self.Click("css=a:contains('has granted you access')")
self.WaitUntil(self.IsElementPresent,
"css=tr.row-selected td:contains('%s')" % hunt_id)
self.WaitUntil(self.IsTextPresent, hunt_id)
def testLogsTabShowsLogsFromAllClients(self):
hunt_id = self.SetupHuntDetailView(failrate=-1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
for client_id in self.client_ids:
self.WaitUntil(self.IsTextPresent, client_id)
# TODO(amoser): Get rid of the aff4 prefix here.
self.WaitUntil(
self.IsTextPresent, "File aff4:/%s/%s transferred successfully." %
(client_id, "fs/os/tmp/evil.txt"))
def testLogsTabGetsAutoRefreshed(self):
hunt_id = self.CreateSampleHunt()
self.AddLogToHunt(hunt_id, self.client_ids[0], "foo-log")
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntLogDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-log td:contains('foo-log')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-log td:contains('bar-log')")
self.AddLogToHunt(hunt_id, self.client_ids[1], "bar-log")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-log td:contains('bar-log')")
def testLogsTabFiltersLogsByString(self):
hunt_id = self.SetupHuntDetailView(failrate=-1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
self.Type("css=grr-hunt-log input.search-query", self.client_ids[-1])
self.Click("css=grr-hunt-log button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, self.client_ids[-1])
# TODO(amoser): Get rid of the aff4 prefix here.
self.WaitUntil(
self.IsTextPresent, "File aff4:/%s/%s transferred successfully." %
(self.client_ids[-1], "fs/os/tmp/evil.txt"))
for client_id in self.client_ids[:-1]:
self.WaitUntilNot(self.IsTextPresent, client_id)
self.WaitUntilNot(
self.IsTextPresent, "File %s/%s transferred successfully." %
(client_id, "fs/os/tmp/evil.txt"))
def testLogsTabShowsDatesInUTC(self):
hunt_id = self.CreateSampleHunt()
with test_lib.FakeTime(42):
self.AddLogToHunt(hunt_id, self.client_ids[0], "I do log.")
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "1970-01-01 00:00:42 UTC")
def testErrorsTabShowsErrorsFromAllClients(self):
hunt_id = self.SetupHuntDetailView(failrate=1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
for client_id in self.client_ids:
self.WaitUntil(self.IsTextPresent, client_id)
def testErrorsTabGetsAutoRefreshed(self):
hunt_id = self.CreateSampleHunt()
self.AddErrorToHunt(hunt_id, self.client_ids[0], "foo-error",
traceback.format_exc())
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntErrorsDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-errors td:contains('foo-error')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-errors td:contains('bar-error')")
self.AddErrorToHunt(hunt_id, self.client_ids[0], "bar-error",
traceback.format_exc())
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-errors td:contains('bar-error')")
def testErrorsTabShowsDatesInUTC(self):
hunt_id = self.CreateSampleHunt()
with test_lib.FakeTime(42):
self.AddErrorToHunt(hunt_id, self.client_ids[0], "Client Error 1",
traceback.format_exc())
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsTextPresent, "1970-01-01 00:00:42 UTC")
def testErrorsTabFiltersErrorsByString(self):
hunt_id = self.SetupHuntDetailView(failrate=1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
self.Type("css=grr-hunt-errors input.search-query", self.client_ids[-1])
self.Click("css=grr-hunt-errors button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, self.client_ids[-1])
for client_id in self.client_ids[:-1]:
self.WaitUntilNot(self.IsTextPresent, client_id)
def testCrashesTabShowsNoErrorWhenCrashesAreMissing(self):
hunt_id = self.SetupHuntDetailView()
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Crashes]")
self.WaitUntilNot(self.IsTextPresent, "Loading...")
self.WaitUntilNot(self.IsVisible, "css=button#show_backtrace")
def testCrashesTabGetsAutoRefreshed(self):
client_ids = self.SetupClients(2)
hunt_id = self.StartHunt()
self.RunHuntWithClientCrashes([client_ids[0]])
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntCrashesDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Crashes]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[0])
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[1])
self.RunHuntWithClientCrashes([client_ids[1]])
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[1])
def testShowsResultsTabForIndividualFlowsOnClients(self):
# Create and run the hunt.
self.CreateSampleHunt(stopped=False)
self.RunHunt(client_ids=self.client_ids, failrate=-1)
self.RequestAndGrantClientApproval(self.client_ids[0])
self.Open("/#c=" + self.client_ids[0])
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=grr-client-flows-list tr:contains('GetFile')")
self.Click("css=li[heading=Results]")
# This is to check that no exceptions happened when we tried to display
# results.
self.WaitUntilNot(self.IsTextPresent, "Loading...")
def testClientsTabShowsCompletedAndOutstandingClients(self):
# Create some clients and a hunt to view.
hunt_id = self.CreateSampleHunt()
# Run the hunt on half the clients.
finished_client_ids = self.client_ids[5:]
outstanding_client_ids = self.client_ids[:5]
self.AssignTasksToClients(client_ids=outstanding_client_ids)
self.RunHunt(failrate=2, client_ids=finished_client_ids)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Clients]")
self.Click("css=label[name=ShowCompletedClients]")
for client_id in finished_client_ids:
self.WaitUntilContains(client_id, self.GetText, "css=.tab-content")
self.Click("css=label[name=ShowOutstandingClients]")
for client_id in outstanding_client_ids:
self.WaitUntilContains(client_id, self.GetText, "css=.tab-content")
def testContextTabShowsHuntContext(self):
# Create some clients and a hunt to view.
hunt_id = self.CreateSampleHunt()
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading='Context Details']")
# Check for different context properties.
self.WaitUntilContains(
hunt_id, self.GetText,
"css=table > tbody td.proto_key:contains(\"Session id\") "
"~ td.proto_value")
self.WaitUntilContains(
self.test_username, self.GetText,
"css=table > tbody td.proto_key:contains(\"Creator\") "
"~ td.proto_value")
def testHuntCreatorIsNotifiedWhenHuntIsStoppedDueToCrashes(self):
hunt_id = self.StartHunt(crash_limit=3, creator=self.test_username)
# Run the hunt on 3 clients, one by one. Crash detection check happens
# when client is scheduled, so it's important to schedule the clients
# one by one in the test.
for client_id in self.SetupClients(3):
self.RunHuntWithClientCrashes([client_id])
self.Open("/")
# Wait until the notification is there and show the notifications list.
self.WaitUntilEqual("1", self.GetText, "css=button[id=notification_button]")
self.Click("css=button[id=notification_button]")
# Click on the "hunt [id] reached the crashes limit" notification.
self.Click("css=td:contains(Hunt %s reached the crashes limit)" % hunt_id)
# Clicking on notification should shown the hunt's overview page.
self.WaitUntil(self.IsTextPresent, "/tmp/evil.txt")
# TODO(user): display hunt.hunt_state_comment in the UI.
if __name__ == "__main__":
app.run(test_lib.main)
|
the-stack_0_27755
|
""" tornado handler for managing and communicating with language servers
"""
from typing import Optional, Text
from notebook.base.handlers import IPythonHandler
from notebook.base.zmqhandlers import WebSocketHandler, WebSocketMixin
from notebook.utils import url_path_join as ujoin
from .manager import LanguageServerManager
from .schema import SERVERS_RESPONSE
class BaseHandler(IPythonHandler):
manager = None # type: LanguageServerManager
def initialize(self, manager: LanguageServerManager):
self.manager = manager
class LanguageServerWebSocketHandler(WebSocketMixin, WebSocketHandler, BaseHandler):
""" Setup tornado websocket to route to language server sessions
"""
language_server = None # type: Optional[Text]
def open(self, language_server):
self.language_server = language_server
self.manager.subscribe(self)
self.log.debug("[{}] Opened a handler".format(self.language_server))
async def on_message(self, message):
self.log.debug("[{}] Handling a message".format(self.language_server))
await self.manager.on_client_message(message, self)
def on_close(self):
self.manager.unsubscribe(self)
self.log.debug("[{}] Closed a handler".format(self.language_server))
class LanguageServersHandler(BaseHandler):
""" Reports the status of all current servers
Response should conform to schema in schema/servers.schema.json
"""
validator = SERVERS_RESPONSE
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
def get(self):
""" finish with the JSON representations of the sessions
"""
response = {
"version": 2,
"sessions": {
language_server: session.to_json()
for language_server, session in self.manager.sessions.items()
},
}
errors = list(self.validator.iter_errors(response))
if errors: # pragma: no cover
self.log.warn("{} validation errors: {}", len(errors), errors)
self.finish(response)
def add_handlers(nbapp):
""" Add Language Server routes to the notebook server web application
"""
lsp_url = ujoin(nbapp.base_url, "lsp")
re_langservers = "(?P<language_server>.*)"
opts = {"manager": nbapp.language_server_manager}
nbapp.web_app.add_handlers(
".*",
[
(ujoin(lsp_url, "status"), LanguageServersHandler, opts),
(
ujoin(lsp_url, "ws", re_langservers),
LanguageServerWebSocketHandler,
opts,
),
],
)
|
the-stack_0_27756
|
"""Golden tests cases for testing liquid's built-in `size` filter."""
from liquid.golden.case import Case
cases = [
Case(
description="size of an array",
template=r"{{ a | size }}",
globals={"a": ["a", "b", "c"]},
expect="3",
),
Case(
description="size of a string",
template=r"{{ a | size }}",
globals={"a": "abc"},
expect="3",
),
Case(
description="size of an empty array",
template=r"{{ a | size }}",
globals={"a": []},
expect="0",
),
Case(
description="size of a hash",
template=r"{{ a | size }}",
globals={"a": {"a": 1, "b": 2}},
expect="2",
),
Case(
description="unexpected argument",
template=r"{{ a | size: 'foo' }}",
globals={"a": [1, 2, 3]},
expect="",
error=True,
),
Case(
description="undefined left value",
template=r"{{ nosuchthing | size }}",
expect="0",
),
]
|
the-stack_0_27758
|
import cv2
import numpy as np
# import sys
# sys.path.append("..")
# FUNCTIONS
# ---------------------------------------------------------------------
drawing = False # true if mouse is pressed
mode = True # if true, draw rectangle
refPt1 = np.array([0, 0])
refPt2 = np.array([0, 0])
refPt3 = np.array([0, 0])
def draw_rectangle(event, x, y, flags, param):
global refPt1, refPt2, refPt3, rgb_ave, drawing, mode
# if the left mouse button was clicked, record the starting
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
refPt1[0] = x
refPt1[1] = y
print('x = %d, y = %d press' % (x, y))
# https://stackoverflow.com/questions/50234485/drawing-rectangle-in-opencv-python/50235566
elif event == cv2.EVENT_MOUSEMOVE:
if drawing is True:
refPt2[0] = x
refPt2[1] = y
roimouse = np.array(img1[refPt1[0]:refPt2[0], refPt1[1]:refPt2[1]])
print("ROI mouse: ", roimouse)
print("ROI size", roimouse.shape)
# incorportate empty case protection!!
red = roimouse[0:refPt2[0]-refPt1[0], 0:refPt2[1]-refPt1[1], 0].mean()
blue = roimouse[0:refPt2[0]-refPt1[0], 0:refPt2[1]-refPt1[1], 1].mean()
green = roimouse[0:refPt2[0]-refPt1[0], 0:refPt2[1]-refPt1[1], 2].mean()
rgb_ave = np.array([red, blue, green])
# incorporate reverse drag protection
# if x1 > x2 then one way
cv2.rectangle(img1, (refPt1[0], refPt1[1]), (x, y), (255, 255, 255), 3)
if refPt2[0] != x | refPt2[1] != y:
cv2.rectangle(img1, (refPt1[0], refPt1[1]), (refPt2[0], refPt2[1]), (rgb_ave), -1)
# else:
# cv2.circle(img1, (x, y), 5, (0, 0, 0), -1)
if event == cv2.EVENT_LBUTTONUP:
if mode is True:
refPt3[0] = x
refPt3[1] = y
roimouse = np.array(img1[refPt1[0]:refPt2[0], refPt1[1]:refPt2[1]])
print("ROI mouse: ", roimouse)
print("ROI size", roimouse.shape)
red = roimouse[0:refPt3[0]-refPt1[0], 0:refPt3[1]-refPt1[1], 0].mean()
blue = roimouse[0:refPt3[0]-refPt1[0], 0:refPt3[1]-refPt1[1], 1].mean()
green = roimouse[0:refPt3[0]-refPt1[0], 0:refPt3[1]-refPt1[1], 2].mean()
rgb_ave = np.array([red, blue, green])
print("Ave RGB: ", rgb_ave)
cv2.rectangle(img1, (refPt1[0], refPt1[1]), (refPt3[0], refPt3[1]), (rgb_ave), -1)
# else:
# cv2.circle(img1, (x, y), 5, (0, 0, 255), -1)
cv2.imshow('Toulouse Brick', img1)
drawing = False
print('x = %d, y = %d release' % (x, y))
# xstart = refPt1[0]
# ystart = refPt1[1]
#
# xfinish = refPt2[0]
# yfinish = refPt2[1]
# ---------------------------------------------------------------------
# MAIN
# ---------------------------------------------------------------------
# read image from file
# dims = 436 × 1026
img1 = cv2.imread(r"/Users/thomaslloyd/Desktop/toulousebrick.png",
cv2.IMREAD_COLOR)
# ^^ add specific file type for ideal analysis
img2 = np.array(cv2.imread(r"/Users/thomaslloyd/Desktop/toulousebrick.png",
cv2.IMREAD_COLOR))
# parameterise image to ideal color space
rows, cols, channels = img2.shape
print("rows:", rows)
print("columns:", cols)
print("channels:", channels)
print(img1)
# create rectangle of color palette
# present image on screen with roi selected
cv2.namedWindow('Toulouse Brick', cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback('Toulouse Brick', draw_rectangle)
# cv2.moveWindow('Toulouse Brick', 300, -300)
# draw rectangle around pixels desired
# cv2.rectangle(img1, (x_start+2, y_start+2), (x_finish-2, y_finish-2), (rgb_ave), -1)
# draw rectangle around pixels desired
# cv2.rectangle(img1, (x_start, y_start), (x_finish, y_finish), (255, 0, 0), 2)
while(1):
cv2.imshow('Toulouse Brick', img1)
cv2.waitKey(0)
break
cv2.destroyAllWindows()
|
the-stack_0_27759
|
"""
The state is central in torchbearer, storing all of the relevant intermediate values that may be changed or replaced
during model fitting. This module defines classes for interacting with state and all of the built in state keys used
throughout torchbearer. The :func:`state_key` function can be used to create custom state keys for use in callbacks or
metrics.
Example: ::
from torchbearer import state_key
MY_KEY = state_key('my_test_key')
"""
from torchbearer import Metric
import warnings
__keys__ = []
def state_key(key):
"""Computes and returns a non-conflicting key for the state dictionary when given a seed key
Args:
key (str): The seed key - basis for new state key
Returns:
:class:`.StateKey`: New state key
"""
return StateKey(key)
class StateKey(Metric):
""" StateKey class that is a unique state key based on the input string key. State keys are also metrics which
retrieve themselves from state.
Args:
key (str): Base key
"""
def __init__(self, key):
self.key = self._gen_key_(key)
super(StateKey, self).__init__(self.key)
def process(self, state):
return {self.name: state[self]}
def process_final(self, state):
return {self.name: state[self]}
def _gen_key_(self, key):
if key in __keys__:
count = 1
my_key = key + '_' + str(count)
while my_key in __keys__:
count += 1
my_key = key + '_' + str(count)
key = my_key
__keys__.append(key)
return key
def __repr__(self):
return self.key
def __str__(self):
return self.key
def __eq__(self, other):
return self.key == str(other)
def __hash__(self):
return self.key.__hash__()
class State(dict):
"""
State dictionary that behaves like a python dict but accepts StateKeys
"""
def __init__(self):
super(State, self).__init__()
def get_key(self, statekey):
if isinstance(statekey, str):
warnings.warn("State was accessed with a string: {}, generate keys with StateKey(str).".format(statekey), stacklevel=3)
return statekey
def __getitem__(self, key):
return super(State, self).__getitem__(self.get_key(key))
def __setitem__(self, key, val):
super(State, self).__setitem__(self.get_key(key), val)
def __delitem__(self, val):
super(State, self).__delitem__(val)
def __contains__(self, o):
return super(State, self).__contains__(self.get_key(o))
def update(self, d):
new_dict = {}
for key in d:
new_dict[self.get_key(key)] = d[key]
super(State, self).update(new_dict)
#: The torchbearer version
VERSION = state_key('torchbearer_version')
#: The PyTorch module / model that will be trained
MODEL = state_key('model')
#: The criterion to use when model fitting
CRITERION = state_key('criterion')
#: The optimizer to use when model fitting
OPTIMIZER = state_key('optimizer')
#: The device currently in use by the :class:`.Trial` and PyTorch model
DEVICE = state_key('device')
#: The data type of tensors in use by the model, match this to avoid type issues
DATA_TYPE = state_key('dtype')
#: The list of metrics in use by the :class:`.Trial`
METRIC_LIST = state_key('metric_list')
#: The metric dict from the current batch of data
METRICS = state_key('metrics')
#: A self refrence to the Trial object for persistence etc.
SELF = state_key('self')
#: The current epoch number
EPOCH = state_key('epoch')
#: The total number of epochs to run for
MAX_EPOCHS = state_key('max_epochs')
#: The string name of the current data
DATA = state_key('data')
#: The current data generator (DataLoader)
GENERATOR = state_key('generator')
#: The current iterator
ITERATOR = state_key('iterator')
#: The current number of steps per epoch
STEPS = state_key('steps')
#: The train data generator in the Trial object
TRAIN_GENERATOR = state_key('train_generator')
#: The number of train steps to take
TRAIN_STEPS = state_key('train_steps')
#: The flag representing train data
TRAIN_DATA = state_key('train_data')
#: The validation data generator in the Trial object
VALIDATION_GENERATOR = state_key('validation_generator')
#: The number of validation steps to take
VALIDATION_STEPS = state_key('validation_steps')
#: The flag representing validation data
VALIDATION_DATA = state_key('validation_data')
#: The test data generator in the Trial object
TEST_GENERATOR = state_key('test_generator')
#: The number of test steps to take
TEST_STEPS = state_key('test_steps')
#: The flag representing test data
TEST_DATA = state_key('test_data')
#: A flag that can be set to true to stop the current fit call
STOP_TRAINING = state_key('stop_training')
#: The current batch of ground truth data
TARGET = Y_TRUE = state_key('y_true')
#: The current batch of predictions
PREDICTION = Y_PRED = state_key('y_pred')
#: The current batch of inputs
INPUT = X = state_key('x')
#: The sampler which loads data from the generator onto the correct device
SAMPLER = state_key('sampler')
#: The current value for the loss
LOSS = state_key('loss')
#: The key which maps to the predictions over the dataset when calling predict
FINAL_PREDICTIONS = state_key('final_predictions')
#: The current batch number
BATCH = state_key('t')
#: The timings keys used by the timer callback
TIMINGS = state_key('timings')
#: The :class:`.CallbackList` object which is called by the Trial
CALLBACK_LIST = state_key('callback_list')
#: The history list of the Trial instance
HISTORY = state_key('history')
#: The optional arguments which should be passed to the backward call
BACKWARD_ARGS = state_key('backward_args')
# Legacy
VALIDATION_ITERATOR = 'validation_iterator'
TRAIN_ITERATOR = 'train_iterator'
|
the-stack_0_27760
|
import wamptest
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.wamp.exception import ApplicationError
class ExampleTestCase1(wamptest.TestCase):
def __init__(self, *args, **kwargs):
super(ExampleTestCase1, self).__init__(*args, **kwargs)
self.update = None
@inlineCallbacks
def test_1(self):
result = yield self.call("test.add", 1, 2)
self.assertEqual(3, result)
def receive_update(self, update=None):
self.update = update
@inlineCallbacks
def test_2(self):
self.subscribe(self.receive_update, topic="test.trigger.update")
yield self.call("test.trigger")
yield sleep(2)
self.assertEqual("test", self.update)
class ExampleTestCase2(wamptest.TestCase):
def __init__(self, *args, **kwargs):
super(ExampleTestCase2, self).__init__(*args, **kwargs)
self.update = None
@inlineCallbacks
def test_1(self):
result = yield self.call("test.add", 1, 2)
self.assertEqual(3, result)
@inlineCallbacks
def test_2(self):
with self.assertRaises(ApplicationError) as context:
yield self.call("test.trigger.error")
if __name__ == '__main__':
errors = wamptest.wamptest.main(
test_cases=[ExampleTestCase1, ExampleTestCase2],
url=u"ws://router:8080/ws",
realm=u"realm1"
)
exit(errors)
|
the-stack_0_27761
|
import requests
import time
import progressbar
import json
import os
import sys
from retrying import retry
from planet.api.utils import read_planet_json
from planet.api.auth import find_api_key
sz=[]
fname=[]
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
#Get Planet API and Authenticate SESSION
try:
PL_API_KEY = find_api_key()
except:
print('Failed to get Planet Key')
sys.exit()
SESSION = requests.Session()
SESSION.auth = (PL_API_KEY, '')
#Get the redirects and ordersize
@retry(
wait_exponential_multiplier=1000,
wait_exponential_max=10000)
def parsesize(url):
mfsize=[]
mfname=[]
result=SESSION.get(url)
if result.status_code==200:
r=result.content
inp=json.loads(r)
for things in inp['files']:
mfname.append(things['path'])
mfsize.append(things['size'])
return (len(mfname),sum(mfsize))
#time.sleep(0.3)
#print('Total of '+str(len(fname)/3)+' items has filesize of '+str(humansize(sum(sz))))
elif result.status_code == 429:
raise Exception("rate limit error")
elif result.status_code !=(429,200):
return (result.status_code)
#
def ordersize(url):
response=SESSION.get(url).json()
print("Polling ...")
while response['state']=='queued' or response['state']=='running' or response['state']=='starting':
bar = progressbar.ProgressBar()
for z in bar(range(60)):
time.sleep(1)
response=SESSION.get(url).json()
if response['state']=='success':
for items in response['_links']['results']:
if items['name'].endswith('manifest.json'):
url=(items['location'])
#print(url)
try:
name,size=parsesize(url)
sz.append(size)
fname.append(name)
except:
error_code=parsesize(url)
print('Order has expired or exited with error '+str(error_code))
sys.exit()
print('Total of '+str(len(fname))+' download objects with download size of '+str(humansize(sum(sz))))
else:
print('Order Failed with state: '+str(response['state']))
#ordersize(url='https://api.planet.com/compute/ops/orders/v2/6433d78f-c695-4763-b68b-01f2ef2ccb9c')
|
the-stack_0_27763
|
#
# PySNMP MIB module OLD-CISCO-XNS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/OLD-CISCO-XNS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:32:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
temporary, = mibBuilder.importSymbols("CISCO-SMI", "temporary")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Counter64, NotificationType, Gauge32, iso, Integer32, IpAddress, ModuleIdentity, Unsigned32, ObjectIdentity, Bits, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter64", "NotificationType", "Gauge32", "iso", "Integer32", "IpAddress", "ModuleIdentity", "Unsigned32", "ObjectIdentity", "Bits", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tmpxns = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 3, 2))
xnsInput = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsInput.setStatus('mandatory')
if mibBuilder.loadTexts: xnsInput.setDescription('Total input count of number of XNS packets.')
xnsLocal = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsLocal.setStatus('mandatory')
if mibBuilder.loadTexts: xnsLocal.setDescription('Total count of XNS input packets for this host.')
xnsBcastin = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsBcastin.setStatus('mandatory')
if mibBuilder.loadTexts: xnsBcastin.setDescription('Total count of number of XNS input broadcast packets.')
xnsForward = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsForward.setStatus('mandatory')
if mibBuilder.loadTexts: xnsForward.setDescription('Total count of number of XNS packets forwarded.')
xnsBcastout = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsBcastout.setStatus('mandatory')
if mibBuilder.loadTexts: xnsBcastout.setDescription('Total count of number of XNS output broadcast packets.')
xnsErrin = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsErrin.setStatus('mandatory')
if mibBuilder.loadTexts: xnsErrin.setDescription('Total count of number of XNS Error input packets.')
xnsErrout = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsErrout.setStatus('mandatory')
if mibBuilder.loadTexts: xnsErrout.setDescription('Total count of number of XNS Error output packets.')
xnsFormerr = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsFormerr.setStatus('mandatory')
if mibBuilder.loadTexts: xnsFormerr.setDescription('Total count of number of XNS input packets with header errors.')
xnsChksum = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsChksum.setStatus('mandatory')
if mibBuilder.loadTexts: xnsChksum.setDescription('Total count of number of XNS input packets with checksum errors.')
xnsNotgate = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsNotgate.setStatus('mandatory')
if mibBuilder.loadTexts: xnsNotgate.setDescription('Total count of number of XNS input packets received while not routing.')
xnsHopcnt = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsHopcnt.setStatus('mandatory')
if mibBuilder.loadTexts: xnsHopcnt.setDescription('Total count of number of XNS input packets that have exceeded the maximum hop count.')
xnsNoroute = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsNoroute.setStatus('mandatory')
if mibBuilder.loadTexts: xnsNoroute.setDescription('Total count of number of XNS packets dropped due to no route.')
xnsNoencap = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsNoencap.setStatus('mandatory')
if mibBuilder.loadTexts: xnsNoencap.setDescription('Total count of number of XNS packets dropped due to output encapsulation failure.')
xnsOutput = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsOutput.setStatus('mandatory')
if mibBuilder.loadTexts: xnsOutput.setDescription('Total count of number of XNS output packets.')
xnsInmult = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsInmult.setStatus('mandatory')
if mibBuilder.loadTexts: xnsInmult.setDescription('Total count of number of XNS input multicast packets.')
xnsUnknown = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsUnknown.setStatus('mandatory')
if mibBuilder.loadTexts: xnsUnknown.setDescription('Total count of number of unknown XNS input packets.')
xnsFwdbrd = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsFwdbrd.setStatus('mandatory')
if mibBuilder.loadTexts: xnsFwdbrd.setDescription('Total count of number of XNS broadcast packets forwarded.')
xnsEchoreqin = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsEchoreqin.setStatus('mandatory')
if mibBuilder.loadTexts: xnsEchoreqin.setDescription('Total count of number of XNS Echo request packets received.')
xnsEchoreqout = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsEchoreqout.setStatus('mandatory')
if mibBuilder.loadTexts: xnsEchoreqout.setDescription('Total count of number of XNS Echo request packets sent.')
xnsEchorepin = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsEchorepin.setStatus('mandatory')
if mibBuilder.loadTexts: xnsEchorepin.setDescription('Total count of number of XNS Echo reply packets received.')
xnsEchorepout = MibScalar((1, 3, 6, 1, 4, 1, 9, 3, 2, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xnsEchorepout.setStatus('mandatory')
if mibBuilder.loadTexts: xnsEchorepout.setDescription('Total count of number of XNS Echo reply packets sent.')
mibBuilder.exportSymbols("OLD-CISCO-XNS-MIB", xnsInput=xnsInput, xnsEchorepin=xnsEchorepin, xnsEchorepout=xnsEchorepout, xnsNoencap=xnsNoencap, xnsLocal=xnsLocal, xnsNotgate=xnsNotgate, xnsChksum=xnsChksum, xnsEchoreqin=xnsEchoreqin, xnsEchoreqout=xnsEchoreqout, xnsFormerr=xnsFormerr, xnsFwdbrd=xnsFwdbrd, xnsHopcnt=xnsHopcnt, xnsBcastout=xnsBcastout, xnsInmult=xnsInmult, tmpxns=tmpxns, xnsOutput=xnsOutput, xnsErrin=xnsErrin, xnsErrout=xnsErrout, xnsNoroute=xnsNoroute, xnsUnknown=xnsUnknown, xnsBcastin=xnsBcastin, xnsForward=xnsForward)
|
the-stack_0_27764
|
import json
from app.utilities.json import json_loads
from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.questionnaire import SUBMIT_URL_PATH
class TestQuestionnaireEndpoints(IntegrationTestCase):
BASE_URL = "/questionnaire"
def test_invalid_section_id_raises_404(self):
# Given
self.launchSurvey("test_hub_and_spoke")
# When I navigate to the url for a section that does not exist
self.get(f"{self.BASE_URL}/sections/invalid-section/")
# Then I am shown a 404 page
self.assertStatusNotFound()
def test_get_invalid_questionnaire_location_raises_404(self):
# Given
self.launchSurvey("test_introduction")
# When
self.get(f"{self.BASE_URL}/test")
# Then
self.assertStatusNotFound()
def test_post_invalid_questionnaire_location_raises_404(self):
# Given
self.launchSurvey("test_introduction")
# When
self.post(url=f"{self.BASE_URL}/test")
# Then I am shown a 404 page
self.assertStatusNotFound()
def test_post_on_questionnaire_route_without_hub_redirects_to_first_incomplete_location(
self,
):
# Given
self.launchSurvey("test_textfield")
# When
self.post(url=f"/questionnaire/")
# Then
self.assertInUrl("name-block")
def test_get_thank_you_data_not_deleted_when_questionnaire_is_not_complete(self):
# Given we start a survey
self.launchSurvey("test_percentage", roles=["dumper"])
self.post({"answer": "99"})
# When we request the thank you page (without submitting the survey)
self.get("submitted/thank-you")
# Then the answers are not deleted
self.get("/dump/debug")
answers = json_loads(self.getResponseData())
self.assertEqual(1, len(answers["ANSWERS"]))
def test_get_thank_you_raises_404_when_questionnaire_is_not_complete(self):
# Given we start a survey
self.launchSurvey("test_percentage", roles=["dumper"])
# When we request the thank you page (without submitting the survey)
self.get("submitted/thank-you")
# Then we are shown a 404 page
self.assertStatusNotFound()
def test_data_is_deleted_on_submission(self):
# Given we submit a survey
self.launchSurvey("test_percentage", roles=["dumper"])
self.post({"answer": "99"})
self.post()
# When we start the survey again
self.launchSurvey("test_percentage", roles=["dumper"])
# Then no answers should have persisted
self.get("/dump/debug")
answers = json_loads(self.getResponseData())
self.assertEqual(0, len(answers["ANSWERS"]))
def test_when_on_thank_you_get_thank_you_returns_thank_you(self):
# Given we complete the test_percentage survey and are on the thank you page
self.launchSurvey("test_percentage", roles=["dumper"])
self.post({"answer": "99"})
self.post()
# When we try to get the thank-you page
self.get("submitted/thank-you")
# Then we get the thank-you page
self.assertInUrl("submitted/thank-you")
def test_questionnaire_not_accessible_once_submitted(self):
# Given we have submitted the test_percentage survey
self.launchSurvey("test_percentage", roles=["dumper"])
self.post({"answer": "99"})
self.post()
# When we try to access the submitted questionnaire
self.get(url=SUBMIT_URL_PATH)
# Then we get the unauthorised page
self.assertStatusUnauthorised()
def test_when_no_session_thank_you_returns_unauthorised(self):
# When we try to request the thank-you page with no session
self.get(url="submitted/thank-you")
# Then we get the unauthorised page
self.assertStatusUnauthorised()
|
the-stack_0_27765
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import hardcheck
def mkLed():
m = Module('blinkled')
subled = mkSubLed()
ports = m.copy_ports(subled)
params = m.copy_params(subled)
clk = ports['CLK']
rst = ports['RST']
led = ports['LED']
inv_led = m.OutputRegLike(led, name='INV_LED')
m.Always(Posedge(clk))(
If(rst)(
inv_led(0)
).Else(
inv_led(Unot(led))
)
)
m.Instance(subled, 'inst_subled',
params=m.connect_params(subled),
ports=m.connect_ports(subled))
return m
def mkSubLed():
m = Module('sub_blinkled')
width = m.Parameter('WIDTH', 8)
clk = m.Input('CLK')
rst = m.Input('RST')
led = m.OutputReg('LED', width)
count = m.Reg('count', 32)
m.Always(Posedge(clk))(
If(rst)(
count(0)
).Else(
If(count == 1023)(
count(0)
).Else(
count(count + 1)
)
))
m.Always(Posedge(clk))(
If(rst)(
led(0)
).Else(
If(count == 1024 - 1)(
led(led + 1)
)
))
return m
if __name__ == '__main__':
orig = mkLed()
#orig_verilog = orig.to_verilog()
#print(orig_verilog)
conv = hardcheck.convert(orig)
conv_verilog = conv.to_verilog()
print(conv_verilog)
|
the-stack_0_27768
|
import os
import cv2
import glob
import numpy as np
from libs.utils.data_utils import load_labels
def save_single_video(video, csv, out_path, gestures):
# get labels
name = video.split("/")[-1].split(".")[0]
csv = os.path.join(csv, name + ".csv")
labels = load_labels(csv)
# get video properties
cap = cv2.VideoCapture(video)
fps = int(cap.get(cv2.CAP_PROP_FPS))
frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
subtitle_width = 840
assert frames == len(labels), \
"length of video and label must be equal, but get {} != {}".format(frames, len(labels))
out_path = os.path.join(out_path, name + ".mp4")
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
new_video = cv2.VideoWriter(out_path, fourcc, fps, (width + subtitle_width, height))
frame_count = 0
while cap.isOpened():
rec, frame = cap.read()
if rec:
label = labels[frame_count]
gesture = gestures[label] # english name of gesture
bar = np.zeros((height, subtitle_width, 3), dtype=np.uint8)
cv2.putText(bar, gesture, (50, 400), cv2.FONT_HERSHEY_SIMPLEX,
2, (255, 255, 255), thickness=2)
new_frame = np.concatenate((frame, bar), axis=1)
new_video.write(new_frame)
frame_count += 1
print("The {}th frame saved.".format(frame_count))
else:
break
cap.release()
new_video.release()
print("New video has been saved at {}.".format(out_path))
def save_all_videos(video_dir, csv_dir, out_path, gestures, v_type):
videos = glob.glob(os.path.join(video_dir, "*.{}".format(v_type)))
for video in videos:
save_single_video(video, csv_dir, out_path, gestures)
|
the-stack_0_27770
|
########################################################################
#
# Functions for downloading the CIFAR-10 data-set from the internet
# and loading it into memory.
#
# Implemented in Python 3.5
#
# Usage:
# 1) Set the variable data_path with the desired storage path.
# 2) Call maybe_download_and_extract() to download the data-set
# if it is not already located in the given data_path.
# 3) Call load_class_names() to get an array of the class-names.
# 4) Call load_training_data() and load_test_data() to get
# the images, class-numbers and one-hot encoded class-labels
# for the training-set and test-set.
# 5) Use the returned data in your own program.
#
# Format:
# The images for the training- and test-sets are returned as 4-dim numpy
# arrays each with the shape: [image_number, height, width, channel]
# where the individual pixels are floats between 0.0 and 1.0.
#
########################################################################
#
# This file is part of the TensorFlow Tutorials available at:
#
# https://github.com/Hvass-Labs/TensorFlow-Tutorials
#
# Published under the MIT License. See the file LICENSE for details.
#
# Copyright 2016 by Magnus Erik Hvass Pedersen
#
########################################################################
import numpy as np
import pickle
import os
import download
from dataset import one_hot_encoded
########################################################################
# Directory where you want to download and save the data-set.
# Set this before you start calling any of the functions below.
data_path = "data/CIFAR-10/"
# URL for the data-set on the internet.
data_url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
########################################################################
# Various constants for the size of the images.
# Use these constants in your own program.
# Width and height of each image.
img_size = 32
# Number of channels in each image, 3 channels: Red, Green, Blue.
num_channels = 3
# Length of an image when flattened to a 1-dim array.
img_size_flat = img_size * img_size * num_channels
# Number of classes.
num_classes = 10
########################################################################
# Various constants used to allocate arrays of the correct size.
# Number of files for the training-set.
_num_files_train = 5
# Number of images for each batch-file in the training-set.
_images_per_file = 10000
# Total number of images in the training-set.
# This is used to pre-allocate arrays for efficiency.
_num_images_train = _num_files_train * _images_per_file
########################################################################
# Private functions for downloading, unpacking and loading data-files.
def _get_file_path(filename=""):
"""
Return the full path of a data-file for the data-set.
If filename=="" then return the directory of the files.
"""
return os.path.join(data_path, "cifar-10-batches-py/", filename)
def _unpickle(filename):
"""
Unpickle the given file and return the data.
Note that the appropriate dir-name is prepended the filename.
"""
# Create full path for the file.
file_path = _get_file_path(filename)
print("Loading data: " + file_path)
with open(file_path, mode='rb') as file:
# In Python 3.X it is important to set the encoding,
# otherwise an exception is raised here.
data = pickle.load(file)
return data
def _convert_images(raw):
"""
Convert images from the CIFAR-10 format and
return a 4-dim array with shape: [image_number, height, width, channel]
where the pixels are floats between 0.0 and 1.0.
"""
# Convert the raw images from the data-files to floating-points.
raw_float = np.array(raw, dtype=float) / 255.0
# Reshape the array to 4-dimensions.
images = raw_float.reshape([-1, num_channels, img_size, img_size])
# Reorder the indices of the array.
images = images.transpose([0, 2, 3, 1])
return images
def _load_data(filename):
"""
Load a pickled data-file from the CIFAR-10 data-set
and return the converted images (see above) and the class-number
for each image.
"""
# Load the pickled data-file.
data = _unpickle(filename)
# Get the raw images.
raw_images = data[b'data']
# Get the class-numbers for each image. Convert to numpy-array.
cls = np.array(data[b'labels'])
# Convert the images.
images = _convert_images(raw_images)
return images, cls
########################################################################
# Public functions that you may call to download the data-set from
# the internet and load the data into memory.
def maybe_download_and_extract():
"""
Download and extract the CIFAR-10 data-set if it doesn't already exist
in data_path (set this variable first to the desired path).
"""
download.maybe_download_and_extract(url=data_url, download_dir=data_path)
def load_class_names():
"""
Load the names for the classes in the CIFAR-10 data-set.
Returns a list with the names. Example: names[3] is the name
associated with class-number 3.
"""
# Load the class-names from the pickled file.
raw = _unpickle(filename="batches.meta")[b'label_names']
# Convert from binary strings.
names = [x.decode('utf-8') for x in raw]
return names
def load_training_data():
"""
Load all the training-data for the CIFAR-10 data-set.
The data-set is split into 5 data-files which are merged here.
Returns the images, class-numbers and one-hot encoded class-labels.
"""
# Pre-allocate the arrays for the images and class-numbers for efficiency.
images = np.zeros(shape=[_num_images_train, img_size, img_size, num_channels], dtype=float)
cls = np.zeros(shape=[_num_images_train], dtype=int)
# Begin-index for the current batch.
begin = 0
# For each data-file.
for i in range(_num_files_train):
# Load the images and class-numbers from the data-file.
images_batch, cls_batch = _load_data(filename="data_batch_" + str(i + 1))
# Number of images in this batch.
num_images = len(images_batch)
# End-index for the current batch.
end = begin + num_images
# Store the images into the array.
images[begin:end, :] = images_batch
# Store the class-numbers into the array.
cls[begin:end] = cls_batch
# The begin-index for the next batch is the current end-index.
begin = end
return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
def load_test_data():
"""
Load all the test-data for the CIFAR-10 data-set.
Returns the images, class-numbers and one-hot encoded class-labels.
"""
images, cls = _load_data(filename="test_batch")
return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
########################################################################
|
the-stack_0_27772
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 12:33:27 2021
@author: tcunn
"""
import matplotlib.pyplot
import agentframework
import csv
import random
import sys
import distutils.util
# Create empty list for environment raster data
environment = []
# Read in CSV raster data
with open('in.txt', newline='') as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist = []
for value in row:
rowlist.append(value)
environment.append(rowlist)
# Convert string input for whether to have visual output to boolean
try:
visual_output = bool(distutils.util.strtobool(sys.argv[4]))
except:
visual_output = True
# Plot environment data
if visual_output:
matplotlib.pyplot.imshow(environment)
matplotlib.pyplot.show()
# Set number of agents and number, random movements and size of neighbourhood
try:
num_of_agents = int(sys.argv[1])
num_of_moves = int(sys.argv[2])
neighbourhood = int(sys.argv[3])
except IndexError:
num_of_agents = 10
num_of_moves = 100
neighbourhood = 20
print("Inputs not all provided. " +
"Defaulting to 10 agents, 100 moves, 20 neighbourhood.")
except ValueError:
num_of_agents = 10
num_of_moves = 100
neighbourhood = 20
print("Inputs not valid. " +
"Defaulting to 10 agents, 100 moves, 20 neighbourhood.")
# Create empty list for agents
agents = []
# Create agents
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment, agents))
# Move agents.
for j in range(num_of_moves):
random.shuffle(agents)
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Plot the agents and environment
if visual_output:
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
matplotlib.pyplot.show()
# Write new environment
with open("out.txt", "w", newline = "") as f2:
writer = csv.writer(f2, delimiter = ",")
for row in environment:
writer.writerow(row)
# Get list of agents current stores
agent_stores = []
for agent in agents:
agent_stores.append(agent.store)
# Write agents current stores to a file
with open("agent stores.txt", "a", newline = "") as f3:
writer = csv.writer(f3, delimiter = ",")
writer.writerow(agent_stores)
print("agents=" + str(num_of_agents) +
", total_store=" + str(sum(agent_stores)))
# Check agent creation is including other agents
# print(agents[0].agents[1].x, agents[1].x, agents[0].x)
|
the-stack_0_27773
|
"""
test_rnd_path.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.url.HTTPRequest import HTTPRequest
from w3af.plugins.evasion.rnd_path import rnd_path
class TestEvasion(unittest.TestCase):
def test_add_path_to_base_url(self):
rp = rnd_path()
u = URL('http://www.w3af.com/')
r = HTTPRequest( u )
url_string = rp.modify_request( r ).url_object.url_string
self.assertRegexpMatches(url_string, 'http://www.w3af.com/\w*/../')
def test_add_path_to_path_url(self):
rp = rnd_path()
u = URL('http://www.w3af.com/abc/')
r = HTTPRequest( u )
url_string = rp.modify_request( r ).url_object.url_string
self.assertRegexpMatches(url_string, 'http://www.w3af.com/\w*/../abc/')
def test_add_with_filename(self):
rp = rnd_path()
u = URL('http://www.w3af.com/abc/def.htm')
r = HTTPRequest( u )
url_string = rp.modify_request( r ).url_object.url_string
self.assertRegexpMatches(url_string, 'http://www.w3af.com/\w*/../abc/def.htm')
def test_add_with_qs(self):
rp = rnd_path()
u = URL('http://www.w3af.com/abc/def.htm?id=1')
r = HTTPRequest( u )
url_string = rp.modify_request( r ).url_object.url_string
self.assertRegexpMatches(url_string, 'http://www.w3af.com/\w*/../abc/def.htm\?id=1')
|
the-stack_0_27774
|
import time
from math import sqrt
import torch
from torch.utils.checkpoint import checkpoint
import config as cfg
from ipeps.ipeps_c4v import IPEPS_C4V
from ctm.one_site_c4v.env_c4v import *
from ctm.one_site_c4v.ctm_components_c4v import *
from ctm.one_site_c4v.fpcm_c4v import fpcm_MOVE_sl
from linalg.custom_svd import *
from linalg.custom_eig import *
import logging
log = logging.getLogger(__name__)
def run(state, env, conv_check=None, ctm_args=cfg.ctm_args, global_args=cfg.global_args):
r"""
:param state: wavefunction
:param env: initial C4v symmetric environment
:param conv_check: function which determines the convergence of CTM algorithm. If ``None``,
the algorithm performs ``ctm_args.ctm_max_iter`` iterations.
:param ctm_args: CTM algorithm configuration
:param global_args: global configuration
:type state: IPEPS_C4V
:type env: ENV_C4V
:type conv_check: function(IPEPS,ENV_C4V,Object,CTMARGS)->bool
:type ctm_args: CTMARGS
:type global_args: GLOBALARGS
Executes specialized CTM algorithm for 1-site C4v symmetric iPEPS starting from the intial
environment ``env``. To establish the convergence of CTM before the maximal number of iterations
is reached a ``conv_check`` function is invoked. Its expected signature is
``conv_check(IPEPS,ENV_C4V,Object,CTMARGS)`` where ``Object`` is an arbitary argument. For
example it can be a list or dict used for storing CTM data from previous steps to
check convergence.
If desired, CTM can be accelerated by fixed-point corner-matrix algorithm (FPCM) controlled
by settings in :py:class:`CTMARGS <config.CTMARGS>`.
.. note::
Currently, FPCM does not support reverse-mode differentiation.
"""
if ctm_args.projector_svd_method=='DEFAULT' or ctm_args.projector_svd_method=='SYMEIG':
def truncated_eig(M, chi):
return truncated_eig_sym(M, chi, keep_multiplets=True,\
verbosity=ctm_args.verbosity_projectors)
elif ctm_args.projector_svd_method == 'SYMARP':
def truncated_eig(M, chi):
return truncated_eig_symarnoldi(M, chi, keep_multiplets=True, \
verbosity=ctm_args.verbosity_projectors)
# elif ctm_args.projector_svd_method == 'GESDD':
# def truncated_eig(M, chi):
# return truncated_svd_gesdd(M, chi, verbosity=ctm_args.verbosity_projectors)
# elif cfg.ctm_args.projector_svd_method == 'RSVD':
# truncated_svd= truncated_svd_rsvd
else:
raise Exception(f"Projector eig/svd method \"{cfg.ctm_args.projector_svd_method}\" not implemented")
a= next(iter(state.sites.values()))
# 1) perform CTMRG
t_obs=t_ctm=t_fpcm=0.
history=None
past_steps_data=dict() # possibly store some data throughout the execution of CTM
for i in range(ctm_args.ctm_max_iter):
# FPCM acceleration
if i>=ctm_args.fpcm_init_iter and ctm_args.fpcm_freq>0 and i%ctm_args.fpcm_freq==0:
t0_fpcm= time.perf_counter()
fpcm_MOVE_sl(a, env, ctm_args=ctm_args, global_args=global_args,
past_steps_data=past_steps_data)
t1_fpcm= time.perf_counter()
t_fpcm+= t1_fpcm-t0_fpcm
log.info(f"fpcm_MOVE_sl DONE t_fpcm {t1_fpcm-t0_fpcm} [s]")
t0_ctm= time.perf_counter()
# ctm_MOVE_dl(a, env, truncated_eig, ctm_args=ctm_args, global_args=global_args)
ctm_MOVE_sl(a, env, truncated_eig, ctm_args=ctm_args, global_args=global_args,\
past_steps_data=past_steps_data)
t1_ctm= time.perf_counter()
t0_obs= time.perf_counter()
if conv_check is not None:
# evaluate convergence of the CTMRG procedure
converged, history= conv_check(state, env, history, ctm_args=ctm_args)
if converged:
if ctm_args.verbosity_ctm_convergence>0:
print(f"CTMRG converged at iter= {i}")
break
t1_obs= time.perf_counter()
t_ctm+= t1_ctm-t0_ctm
t_obs+= t1_obs-t0_obs
return env, history, t_ctm, t_obs
# performs CTM move
def ctm_MOVE_dl(a, env, f_c2x2_decomp, ctm_args=cfg.ctm_args, global_args=cfg.global_args):
# 0) extract raw tensors as tuple
dimsa = a.size()
A = torch.einsum('sefgh,sabcd->eafbgchd',a,a).contiguous()\
.view(dimsa[1]**2, dimsa[2]**2, dimsa[3]**2, dimsa[4]**2)
tensors= tuple([A,env.C[env.keyC],env.T[env.keyT]])
# function wrapping up the core of the CTM MOVE segment of CTM algorithm
def ctm_MOVE_dl_c(*tensors):
A, C, T= tensors
if global_args.device=='cpu' and ctm_args.step_core_gpu:
#loc_gpu= torch.device(global_args.gpu)
A= A.cuda()
C= C.cuda()
T= T.cuda()
# 1) build enlarged corner upper left corner
C2X2= c2x2_dl(A, C, T, verbosity=ctm_args.verbosity_projectors)
# 2) build projector
P, S, V = f_c2x2_decomp(C2X2, env.chi) # M = PSV^{T}
# 3) absorb and truncate
#
# C2X2--1 0--P--1
# 0
# 0
# P^t
# 1->0
C2X2= P.t() @ C2X2 @ P
# C2X2= torch.diag(S)
P= P.view(env.chi,T.size()[2],env.chi)
# 2->1
# __P__
# 0 1->0
# 0
# T--2->3
# 1->2
nT = torch.tensordot(P, T,([0],[0]))
# 1->0
# __P____
# | 0
# | 0
# T--3 1--A--3
# 2->1 2
nT = torch.tensordot(nT, A,([0,3],[0,1]))
# 0
# __P____
# | |
# | |
# T-------A--3->1
# 1 2
# 0 1
# |___P___|
# 2
nT = torch.tensordot(nT, P,([1,2],[0,1]))
nT = nT.permute(0,2,1).contiguous()
# 4) symmetrize, normalize and assign new C,T
C2X2= 0.5*(C2X2 + C2X2.t())
nT= 0.5*(nT + nT.permute(1,0,2))
C2X2= C2X2/torch.max(torch.abs(C2X2))
nT= nT/torch.max(torch.abs(nT))
if global_args.device=='cpu' and ctm_args.step_core_gpu:
C2X2= C2X2.cpu()
nT= nT.cpu()
return C2X2, nT
# Call the core function, allowing for checkpointing
if ctm_args.fwd_checkpoint_move:
new_tensors= checkpoint(ctm_MOVE_dl_c,*tensors)
else:
new_tensors= ctm_MOVE_dl_c(*tensors)
env.C[env.keyC]= new_tensors[0]
env.T[env.keyT]= new_tensors[1]
# performs CTM move
def ctm_MOVE_sl(a, env, f_c2x2_decomp, ctm_args=cfg.ctm_args, global_args=cfg.global_args,
past_steps_data=None):
r"""
:param a: on-site C4v symmetric tensor
:param env: C4v symmetric environment
:param f_c2x2_decomp: function performing the truncated spectral decomposition (eigenvalue/svd)
of enlarged corner. The ``f_c2x2_decomp`` returns a tuple composed of
leading chi spectral values and projector on leading chi spectral values.
:param ctm_args: CTM algorithm configuration
:param global_args: global configuration
:param past_steps_data: dictionary used for recording diagnostic information during CTM
:type a: torch.Tensor
:type env: ENV_C4V
:type f_c2x2_decomp: function(torch.Tensor, int)->torch.Tensor, torch.Tensor
:type ctm_args: CTMARGS
:type global_args: GLOBALARGS
:type past_steps_data:
Executes a single step of C4v symmetric CTM algorithm for 1-site C4v symmetric iPEPS.
This variant of CTM step does not explicitly build double-layer on-site tensor.
"""
# 0) extract raw tensors as tuple
tensors= tuple([a,env.C[env.keyC],env.T[env.keyT]])
# function wrapping up the core of the CTM MOVE segment of CTM algorithm
def ctm_MOVE_sl_c(*tensors):
a, C, T= tensors
if global_args.device=='cpu' and ctm_args.step_core_gpu:
#loc_gpu= torch.device(global_args.gpu)
a= a.cuda()
C= C.cuda()
T= T.cuda()
# 1) build enlarged corner upper left corner
C2X2= c2x2_sl(a, C, T, verbosity=ctm_args.verbosity_projectors)
# 2) build projector
# P, S, V = f_c2x2_decomp(C2X2, env.chi) # M = PSV^T
D, P= f_c2x2_decomp(C2X2, env.chi) # M = UDU^T
# 3) absorb and truncate
#
# C2X2--1 0--P--1
# 0
# 0
# P^t
# 1->0
# C2X2= P.t() @ C2X2 @ P
C2X2= torch.diag(D)
P= P.view(env.chi,T.size()[2],env.chi)
# 2->1
# __P__
# 0 1->0
# 0
# T--2->3
# 1->2
nT= torch.tensordot(P, T,([0],[0]))
# 4) double-layer tensor contraction - layer by layer
# 4i) untangle the fused D^2 indices
# 1->2
# __P__
# | 0->0,1
# |
# T--3->4,5
# 2->3
nT= nT.view(a.size()[1],a.size()[1],nT.size()[1],nT.size()[2],\
a.size()[2],a.size()[2])
# 4ii) first layer "bra" (in principle conjugate)
# 2->1
# __P___________
# | 0 1->0
# | 1 /0->4
# T----4 2--a--4->6
# | | 3->5
# | --5->3
# 3->2
nT= torch.tensordot(nT, a,([0,4],[1,2]))
# 4iii) second layer "ket"
# 1->0
# __P__________
# | | 0
# | |/4 0\ |
# T----a---------6->3
# | | | \1
# | -----3 2--a--4->5
# | | 3->4
# | |
# 2->1 5->2
nT= torch.tensordot(nT, a,([0,3,4],[1,2,0]))
# 4iv) fuse pairs of aux indices
# 0
# __P_
# | |
# T----a----3\
# | | |\ ->3
# | ----a--5/
# | | |
# | | |
# 1 (2 4)->2
nT= nT.permute(0,1,2,4,3,5).contiguous().view(nT.size()[0],nT.size()[1],\
a.size()[3]**2,a.size()[4]**2)
# 0
# __P____
# | |
# | |
# T------aa--3->1
# 1 2
# 0 1
# |___P___|
# 2
nT = torch.tensordot(nT,P,([1,2],[0,1]))
nT = nT.permute(0,2,1).contiguous()
# 4) symmetrize, normalize and assign new C,T
C2X2= 0.5*(C2X2 + C2X2.t())
nT= 0.5*(nT + nT.permute(1,0,2))
C2X2= C2X2/torch.max(torch.abs(C2X2))
# C2X2= C2X2/torch.sum(torch.abs(D))
nT= nT/torch.max(torch.abs(nT))
# nT= ((nT.size()[0]*nT.size()[1]*nT.size()[2])/nT.norm())*nT
# print(f"{nT.norm()}")
if global_args.device=='cpu' and ctm_args.step_core_gpu:
C2X2= C2X2.cpu()
nT= nT.cpu()
return C2X2, nT
# Call the core function, allowing for checkpointing
if ctm_args.fwd_checkpoint_move:
new_tensors= checkpoint(ctm_MOVE_sl_c,*tensors)
else:
new_tensors= ctm_MOVE_sl_c(*tensors)
env.C[env.keyC]= new_tensors[0]
env.T[env.keyT]= new_tensors[1]
|
the-stack_0_27775
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.urls import path
from django.views.generic import RedirectView
from .views import *
urlpatterns = [
path('', login_required(assets), name='home'),
path('assets/', login_required(assets), name='assets'),
# Data Model views
path('moex-portfolio/', login_required(MoexPortfolioView.as_view()), name='moex-portfolio'),
path('transfers/', login_required(TransfersView.as_view()), name='transfers'),
path('deals/', login_required(DealsView.as_view()), name='deals'),
path('portfolio/', login_required(ReportPortfolioView.as_view()), name='portfolio'),
path('coupons-dividends/', login_required(CouponsDividendsView.as_view()), name='coupons-dividends'),
# Moex operations
path('update-bounds/', login_required(update_bounds), name='update-bounds'),
path('corp-bounds/', login_required(CorpBounView.as_view()), name='corp-bounds'),
# other
path('google-callback/', google_callback, name='google-callback'),
path('sentry-debug/', trigger_error),
]
|
the-stack_0_27776
|
import tensorflow as tf
from .generative import GenerativeTrainer
class GenerativeAdversarialNetworkTrainer(GenerativeTrainer):
def __init__(self, config, discriminator=None, generator=None, discriminator_steps=1,
discriminator_scale_factor=None, generator_scale_factor=None,
alternative_generator_loss=False, fixed_evaluate_noise=False,
**kwargs):
# get basic params
self.discriminator_definition = discriminator
self.discriminator_steps = discriminator_steps
self.discriminator_scale_factor = discriminator_scale_factor
self.generator_definition = generator
self.generator_scale_factor = generator_scale_factor
self.alternative_generator_loss = alternative_generator_loss
self.discriminator_networks = []
super().__init__(config, **kwargs)
def build_discriminator(self, x, real):
# build network
definition = self.discriminator_definition
reuse = len(self.discriminator_networks) > 0
network = self.build_network("discriminator", definition, x, reuse=reuse)
# set labels and logits
network.logits = network.get_output_layer().references["Z"]
network.real = real
network.scale_factor = self.discriminator_scale_factor if real else None
# append network
self.discriminator_networks.append(network)
return network.outputs
def build_generator(self):
definition = self.generator_definition
self.generator_network = self.build_generator_network("generator", definition)
return self.generator_network.outputs
def build_discriminator_optimize(self, loss_name="discriminator_loss"):
with self.variable_scope("discriminator_optimize"):
# sum losses from all discriminator networks
loss = 0
for network in self.discriminator_networks:
# prepare labels
if network.real:
labels = tf.ones_like(network.logits)
else:
labels = tf.zeros_like(network.logits)
if network.scale_factor is not None:
labels -= network.scale_factor
# cross entropy loss
network.loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=network.logits,
labels=labels)
loss += tf.reduce_mean(network.loss)
# loss summary
if loss_name:
self.add_metric(loss_name, loss)
# optimize loss for networks
self.discriminator_networks[0].optimize_loss(loss, name="discriminator_optimize")
return loss
def build_generator_optimize(self, loss_name="generator_loss"):
with self.variable_scope("generator_optimize"):
# sum losses from all fake discriminator networks
loss = 0
for network in self.discriminator_networks:
if network.real:
continue
if self.alternative_generator_loss:
# alternative of using negative discriminator loss (FIXME)
network_loss = -network.loss
else:
# prepare negative labels
labels = tf.ones_like(network.logits)
if self.generator_scale_factor is not None:
labels -= self.generator_scale_factor
# cross entropy loss of negative labels
network_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=network.logits,
labels=labels)
loss += tf.reduce_mean(network_loss)
# loss summary
if loss_name:
self.add_metric(loss_name, loss)
# optimize loss for network
self.generator_network.optimize_loss(loss, name="generator_optimize")
return loss
def build_trainer(self):
super().build_trainer()
with self.variable_scope("GAN"):
# build generator-network
generated = self.build_generator()
# normalize real input
with self.variable_scope("normalize_images"):
x = self.get_feed("X")
x = x * 2 - 1 # normalize (-1, 1)
# build discriminator-networks
self.build_discriminator(x, True)
self.build_discriminator(generated, False)
# optimize discriminator loss
self.build_discriminator_optimize()
# optimize generator loss
self.build_generator_optimize()
# summary images
self.build_summary_images("generated", generated)
def optimize(self, batch):
feed_map = batch.get_feeds()
# optimize discriminator-network
for i in range(self.discriminator_steps):
self.run("discriminator_optimize", feed_map)
# optimize generator-network
results = self.run("generator_optimize", feed_map)
return results
|
the-stack_0_27777
|
"""
external_query.py
Run steep.py (compute similarities) and sip.py (compute connectivities) on an
external gct file. The required inputs are a path to a gct of external profiles
(probes x samples), a path to a gct of internal profiles, and a path to a gct
of the pre-computed similarity matrix of the internal profiles against
themselves.
N.B. The internal_gcts and bg_gcts should each contain only WITHIN-cell
calculations. In other words, this script needs to be run 6 times to get
results for each cell line in the corpus.
"""
import logging
import sys
import argparse
import broadinstitute_psp.utils.setup_logger as setup_logger
import broadinstitute_psp.steep.steep as steep
import broadinstitute_psp.sip.sip as sip
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.parse as parse
import cmapPy.pandasGEXpress.write_gct as wg
__author__ = "Lev Litichevskiy"
__email__ = "[email protected]"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
SIMILARITY_METRIC_FIELD = "similarity_metric"
CONNECTIVITY_METRIC_FIELD = "connectivity_metric"
QUERY_FIELD_NAME = "query_field"
TARGET_FIELD_NAME = "target_field"
SEPARATOR = ":"
def build_parser():
"""Build argument parser."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required args
parser.add_argument("--external_gct_path", "-e", required=True,
help="path to gct file of external profiles")
parser.add_argument("--internal_gct_path", "-i", required=True,
help="path to gct file of internal profiles")
parser.add_argument("--bg_gct_path", "-b", required=True,
help="path to background similarity gct file")
# Optional args
parser.add_argument("--out_steep_name", "-ost", default="steep_output.gct",
help="what to name the output similarity file")
parser.add_argument("--out_sip_name", "-osi", default="sip_output.gct",
help="what to name the output connectivity file")
parser.add_argument("--similarity_metric", "-s", default="spearman",
choices=["spearman", "pearson"],
help="metric to use for comparing sample profiles")
parser.add_argument("--connectivity_metric", "-c", default="ks_test",
choices=["ks_test", "percentile_score"],
help="metric to use for computing connectivity")
parser.add_argument("--fields_to_aggregate_for_external_profiles", "-fae",
nargs="+", default=["pert_id", "cell_id", "pert_time"],
help="list of metadata fields to use in aggregating replicates in external profiles")
parser.add_argument("--fields_to_aggregate_for_internal_profiles", "-fai",
nargs="+", default=["pert_id", "cell_id", "pert_time"],
help="list of metadata fields to use in aggregating replicates in internal profiles")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="whether to increase the # of messages reported")
return parser
def main(args):
# Parse input gcts
external_gct = parse.parse(args.external_gct_path)
internal_gct = parse.parse(args.internal_gct_path)
bg_gct = parse.parse(args.bg_gct_path)
# Meat of the script
(sim_gct, conn_gct) = do_steep_and_sip(
external_gct, internal_gct, bg_gct, args.similarity_metric,
args.connectivity_metric,
args.fields_to_aggregate_for_external_profiles,
args.fields_to_aggregate_for_internal_profiles)
# Write output gcts
wg.write(sim_gct, args.out_steep_name, data_null="NaN", metadata_null="NaN", filler_null="NaN")
wg.write(conn_gct, args.out_sip_name, data_null="NaN", filler_null="NaN", metadata_null="NaN")
def do_steep_and_sip(external_gct, internal_gct, bg_gct, similarity_metric,
connectivity_metric,
fields_to_aggregate_for_external_profiles,
fields_to_aggregate_for_internal_profiles):
#----------STEEP----------#
# Compute similarity between external and internal profiles
sim_df = steep.compute_similarity_bw_two_dfs(internal_gct.data_df,
external_gct.data_df,
similarity_metric)
# Row metadata is from gct1, column metadata is from gct2
row_metadata_for_sim_df = internal_gct.col_metadata_df
col_metadata_for_sim_df = external_gct.col_metadata_df
# Append column to both metadata_dfs indicating which similarity_metric was used
row_metadata_for_sim_df[SIMILARITY_METRIC_FIELD] = similarity_metric
col_metadata_for_sim_df[SIMILARITY_METRIC_FIELD] = similarity_metric
# Assemble similarity gct
sim_gct = GCToo.GCToo(sim_df, row_metadata_for_sim_df, col_metadata_for_sim_df)
#----------SIP----------#
# Check symmetry
(is_test_df_sym, is_bg_df_sym) = sip.check_symmetry(sim_gct.data_df, bg_gct.data_df)
# Create an aggregated metadata field for index and columns of both gcts
# and sort by that field
(test_gct, bg_gct) = sip.create_aggregated_fields_in_GCTs(
sim_gct, bg_gct,
fields_to_aggregate_for_external_profiles,
fields_to_aggregate_for_internal_profiles,
fields_to_aggregate_for_internal_profiles,
QUERY_FIELD_NAME, TARGET_FIELD_NAME, SEPARATOR)
# Compute connectivity
(_, signed_conn_gct) = sip.compute_connectivities(
test_gct, bg_gct, QUERY_FIELD_NAME, TARGET_FIELD_NAME, TARGET_FIELD_NAME,
connectivity_metric, is_test_df_sym, SEPARATOR)
# Append to queries a new column saying what connectivity metric was used
sip.add_connectivity_metric_to_metadata(signed_conn_gct.col_metadata_df, connectivity_metric, CONNECTIVITY_METRIC_FIELD)
sip.add_connectivity_metric_to_metadata(signed_conn_gct.row_metadata_df, connectivity_metric, CONNECTIVITY_METRIC_FIELD)
return sim_gct, signed_conn_gct
if __name__ == "__main__":
args = build_parser().parse_args(sys.argv[1:])
setup_logger.setup(verbose=args.verbose)
main(args)
|
the-stack_0_27779
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep, time
import pandas as pd
import pytest
import v3io_frames as v3f
from conftest import has_go
from conftest import test_backends
try:
import cudf
has_cudf = True
except ImportError:
has_cudf = False
@pytest.mark.skipif(not has_cudf, reason='cudf not found')
@pytest.mark.skipif(not has_go, reason='Go SDK not found')
def test_cudf(framesd, session):
df = cudf.DataFrame({
'a': [1, 2, 3],
'b': [1.1, 2.2, 3.3],
})
c = v3f.Client(framesd.grpc_addr, frame_factory=cudf.DataFrame)
backend = 'csv'
table = 'cudf-{}'.format(int(time()))
print('table = {}'.format(table))
c.write(backend, table, [df])
sleep(1) # Let db flush
rdf = c.read(backend, table=table)
assert isinstance(rdf, cudf.DataFrame), 'not a cudf.DataFrame'
assert len(rdf) == len(df), 'wrong frame size'
assert set(rdf.columns) == set(df.columns), 'columns mismatch'
@pytest.mark.skipif(not has_cudf, reason='cudf not found')
def test_concat_categorical():
df1 = cudf.DataFrame({'a': range(10, 13), 'b': range(50, 53)})
df1['c'] = pd.Series(['a'] * 3, dtype='category')
df2 = cudf.DataFrame({'a': range(20, 23), 'b': range(60, 63)})
df2['c'] = pd.Series(['b'] * 3, dtype='category')
for backend in test_backends:
df = v3f.pdutils.concat_dfs([df1, df2], backend, cudf.DataFrame, cudf.concat, False)
assert len(df) == len(df1) + len(df2), 'bad concat size'
dtype = df['c'].dtype
assert v3f.pdutils.is_categorical_dtype(dtype), 'result not categorical'
@pytest.mark.skipif(not has_cudf, reason='cudf not found')
def test_concat_categorical_with_multi_index():
df1 = cudf.DataFrame({'a': range(10, 13), 'b': range(50, 53)})
df1['c'] = pd.Series(['a'] * 3, dtype='category')
df2 = cudf.DataFrame({'a': range(20, 23), 'b': range(60, 63)})
df2['c'] = pd.Series(['b'] * 3, dtype='category')
for backend in test_backends:
df = v3f.pdutils.concat_dfs([df1, df2], backend, cudf.DataFrame, cudf.concat, True)
assert len(df) == len(df1) + len(df2), 'bad concat size'
dtype = df['c'].dtype
assert v3f.pdutils.is_categorical_dtype(dtype), 'result not categorical'
|
the-stack_0_27781
|
from random import randint
from random import choice
def montyhall(playerchoice):
prize = randint(1,3)
if (prize == 1):
noluck1 = randint(2,3)
if (noluck1 == 2):
noluck2 = 3
else:
noluck2 = 2
if (prize == 2):
noluck1 = choice([1,3])
if (noluck1 == 1):
noluck2 = 3
else:
noluck2 = 1
if (prize == 3):
noluck1 = randint(1,2)
if (noluck1 == 1):
noluck2 = 2
else:
noluck2 = 1
"out of the two remaining doors, pick the one that does not have\
prize behind"
if (playerchoice == prize):
openeddoor = choice([noluck1, noluck2])
if (playerchoice == noluck1):
openeddoor = noluck2
else:
openeddoor = noluck1
newplayerchoice = [i for i in [1,2,3] if (i != playerchoice and
i != openeddoor)][0]
win = (newplayerchoice == prize)
return win
def test(num):
wincount = 0
newnum = num
for i in range(1,num+1):
pchoice = randint(1,3)
print("Trial #" + str(i))
i += 1
win = montyhall(pchoice)
if (win == True):
wincount += 1
print("Win!")
if (win == False):
print("Lose.")
print("-----------")
print("By swapping, we won " + str(wincount) + " times in " + str(newnum)\
+ " trials.")
print("The possibility of Winning = " + "%.2f" % (wincount/num*100) + "%.\n")
repeat = input("Please enter:\n" + "'y' to try again\n" +
"'c' to change the number of trials\n" +
"'n' to stop\n")
while (repeat != "n" and repeat != "N"):
if (repeat == "y" or repeat == "Y"):
test(num)
repeat = "n"
if (repeat == "c" or repeat == "C"):
newnum = int(input("Number of trials = "))
test(newnum)
repeat = "n"
else:
repeat = input("Please enter the correct value. (y/n/c)")
return
def init():
num = int(input("Please enter the number of trials you want to take: "))
return test(num)
init()
|
the-stack_0_27784
|
import copy
import os
import time
from argparse import Namespace
from typing import Optional, Dict, Union, Set, List, Iterable
import jina
from .k8slib import kubernetes_deployment, kubernetes_client
from ..networking import K8sGrpcConnectionPool
from ..pods import BasePod
from ... import __default_executor__
from ...enums import PeaRoleType
from ...logging.logger import JinaLogger
from ...excepts import RuntimeFailToStart
class K8sPod(BasePod):
"""The K8sPod (KubernetesPod) is used for deployments on Kubernetes."""
class _K8sDeployment:
def __init__(
self,
name: str,
head_port_in: int,
version: str,
pea_type: str,
jina_pod_name: str,
shard_id: Optional[int],
common_args: Union['Namespace', Dict],
deployment_args: Union['Namespace', Dict],
):
self.name = name
self.dns_name = kubernetes_deployment.to_dns_name(name)
self.head_port_in = head_port_in
self.version = version
self.pea_type = pea_type
self.jina_pod_name = jina_pod_name
self.shard_id = shard_id
self.common_args = common_args
self.deployment_args = deployment_args
self.k8s_namespace = self.common_args.k8s_namespace
self.num_replicas = getattr(self.deployment_args, 'replicas', 1)
self.cluster_address = None
def _deploy_gateway(self):
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-standard'
)
self.cluster_address = kubernetes_deployment.deploy_service(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
container_cmd='["jina"]',
container_args=f'["gateway", '
f'{kubernetes_deployment.get_cli_params(arguments=self.common_args, skip_list=("pod_role",))}]',
logger=JinaLogger(f'deploy_{self.name}'),
replicas=1,
pull_policy='IfNotPresent',
jina_pod_name='gateway',
pea_type='gateway',
port_expose=self.common_args.port_expose,
)
@staticmethod
def _construct_runtime_container_args(
deployment_args, uses, uses_metas, uses_with_string, pea_type, port_in
):
container_args = (
f'["executor", '
f'"--native", '
f'"--uses", "{uses}", '
f'"--runtime-cls", {"WorkerRuntime" if pea_type.lower() == "worker" else "HeadRuntime"}, '
f'"--uses-metas", "{uses_metas}", '
+ uses_with_string
+ f'{kubernetes_deployment.get_cli_params(arguments=deployment_args, port_in=port_in)}]'
)
return container_args
def _get_image_name(self, uses: str):
image_name = kubernetes_deployment.get_image_name(uses)
if image_name == __default_executor__:
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-perf'
)
return image_name
def _get_init_container_args(self):
return kubernetes_deployment.get_init_container_args(self.common_args)
def _get_container_args(self, uses, pea_type=None, port_in=None):
uses_metas = kubernetes_deployment.dictionary_to_cli_param(
{'pea_id': self.shard_id}
)
uses_with = kubernetes_deployment.dictionary_to_cli_param(
self.deployment_args.uses_with
)
uses_with_string = f'"--uses-with", "{uses_with}", ' if uses_with else ''
if uses != __default_executor__:
uses = 'config.yml'
return self._construct_runtime_container_args(
self.deployment_args,
uses,
uses_metas,
uses_with_string,
pea_type if pea_type else self.pea_type,
port_in,
)
def _deploy_runtime(
self,
replace=False,
):
image_name = self._get_image_name(self.deployment_args.uses)
image_name_uses_before = (
self._get_image_name(self.deployment_args.uses_before)
if hasattr(self.deployment_args, 'uses_before')
and self.deployment_args.uses_before
else None
)
image_name_uses_after = (
self._get_image_name(self.deployment_args.uses_after)
if hasattr(self.deployment_args, 'uses_after')
and self.deployment_args.uses_after
else None
)
init_container_args = self._get_init_container_args()
container_args = self._get_container_args(self.deployment_args.uses)
container_args_uses_before = (
self._get_container_args(
self.deployment_args.uses_before,
'worker',
port_in=K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE,
)
if hasattr(self.deployment_args, 'uses_before')
and self.deployment_args.uses_before
else None
)
container_args_uses_after = (
self._get_container_args(
self.deployment_args.uses_after,
'worker',
port_in=K8sGrpcConnectionPool.K8S_PORT_USES_AFTER,
)
if hasattr(self.deployment_args, 'uses_after')
and self.deployment_args.uses_after
else None
)
self.cluster_address = kubernetes_deployment.deploy_service(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
image_name_uses_after=image_name_uses_after,
image_name_uses_before=image_name_uses_before,
container_cmd='["jina"]',
container_cmd_uses_before='["jina"]',
container_cmd_uses_after='["jina"]',
container_args=container_args,
container_args_uses_before=container_args_uses_before,
container_args_uses_after=container_args_uses_after,
logger=JinaLogger(f'deploy_{self.name}'),
replicas=self.num_replicas,
pull_policy='IfNotPresent',
jina_pod_name=self.jina_pod_name,
pea_type=self.pea_type,
shard_id=self.shard_id,
init_container=init_container_args,
env=self.deployment_args.env,
gpus=self.deployment_args.gpus
if hasattr(self.deployment_args, 'gpus')
else None,
custom_resource_dir=getattr(
self.common_args, 'k8s_custom_resource_dir', None
),
replace_deployment=replace,
)
def _restart_runtime(self):
self._deploy_runtime(replace=True)
def wait_start_success(self):
_timeout = self.common_args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
from kubernetes import client
with JinaLogger(f'waiting_for_{self.name}') as logger:
logger.debug(
f'🏝️\n\t\tWaiting for "{self.name}" to be ready, with {self.num_replicas} replicas'
)
timeout_ns = 1000000000 * _timeout if _timeout else None
now = time.time_ns()
exception_to_raise = None
while timeout_ns is None or time.time_ns() - now < timeout_ns:
try:
api_response = self._read_namespaced_deployment()
if (
api_response.status.ready_replicas is not None
and api_response.status.ready_replicas == self.num_replicas
):
logger.success(f' {self.name} has all its replicas ready!!')
return
else:
ready_replicas = api_response.status.ready_replicas or 0
logger.debug(
f'\nNumber of ready replicas {ready_replicas}, waiting for {self.num_replicas - ready_replicas} replicas to be available for {self.name}'
)
time.sleep(1.0)
except client.ApiException as ex:
exception_to_raise = ex
break
fail_msg = f' Deployment {self.name} did not start with a timeout of {self.common_args.timeout_ready}'
if exception_to_raise:
fail_msg += f': {repr(exception_to_raise)}'
raise RuntimeFailToStart(fail_msg)
async def wait_restart_success(self, previous_uids: Iterable[str] = None):
_timeout = self.common_args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
if previous_uids is None:
previous_uids = []
from kubernetes import client
import asyncio
k8s_client = kubernetes_client.K8sClients().apps_v1
with JinaLogger(f'waiting_restart_for_{self.name}') as logger:
logger.info(
f'🏝️\n\t\tWaiting for "{self.name}" to be restarted, with {self.num_replicas} replicas'
)
timeout_ns = 1000000000 * _timeout if _timeout else None
now = time.time_ns()
exception_to_raise = None
while timeout_ns is None or time.time_ns() - now < timeout_ns:
try:
api_response = k8s_client.read_namespaced_deployment(
name=self.dns_name, namespace=self.k8s_namespace
)
logger.debug(
f'\n\t\t Updated Replicas: {api_response.status.updated_replicas}.'
f' Replicas: {api_response.status.replicas}.'
f' Expected Replicas {self.num_replicas}'
)
has_pod_with_uid = self._has_pod_with_uid(previous_uids)
if (
api_response.status.updated_replicas is not None
and api_response.status.updated_replicas
== self.num_replicas
and api_response.status.replicas == self.num_replicas
and not has_pod_with_uid
):
logger.success(
f' {self.name} has all its replicas updated!!'
)
return
else:
updated_replicas = api_response.status.updated_replicas or 0
alive_replicas = api_response.status.replicas or 0
if updated_replicas < self.num_replicas:
logger.debug(
f'\nNumber of updated replicas {updated_replicas}, waiting for {self.num_replicas - updated_replicas} replicas to be updated'
)
elif has_pod_with_uid:
logger.debug(
f'\nWaiting for old replicas to be terminated'
)
else:
logger.debug(
f'\nNumber of alive replicas {alive_replicas}, waiting for {alive_replicas - self.num_replicas} old replicas to be terminated'
)
await asyncio.sleep(1.0)
except client.ApiException as ex:
exception_to_raise = ex
break
fail_msg = f' Deployment {self.name} did not restart with a timeout of {self.common_args.timeout_ready}'
if exception_to_raise:
fail_msg += f': {repr(exception_to_raise)}'
raise RuntimeFailToStart(fail_msg)
async def wait_scale_success(self, replicas: int):
scale_to = replicas
_timeout = self.common_args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
import asyncio
from kubernetes import client
with JinaLogger(f'waiting_scale_for_{self.name}') as logger:
logger.info(
f'🏝️\n\t\tWaiting for "{self.name}" to be scaled, with {self.num_replicas} replicas,'
f'scale to {scale_to}.'
)
timeout_ns = 1000000000 * _timeout if _timeout else None
now = time.time_ns()
exception_to_raise = None
while timeout_ns is None or time.time_ns() - now < timeout_ns:
try:
api_response = kubernetes_client.K8sClients().apps_v1.read_namespaced_deployment(
name=self.dns_name, namespace=self.k8s_namespace
)
logger.debug(
f'\n\t\t Scaled replicas: {api_response.status.ready_replicas}.'
f' Replicas: {api_response.status.replicas}.'
f' Expected Replicas {scale_to}'
)
if (
api_response.status.ready_replicas is not None
and api_response.status.ready_replicas == scale_to
):
logger.success(
f' {self.name} has all its replicas updated!!'
)
return
else:
scaled_replicas = api_response.status.ready_replicas or 0
if scaled_replicas < scale_to:
logger.debug(
f'\nNumber of replicas {scaled_replicas}, waiting for {scale_to - scaled_replicas} replicas to be scaled up.'
)
else:
logger.debug(
f'\nNumber of replicas {scaled_replicas}, waiting for {scaled_replicas - scale_to} replicas to be scaled down.'
)
await asyncio.sleep(1.0)
except client.ApiException as ex:
exception_to_raise = ex
break
fail_msg = f' Deployment {self.name} did not restart with a timeout of {self.common_args.timeout_ready}'
if exception_to_raise:
fail_msg += f': {repr(exception_to_raise)}'
raise RuntimeFailToStart(fail_msg)
def rolling_update(
self, dump_path: Optional[str] = None, *, uses_with: Optional[Dict] = None
):
assert (
self.name != 'gateway'
), 'Rolling update on the gateway is not supported'
if dump_path is not None:
if uses_with is not None:
uses_with['dump_path'] = dump_path
else:
uses_with = {'dump_path': dump_path}
self.deployment_args.uses_with = uses_with
self.deployment_args.dump_path = dump_path
self._restart_runtime()
def scale(self, replicas: int):
"""
Scale the amount of replicas of a given Executor.
:param replicas: The number of replicas to scale to
"""
self._patch_namespaced_deployment_scale(replicas)
def start(self):
with JinaLogger(f'start_{self.name}') as logger:
logger.debug(f'\t\tDeploying "{self.name}"')
if self.name == 'gateway':
self._deploy_gateway()
else:
self._deploy_runtime()
if not self.common_args.noblock_on_start:
self.wait_start_success()
return self
def close(self):
from kubernetes import client
with JinaLogger(f'close_{self.name}') as logger:
try:
resp = self._delete_namespaced_deployment()
if resp.status == 'Success':
logger.success(
f' Successful deletion of deployment {self.name}'
)
else:
logger.error(
f' Deletion of deployment {self.name} unsuccessful with status {resp.status}'
)
except client.ApiException as exc:
logger.error(
f' Error deleting deployment {self.name}: {exc.reason} '
)
def _delete_namespaced_deployment(self):
return kubernetes_client.K8sClients().apps_v1.delete_namespaced_deployment(
name=self.dns_name, namespace=self.k8s_namespace
)
def _read_namespaced_deployment(self):
return kubernetes_client.K8sClients().apps_v1.read_namespaced_deployment(
name=self.dns_name, namespace=self.k8s_namespace
)
def _patch_namespaced_deployment_scale(self, replicas: int):
kubernetes_client.K8sClients().apps_v1.patch_namespaced_deployment_scale(
self.dns_name,
namespace=self.k8s_namespace,
body={'spec': {'replicas': replicas}},
)
def get_pod_uids(self) -> List[str]:
"""Get the UIDs for all Pods in this deployment
:return: list of uids as strings for all pods in the deployment
"""
pods = kubernetes_client.K8sClients().core_v1.list_namespaced_pod(
namespace=self.k8s_namespace, label_selector=f'app={self.dns_name}'
)
return [item.metadata.uid for item in pods.items]
def _has_pod_with_uid(self, uids: Iterable[str]) -> bool:
"""Check if this deployment has any Pod with a UID contained in uids
:param uids: list of UIDs to check
:return: True if any Pod has a UID in uids
"""
current_pods_uids = self.get_pod_uids()
return any(uid in current_pods_uids for uid in uids)
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def to_node(self):
return {
'name': self.dns_name,
'head_host': f'{self.dns_name}.{self.k8s_namespace}.svc',
'head_port_in': self.head_port_in,
}
def __init__(
self, args: Union['Namespace', Dict], needs: Optional[Set[str]] = None
):
super().__init__()
self.args = args
self.k8s_namespace = self.args.k8s_namespace
self.needs = needs or set()
self.deployment_args = self._parse_args(args)
self.version = self._get_base_executor_version()
self.k8s_head_deployment = None
self.k8s_connection_pool = getattr(args, 'k8s_connection_pool', True)
if self.deployment_args['head_deployment'] is not None:
name = f'{self.name}-head'
self.k8s_head_deployment = self._K8sDeployment(
name=name,
head_port_in=K8sGrpcConnectionPool.K8S_PORT_IN,
version=self.version,
shard_id=None,
jina_pod_name=self.name,
common_args=self.args,
deployment_args=self.deployment_args['head_deployment'],
pea_type='head',
)
self.k8s_deployments = []
for i, args in enumerate(self.deployment_args['deployments']):
name = (
f'{self.name}-{i}'
if len(self.deployment_args['deployments']) > 1
else f'{self.name}'
)
self.k8s_deployments.append(
self._K8sDeployment(
name=name,
head_port_in=K8sGrpcConnectionPool.K8S_PORT_IN,
version=self.version,
shard_id=i,
common_args=self.args,
deployment_args=args,
pea_type='worker',
jina_pod_name=self.name,
)
)
def _parse_args(
self, args: Namespace
) -> Dict[str, Optional[Union[List[Namespace], Namespace]]]:
return self._parse_deployment_args(args)
def _parse_deployment_args(self, args):
parsed_args = {
'head_deployment': None,
'deployments': [],
}
shards = getattr(args, 'shards', 1)
uses_before = getattr(args, 'uses_before', None)
uses_after = getattr(args, 'uses_after', None)
if args.name != 'gateway':
parsed_args['head_deployment'] = copy.copy(args)
parsed_args['head_deployment'].replicas = 1
parsed_args['head_deployment'].runtime_cls = 'HeadRuntime'
parsed_args['head_deployment'].pea_role = PeaRoleType.HEAD
parsed_args['head_deployment'].port_in = K8sGrpcConnectionPool.K8S_PORT_IN
# if the k8s connection pool is disabled, the connection pool is managed manually
if not args.k8s_connection_pool:
connection_list = '{'
for i in range(shards):
name = f'{self.name}-{i}' if shards > 1 else f'{self.name}'
connection_list += f'"{str(i)}": "{name}.{self.k8s_namespace}.svc:{K8sGrpcConnectionPool.K8S_PORT_IN}",'
connection_list = connection_list[:-1]
connection_list += '}'
parsed_args['head_deployment'].connection_list = connection_list
if uses_before:
parsed_args[
'head_deployment'
].uses_before_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE}'
)
if uses_after:
parsed_args[
'head_deployment'
].uses_after_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_AFTER}'
)
for i in range(shards):
cargs = copy.deepcopy(args)
cargs.shard_id = i
cargs.uses_before = None
cargs.uses_after = None
cargs.port_in = K8sGrpcConnectionPool.K8S_PORT_IN
if args.name == 'gateway':
cargs.pea_role = PeaRoleType.GATEWAY
parsed_args['deployments'].append(cargs)
return parsed_args
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
super().__exit__(exc_type, exc_val, exc_tb)
self.join()
@property
def port_expose(self) -> int:
"""Not implemented"""
raise NotImplementedError
@property
def host(self) -> str:
"""Currently, when deploying on Kubernetes, Jina does not expose a public host.
Instead Jina sends requests via port-forward and runs the requests against localhost.
:return: localhost
"""
return 'localhost'
async def rolling_update(
self, dump_path: Optional[str] = None, *, uses_with: Optional[Dict] = None
):
"""Reload all Deployments of this K8s Pod.
:param dump_path: **backwards compatibility** This function was only accepting dump_path as the only potential arg to override
:param uses_with: a Dictionary of arguments to restart the executor with
"""
old_uids = {}
for deployment in self.k8s_deployments:
old_uids[deployment.dns_name] = deployment.get_pod_uids()
deployment.rolling_update(dump_path=dump_path, uses_with=uses_with)
for deployment in self.k8s_deployments:
await deployment.wait_restart_success(old_uids[deployment.dns_name])
async def scale(self, replicas: int):
"""
Scale the amount of replicas of a given Executor.
:param replicas: The number of replicas to scale to
"""
for deployment in self.k8s_deployments:
deployment.scale(replicas=replicas)
for deployment in self.k8s_deployments:
await deployment.wait_scale_success(replicas=replicas)
deployment.num_replicas = replicas
def start(self) -> 'K8sPod':
"""Deploy the kubernetes pods via k8s Deployment and k8s Service.
:return: self
"""
with JinaLogger(f'start_{self.name}') as logger:
logger.debug(f'🏝️\tCreate deployments for "{self.name}"')
if self.k8s_head_deployment is not None:
self.enter_context(self.k8s_head_deployment)
for k8s_deployment in self.k8s_deployments:
self.enter_context(k8s_deployment)
return self
def wait_start_success(self):
"""Not implemented. It should wait until the deployment is up and running"""
if not self.args.noblock_on_start:
raise ValueError(
f'{self.wait_start_success!r} should only be called when `noblock_on_start` is set to True'
)
try:
if self.k8s_head_deployment is not None:
self.k8s_head_deployment.wait_start_success()
for p in self.k8s_deployments:
p.wait_start_success()
except:
self.close()
raise
def join(self):
"""Not needed. The context managers will manage the proper deletion"""
pass
def update_pea_args(self):
"""
Regenerate deployment args
"""
self.deployment_args = self._parse_args(self.args)
@property
def head_args(self) -> Namespace:
"""Head args of the pod.
:return: namespace
"""
return self.args
@property
def num_peas(self) -> int:
"""Number of peas. Currently unused.
:return: number of peas
"""
return sum(
[
self.k8s_head_deployment.num_replicas
if self.k8s_head_deployment is not None
else 0
]
+ [k8s_deployment.num_replicas for k8s_deployment in self.k8s_deployments]
)
@property
def deployments(self) -> List[Dict]:
"""Deployment information which describes the interface of the pod.
:return: list of dictionaries defining the attributes used by the routing table
"""
res = []
if self.args.name == 'gateway':
res.append(self.k8s_deployments[0].to_node())
else:
if self.k8s_head_deployment:
res.append(self.k8s_head_deployment.to_node())
res.extend([_.to_node() for _ in self.k8s_deployments])
return res
def _get_base_executor_version(self):
import requests
url = 'https://registry.hub.docker.com/v1/repositories/jinaai/jina/tags'
tags = requests.get(url).json()
name_set = {tag['name'] for tag in tags}
if jina.__version__ in name_set:
return jina.__version__
else:
return 'master'
@property
def _mermaid_str(self) -> List[str]:
"""String that will be used to represent the Pod graphically when `Flow.plot()` is invoked
.. # noqa: DAR201
"""
mermaid_graph = []
if self.name != 'gateway':
mermaid_graph = [f'subgraph {self.name};\n', f'direction LR;\n']
num_replicas = getattr(self.args, 'replicas', 1)
num_shards = getattr(self.args, 'shards', 1)
uses = self.args.uses
head_name = f'{self.name}/head'
tail_name = f'{self.name}/tail'
uses_before = (
self.args.uses_before if self.args.uses_before is not None else None
)
uses_after = (
self.args.uses_after if self.args.uses_after is not None else None
)
if num_shards > 1:
shard_names = [
f'{args.name}/shard-{i}'
for i, args in enumerate(self.deployment_args['deployments'])
]
for shard_name in shard_names:
shard_mermaid_graph = [
f'subgraph {shard_name}\n',
f'direction TB;\n',
]
for replica_id in range(num_replicas):
shard_mermaid_graph.append(
f'{shard_name}/replica-{replica_id}[{uses}]\n'
)
shard_mermaid_graph.append(f'end\n')
mermaid_graph.extend(shard_mermaid_graph)
if uses_before:
for shard_name in shard_names:
mermaid_graph.append(
f'{head_name}[{uses_before}]:::HEADTAIL --> {shard_name}[{uses}];'
)
if uses_after:
for shard_name in shard_names:
mermaid_graph.append(
f'{shard_name}[{uses}] --> {tail_name}[{uses_after}]:::HEADTAIL;'
)
else:
if uses_before is None and uses_after is None:
for replica_id in range(num_replicas):
mermaid_graph.append(
f'{self.name}/replica-{replica_id}[{uses}];'
)
else:
if uses_before is not None:
for replica_id in range(num_replicas):
mermaid_graph.append(
f'{head_name}[{uses_before}]:::HEADTAIL --> {self.name}/replica-{replica_id}[{uses}]:::PEA;'
)
if uses_after is not None:
for replica_id in range(num_replicas):
mermaid_graph.append(
f'{self.name}/replica-{replica_id}[{uses}]:::PEA --> {tail_name}[{uses_after}]:::HEADTAIL;'
)
mermaid_graph.append(f'end;')
return mermaid_graph
|
the-stack_0_27787
|
#!/usr/bin/env python
from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
from util.utils import build_targets
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
"""
Function:
Constructor for EmptyLayer class
"""
super(EmptyLayer, self).__init__()
class DetectionLayer(nn.Module):
"""Detection layer"""
def __init__(self, anchors, num_classes, img_dim):
"""
Function:
Constructor for DetectionLayer class
Arguments:
anchors -- list of anchors boxes dimensions
num_classes -- number of classes that model with classify
img_dim -- dimension of input images
"""
super(DetectionLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.bbox_attrs = 5 + num_classes
self.image_dim = img_dim
self.ignore_thres = 0.5
self.lambda_coord = 1
self.mse_loss = nn.MSELoss(size_average=True) # Coordinate loss
self.bce_loss = nn.BCELoss(size_average=True) # Confidence loss
self.ce_loss = nn.CrossEntropyLoss() # Class loss
def forward(self, x, targets=None):
"""
Function:
Feedforward propagation for prediction
Arguments:
x -- input tensor
targets -- tensor of true values of training process
Returns:
output -- tensor of outputs of the models
"""
nA = self.num_anchors
nB = x.size(0)
nG = x.size(2)
stride = self.image_dim / nG
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
prediction = x.view(nB, nA, self.bbox_attrs, nG, nG).permute(0, 1, 3, 4, 2).contiguous()
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# Calculate offsets for each grid
grid_x = torch.arange(nG).repeat(nG, 1).view([1, 1, nG, nG]).type(FloatTensor)
grid_y = torch.arange(nG).repeat(nG, 1).t().view([1, 1, nG, nG]).type(FloatTensor)
scaled_anchors = FloatTensor([(a_w / stride, a_h / stride) for a_w, a_h in self.anchors])
anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1))
anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1))
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + grid_x
pred_boxes[..., 1] = y.data + grid_y
pred_boxes[..., 2] = torch.exp(w.data) * anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * anchor_h
# Training
if targets is not None:
if x.is_cuda:
self.mse_loss = self.mse_loss.cuda()
self.bce_loss = self.bce_loss.cuda()
self.ce_loss = self.ce_loss.cuda()
nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls = build_targets(
pred_boxes=pred_boxes.cpu().data,
pred_conf=pred_conf.cpu().data,
pred_cls=pred_cls.cpu().data,
target=targets.cpu().data,
anchors=scaled_anchors.cpu().data,
num_anchors=nA,
num_classes=self.num_classes,
grid_size=nG,
ignore_thres=self.ignore_thres,
img_dim=self.image_dim,
)
nProposals = int((pred_conf > 0.5).sum().item())
recall = float(nCorrect / nGT) if nGT else 1
precision = float(nCorrect / nProposals)
# Handle masks
mask = Variable(mask.type(ByteTensor))
conf_mask = Variable(conf_mask.type(ByteTensor))
# Handle target variables
tx = Variable(tx.type(FloatTensor), requires_grad=False)
ty = Variable(ty.type(FloatTensor), requires_grad=False)
tw = Variable(tw.type(FloatTensor), requires_grad=False)
th = Variable(th.type(FloatTensor), requires_grad=False)
tconf = Variable(tconf.type(FloatTensor), requires_grad=False)
tcls = Variable(tcls.type(LongTensor), requires_grad=False)
# Get conf mask where gt and where there is no gt
conf_mask_true = mask
conf_mask_false = conf_mask - mask
# Mask outputs to ignore non-existing objects
loss_x = self.mse_loss(x[mask], tx[mask])
loss_y = self.mse_loss(y[mask], ty[mask])
loss_w = self.mse_loss(w[mask], tw[mask])
loss_h = self.mse_loss(h[mask], th[mask])
loss_conf = self.bce_loss(pred_conf[conf_mask_false], tconf[conf_mask_false]) + self.bce_loss(
pred_conf[conf_mask_true], tconf[conf_mask_true]
)
loss_cls = (1 / nB) * self.ce_loss(pred_cls[mask], torch.argmax(tcls[mask], 1))
loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
return (
loss,
loss_x.item(),
loss_y.item(),
loss_w.item(),
loss_h.item(),
loss_conf.item(),
loss_cls.item(),
recall,
precision,
)
else:
# If not in training phase return predictions
output = torch.cat(
(
pred_boxes.view(nB, -1, 4) * stride,
pred_conf.view(nB, -1, 1),
pred_cls.view(nB, -1, self.num_classes),
),
-1,
)
return output
def modules_creator(blocks):
"""
Function:
Constructs module list of layer blocks from module configuration in blocks
Arguments:
blocks -- dictionary of each block contains it's info
Returns:
hyperparams -- dictionary contains info about model
modules_list -- list of pytorch modules
"""
hyperparams = blocks.pop(0)
output_filters = [int(hyperparams["channels"])]
modules_list = nn.ModuleList()
for i, block in enumerate(blocks):
modules = nn.Sequential()
if block["type"] == "convolutional":
try:
bn = int(block["batch_normalize"])
except:
bn = 0
filters = int(block["filters"])
kernel_size = int(block["size"])
pad = (kernel_size - 1) // 2 if int(block["pad"]) else 0
modules.add_module(
"conv_%d" % i,
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(block["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module("batch_norm_%d" % i, nn.BatchNorm2d(filters))
if block["activation"] == "leaky":
modules.add_module("leaky_%d" % i, nn.LeakyReLU(0.1))
elif block["type"] == "maxpool":
kernel_size = int(block["size"])
stride = int(block["stride"])
if kernel_size == 2 and stride == 1:
padding = nn.ZeroPad2d((0, 1, 0, 1))
modules.add_module("_debug_padding_%d" % i, padding)
maxpool = nn.MaxPool2d(
kernel_size=int(block["size"]),
stride=int(block["stride"]),
padding=int((kernel_size - 1) // 2),
)
modules.add_module("maxpool_%d" % i, maxpool)
elif block["type"] == "upsample":
upsample = nn.Upsample(scale_factor=int(block["stride"]), mode="nearest")
modules.add_module("upsample_%d" % i, upsample)
elif block["type"] == "route":
layers = [int(x) for x in block["layers"].split(",")]
filters = sum([output_filters[layer_i] for layer_i in layers])
modules.add_module("route_%d" % i, EmptyLayer())
elif block["type"] == "shortcut":
filters = output_filters[int(block["from"])]
modules.add_module("shortcut_%d" % i, EmptyLayer())
elif block["type"] == "yolo":
anchor_idxs = [int(x) for x in block["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in block["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(block["classes"])
img_height = int(hyperparams["height"])
# Define detection layer
yolo_layer = DetectionLayer(anchors, num_classes, img_height)
modules.add_module("yolo_%d" % i, yolo_layer)
# Register module list and number of output filters
modules_list.append(modules)
output_filters.append(filters)
return hyperparams, modules_list, num_classes
|
the-stack_0_27788
|
import six
import logging
from elasticsearch.helpers import bulk
from aleph.core import es, es_index
from aleph.util import is_list, unique_list
log = logging.getLogger(__name__)
def bulk_op(iter, chunk_size=500):
bulk(es, iter, stats_only=True, chunk_size=chunk_size,
request_timeout=200.0)
def query_delete(query, doc_type=None, wait=True):
"Delete all documents matching the given query inside the doc_type(s)."
if doc_type is None:
doc_type = '*'
es.delete_by_query(index=six.text_type(es_index), body={'query': query},
doc_type=doc_type, refresh=True, conflicts='proceed',
wait_for_completion=wait)
def merge_docs(old, new):
"""Exend the values of the new doc with extra values from the old."""
old = remove_nulls(old)
new = dict(remove_nulls(new))
for k, v in old.items():
if k in new:
if is_list(v):
v = new[k] + v
new[k] = unique_list(v)
elif isinstance(v, dict):
new[k] = merge_docs(v, new[k])
else:
new[k] = v
return new
def remove_nulls(data):
"""Remove None-valued keys from a dictionary, recursively."""
if isinstance(data, dict):
for k, v in data.items():
if v is None:
data.pop(k)
data[k] = remove_nulls(v)
elif is_list(data):
data = [remove_nulls(d) for d in data if d is not None]
return data
|
the-stack_0_27790
|
"""Utilities to convert JSGF sentences to directed graphs."""
import base64
import gzip
import io
import math
import typing
from dataclasses import dataclass
from pathlib import Path
import networkx as nx
from .const import IntentsType, ReplacementsType, SentencesType
from .ini_jsgf import get_intent_counts, split_rules
from .jsgf import (
Expression,
RuleReference,
Sentence,
Sequence,
SequenceType,
SlotReference,
Substitutable,
Taggable,
Word,
)
from .slots import split_slot_args
# -----------------------------------------------------------------------------
def expression_to_graph(
expression: Expression,
graph: nx.DiGraph,
source_state: int,
replacements: typing.Optional[ReplacementsType] = None,
empty_substitution: int = 0,
grammar_name: typing.Optional[str] = None,
count_dict: typing.Optional[typing.Dict[Expression, int]] = None,
rule_grammar: str = "",
expand_slots: bool = True,
) -> int:
"""Insert JSGF expression into a graph. Return final state."""
replacements = replacements or {}
# Handle sequence substitution
if isinstance(expression, Substitutable) and (expression.substitution is not None):
# Ensure everything downstream outputs nothing
empty_substitution += 1
# Handle tag begin
if isinstance(expression, Taggable) and expression.tag:
# Begin tag
next_state = len(graph)
tag = expression.tag.tag_text
olabel = f"__begin__{tag}"
label = f":{olabel}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=maybe_pack(olabel), label=label
)
source_state = next_state
if expression.tag.substitution is not None:
# Ensure everything downstream outputs nothing
empty_substitution += 1
# Handle converters begin
begin_converters: typing.List[str] = []
if isinstance(expression, Taggable) and expression.tag:
begin_converters.extend(reversed(expression.tag.converters))
if isinstance(expression, Substitutable) and expression.converters:
begin_converters.extend(reversed(expression.converters))
# Create begin transitions for each converter (in reverse order)
for converter_name in begin_converters:
next_state = len(graph)
olabel = f"__convert__{converter_name}"
label = f"!{olabel}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=maybe_pack(olabel), label=label
)
source_state = next_state
if isinstance(expression, Sequence):
# Group, optional, or alternative
seq: Sequence = expression
if seq.type == SequenceType.ALTERNATIVE:
# Optional or alternative
final_states = []
for item in seq.items:
# Branch alternatives from source state
next_state = expression_to_graph(
item,
graph,
source_state,
replacements=replacements,
empty_substitution=empty_substitution,
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
final_states.append(next_state)
# Connect all paths to final state
next_state = len(graph)
for final_state in final_states:
graph.add_edge(final_state, next_state, ilabel="", olabel="", label="")
source_state = next_state
else:
# Group
next_state = source_state
for item in seq.items:
# Create sequence of states
next_state = expression_to_graph(
item,
graph,
next_state,
replacements=replacements,
empty_substitution=empty_substitution,
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
source_state = next_state
elif isinstance(expression, Word):
# State for single word
word: Word = expression
next_state = len(graph)
graph.add_node(next_state, word=word.text)
if (word.substitution is None) and (empty_substitution <= 0):
# Single word input/output
graph.add_edge(
source_state,
next_state,
ilabel=word.text,
olabel=word.text,
label=word.text,
)
source_state = next_state
else:
# Loading edge
graph.add_edge(
source_state,
next_state,
ilabel=word.text,
olabel="",
label=f"{word.text}:",
)
source_state = next_state
# Add word output(s)
olabels = [word.text] if (word.substitution is None) else word.substitution
if empty_substitution <= 0:
source_state = add_substitution(graph, olabels, source_state)
elif isinstance(expression, RuleReference):
# Reference to a local or remote rule
rule_ref: RuleReference = expression
if rule_ref.grammar_name:
# Fully resolved rule name
rule_name = f"{rule_ref.grammar_name}.{rule_ref.rule_name}"
rule_grammar = rule_ref.grammar_name
elif rule_grammar:
# Nested rule
rule_name = f"{rule_grammar}.{rule_ref.rule_name}"
elif grammar_name:
# Local rule
rule_name = f"{grammar_name}.{rule_ref.rule_name}"
rule_grammar = grammar_name
else:
# Unresolved rule name
rule_name = rule_ref.rule_name
# Surround with <>
rule_name_brackets = f"<{rule_name}>"
rule_replacements = replacements.get(rule_name_brackets)
assert rule_replacements, f"Missing rule {rule_name}"
rule_body = next(iter(rule_replacements))
assert isinstance(rule_body, Sentence), f"Invalid rule {rule_name}: {rule_body}"
source_state = expression_to_graph(
rule_body,
graph,
source_state,
replacements=replacements,
empty_substitution=empty_substitution,
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
elif isinstance(expression, SlotReference):
# Reference to slot values
slot_ref: SlotReference = expression
# Prefix with $
slot_name = "$" + slot_ref.slot_name
if expand_slots:
slot_values = replacements.get(slot_name)
assert slot_values, f"Missing slot {slot_name}"
# Interpret as alternative
slot_seq = Sequence(type=SequenceType.ALTERNATIVE, items=list(slot_values))
source_state = expression_to_graph(
slot_seq,
graph,
source_state,
replacements=replacements,
empty_substitution=(
empty_substitution + (1 if slot_ref.substitution else 0)
),
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
# Emit __source__ with slot name (no arguments)
slot_name_noargs = split_slot_args(slot_ref.slot_name)[0]
next_state = len(graph)
olabel = f"__source__{slot_name_noargs}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=olabel, label=maybe_pack(olabel)
)
source_state = next_state
# Handle sequence substitution
if isinstance(expression, Substitutable) and (expression.substitution is not None):
# Output substituted word(s)
empty_substitution -= 1
if empty_substitution <= 0:
source_state = add_substitution(
graph, expression.substitution, source_state
)
# Handle converters end
end_converters: typing.List[str] = []
if isinstance(expression, Substitutable) and expression.converters:
end_converters.extend(expression.converters)
if isinstance(expression, Taggable) and expression.tag:
end_converters.extend(expression.tag.converters)
# Handle tag end
if isinstance(expression, Taggable) and expression.tag:
# Handle tag substitution
if expression.tag.substitution is not None:
# Output substituted word(s)
source_state = add_substitution(
graph, expression.tag.substitution, source_state
)
# Create end transitions for each converter
for converter_name in end_converters:
next_state = len(graph)
olabel = f"__converted__{converter_name}"
label = f"!{olabel}"
graph.add_edge(
source_state,
next_state,
ilabel="",
olabel=maybe_pack(olabel),
label=label,
)
source_state = next_state
# End tag
next_state = len(graph)
tag = expression.tag.tag_text
olabel = f"__end__{tag}"
label = f":{olabel}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=maybe_pack(olabel), label=label
)
source_state = next_state
else:
# Create end transitions for each converter
for converter_name in end_converters:
next_state = len(graph)
olabel = f"__converted__{converter_name}"
label = f"!{olabel}"
graph.add_edge(
source_state,
next_state,
ilabel="",
olabel=maybe_pack(olabel),
label=label,
)
source_state = next_state
return source_state
def add_substitution(
graph: nx.DiGraph,
substitution: typing.Union[str, typing.List[str]],
source_state: int,
) -> int:
"""Add substitution token sequence to graph."""
if isinstance(substitution, str):
substitution = [substitution]
for olabel in substitution:
next_state = len(graph)
graph.add_edge(
source_state,
next_state,
ilabel="",
olabel=maybe_pack(olabel),
label=f":{olabel}",
)
source_state = next_state
return source_state
def maybe_pack(olabel: str) -> str:
"""Pack output label as base64 if it contains whitespace."""
if " " in olabel:
return "__unpack__" + base64.encodebytes(olabel.encode()).decode().strip()
return olabel
# -----------------------------------------------------------------------------
def intents_to_graph(
intents: IntentsType,
replacements: typing.Optional[ReplacementsType] = None,
add_intent_weights: bool = True,
exclude_slots_from_counts: bool = True,
) -> nx.DiGraph:
"""Convert sentences/rules grouped by intent into a directed graph."""
sentences, replacements = split_rules(intents, replacements)
return sentences_to_graph(
sentences,
replacements=replacements,
add_intent_weights=add_intent_weights,
exclude_slots_from_counts=exclude_slots_from_counts,
)
def sentences_to_graph(
sentences: SentencesType,
replacements: typing.Optional[ReplacementsType] = None,
add_intent_weights: bool = True,
exclude_slots_from_counts: bool = True,
expand_slots: bool = True,
) -> nx.DiGraph:
"""Convert sentences grouped by intent into a directed graph."""
num_intents = len(sentences)
intent_weights: typing.Dict[str, float] = {}
count_dict: typing.Optional[typing.Dict[Expression, int]] = None
if add_intent_weights:
# Count number of posssible sentences per intent
intent_counts = get_intent_counts(
sentences,
replacements,
exclude_slots=exclude_slots_from_counts,
count_dict=count_dict,
)
# Fix zero counts
for intent_name in intent_counts:
intent_counts[intent_name] = max(intent_counts[intent_name], 1)
num_sentences_lcm = lcm(*intent_counts.values())
intent_weights = {
intent_name: (
num_sentences_lcm // max(intent_counts.get(intent_name, 1), 1)
)
for intent_name in sentences
}
# Normalize
weight_sum = max(sum(intent_weights.values()), 1)
for intent_name in intent_weights:
intent_weights[intent_name] /= weight_sum
else:
intent_counts = {}
# Create initial graph
graph: nx.DiGraph = nx.DiGraph()
root_state: int = 0
graph.add_node(root_state, start=True)
final_states: typing.List[int] = []
for intent_name, intent_sentences in sentences.items():
# Branch off for each intent from start state
intent_state = len(graph)
olabel = f"__label__{intent_name}"
label = f":{olabel}"
edge_kwargs: typing.Dict[str, typing.Any] = {}
if add_intent_weights and (num_intents > 1):
edge_kwargs["sentence_count"] = intent_counts.get(intent_name, 1)
edge_kwargs["weight"] = intent_weights.get(intent_name, 0)
graph.add_edge(
root_state,
intent_state,
ilabel="",
olabel=olabel,
label=label,
**edge_kwargs,
)
for sentence in intent_sentences:
# Insert all sentences for this intent
next_state = expression_to_graph( # type: ignore
sentence,
graph,
intent_state,
replacements=replacements,
grammar_name=intent_name,
count_dict=count_dict,
expand_slots=expand_slots,
)
final_states.append(next_state)
# Create final state and join all sentences to it
final_state = len(graph)
graph.add_node(final_state, final=True)
for next_state in final_states:
graph.add_edge(next_state, final_state, ilabel="", olabel="", label="")
return graph
# -----------------------------------------------------------------------------
def graph_to_json(graph: nx.DiGraph) -> typing.Dict[str, typing.Any]:
"""Convert to dict suitable for JSON serialization."""
return nx.readwrite.json_graph.node_link_data(graph)
def json_to_graph(json_dict: typing.Dict[str, typing.Any]) -> nx.DiGraph:
"""Convert from deserialized JSON dict to graph."""
return nx.readwrite.json_graph.node_link_graph(json_dict)
def graph_to_gzip_pickle(graph: nx.DiGraph, out_file: typing.BinaryIO, filename=None):
"""Convert to binary gzip pickle format."""
with gzip.GzipFile(fileobj=out_file, filename=filename, mode="wb") as graph_gzip:
nx.readwrite.gpickle.write_gpickle(graph, graph_gzip)
def gzip_pickle_to_graph(in_file: typing.BinaryIO) -> nx.DiGraph:
"""Convert from binary gzip pickle format."""
with gzip.GzipFile(fileobj=in_file, mode="rb") as graph_gzip:
return nx.readwrite.gpickle.read_gpickle(graph_gzip)
# -----------------------------------------------------------------------------
@dataclass
class GraphFsts:
"""Result from graph_to_fsts."""
intent_fsts: typing.Dict[str, str]
symbols: typing.Dict[str, int]
input_symbols: typing.Dict[str, int]
output_symbols: typing.Dict[str, int]
def graph_to_fsts(
graph: nx.DiGraph,
eps="<eps>",
weight_key="weight",
default_weight=0,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
) -> GraphFsts:
"""Convert graph to OpenFST text format, one per intent."""
intent_fsts: typing.Dict[str, str] = {}
symbols: typing.Dict[str, int] = {eps: 0}
input_symbols: typing.Dict[str, int] = {}
output_symbols: typing.Dict[str, int] = {}
n_data = graph.nodes(data=True)
# start state
start_node: int = next(n for n, data in n_data if data.get("start"))
for _, intent_node, edge_data in graph.edges(start_node, data=True):
intent_name: str = edge_data["olabel"][9:]
# Filter intents by name
if intent_filter and not intent_filter(intent_name):
continue
final_states: typing.Set[int] = set()
state_map: typing.Dict[int, int] = {}
with io.StringIO() as intent_file:
# Transitions
for edge in nx.edge_bfs(graph, intent_node):
edge_data = graph.edges[edge]
from_node, to_node = edge
# Map states starting from 0
from_state = state_map.get(from_node, len(state_map))
state_map[from_node] = from_state
to_state = state_map.get(to_node, len(state_map))
state_map[to_node] = to_state
# Get input/output labels.
# Empty string indicates epsilon transition (eps)
ilabel = edge_data.get("ilabel", "") or eps
olabel = edge_data.get("olabel", "") or eps
# Map labels (symbols) to integers
isymbol = symbols.get(ilabel, len(symbols))
symbols[ilabel] = isymbol
input_symbols[ilabel] = isymbol
osymbol = symbols.get(olabel, len(symbols))
symbols[olabel] = osymbol
output_symbols[olabel] = osymbol
if weight_key:
weight = edge_data.get(weight_key, default_weight)
print(
f"{from_state} {to_state} {ilabel} {olabel} {weight}",
file=intent_file,
)
else:
# No weight
print(
f"{from_state} {to_state} {ilabel} {olabel}", file=intent_file
)
# Check if final state
if n_data[from_node].get("final", False):
final_states.add(from_state)
if n_data[to_node].get("final", False):
final_states.add(to_state)
# Record final states
for final_state in final_states:
print(final_state, file=intent_file)
intent_fsts[intent_name] = intent_file.getvalue()
return GraphFsts(
intent_fsts=intent_fsts,
symbols=symbols,
input_symbols=input_symbols,
output_symbols=output_symbols,
)
# -----------------------------------------------------------------------------
@dataclass
class GraphFst:
"""Result from graph_to_fst."""
intent_fst: str
symbols: typing.Dict[str, int]
input_symbols: typing.Dict[str, int]
output_symbols: typing.Dict[str, int]
def write_fst(
self,
fst_text_path: typing.Union[str, Path],
isymbols_path: typing.Union[str, Path],
osymbols_path: typing.Union[str, Path],
):
"""Write FST text and symbol files."""
# Write FST
Path(fst_text_path).write_text(self.intent_fst)
# Write input symbols
with open(isymbols_path, "w") as isymbols_file:
# pylint: disable=E1101
for symbol, num in self.input_symbols.items():
print(symbol, num, file=isymbols_file)
# Write output symbols
with open(osymbols_path, "w") as osymbols_file:
# pylint: disable=E1101
for symbol, num in self.output_symbols.items():
print(symbol, num, file=osymbols_file)
def graph_to_fst(
graph: nx.DiGraph,
eps="<eps>",
weight_key="weight",
default_weight=0,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
) -> GraphFst:
"""Convert graph to OpenFST text format."""
symbols: typing.Dict[str, int] = {eps: 0}
input_symbols: typing.Dict[str, int] = {}
output_symbols: typing.Dict[str, int] = {}
n_data = graph.nodes(data=True)
# start state
start_node: int = next(n for n, data in n_data if data.get("start"))
# Generate FST text
with io.StringIO() as fst_file:
final_states: typing.Set[int] = set()
state_map: typing.Dict[int, int] = {}
# Transitions
for _, intent_node, intent_edge_data in graph.edges(start_node, data=True):
intent_olabel: str = intent_edge_data["olabel"]
intent_name: str = intent_olabel[9:]
# Filter intents by name
if intent_filter and not intent_filter(intent_name):
continue
assert (
" " not in intent_olabel
), f"Output symbol cannot contain whitespace: {intent_olabel}"
# Map states starting from 0
from_state = state_map.get(start_node, len(state_map))
state_map[start_node] = from_state
to_state = state_map.get(intent_node, len(state_map))
state_map[intent_node] = to_state
# Map labels (symbols) to integers
isymbol = symbols.get(eps, len(symbols))
symbols[eps] = isymbol
input_symbols[eps] = isymbol
osymbol = symbols.get(intent_olabel, len(symbols))
symbols[intent_olabel] = osymbol
output_symbols[intent_olabel] = osymbol
if weight_key:
weight = intent_edge_data.get(weight_key, default_weight)
print(
f"{from_state} {to_state} {eps} {intent_olabel} {weight}",
file=fst_file,
)
else:
# No weight
print(f"{from_state} {to_state} {eps} {intent_olabel}", file=fst_file)
# Add intent sub-graphs
for edge in nx.edge_bfs(graph, intent_node):
edge_data = graph.edges[edge]
from_node, to_node = edge
# Get input/output labels.
# Empty string indicates epsilon transition (eps)
ilabel = edge_data.get("ilabel", "") or eps
olabel = edge_data.get("olabel", "") or eps
# Check for whitespace
assert (
" " not in ilabel
), f"Input symbol cannot contain whitespace: {ilabel}"
assert (
" " not in olabel
), f"Output symbol cannot contain whitespace: {olabel}"
# Map states starting from 0
from_state = state_map.get(from_node, len(state_map))
state_map[from_node] = from_state
to_state = state_map.get(to_node, len(state_map))
state_map[to_node] = to_state
# Map labels (symbols) to integers
isymbol = symbols.get(ilabel, len(symbols))
symbols[ilabel] = isymbol
input_symbols[ilabel] = isymbol
osymbol = symbols.get(olabel, len(symbols))
symbols[olabel] = osymbol
output_symbols[olabel] = osymbol
if weight_key:
weight = edge_data.get(weight_key, default_weight)
print(
f"{from_state} {to_state} {ilabel} {olabel} {weight}",
file=fst_file,
)
else:
# No weight
print(f"{from_state} {to_state} {ilabel} {olabel}", file=fst_file)
# Check if final state
if n_data[from_node].get("final", False):
final_states.add(from_state)
if n_data[to_node].get("final", False):
final_states.add(to_state)
# Record final states
for final_state in final_states:
print(final_state, file=fst_file)
return GraphFst(
intent_fst=fst_file.getvalue(),
symbols=symbols,
input_symbols=input_symbols,
output_symbols=output_symbols,
)
# -----------------------------------------------------------------------------
def lcm(*nums: int) -> int:
"""Returns the least common multiple of the given integers"""
if nums:
nums_lcm = nums[0]
for n in nums[1:]:
nums_lcm = (nums_lcm * n) // math.gcd(nums_lcm, n)
return nums_lcm
return 1
# -----------------------------------------------------------------------------
def get_start_end_nodes(
graph: nx.DiGraph,
) -> typing.Tuple[typing.Optional[int], typing.Optional[int]]:
"""Return start/end nodes in graph"""
n_data = graph.nodes(data=True)
start_node = None
end_node = None
for node, data in n_data:
if data.get("start", False):
start_node = node
elif data.get("final", False):
end_node = node
if (start_node is not None) and (end_node is not None):
break
return (start_node, end_node)
|
the-stack_0_27793
|
"""
Unit tests for the hcluster module
"""
import hcluster
from hcluster import HashableSet as hset
import math
from unittest import TestCase
class TestHCluster(TestCase):
def test_base_call(self):
""" Basic call - assumes Euclidian Distance """
data = [(5, 5), (5, 6), (1, 1), (0, 0)]
c = hcluster.cluster(data)
# expected structure:
# |-------------|
# |-------| |
# | | |-------|
# (0, 0) (1, 1) (5, 5) (5, 6)
expected = hset(hset((0, 0), (1, 1)), hset((5, 5), (5, 6)))
assert expected == c.setrepr()
def test_cust_func(self):
""" Call passing a custom distance function """
data = [(5, 0), (0, 0), (1, 2)]
def yDist(a, b):
return abs(a[1] - b[1])
c = hcluster.cluster(data, yDist)
expected = hset(hset((5, 0), (0, 0)), (1, 2))
assert expected == c.setrepr()
def test_distance_matrix(self):
data = [(5, 5), (5, 6), (4, 4)]
m = hcluster.distance_matrix(data, hcluster.euclidian)
assert m[((5, 5), (5, 6))] == 1
assert m[((4, 4), (5, 6))] == math.sqrt(5)
assert m[((5, 5), (4, 4))] == math.sqrt(2)
|
the-stack_0_27795
|
# coding: utf-8
"""
Singularity sandbox implementation.
"""
__all__ = ["SingularitySandbox"]
import os
import subprocess
import luigi
import six
from law.config import Config
from law.sandbox.base import Sandbox
from law.target.local import LocalDirectoryTarget
from law.cli.software import deps as law_deps
from law.util import make_list, interruptable_popen, quote_cmd, flatten, law_src_path
class SingularitySandbox(Sandbox):
sandbox_type = "singularity"
# env cache per image
_envs = {}
@property
def image(self):
return self.name
def _singularity_exec_cmd(self):
cmd = ["singularity", "exec"]
# task-specific argiments
if self.task:
# add args configured on the task
args_getter = getattr(self.task, "singularity_args", None)
if callable(args_getter):
cmd.extend(make_list(args_getter()))
return cmd
@property
def env(self):
# strategy: unlike docker, singularity might not allow binding of paths that do not exist
# in the container, so create a tmp directory on the host system and bind it as /tmp, let
# python dump its full env into a file, and read the file again on the host system
if self.image not in self._envs:
tmp_dir = LocalDirectoryTarget(is_tmp=True)
tmp_dir.touch()
tmp = tmp_dir.child("env", type="f")
tmp.touch()
# determine whether volume binding is allowed
allow_binds_cb = getattr(self.task, "singularity_allow_binds", None)
if callable(allow_binds_cb):
allow_binds = allow_binds_cb()
else:
cfg = Config.instance()
allow_binds = cfg.get_expanded(self.get_config_section(), "allow_binds")
# arguments to configure the environment
args = ["-e"]
if allow_binds:
args.extend(["-B", "{}:/tmp".format(tmp_dir.path)])
env_file = "/tmp/{}".format(tmp.basename)
else:
env_file = tmp.path
# get the singularity exec command
singularity_exec_cmd = self._singularity_exec_cmd() + args
# build commands to setup the environment
setup_cmds = self._build_setup_cmds(self._get_env())
# build the python command that dumps the environment
py_cmd = "import os,pickle;" \
+ "pickle.dump(dict(os.environ),open('{}','wb'),protocol=2)".format(env_file)
# build the full command
cmd = quote_cmd(singularity_exec_cmd + [self.image, "bash", "-l", "-c",
"; ".join(flatten(setup_cmds, quote_cmd(["python", "-c", py_cmd]))),
])
# run it
code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if code != 0:
raise Exception("singularity sandbox env loading failed:\n{}".format(out))
# load the environment from the tmp file
env = tmp.load(formatter="pickle")
# cache
self._envs[self.image] = env
return self._envs[self.image]
def cmd(self, proxy_cmd):
# singularity exec command arguments
# -e clears the environment
args = ["-e"]
# helper to build forwarded paths
cfg = Config.instance()
cfg_section = self.get_config_section()
forward_dir = cfg.get_expanded(cfg_section, "forward_dir")
python_dir = cfg.get_expanded(cfg_section, "python_dir")
bin_dir = cfg.get_expanded(cfg_section, "bin_dir")
stagein_dir_name = cfg.get_expanded(cfg_section, "stagein_dir_name")
stageout_dir_name = cfg.get_expanded(cfg_section, "stageout_dir_name")
def dst(*args):
return os.path.join(forward_dir, *(str(arg) for arg in args))
# helper for mounting a volume
volume_srcs = []
def mount(*vol):
src = vol[0]
# make sure, the same source directory is not mounted twice
if src in volume_srcs:
return
volume_srcs.append(src)
# ensure that source directories exist
if not os.path.isfile(src) and not os.path.exists(src):
os.makedirs(src)
# store the mount point
args.extend(["-B", ":".join(vol)])
# determine whether volume binding is allowed
allow_binds_cb = getattr(self.task, "singularity_allow_binds", None)
if callable(allow_binds_cb):
allow_binds = allow_binds_cb()
else:
allow_binds = cfg.get_expanded(cfg_section, "allow_binds")
# determine whether law software forwarding is allowed
forward_law_cb = getattr(self.task, "singularity_forward_law", None)
if callable(forward_law_cb):
forward_law = forward_law_cb()
else:
forward_law = cfg.get_expanded(cfg_section, "forward_law")
# environment variables to set
env = self._get_env()
# prevent python from writing byte code files
env["PYTHONDONTWRITEBYTECODE"] = "1"
if forward_law:
# adjust path variables
if allow_binds:
env["PATH"] = os.pathsep.join([dst("bin"), "$PATH"])
env["PYTHONPATH"] = os.pathsep.join([dst(python_dir), "$PYTHONPATH"])
else:
env["PATH"] = "$PATH"
env["PYTHONPATH"] = "$PYTHONPATH"
# forward python directories of law and dependencies
for mod in law_deps:
path = os.path.dirname(mod.__file__)
name, ext = os.path.splitext(os.path.basename(mod.__file__))
if name == "__init__":
vsrc = path
vdst = dst(python_dir, os.path.basename(path))
else:
vsrc = os.path.join(path, name + ".py")
vdst = dst(python_dir, name + ".py")
if allow_binds:
mount(vsrc, vdst)
else:
dep_path = os.path.dirname(vsrc)
if dep_path not in env["PYTHONPATH"].split(os.pathsep):
env["PYTHONPATH"] = os.pathsep.join([dep_path, env["PYTHONPATH"]])
# forward the law cli dir to bin as it contains a law executable
if allow_binds:
env["PATH"] = os.pathsep.join([dst(python_dir, "law", "cli"), env["PATH"]])
else:
env["PATH"] = os.pathsep.join([law_src_path("cli"), env["PATH"]])
# forward the law config file
if cfg.config_file:
if allow_binds:
mount(cfg.config_file, dst("law.cfg"))
env["LAW_CONFIG_FILE"] = dst("law.cfg")
else:
env["LAW_CONFIG_FILE"] = cfg.config_file
# forward the luigi config file
for p in luigi.configuration.LuigiConfigParser._config_paths[::-1]:
if os.path.exists(p):
if allow_binds:
mount(p, dst("luigi.cfg"))
env["LUIGI_CONFIG_PATH"] = dst("luigi.cfg")
else:
env["LUIGI_CONFIG_PATH"] = p
break
# add staging directories
if (self.stagein_info or self.stageout_info) and not allow_binds:
raise Exception("cannot use stage-in or -out if binds are not allowed")
if self.stagein_info:
env["LAW_SANDBOX_STAGEIN_DIR"] = dst(stagein_dir_name)
mount(self.stagein_info.stage_dir.path, dst(stagein_dir_name))
if self.stageout_info:
env["LAW_SANDBOX_STAGEOUT_DIR"] = dst(stageout_dir_name)
mount(self.stageout_info.stage_dir.path, dst(stageout_dir_name))
# forward volumes defined in the config and by the task
vols = self._get_volumes()
if vols and not allow_binds:
raise Exception("cannot forward volumes to sandbox if binds are not allowed")
for hdir, cdir in six.iteritems(vols):
if not cdir:
mount(hdir)
else:
cdir = self._expand_volume(cdir, bin_dir=dst(bin_dir), python_dir=dst(python_dir))
mount(hdir, cdir)
# handle scheduling within the container
ls_flag = "--local-scheduler"
if self.force_local_scheduler() and ls_flag not in proxy_cmd:
proxy_cmd.extend([ls_flag, "True"])
# get the singularity exec command, add arguments from above
singularity_exec_cmd = self._singularity_exec_cmd() + args
# build commands to set up environment
setup_cmds = self._build_setup_cmds(env)
# build the final command
cmd = quote_cmd(singularity_exec_cmd + [self.image, "bash", "-l", "-c",
"; ".join(flatten(setup_cmds, quote_cmd(proxy_cmd)))
])
return cmd
|
the-stack_0_27796
|
from twisted.python.components import registerAdapter
from axiom.attributes import reference
from axiom.item import Item
from nevow.page import Element
from xmantissa.ixmantissa import INavigableElement, INavigableFragment
from xmantissa.webnav import Tab
from zope.interface import implements, Interface
class ISuspender(Interface):
"""
Marker interface for suspended powerup facades.
"""
class SuspendedNavigableElement(Item):
implements(INavigableElement, ISuspender)
powerupInterfaces = (INavigableElement, ISuspender)
originalNE = reference()
def getTabs(self):
origTabs = self.originalNE.getTabs()
def proxyTabs(tabs):
for tab in tabs:
yield Tab(tab.name, self.storeID, tab.priority,
proxyTabs(tab.children),
authoritative=tab.authoritative,
linkURL=tab.linkURL)
return proxyTabs(origTabs)
class SuspendedFragment(Element):
"""
Temporary account-suspended fragment.
"""
fragmentName = 'suspend'
live = False
implements(INavigableFragment)
def head(self):
pass
registerAdapter(SuspendedFragment, SuspendedNavigableElement, INavigableFragment)
def suspendJustTabProviders(installation):
"""
Replace INavigableElements with facades that indicate their suspension.
"""
if installation.suspended:
raise RuntimeError("Installation already suspended")
powerups = list(installation.allPowerups)
for p in powerups:
if INavigableElement.providedBy(p):
p.store.powerDown(p, INavigableElement)
sne = SuspendedNavigableElement(store=p.store, originalNE=p)
p.store.powerUp(sne, INavigableElement)
p.store.powerUp(sne, ISuspender)
installation.suspended = True
def unsuspendTabProviders(installation):
"""
Remove suspension facades and replace them with their originals.
"""
if not installation.suspended:
raise RuntimeError("Installation not suspended")
powerups = list(installation.allPowerups)
allSNEs = list(powerups[0].store.powerupsFor(ISuspender))
for p in powerups:
for sne in allSNEs:
if sne.originalNE is p:
p.store.powerDown(sne, INavigableElement)
p.store.powerDown(sne, ISuspender)
p.store.powerUp(p, INavigableElement)
sne.deleteFromStore()
installation.suspended = False
|
the-stack_0_27797
|
#!/usr/bin/python
# coding:utf-8
import datetime
import os
import sys
import commands
"""
author:jimw
date:2020-01-14
支持范围:
1、newWebSite.log.2019-05-14
2、catalina.2019-01-03.out
python zipFile.py /home/webApp/logs
根据月份打包
然后删除历史的数据
zip logging
"""
# 返回需要打包的集合
def loggingFile(filepath):
# 遍历filepath下所有文件,包括子目录 下个小版本优化
files = os.listdir(filepath)
# 这里必须排序,不然无法整合出数据
files.sort()
now = datetime.datetime.now()
# 得到今年的的时间 (年份) 得到的today_year等于2019年
# 修改一个bug 月份转str是int的
today_month = str(now.year) + '-' + (str(now.month), '0' + str(now.month))[str(now.month).__len__() >= 1]
print('today_month:', today_month)
###
data_list_todays = []
data_list_files = []
remove_data_list_files = []
dictName = dict()
flagName = ''
os.chdir(filepath)
for fi in files:
fi_d = os.path.join(filepath, fi)
if not os.path.isdir(fi_d):
varName = os.path.join(filepath, fi_d)
varName = varName.split('/')[-1]
# 文件名统计
if (varName.split('.')[1] == 'log'):
varFileName = varName.split('.')[0] + '.' + varName.split('.')[1]
else:
varFileName = varName.split('.')[0]
if flagName == '':
# flagName = varFileName
print()
elif flagName != varFileName:
data_list_todays = []
flagName = varFileName
#print('flagName:', flagName)
data_list_files.append(varFileName)
# 判断是否重复
if containsDuplicate(data_list_files):
data_list_files.remove(varFileName)
# print(data_list_files)
# 月份统计
# print(varFileName)
try:
if (varName.split('.')[1] == 'log'):
varNameindex = varName.split('.')[2]
varNameMonth = varNameindex.split('-')[0] + '-' + varNameindex.split('-')[1]
else:
varNameindex = varName.split('.')[1]
varNameMonth = varNameindex.split('-')[0] + '-' + varNameindex.split('-')[1]
if (varNameMonth == today_month):
print('当前月份不压缩')
continue
# print(varNameMonth)
except(Exception):
# print(varName)
continue
data_list_todays.append(varNameMonth)
if containsDuplicate(data_list_todays):
data_list_todays.remove(varNameMonth)
# pythodata_list_todays.reverse()
data_list_todays.reverse()
print(data_list_todays)
dictName[varFileName] = data_list_todays
if fi_d.find('.gz')==-1:
remove_data_list_files.append(str(fi_d))
else:
print('not delete:',fi_d)
return dictName, remove_data_list_files
# 如果返回true 则不新加
def containsDuplicate(nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) == 0 or len(nums) == 1:
return False
d = {}
for i in nums:
if i in d:
return True
d[i] = 0
return False
# 执行命令
def execCmd(cmd):
print(cmd)
# r = os.popen(cmd)
(status, output) = commands.getstatusoutput(cmd)
print("result:", status)
print("result output:", output)
text = 'ok'
return text
# 遍历结果
def zipFile(result):
print(result)
for file, valus in result.items():
# print('不存在则打包')
# print(file)
# print(valus)
for months in valus:
varZipFile = file + '.' + months + '.tar.gz'
if os.path.isfile(varZipFile):
print('varZipFile 已经存在不需要更新:', varZipFile)
continue
# 压缩
cmdval = 'tar -zcvf ./' + varZipFile + ' ' + file + '.' + months + '*'
print(cmdval)
execCmd(cmdval)
# 遍历结果 然后删除
def delFile(result):
print(result)
for valus in result:
# 压缩完成后就删除
os.remove(valus)
print('delete history file:', valus)
if __name__ == '__main__':
# 递归遍历/root目录下所有文件
result = loggingFile(sys.argv[1])
# result = loggingFile('D://logs')
zipFile(result[0])
delFile(result[1])
|
the-stack_0_27798
|
#!/usr/bin/env python
# Copyright (c) 2013, AT&T Labs, Yun Mao <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from __future__ import print_function
import json
import re
import sys
from pylint import lint
from pylint.reporters import text
from six.moves import cStringIO as StringIO
# Note(maoy): E1103 is error code related to partial type inference
ignore_codes = ["E1103"]
# Note(maoy): the error message is the pattern of E0202. It should be ignored
# for cinder.tests modules
#Note(fengqian): the second error message is the pattern of [E0611].
#It should be ignored because use six module to keep py3.X compatibility.
ignore_messages = ["An attribute affected in cinder.tests", "No name 'urllib' in module '_MovedItems'"]
# Note(maoy): we ignore all errors in openstack.common because it should be
# checked elsewhere. We also ignore cinder.tests for now due to high false
# positive rate.
ignore_modules = ["cinder/openstack/common/", "cinder/tests/"]
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def from_line(cls, line):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
line_content = cls._cached_content[lineno - 1].rstrip()
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""From the output of pylint msg, to a dict, where each key
is a unique error identifier, value is a list of LintOutput
"""
result = {}
for line in msg.splitlines():
obj = cls.from_line(line)
if obj.is_ignored():
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def is_ignored(self):
if self.code in ignore_codes:
return True
if any(self.filename.startswith(name) for name in ignore_modules):
return True
if any(msg in self.message for msg in ignore_messages):
return True
return False
def key(self):
if self.code in ["E1101", "E1103"]:
# These two types of errors are like Foo class has no member bar.
# We discard the source code so that the error will be ignored
# next time another Foo.bar is encountered.
return self.message, ""
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" % self.__dict__)
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
reporter = text.ParseableTextReporter(output=buff)
args = ["--include-ids=y", "-E", "cinder"]
lint.Run(args, reporter=reporter, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print(err.lintoutput)
print()
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print("Please fix the errors above. If you believe they are false "
"positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()
|
the-stack_0_27799
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import Counter, OrderedDict
from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id
from knack.log import get_logger
from azure.mgmt.trafficmanager.models import MonitorProtocol, ProfileStatus
# pylint: disable=no-self-use,no-member,too-many-lines,unused-argument
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection, get_property
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.util import CLIError, sdk_no_wait, find_child_item, find_child_collection
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, \
UnrecognizedArgumentError, ResourceNotFoundError, CLIInternalError
from azure.cli.core.profiles import ResourceType, supported_api_version
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.command_modules.network.zone_file.parse_zone_file import parse_zone_file
from azure.cli.command_modules.network.zone_file.make_zone_file import make_zone_file
import threading
import time
import platform
import subprocess
logger = get_logger(__name__)
# region Utility methods
def _log_pprint_template(template):
import json
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
def _get_default_name(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, True)
def _get_default_id(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, False)
def _get_default_value(balancer, property_name, option_name, return_name):
values = [x.id for x in getattr(balancer, property_name)]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
if not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0].rsplit('/', 1)[1] if return_name else values[0]
# endregion
# region Generic list commands
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def list_vnet(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_networks', resource_group_name)
def list_express_route_circuits(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'express_route_circuits', resource_group_name)
def create_express_route_auth(cmd, resource_group_name, circuit_name, authorization_name):
ExpressRouteCircuitAuthorization = cmd.get_models('ExpressRouteCircuitAuthorization')
client = network_client_factory(cmd.cli_ctx).express_route_circuit_authorizations
return client.begin_create_or_update(resource_group_name,
circuit_name,
authorization_name,
ExpressRouteCircuitAuthorization())
def list_lbs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'load_balancers', resource_group_name)
def list_nics(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_interfaces', resource_group_name)
def list_nsgs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_security_groups', resource_group_name)
def list_nsg_rules(cmd, resource_group_name, network_security_group_name, include_default=False):
client = network_client_factory(cmd.cli_ctx).network_security_groups
nsg = client.get(resource_group_name, network_security_group_name)
rules = nsg.security_rules
if include_default:
rules = rules + nsg.default_security_rules
return rules
def list_custom_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'custom_ip_prefixes', resource_group_name)
def list_public_ips(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_addresses', resource_group_name)
def list_public_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_prefixes', resource_group_name)
def list_route_tables(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'route_tables', resource_group_name)
def list_application_gateways(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'application_gateways', resource_group_name)
def list_network_watchers(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_watchers', resource_group_name)
# endregion
# region ApplicationGateways
# pylint: disable=too-many-locals
def _is_v2_sku(sku):
return 'v2' in sku
# pylint: disable=too-many-statements
def create_application_gateway(cmd, application_gateway_name, resource_group_name, location=None,
tags=None, no_wait=False, capacity=2,
cert_data=None, cert_password=None, key_vault_secret_id=None,
frontend_port=None, http_settings_cookie_based_affinity='disabled',
http_settings_port=80, http_settings_protocol='Http',
routing_rule_type='Basic', servers=None,
sku=None,
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
subnet='default', subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
connection_draining_timeout=0, enable_http2=None, min_capacity=None, zones=None,
custom_error_pages=None, firewall_policy=None, max_capacity=None,
user_assigned_identity=None,
enable_private_link=False,
private_link_ip_address=None,
private_link_subnet='PrivateLinkDefaultSubnet',
private_link_subnet_prefix='10.0.1.0/24',
private_link_primary=None,
trusted_client_cert=None,
ssl_profile=None,
ssl_profile_id=None,
ssl_cert_name=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_application_gateway_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
sku_tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
http_listener_protocol = 'https' if (cert_data or key_vault_secret_id) else 'http'
private_ip_allocation = 'Static' if private_ip_address else 'Dynamic'
virtual_network_name = virtual_network_name or '{}Vnet'.format(application_gateway_name)
# Build up the ARM template
master_template = ArmTemplateBuilder()
ag_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if subnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix,
enable_private_link=enable_private_link,
private_link_subnet=private_link_subnet,
private_link_subnet_prefix=private_link_subnet_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name, subnet)
if public_ip_address_type == 'new':
ag_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
public_ip_sku = None
if _is_v2_sku(sku):
public_ip_sku = 'Standard'
public_ip_address_allocation = 'Static'
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
None, public_ip_sku, None))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
private_link_subnet_id = None
private_link_name = 'PrivateLinkDefaultConfiguration'
private_link_ip_allocation_method = 'Dynamic'
if enable_private_link:
private_link_subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name,
private_link_subnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
app_gateway_resource = build_application_gateway_resource(
cmd, application_gateway_name, location, tags, sku, sku_tier, capacity, servers, frontend_port,
private_ip_address, private_ip_allocation, cert_data, cert_password, key_vault_secret_id,
http_settings_cookie_based_affinity, http_settings_protocol, http_settings_port,
http_listener_protocol, routing_rule_type, public_ip_id, subnet_id,
connection_draining_timeout, enable_http2, min_capacity, zones, custom_error_pages,
firewall_policy, max_capacity, user_assigned_identity,
enable_private_link, private_link_name,
private_link_ip_address, private_link_ip_allocation_method, private_link_primary,
private_link_subnet_id, trusted_client_cert, ssl_profile, ssl_profile_id, ssl_cert_name)
app_gateway_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(
application_gateway_name))
master_template.add_resource(app_gateway_resource)
master_template.add_output('applicationGateway', application_gateway_name, output_type='object')
if cert_password:
master_template.add_secure_parameter('certPassword', cert_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'ag_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_application_gateway(cmd, instance, sku=None, capacity=None, tags=None, enable_http2=None, min_capacity=None,
custom_error_pages=None, max_capacity=None):
if sku is not None:
instance.sku.tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
try:
if min_capacity is not None:
instance.autoscale_configuration.min_capacity = min_capacity
if max_capacity is not None:
instance.autoscale_configuration.max_capacity = max_capacity
except AttributeError:
instance.autoscale_configuration = {
'min_capacity': min_capacity,
'max_capacity': max_capacity
}
with cmd.update_context(instance) as c:
c.set_param('sku.name', sku)
c.set_param('sku.capacity', capacity)
c.set_param('tags', tags)
c.set_param('enable_http2', enable_http2)
c.set_param('custom_error_configurations', custom_error_pages)
return instance
def create_ag_authentication_certificate(cmd, resource_group_name, application_gateway_name, item_name,
cert_data, no_wait=False):
AuthCert = cmd.get_models('ApplicationGatewayAuthenticationCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_cert = AuthCert(data=cert_data, name=item_name)
upsert_to_collection(ag, 'authentication_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_authentication_certificate(instance, parent, item_name, cert_data):
instance.data = cert_data
return parent
def create_ag_backend_address_pool(cmd, resource_group_name, application_gateway_name, item_name,
servers=None, no_wait=False):
ApplicationGatewayBackendAddressPool = cmd.get_models('ApplicationGatewayBackendAddressPool')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_pool = ApplicationGatewayBackendAddressPool(name=item_name, backend_addresses=servers)
upsert_to_collection(ag, 'backend_address_pools', new_pool, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_address_pool(instance, parent, item_name, servers=None):
if servers is not None:
instance.backend_addresses = servers
return parent
def create_ag_frontend_ip_configuration(cmd, resource_group_name, application_gateway_name, item_name,
public_ip_address=None, subnet=None,
virtual_network_name=None, private_ip_address=None,
private_ip_address_allocation=None, no_wait=False):
ApplicationGatewayFrontendIPConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayFrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if public_ip_address:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address))
else:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address if private_ip_address else None,
private_ip_allocation_method='Static' if private_ip_address else 'Dynamic',
subnet=SubResource(id=subnet))
upsert_to_collection(ag, 'frontend_ip_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_ip_configuration(cmd, instance, parent, item_name, public_ip_address=None,
subnet=None, virtual_network_name=None,
private_ip_address=None):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.subnet = SubResource(id=subnet)
if private_ip_address is not None:
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'Static'
return parent
def create_ag_frontend_port(cmd, resource_group_name, application_gateway_name, item_name, port,
no_wait=False):
ApplicationGatewayFrontendPort = cmd.get_models('ApplicationGatewayFrontendPort')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_port = ApplicationGatewayFrontendPort(name=item_name, port=port)
upsert_to_collection(ag, 'frontend_ports', new_port, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_port(instance, parent, item_name, port=None):
if port is not None:
instance.port = port
return parent
def create_ag_http_listener(cmd, resource_group_name, application_gateway_name, item_name,
frontend_port, frontend_ip=None, host_name=None, ssl_cert=None,
ssl_profile=None, firewall_policy=None, no_wait=False, host_names=None):
ApplicationGatewayHttpListener, SubResource = cmd.get_models('ApplicationGatewayHttpListener', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not frontend_ip:
frontend_ip = _get_default_id(ag, 'frontend_ip_configurations', '--frontend-ip')
new_listener = ApplicationGatewayHttpListener(
name=item_name,
frontend_ip_configuration=SubResource(id=frontend_ip),
frontend_port=SubResource(id=frontend_port),
host_name=host_name,
require_server_name_indication=True if ssl_cert and host_name else None,
protocol='https' if ssl_cert else 'http',
ssl_certificate=SubResource(id=ssl_cert) if ssl_cert else None,
host_names=host_names
)
if cmd.supported_api_version(min_api='2019-09-01'):
new_listener.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
if cmd.supported_api_version(min_api='2020-06-01'):
new_listener.ssl_profile = SubResource(id=ssl_profile) if ssl_profile else None
upsert_to_collection(ag, 'http_listeners', new_listener, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_http_listener(cmd, instance, parent, item_name, frontend_ip=None, frontend_port=None,
host_name=None, ssl_cert=None, ssl_profile=None, firewall_policy=None, host_names=None):
SubResource = cmd.get_models('SubResource')
if frontend_ip is not None:
instance.frontend_ip_configuration = SubResource(id=frontend_ip)
if frontend_port is not None:
instance.frontend_port = SubResource(id=frontend_port)
if ssl_cert is not None:
if ssl_cert:
instance.ssl_certificate = SubResource(id=ssl_cert)
instance.protocol = 'Https'
else:
instance.ssl_certificate = None
instance.protocol = 'Http'
if host_name is not None:
instance.host_name = host_name or None
if cmd.supported_api_version(min_api='2019-09-01'):
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if cmd.supported_api_version(min_api='2020-06-01'):
if ssl_profile is not None:
instance.ssl_profile = SubResource(id=ssl_profile)
if host_names is not None:
instance.host_names = host_names or None
instance.require_server_name_indication = instance.host_name and instance.protocol.lower() == 'https'
return parent
def assign_ag_identity(cmd, resource_group_name, application_gateway_name,
user_assigned_identity, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
ag.identity = identity_instance
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def remove_ag_identity(cmd, resource_group_name, application_gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
logger.warning("This command will be ignored. The identity doesn't exist.")
ag.identity = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_identity(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
raise CLIError("Please first use 'az network application-gateway identity assign` to init the identity.")
return ag.identity
def add_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
frontend_ip,
private_link_name,
private_link_subnet_name_or_id,
private_link_subnet_prefix=None,
private_link_primary=None,
private_link_ip_address=None,
no_wait=False):
(SubResource, IPAllocationMethod, Subnet,
ApplicationGatewayPrivateLinkConfiguration,
ApplicationGatewayPrivateLinkIpConfiguration) = cmd.get_models(
'SubResource', 'IPAllocationMethod', 'Subnet',
'ApplicationGatewayPrivateLinkConfiguration', 'ApplicationGatewayPrivateLinkIpConfiguration')
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
private_link_config_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=appgw.name,
child_type_1='privateLinkConfigurations',
child_name_1=private_link_name
)
if not any(fic for fic in appgw.frontend_ip_configurations if fic.name == frontend_ip):
raise CLIError("Frontend IP doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == private_link_config_id:
raise CLIError('Frontend IP already reference an existing Private Link')
if fic.name == frontend_ip:
break
else:
raise CLIError("Frontend IP doesn't exist")
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
raise CLIError('Private Link name duplicates')
# get the virtual network of this application gateway
vnet_name = parse_resource_id(appgw.gateway_ip_configurations[0].subnet.id)['name']
vnet = ncf.virtual_networks.get(resource_group_name, vnet_name)
# prepare the subnet for new private link
for subnet in vnet.subnets:
if subnet.name == private_link_subnet_name_or_id:
raise CLIError('Subnet duplicates')
if subnet.address_prefix == private_link_subnet_prefix:
raise CLIError('Subnet prefix duplicates')
if subnet.address_prefixes and private_link_subnet_prefix in subnet.address_prefixes:
raise CLIError('Subnet prefix duplicates')
if is_valid_resource_id(private_link_subnet_name_or_id):
private_link_subnet_id = private_link_subnet_name_or_id
else:
private_link_subnet = Subnet(name=private_link_subnet_name_or_id,
address_prefix=private_link_subnet_prefix,
private_link_service_network_policies='Disabled')
private_link_subnet_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name,
child_type_1='subnets',
child_name_1=private_link_subnet_name_or_id
)
vnet.subnets.append(private_link_subnet)
ncf.virtual_networks.begin_create_or_update(resource_group_name, vnet_name, vnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name='PrivateLinkDefaultIPConfiguration',
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
private_link_config = ApplicationGatewayPrivateLinkConfiguration(
name=private_link_name,
ip_configurations=[private_link_ip_config]
)
# associate the private link with the frontend IP configuration
for fic in appgw.frontend_ip_configurations:
if fic.name == frontend_ip:
fic.private_link_configuration = SubResource(id=private_link_config_id)
appgw.private_link_configurations.append(private_link_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name, appgw)
def show_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link
def list_ag_private_link(cmd,
resource_group_name,
application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.private_link_configurations
def remove_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
removed_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
removed_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == removed_private_link.id:
fic.private_link_configuration = None
# the left vnet have to delete manually
# rs = parse_resource_id(removed_private_link.ip_configurations[0].subnet.id)
# vnet_resource_group, vnet_name, subnet = rs['resource_group'], rs['name'], rs['child_name_1']
# ncf.subnets.delete(vnet_resource_group, vnet_name, subnet)
appgw.private_link_configurations.remove(removed_private_link)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
# region application-gateway trusted-client-certificates
def add_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
ApplicationGatewayTrustedClientCertificate = cmd.get_models('ApplicationGatewayTrustedClientCertificate')
cert = ApplicationGatewayTrustedClientCertificate(name=client_cert_name, data=client_cert_data)
appgw.trusted_client_certificates.append(cert)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
cert.data = client_cert_data
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_trusted_client_certificate(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.trusted_client_certificates
def remove_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
appgw.trusted_client_certificates.remove(cert)
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
instance = cert
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return instance
def show_ag_backend_health(cmd, client, resource_group_name, application_gateway_name, expand=None,
protocol=None, host=None, path=None, timeout=None, host_name_from_http_settings=None,
match_body=None, match_status_codes=None, address_pool=None, http_settings=None):
from azure.cli.core.commands import LongRunningOperation
on_demand_arguments = {protocol, host, path, timeout, host_name_from_http_settings, match_body, match_status_codes,
address_pool, http_settings}
if on_demand_arguments.difference({None}) and cmd.supported_api_version(min_api='2019-04-01'):
SubResource, ApplicationGatewayOnDemandProbe, ApplicationGatewayProbeHealthResponseMatch = cmd.get_models(
"SubResource", "ApplicationGatewayOnDemandProbe", "ApplicationGatewayProbeHealthResponseMatch")
probe_request = ApplicationGatewayOnDemandProbe(
protocol=protocol,
host=host,
path=path,
timeout=timeout,
pick_host_name_from_backend_http_settings=host_name_from_http_settings
)
if match_body is not None or match_status_codes is not None:
probe_request.match = ApplicationGatewayProbeHealthResponseMatch(
body=match_body,
status_codes=match_status_codes,
)
if address_pool is not None:
if not is_valid_resource_id(address_pool):
address_pool = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendAddressPools',
child_name_1=address_pool
)
probe_request.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
if not is_valid_resource_id(http_settings):
http_settings = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendHttpSettingsCollection',
child_name_1=http_settings
)
probe_request.backend_http_settings = SubResource(id=http_settings)
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health_on_demand(
resource_group_name, application_gateway_name, probe_request, expand))
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health(
resource_group_name, application_gateway_name, expand))
# endregion
# region application-gateway ssl-profile
def add_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
(SubResource,
ApplicationGatewaySslPolicy,
ApplicationGatewayClientAuthConfiguration,
ApplicationGatewaySslProfile) = cmd.get_models('SubResource',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayClientAuthConfiguration',
'ApplicationGatewaySslProfile')
sr_trusted_client_certificates = [SubResource(id=item) for item in
trusted_client_certificates] if trusted_client_certificates else None
ssl_policy = ApplicationGatewaySslPolicy(policy_name=policy_name, policy_type=policy_type,
min_protocol_version=min_protocol_version,
cipher_suites=cipher_suites, disabled_ssl_protocols=disabled_ssl_protocols)
client_auth = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=client_auth_configuration) if client_auth_configuration else None
ssl_profile = ApplicationGatewaySslProfile(trusted_client_certificates=sr_trusted_client_certificates,
ssl_policy=ssl_policy, client_auth_configuration=client_auth,
name=ssl_profile_name)
appgw.ssl_profiles.append(ssl_profile)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
if policy_name is not None:
instance.ssl_policy.policy_name = policy_name
if policy_type is not None:
instance.ssl_policy.policy_type = policy_type
if min_protocol_version is not None:
instance.ssl_policy.min_protocol_version = min_protocol_version
if cipher_suites is not None:
instance.ssl_policy.cipher_suites = cipher_suites
if disabled_ssl_protocols is not None:
instance.ssl_policy.disabled_ssl_protocols = disabled_ssl_protocols
if trusted_client_certificates is not None:
SubResource = cmd.get_models('SubResource')
instance.trusted_client_certificates = [SubResource(id=item) for item in trusted_client_certificates]
if client_auth_configuration is not None:
ApplicationGatewayClientAuthConfiguration = cmd.get_models('ApplicationGatewayClientAuthConfiguration')
instance.client_auth_configuration = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=(client_auth_configuration == 'True')
)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_ssl_profile(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.ssl_profiles
def remove_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
appgw.ssl_profiles.remove(profile)
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return instance
# endregion
def add_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
private_link_primary=False,
private_link_ip_address=None,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
(SubResource, IPAllocationMethod,
ApplicationGatewayPrivateLinkIpConfiguration) = \
cmd.get_models('SubResource', 'IPAllocationMethod',
'ApplicationGatewayPrivateLinkIpConfiguration')
private_link_subnet_id = target_private_link.ip_configurations[0].subnet.id
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name=private_link_ip_name,
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
target_private_link.ip_configurations.append(private_link_ip_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def show_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
target_private_link_ip_config = None
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
target_private_link_ip_config = pic
break
else:
raise CLIError("IP Configuration doesn't exist")
return target_private_link_ip_config
def list_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link.ip_configurations
def remove_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
updated_ip_configurations = target_private_link.ip_configurations
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
updated_ip_configurations.remove(pic)
break
else:
raise CLIError("IP Configuration doesn't exist")
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def create_ag_backend_http_settings_collection(cmd, resource_group_name, application_gateway_name, item_name, port,
probe=None, protocol='http', cookie_based_affinity=None, timeout=None,
no_wait=False, connection_draining_timeout=0,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
ApplicationGatewayBackendHttpSettings, ApplicationGatewayConnectionDraining, SubResource = cmd.get_models(
'ApplicationGatewayBackendHttpSettings', 'ApplicationGatewayConnectionDraining', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_settings = ApplicationGatewayBackendHttpSettings(
port=port,
protocol=protocol,
cookie_based_affinity=cookie_based_affinity or 'Disabled',
request_timeout=timeout,
probe=SubResource(id=probe) if probe else None,
name=item_name)
if cmd.supported_api_version(min_api='2016-09-01'):
new_settings.authentication_certificates = [SubResource(id=x) for x in auth_certs or []]
if cmd.supported_api_version(min_api='2016-12-01'):
new_settings.connection_draining = \
ApplicationGatewayConnectionDraining(
enabled=bool(connection_draining_timeout), drain_timeout_in_sec=connection_draining_timeout or 1)
if cmd.supported_api_version(min_api='2017-06-01'):
new_settings.host_name = host_name
new_settings.pick_host_name_from_backend_address = host_name_from_backend_pool
new_settings.affinity_cookie_name = affinity_cookie_name
new_settings.probe_enabled = enable_probe
new_settings.path = path
if cmd.supported_api_version(min_api='2019-04-01'):
new_settings.trusted_root_certificates = [SubResource(id=x) for x in root_certs or []]
upsert_to_collection(ag, 'backend_http_settings_collection', new_settings, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_http_settings_collection(cmd, instance, parent, item_name, port=None, probe=None, protocol=None,
cookie_based_affinity=None, timeout=None,
connection_draining_timeout=None,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
SubResource = cmd.get_models('SubResource')
if auth_certs == "":
instance.authentication_certificates = None
elif auth_certs is not None:
instance.authentication_certificates = [SubResource(id=x) for x in auth_certs]
if root_certs == "":
instance.trusted_root_certificates = None
elif root_certs is not None:
instance.trusted_root_certificates = [SubResource(id=x) for x in root_certs]
if port is not None:
instance.port = port
if probe is not None:
instance.probe = SubResource(id=probe)
if protocol is not None:
instance.protocol = protocol
if cookie_based_affinity is not None:
instance.cookie_based_affinity = cookie_based_affinity
if timeout is not None:
instance.request_timeout = timeout
if connection_draining_timeout is not None:
instance.connection_draining = {
'enabled': bool(connection_draining_timeout),
'drain_timeout_in_sec': connection_draining_timeout or 1
}
if host_name is not None:
instance.host_name = host_name
if host_name_from_backend_pool is not None:
instance.pick_host_name_from_backend_address = host_name_from_backend_pool
if affinity_cookie_name is not None:
instance.affinity_cookie_name = affinity_cookie_name
if enable_probe is not None:
instance.probe_enabled = enable_probe
if path is not None:
instance.path = path
return parent
def create_ag_redirect_configuration(cmd, resource_group_name, application_gateway_name, item_name, redirect_type,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, no_wait=False):
ApplicationGatewayRedirectConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayRedirectConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_config = ApplicationGatewayRedirectConfiguration(
name=item_name,
redirect_type=redirect_type,
target_listener=SubResource(id=target_listener) if target_listener else None,
target_url=target_url,
include_path=include_path,
include_query_string=include_query_string)
upsert_to_collection(ag, 'redirect_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_redirect_configuration(cmd, instance, parent, item_name, redirect_type=None,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, raw=False):
SubResource = cmd.get_models('SubResource')
if redirect_type:
instance.redirect_type = redirect_type
if target_listener:
instance.target_listener = SubResource(id=target_listener)
instance.target_url = None
if target_url:
instance.target_listener = None
instance.target_url = target_url
if include_path is not None:
instance.include_path = include_path
if include_query_string is not None:
instance.include_query_string = include_query_string
return parent
def create_ag_rewrite_rule_set(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False):
ApplicationGatewayRewriteRuleSet = cmd.get_models(
'ApplicationGatewayRewriteRuleSet')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_set = ApplicationGatewayRewriteRuleSet(name=item_name)
upsert_to_collection(ag, 'rewrite_rule_sets', new_set, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, item_name,
path='rewrite_rule_sets', key_path='name')
def update_ag_rewrite_rule_set(instance, parent, item_name):
return parent
def create_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
sequence=None, request_headers=None, response_headers=None, no_wait=False,
modified_path=None, modified_query_string=None, enable_reroute=None):
(ApplicationGatewayRewriteRule,
ApplicationGatewayRewriteRuleActionSet,
ApplicationGatewayUrlConfiguration) = cmd.get_models('ApplicationGatewayRewriteRule',
'ApplicationGatewayRewriteRuleActionSet',
'ApplicationGatewayUrlConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(ag, rule_set_name,
path='rewrite_rule_sets', key_path='name')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
new_rule = ApplicationGatewayRewriteRule(
name=rule_name,
rule_sequence=sequence,
action_set=ApplicationGatewayRewriteRuleActionSet(
request_header_configurations=request_headers,
response_header_configurations=response_headers,
url_configuration=url_configuration
)
)
upsert_to_collection(rule_set, 'rewrite_rules', new_rule, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def update_ag_rewrite_rule(instance, parent, cmd, rule_set_name, rule_name, sequence=None,
request_headers=None, response_headers=None,
modified_path=None, modified_query_string=None, enable_reroute=None):
with cmd.update_context(instance) as c:
c.set_param('rule_sequence', sequence)
c.set_param('action_set.request_header_configurations', request_headers)
c.set_param('action_set.response_header_configurations', response_headers)
ApplicationGatewayUrlConfiguration = cmd.get_models('ApplicationGatewayUrlConfiguration')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
c.set_param('action_set.url_configuration', url_configuration)
return parent
def show_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def list_ag_rewrite_rules(cmd, resource_group_name, application_gateway_name, rule_set_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, path='rewrite_rule_sets.rewrite_rules', key_path='name')
def delete_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(gateway, rule_set_name, path='rewrite_rule_sets', key_path='name')
rule = find_child_item(rule_set, rule_name, path='rewrite_rules', key_path='name')
rule_set.rewrite_rules.remove(rule)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
variable, no_wait=False, pattern=None, ignore_case=None, negate=None):
ApplicationGatewayRewriteRuleCondition = cmd.get_models(
'ApplicationGatewayRewriteRuleCondition')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule = find_child_item(ag, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
new_condition = ApplicationGatewayRewriteRuleCondition(
variable=variable,
pattern=pattern,
ignore_case=ignore_case,
negate=negate
)
upsert_to_collection(rule, 'conditions', new_condition, 'variable')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def update_ag_rewrite_rule_condition(instance, parent, cmd, rule_set_name, rule_name, variable, pattern=None,
ignore_case=None, negate=None):
with cmd.update_context(instance) as c:
c.set_param('pattern', pattern)
c.set_param('ignore_case', ignore_case)
c.set_param('negate', negate)
return parent
def show_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def list_ag_rewrite_rule_conditions(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name')
def delete_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule = find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
condition = find_child_item(rule, variable, path='conditions', key_path='variable')
rule.conditions.remove(condition)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_probe(cmd, resource_group_name, application_gateway_name, item_name, protocol, host,
path, interval=30, timeout=120, threshold=8, no_wait=False, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
ApplicationGatewayProbe, ProbeMatchCriteria = cmd.get_models(
'ApplicationGatewayProbe', 'ApplicationGatewayProbeHealthResponseMatch')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_probe = ApplicationGatewayProbe(
name=item_name,
protocol=protocol,
host=host,
path=path,
interval=interval,
timeout=timeout,
unhealthy_threshold=threshold)
if cmd.supported_api_version(min_api='2017-06-01'):
new_probe.pick_host_name_from_backend_http_settings = host_name_from_http_settings
new_probe.min_servers = min_servers
new_probe.match = ProbeMatchCriteria(body=match_body, status_codes=match_status_codes)
if cmd.supported_api_version(min_api='2019-04-01'):
new_probe.port = port
upsert_to_collection(ag, 'probes', new_probe, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_probe(cmd, instance, parent, item_name, protocol=None, host=None, path=None,
interval=None, timeout=None, threshold=None, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
if protocol is not None:
instance.protocol = protocol
if host is not None:
instance.host = host
if path is not None:
instance.path = path
if interval is not None:
instance.interval = interval
if timeout is not None:
instance.timeout = timeout
if threshold is not None:
instance.unhealthy_threshold = threshold
if host_name_from_http_settings is not None:
instance.pick_host_name_from_backend_http_settings = host_name_from_http_settings
if min_servers is not None:
instance.min_servers = min_servers
if match_body is not None or match_status_codes is not None:
ProbeMatchCriteria = \
cmd.get_models('ApplicationGatewayProbeHealthResponseMatch')
instance.match = instance.match or ProbeMatchCriteria()
if match_body is not None:
instance.match.body = match_body
if match_status_codes is not None:
instance.match.status_codes = match_status_codes
if port is not None:
instance.port = port
return parent
def create_ag_request_routing_rule(cmd, resource_group_name, application_gateway_name, item_name,
address_pool=None, http_settings=None, http_listener=None, redirect_config=None,
url_path_map=None, rule_type='Basic', no_wait=False, rewrite_rule_set=None,
priority=None):
ApplicationGatewayRequestRoutingRule, SubResource = cmd.get_models(
'ApplicationGatewayRequestRoutingRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not address_pool and not redirect_config:
address_pool = _get_default_id(ag, 'backend_address_pools', '--address-pool')
if not http_settings and not redirect_config:
http_settings = _get_default_id(ag, 'backend_http_settings_collection', '--http-settings')
if not http_listener:
http_listener = _get_default_id(ag, 'http_listeners', '--http-listener')
new_rule = ApplicationGatewayRequestRoutingRule(
name=item_name,
rule_type=rule_type,
priority=priority,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
http_listener=SubResource(id=http_listener),
url_path_map=SubResource(id=url_path_map) if url_path_map else None)
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
upsert_to_collection(ag, 'request_routing_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_request_routing_rule(cmd, instance, parent, item_name, address_pool=None,
http_settings=None, http_listener=None, redirect_config=None, url_path_map=None,
rule_type=None, rewrite_rule_set=None, priority=None):
SubResource = cmd.get_models('SubResource')
if address_pool is not None:
instance.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
instance.backend_http_settings = SubResource(id=http_settings)
if redirect_config is not None:
instance.redirect_configuration = SubResource(id=redirect_config)
if http_listener is not None:
instance.http_listener = SubResource(id=http_listener)
if url_path_map is not None:
instance.url_path_map = SubResource(id=url_path_map)
if rule_type is not None:
instance.rule_type = rule_type
if rewrite_rule_set is not None:
instance.rewrite_rule_set = SubResource(id=rewrite_rule_set)
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
return parent
def create_ag_ssl_certificate(cmd, resource_group_name, application_gateway_name, item_name, cert_data=None,
cert_password=None, key_vault_secret_id=None, no_wait=False):
ApplicationGatewaySslCertificate = cmd.get_models('ApplicationGatewaySslCertificate')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_cert = ApplicationGatewaySslCertificate(
name=item_name, data=cert_data, password=cert_password, key_vault_secret_id=key_vault_secret_id)
upsert_to_collection(ag, 'ssl_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_ssl_certificate(instance, parent, item_name,
cert_data=None, cert_password=None, key_vault_secret_id=None):
if cert_data is not None:
instance.data = cert_data
if cert_password is not None:
instance.password = cert_password
if key_vault_secret_id is not None:
instance.key_vault_secret_id = key_vault_secret_id
return parent
def set_ag_ssl_policy_2017_03_01(cmd, resource_group_name, application_gateway_name, disabled_ssl_protocols=None,
clear=False, no_wait=False):
ApplicationGatewaySslPolicy = cmd.get_models('ApplicationGatewaySslPolicy')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.ssl_policy = None if clear else ApplicationGatewaySslPolicy(
disabled_ssl_protocols=disabled_ssl_protocols)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_ssl_policy_2017_06_01(cmd, resource_group_name, application_gateway_name, policy_name=None, policy_type=None,
disabled_ssl_protocols=None, cipher_suites=None, min_protocol_version=None,
no_wait=False):
ApplicationGatewaySslPolicy, ApplicationGatewaySslPolicyType = cmd.get_models(
'ApplicationGatewaySslPolicy', 'ApplicationGatewaySslPolicyType')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
policy_type = None
if policy_name:
policy_type = ApplicationGatewaySslPolicyType.predefined.value
elif cipher_suites or min_protocol_version:
policy_type = ApplicationGatewaySslPolicyType.custom.value
ag.ssl_policy = ApplicationGatewaySslPolicy(
policy_name=policy_name,
policy_type=policy_type,
disabled_ssl_protocols=disabled_ssl_protocols,
cipher_suites=cipher_suites,
min_protocol_version=min_protocol_version)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_ssl_policy(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).ssl_policy
def create_ag_trusted_root_certificate(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False,
cert_data=None, keyvault_secret=None):
ApplicationGatewayTrustedRootCertificate = cmd.get_models('ApplicationGatewayTrustedRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
root_cert = ApplicationGatewayTrustedRootCertificate(name=item_name, data=cert_data,
key_vault_secret_id=keyvault_secret)
upsert_to_collection(ag, 'trusted_root_certificates', root_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_trusted_root_certificate(instance, parent, item_name, cert_data=None, keyvault_secret=None):
if cert_data is not None:
instance.data = cert_data
if keyvault_secret is not None:
instance.key_vault_secret_id = keyvault_secret
return parent
def create_ag_url_path_map(cmd, resource_group_name, application_gateway_name, item_name, paths,
address_pool=None, http_settings=None, redirect_config=None, rewrite_rule_set=None,
default_address_pool=None, default_http_settings=None, default_redirect_config=None,
no_wait=False, rule_name='default', default_rewrite_rule_set=None, firewall_policy=None):
ApplicationGatewayUrlPathMap, ApplicationGatewayPathRule, SubResource = cmd.get_models(
'ApplicationGatewayUrlPathMap', 'ApplicationGatewayPathRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_rule = ApplicationGatewayPathRule(
name=rule_name,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
paths=paths
)
new_map = ApplicationGatewayUrlPathMap(
name=item_name,
default_backend_address_pool=SubResource(id=default_address_pool) if default_address_pool else None,
default_backend_http_settings=SubResource(id=default_http_settings) if default_http_settings else None,
path_rules=[])
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
new_map.default_redirect_configuration = \
SubResource(id=default_redirect_config) if default_redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
new_map.default_rewrite_rule_set = \
SubResource(id=default_rewrite_rule_set) if default_rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
# pull defaults from the rule specific properties if the default-* option isn't specified
if new_rule.backend_address_pool and not new_map.default_backend_address_pool:
new_map.default_backend_address_pool = new_rule.backend_address_pool
if new_rule.backend_http_settings and not new_map.default_backend_http_settings:
new_map.default_backend_http_settings = new_rule.backend_http_settings
if new_rule.redirect_configuration and not new_map.default_redirect_configuration:
new_map.default_redirect_configuration = new_rule.redirect_configuration
new_map.path_rules.append(new_rule)
upsert_to_collection(ag, 'url_path_maps', new_map, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_url_path_map(cmd, instance, parent, item_name, default_address_pool=None,
default_http_settings=None, default_redirect_config=None, raw=False,
default_rewrite_rule_set=None):
SubResource = cmd.get_models('SubResource')
if default_address_pool == '':
instance.default_backend_address_pool = None
elif default_address_pool:
instance.default_backend_address_pool = SubResource(id=default_address_pool)
if default_http_settings == '':
instance.default_backend_http_settings = None
elif default_http_settings:
instance.default_backend_http_settings = SubResource(id=default_http_settings)
if default_redirect_config == '':
instance.default_redirect_configuration = None
elif default_redirect_config:
instance.default_redirect_configuration = SubResource(id=default_redirect_config)
if default_rewrite_rule_set == '':
instance.default_rewrite_rule_set = None
elif default_rewrite_rule_set:
instance.default_rewrite_rule_set = SubResource(id=default_rewrite_rule_set)
return parent
def create_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, paths, address_pool=None, http_settings=None, redirect_config=None,
firewall_policy=None, no_wait=False, rewrite_rule_set=None):
ApplicationGatewayPathRule, SubResource = cmd.get_models('ApplicationGatewayPathRule', 'SubResource')
if address_pool and redirect_config:
raise CLIError("Cannot reference a BackendAddressPool when Redirect Configuration is specified.")
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
default_backend_pool = SubResource(id=url_map.default_backend_address_pool.id) \
if (url_map.default_backend_address_pool and not redirect_config) else None
default_http_settings = SubResource(id=url_map.default_backend_http_settings.id) \
if url_map.default_backend_http_settings else None
new_rule = ApplicationGatewayPathRule(
name=item_name,
paths=paths,
backend_address_pool=SubResource(id=address_pool) if address_pool else default_backend_pool,
backend_http_settings=SubResource(id=http_settings) if http_settings else default_http_settings)
if cmd.supported_api_version(min_api='2017-06-01'):
default_redirect = SubResource(id=url_map.default_redirect_configuration.id) \
if (url_map.default_redirect_configuration and not address_pool) else None
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else default_redirect
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(url_map, 'path_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def delete_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
url_map.path_rules = \
[x for x in url_map.path_rules if x.name.lower() != item_name.lower()]
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2016_09_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
no_wait=False):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2017_03_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
rule_set_type='OWASP', rule_set_version=None,
disabled_rule_groups=None,
disabled_rules=None, no_wait=False,
request_body_check=None, max_request_body_size=None, file_upload_limit=None,
exclusions=None):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode, rule_set_type=rule_set_type,
rule_set_version=rule_set_version)
if disabled_rule_groups or disabled_rules:
ApplicationGatewayFirewallDisabledRuleGroup = cmd.get_models('ApplicationGatewayFirewallDisabledRuleGroup')
disabled_groups = []
# disabled groups can be added directly
for group in disabled_rule_groups or []:
disabled_groups.append(ApplicationGatewayFirewallDisabledRuleGroup(rule_group_name=group))
def _flatten(collection, expand_property_fn):
for each in collection:
for value in expand_property_fn(each):
yield value
# for disabled rules, we have to look up the IDs
if disabled_rules:
results = list_ag_waf_rule_sets(ncf, _type=rule_set_type, version=rule_set_version, group='*')
for group in _flatten(results, lambda r: r.rule_groups):
disabled_group = ApplicationGatewayFirewallDisabledRuleGroup(
rule_group_name=group.rule_group_name, rules=[])
for rule in group.rules:
if str(rule.rule_id) in disabled_rules:
disabled_group.rules.append(rule.rule_id)
if disabled_group.rules:
disabled_groups.append(disabled_group)
ag.web_application_firewall_configuration.disabled_rule_groups = disabled_groups
if cmd.supported_api_version(min_api='2018-08-01'):
ag.web_application_firewall_configuration.request_body_check = request_body_check
ag.web_application_firewall_configuration.max_request_body_size_in_kb = max_request_body_size
ag.web_application_firewall_configuration.file_upload_limit_in_mb = file_upload_limit
ag.web_application_firewall_configuration.exclusions = exclusions
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_waf_config(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).web_application_firewall_configuration
def list_ag_waf_rule_sets(client, _type=None, version=None, group=None):
results = client.list_available_waf_rule_sets().value
filtered_results = []
# filter by rule set name or version
for rule_set in results:
if _type and _type.lower() != rule_set.rule_set_type.lower():
continue
if version and version.lower() != rule_set.rule_set_version.lower():
continue
filtered_groups = []
for rule_group in rule_set.rule_groups:
if not group:
rule_group.rules = None
filtered_groups.append(rule_group)
elif group.lower() == rule_group.rule_group_name.lower() or group == '*':
filtered_groups.append(rule_group)
if filtered_groups:
rule_set.rule_groups = filtered_groups
filtered_results.append(rule_set)
return filtered_results
# endregion
# region ApplicationGatewayWAFPolicy
def create_ag_waf_policy(cmd, client, resource_group_name, policy_name,
location=None, tags=None, rule_set_type='OWASP',
rule_set_version='3.0'):
WebApplicationFirewallPolicy, ManagedRulesDefinition, \
ManagedRuleSet = cmd.get_models('WebApplicationFirewallPolicy',
'ManagedRulesDefinition',
'ManagedRuleSet')
# https://docs.microsoft.com/en-us/azure/application-gateway/waf-overview
# mandatory default rule with empty rule sets
managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type, rule_set_version=rule_set_version)
managed_rule_definition = ManagedRulesDefinition(managed_rule_sets=[managed_rule_set])
waf_policy = WebApplicationFirewallPolicy(location=location, tags=tags, managed_rules=managed_rule_definition)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_ag_waf_policy(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_ag_waf_policies(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'web_application_firewall_policies', resource_group_name)
# endregion
# region ApplicationGatewayWAFPolicyRules PolicySettings
def update_waf_policy_setting(cmd, instance,
state=None, mode=None,
max_request_body_size_in_kb=None, file_upload_limit_in_mb=None,
request_body_check=False):
if state is not None:
instance.policy_settings.state = state
if mode is not None:
instance.policy_settings.mode = mode
if max_request_body_size_in_kb is not None:
instance.policy_settings.max_request_body_size_in_kb = max_request_body_size_in_kb
if file_upload_limit_in_mb is not None:
instance.policy_settings.file_upload_limit_in_mb = file_upload_limit_in_mb
if request_body_check is not None:
instance.policy_settings.request_body_check = request_body_check
return instance
def list_waf_policy_setting(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).policy_settings
# endregion
# region ApplicationGatewayWAFPolicyRules
def create_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, priority, rule_type, action):
"""
Initialize custom rule for WAF policy
"""
WebApplicationFirewallCustomRule = cmd.get_models('WebApplicationFirewallCustomRule')
waf_policy = client.get(resource_group_name, policy_name)
new_custom_rule = WebApplicationFirewallCustomRule(
name=rule_name,
action=action,
match_conditions=[],
priority=priority,
rule_type=rule_type
)
upsert_to_collection(waf_policy, 'custom_rules', new_custom_rule, 'name')
parent = client.create_or_update(resource_group_name, policy_name, waf_policy)
return find_child_item(parent, rule_name, path='custom_rules', key_path='name')
# pylint: disable=unused-argument
def update_waf_custom_rule(instance, parent, cmd, rule_name, priority=None, rule_type=None, action=None):
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
c.set_param('rule_type', rule_type)
c.set_param('action', action)
return parent
def show_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
def list_waf_custom_rules(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).custom_rules
def delete_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, no_wait=None):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
waf_policy.custom_rules.remove(rule)
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicyRuleMatchConditions
def add_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name,
match_variables, operator, match_values, negation_condition=None, transforms=None):
MatchCondition = cmd.get_models('MatchCondition')
waf_policy = client.get(resource_group_name, policy_name)
custom_rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
new_cond = MatchCondition(
match_variables=match_variables,
operator=operator,
match_values=match_values,
negation_conditon=negation_condition,
transforms=transforms
)
custom_rule.match_conditions.append(new_cond)
upsert_to_collection(waf_policy, 'custom_rules', custom_rule, 'name', warn=False)
client.create_or_update(resource_group_name, policy_name, waf_policy)
return new_cond
def list_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name').match_conditions
def remove_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name, index):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
rule.match_conditions.pop(index)
client.create_or_update(resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule ManagedRuleSet
def add_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
rule_group_name=None, rules=None):
"""
Add managed rule set to the WAF policy managed rules.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
waf_policy = client.get(resource_group_name, policy_name)
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules is not None else []
rule_group_override = None
if rule_group_name is not None:
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides)
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_override in rule_set.rule_group_overrides:
if rule_override.rule_group_name == rule_group_name:
# Add one rule
rule_override.rules.extend(managed_rule_overrides)
break
else:
# Add one rule group
if rule_group_override is not None:
rule_set.rule_group_overrides.append(rule_group_override)
break
else:
# Add new rule set
waf_policy.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_waf_managed_rule_set(cmd, instance, rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Update(Override) existing rule set of a WAF policy managed rules.
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules else None
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides) if managed_rule_overrides else None
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
updated_rule_set = None
for rule_set in instance.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version != rule_set_version:
updated_rule_set = rule_set
break
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
updated_rule_set = rule_set
break
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg:
rg.rules = managed_rule_overrides # differentiate with add_waf_managed_rule_set()
else:
rule_set.rule_group_overrides.append(rule_group_override)
if updated_rule_set:
instance.managed_rules.managed_rule_sets.remove(updated_rule_set)
instance.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return instance
def remove_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None):
"""
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
"""
waf_policy = client.get(resource_group_name, policy_name)
delete_rule_set = None
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type or rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
delete_rule_set = rule_set
break
# Remove one rule from rule group
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg is None:
raise CLIError('Rule set group [ {} ] not found.'.format(rule_group_name))
rule_set.rule_group_overrides.remove(rg)
if delete_rule_set:
waf_policy.managed_rules.managed_rule_sets.remove(delete_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule OwaspCrsExclusionEntry
def add_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name,
match_variable, selector_match_operator, selector):
OwaspCrsExclusionEntry = cmd.get_models('OwaspCrsExclusionEntry')
exclusion_entry = OwaspCrsExclusionEntry(match_variable=match_variable,
selector_match_operator=selector_match_operator,
selector=selector)
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions.append(exclusion_entry)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def remove_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions = []
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationSecurityGroups
def create_asg(cmd, client, resource_group_name, application_security_group_name, location=None, tags=None):
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup')
asg = ApplicationSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, application_security_group_name, asg)
def update_asg(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region DdosProtectionPlans
def create_ddos_plan(cmd, resource_group_name, ddos_plan_name, location=None, tags=None, vnets=None):
from azure.cli.core.commands import LongRunningOperation
ddos_client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
ddos_protection_plan = cmd.get_models('DdosProtectionPlan')()
if location:
ddos_protection_plan.location = location
if tags:
ddos_protection_plan.tags = tags
if not vnets:
# if no VNETs can do a simple PUT
return ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)
# if VNETs specified, have to create the protection plan and then add the VNETs
plan_id = LongRunningOperation(cmd.cli_ctx)(
ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)).id
SubResource = cmd.get_models('SubResource')
logger.info('Attempting to attach VNets to newly created DDoS protection plan.')
for vnet_subresource in vnets:
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
id_parts = parse_resource_id(vnet_subresource.id)
vnet = vnet_client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=plan_id)
vnet_client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return ddos_client.get(resource_group_name, ddos_plan_name)
def update_ddos_plan(cmd, instance, tags=None, vnets=None):
SubResource = cmd.get_models('SubResource')
if tags is not None:
instance.tags = tags
if vnets is not None:
logger.info('Attempting to update the VNets attached to the DDoS protection plan.')
vnet_ids = set([])
if len(vnets) == 1 and not vnets[0]:
pass
else:
vnet_ids = {x.id for x in vnets}
existing_vnet_ids = {x.id for x in instance.virtual_networks} if instance.virtual_networks else set([])
client = network_client_factory(cmd.cli_ctx).virtual_networks
for vnet_id in vnet_ids.difference(existing_vnet_ids):
logger.info("Adding VNet '%s' to plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=instance.id)
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
for vnet_id in existing_vnet_ids.difference(vnet_ids):
logger.info("Removing VNet '%s' from plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = None
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return instance
def list_ddos_plans(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# endregion
# region DNS Commands
# add delegation name server record for the created child zone in it's parent zone.
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone.name_servers:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added succesfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
def create_dns_zone(cmd, client, resource_group_name, zone_name, parent_zone_name=None, tags=None,
if_none_match=False, zone_type='Public', resolution_vnets=None, registration_vnets=None):
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
zone = Zone(location='global', tags=tags)
if hasattr(zone, 'zone_type'):
zone.zone_type = zone_type
zone.registration_virtual_networks = registration_vnets
zone.resolution_virtual_networks = resolution_vnets
created_zone = client.create_or_update(resource_group_name, zone_name, zone,
if_none_match='*' if if_none_match else None)
if cmd.supported_api_version(min_api='2016-04-01') and parent_zone_name is not None:
logger.info('Attempting to add delegation in the parent zone')
add_dns_delegation(cmd, created_zone, parent_zone_name, resource_group_name, zone_name)
return created_zone
def update_dns_zone(instance, tags=None, zone_type=None, resolution_vnets=None, registration_vnets=None):
if tags is not None:
instance.tags = tags
if zone_type:
instance.zone_type = zone_type
if resolution_vnets == ['']:
instance.resolution_virtual_networks = None
elif resolution_vnets:
instance.resolution_virtual_networks = resolution_vnets
if registration_vnets == ['']:
instance.registration_virtual_networks = None
elif registration_vnets:
instance.registration_virtual_networks = registration_vnets
return instance
def list_dns_zones(cmd, resource_group_name=None):
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).zones
if resource_group_name:
return ncf.list_by_resource_group(resource_group_name)
return ncf.list()
def create_dns_record_set(cmd, resource_group_name, zone_name, record_set_name, record_set_type,
metadata=None, if_match=None, if_none_match=None, ttl=3600, target_resource=None):
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
SubResource = cmd.get_models('SubResource', resource_type=ResourceType.MGMT_NETWORK)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = RecordSet(
ttl=ttl,
metadata=metadata,
target_resource=SubResource(id=target_resource) if target_resource else None
)
return client.create_or_update(resource_group_name, zone_name, record_set_name,
record_set_type, record_set, if_match=if_match,
if_none_match='*' if if_none_match else None)
def list_dns_record_set(client, resource_group_name, zone_name, record_type=None):
if record_type:
return client.list_by_type(resource_group_name, zone_name, record_type)
return client.list_by_dns_zone(resource_group_name, zone_name)
def update_dns_record_set(instance, cmd, metadata=None, target_resource=None):
if metadata is not None:
instance.metadata = metadata
if target_resource == '':
instance.target_resource = None
elif target_resource is not None:
SubResource = cmd.get_models('SubResource')
instance.target_resource = SubResource(id=target_resource)
return instance
def _type_to_property_name(key):
type_dict = {
'a': 'a_records',
'aaaa': 'aaaa_records',
'caa': 'caa_records',
'cname': 'cname_record',
'mx': 'mx_records',
'ns': 'ns_records',
'ptr': 'ptr_records',
'soa': 'soa_record',
'spf': 'txt_records',
'srv': 'srv_records',
'txt': 'txt_records',
}
return type_dict[key.lower()]
def export_zone(cmd, resource_group_name, zone_name, file_name=None):
from time import localtime, strftime
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
record_sets = client.record_sets.list_by_dns_zone(resource_group_name, zone_name)
zone_obj = OrderedDict({
'$origin': zone_name.rstrip('.') + '.',
'resource-group': resource_group_name,
'zone-name': zone_name.rstrip('.'),
'datetime': strftime('%a, %d %b %Y %X %z', localtime())
})
for record_set in record_sets:
record_type = record_set.type.rsplit('/', 1)[1].lower()
record_set_name = record_set.name
record_data = getattr(record_set, _type_to_property_name(record_type), None)
# ignore empty record sets
if not record_data:
continue
if not isinstance(record_data, list):
record_data = [record_data]
if record_set_name not in zone_obj:
zone_obj[record_set_name] = OrderedDict()
for record in record_data:
record_obj = {'ttl': record_set.ttl}
if record_type not in zone_obj[record_set_name]:
zone_obj[record_set_name][record_type] = []
if record_type == 'aaaa':
record_obj.update({'ip': record.ipv6_address})
elif record_type == 'a':
record_obj.update({'ip': record.ipv4_address})
elif record_type == 'caa':
record_obj.update({'val': record.value, 'tag': record.tag, 'flags': record.flags})
elif record_type == 'cname':
record_obj.update({'alias': record.cname.rstrip('.') + '.'})
elif record_type == 'mx':
record_obj.update({'preference': record.preference, 'host': record.exchange.rstrip('.') + '.'})
elif record_type == 'ns':
record_obj.update({'host': record.nsdname.rstrip('.') + '.'})
elif record_type == 'ptr':
record_obj.update({'host': record.ptrdname.rstrip('.') + '.'})
elif record_type == 'soa':
record_obj.update({
'mname': record.host.rstrip('.') + '.',
'rname': record.email.rstrip('.') + '.',
'serial': int(record.serial_number), 'refresh': record.refresh_time,
'retry': record.retry_time, 'expire': record.expire_time,
'minimum': record.minimum_ttl
})
zone_obj['$ttl'] = record.minimum_ttl
elif record_type == 'srv':
record_obj.update({'priority': record.priority, 'weight': record.weight,
'port': record.port, 'target': record.target.rstrip('.') + '.'})
elif record_type == 'txt':
record_obj.update({'txt': ''.join(record.value)})
zone_obj[record_set_name][record_type].append(record_obj)
zone_file_content = make_zone_file(zone_obj)
print(zone_file_content)
if file_name:
try:
with open(file_name, 'w') as f:
f.write(zone_file_content)
except IOError:
raise CLIError('Unable to export to file: {}'.format(file_name))
# pylint: disable=too-many-return-statements, inconsistent-return-statements
def _build_record(cmd, data):
AaaaRecord, ARecord, CaaRecord, CnameRecord, MxRecord, NsRecord, PtrRecord, SoaRecord, SrvRecord, TxtRecord = \
cmd.get_models('AaaaRecord', 'ARecord', 'CaaRecord', 'CnameRecord', 'MxRecord', 'NsRecord',
'PtrRecord', 'SoaRecord', 'SrvRecord', 'TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_type = data['delim'].lower()
try:
if record_type == 'aaaa':
return AaaaRecord(ipv6_address=data['ip'])
if record_type == 'a':
return ARecord(ipv4_address=data['ip'])
if (record_type == 'caa' and
supported_api_version(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS, min_api='2018-03-01-preview')):
return CaaRecord(value=data['val'], flags=int(data['flags']), tag=data['tag'])
if record_type == 'cname':
return CnameRecord(cname=data['alias'])
if record_type == 'mx':
return MxRecord(preference=data['preference'], exchange=data['host'])
if record_type == 'ns':
return NsRecord(nsdname=data['host'])
if record_type == 'ptr':
return PtrRecord(ptrdname=data['host'])
if record_type == 'soa':
return SoaRecord(host=data['host'], email=data['email'], serial_number=data['serial'],
refresh_time=data['refresh'], retry_time=data['retry'], expire_time=data['expire'],
minimum_ttl=data['minimum'])
if record_type == 'srv':
return SrvRecord(
priority=int(data['priority']), weight=int(data['weight']), port=int(data['port']),
target=data['target'])
if record_type in ['txt', 'spf']:
text_data = data['txt']
return TxtRecord(value=text_data) if isinstance(text_data, list) else TxtRecord(value=[text_data])
except KeyError as ke:
raise CLIError("The {} record '{}' is missing a property. {}"
.format(record_type, data['name'], ke))
# pylint: disable=too-many-statements
def import_zone(cmd, resource_group_name, zone_name, file_name):
from azure.cli.core.util import read_file_content
from azure.core.exceptions import HttpResponseError
import sys
logger.warning("In the future, zone name will be case insensitive.")
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
from azure.cli.core.azclierror import FileOperationError, UnclassifiedUserFault
try:
file_text = read_file_content(file_name)
except FileNotFoundError:
raise FileOperationError("No such file: " + str(file_name))
except IsADirectoryError:
raise FileOperationError("Is a directory: " + str(file_name))
except PermissionError:
raise FileOperationError("Permission denied: " + str(file_name))
except OSError as e:
raise UnclassifiedUserFault(e)
zone_obj = parse_zone_file(file_text, zone_name)
origin = zone_name
record_sets = {}
for record_set_name in zone_obj:
for record_set_type in zone_obj[record_set_name]:
record_set_obj = zone_obj[record_set_name][record_set_type]
if record_set_type == 'soa':
origin = record_set_name.rstrip('.')
if not isinstance(record_set_obj, list):
record_set_obj = [record_set_obj]
for entry in record_set_obj:
record_set_ttl = entry['ttl']
record_set_key = '{}{}'.format(record_set_name.lower(), record_set_type)
record = _build_record(cmd, entry)
if not record:
logger.warning('Cannot import %s. RecordType is not found. Skipping...', entry['delim'].lower())
continue
record_set = record_sets.get(record_set_key, None)
if not record_set:
# Workaround for issue #2824
relative_record_set_name = record_set_name.rstrip('.')
if not relative_record_set_name.endswith(origin):
logger.warning(
'Cannot import %s. Only records relative to origin may be '
'imported at this time. Skipping...', relative_record_set_name)
continue
record_set = RecordSet(ttl=record_set_ttl)
record_sets[record_set_key] = record_set
_add_record(record_set, record, record_set_type,
is_list=record_set_type.lower() not in ['soa', 'cname'])
total_records = 0
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = rs_name[:-(len(origin) + 1)] if rs_name != origin else '@'
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
total_records += record_count
cum_records = 0
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
print('== BEGINNING ZONE IMPORT: {} ==\n'.format(zone_name), file=sys.stderr)
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
client.zones.create_or_update(resource_group_name, zone_name, Zone(location='global'))
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = '@' if rs_name == origin else rs_name
if rs_name.endswith(origin):
rs_name = rs_name[:-(len(origin) + 1)]
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
if rs_name == '@' and rs_type == 'soa':
root_soa = client.record_sets.get(resource_group_name, zone_name, '@', 'SOA')
rs.soa_record.host = root_soa.soa_record.host
rs_name = '@'
elif rs_name == '@' and rs_type == 'ns':
root_ns = client.record_sets.get(resource_group_name, zone_name, '@', 'NS')
root_ns.ttl = rs.ttl
rs = root_ns
rs_type = rs.type.rsplit('/', 1)[1]
try:
client.record_sets.create_or_update(
resource_group_name, zone_name, rs_name, rs_type, rs)
cum_records += record_count
print("({}/{}) Imported {} records of type '{}' and name '{}'"
.format(cum_records, total_records, record_count, rs_type, rs_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print("\n== {}/{} RECORDS IMPORTED SUCCESSFULLY: '{}' =="
.format(cum_records, total_records, zone_name), file=sys.stderr)
def add_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
ttl=3600, if_none_match=None):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
ttl=3600, if_none_match=None):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name, 'arecords',
ttl=ttl, if_none_match=if_none_match)
def add_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value, flags, tag,
ttl=3600, if_none_match=None):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname, ttl=3600, if_none_match=None):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, ttl=ttl, if_none_match=if_none_match)
def add_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
ttl=3600, if_none_match=None):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
subscription_id=None, ttl=3600, if_none_match=None):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
subscription_id=subscription_id, ttl=ttl, if_none_match=if_none_match)
def add_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname, ttl=3600, if_none_match=None):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def update_dns_soa_record(cmd, resource_group_name, zone_name, host=None, email=None,
serial_number=None, refresh_time=None, retry_time=None, expire_time=None,
minimum_ttl=3600, if_none_match=None):
record_set_name = '@'
record_type = 'soa'
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record = record_set.soa_record
record.host = host or record.host
record.email = email or record.email
record.serial_number = serial_number or record.serial_number
record.refresh_time = refresh_time or record.refresh_time
record.retry_time = retry_time or record.retry_time
record.expire_time = expire_time or record.expire_time
record.minimum_ttl = minimum_ttl or record.minimum_ttl
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, if_none_match=if_none_match)
def add_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, if_none_match=None):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def add_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value, if_none_match=None):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
long_text = ''.join(x for x in record.value)
original_len = len(long_text)
record.value = []
while len(long_text) > 255:
record.value.append(long_text[:255])
long_text = long_text[255:]
record.value.append(long_text)
final_str = ''.join(record.value)
final_len = len(final_str)
assert original_len == final_len
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def remove_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
keep_empty_record_set=False):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
keep_empty_record_set=False):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value,
flags, tag, keep_empty_record_set=False):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname,
keep_empty_record_set=False):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, keep_empty_record_set=keep_empty_record_set)
def remove_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
keep_empty_record_set=False):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, keep_empty_record_set=False):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value,
keep_empty_record_set=False):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def _check_a_record_exist(record, exist_list):
for r in exist_list:
if r.ipv4_address == record.ipv4_address:
return True
return False
def _check_aaaa_record_exist(record, exist_list):
for r in exist_list:
if r.ipv6_address == record.ipv6_address:
return True
return False
def _check_caa_record_exist(record, exist_list):
for r in exist_list:
if (r.flags == record.flags and
r.tag == record.tag and
r.value == record.value):
return True
return False
def _check_cname_record_exist(record, exist_list):
for r in exist_list:
if r.cname == record.cname:
return True
return False
def _check_mx_record_exist(record, exist_list):
for r in exist_list:
if (r.preference == record.preference and
r.exchange == record.exchange):
return True
return False
def _check_ns_record_exist(record, exist_list):
for r in exist_list:
if r.nsdname == record.nsdname:
return True
return False
def _check_ptr_record_exist(record, exist_list):
for r in exist_list:
if r.ptrdname == record.ptrdname:
return True
return False
def _check_srv_record_exist(record, exist_list):
for r in exist_list:
if (r.priority == record.priority and
r.weight == record.weight and
r.port == record.port and
r.target == record.target):
return True
return False
def _check_txt_record_exist(record, exist_list):
for r in exist_list:
if r.value == record.value:
return True
return False
def _record_exist_func(record_type):
return globals()["_check_{}_record_exist".format(record_type)]
def _add_record(record_set, record, record_type, is_list=False):
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is None:
setattr(record_set, record_property, [])
record_list = getattr(record_set, record_property)
_record_exist = _record_exist_func(record_type)
if not _record_exist(record, record_list):
record_list.append(record)
else:
setattr(record_set, record_property, record)
def _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=True, subscription_id=None, ttl=None, if_none_match=None):
from azure.core.exceptions import HttpResponseError
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS,
subscription_id=subscription_id).record_sets
try:
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
except HttpResponseError:
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_set = RecordSet(ttl=3600)
if ttl is not None:
record_set.ttl = ttl
_add_record(record_set, record, record_type, is_list)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name,
record_type, record_set,
if_none_match='*' if if_none_match else None)
def _remove_record(cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set, is_list=True):
ncf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is not None:
keep_list = [r for r in record_list
if not dict_matches_filter(r.__dict__, record.__dict__)]
if len(keep_list) == len(record_list):
raise CLIError('Record {} not found.'.format(str(record)))
setattr(record_set, record_property, keep_list)
else:
setattr(record_set, record_property, None)
if is_list:
records_remaining = len(getattr(record_set, record_property))
else:
records_remaining = 1 if getattr(record_set, record_property) is not None else 0
if not records_remaining and not keep_empty_record_set:
logger.info('Removing empty %s record set: %s', record_type, record_set_name)
return ncf.delete(resource_group_name, zone_name, record_set_name, record_type)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name, record_type, record_set)
def dict_matches_filter(d, filter_dict):
sentinel = object()
return all(not filter_dict.get(key, None) or
str(filter_dict[key]) == str(d.get(key, sentinel)) or
lists_match(filter_dict[key], d.get(key, []))
for key in filter_dict)
def lists_match(l1, l2):
try:
return Counter(l1) == Counter(l2) # pylint: disable=too-many-function-args
except TypeError:
return False
# endregion
# region ExpressRoutes
def create_express_route(cmd, circuit_name, resource_group_name, bandwidth_in_mbps, peering_location,
service_provider_name, location=None, tags=None, no_wait=False,
sku_family=None, sku_tier=None, allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
ExpressRouteCircuit, ExpressRouteCircuitSku, ExpressRouteCircuitServiceProviderProperties, SubResource = \
cmd.get_models(
'ExpressRouteCircuit', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties',
'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_circuits
sku_name = '{}_{}'.format(sku_tier, sku_family)
circuit = ExpressRouteCircuit(
location=location, tags=tags,
service_provider_properties=ExpressRouteCircuitServiceProviderProperties(
service_provider_name=service_provider_name,
peering_location=peering_location,
bandwidth_in_mbps=bandwidth_in_mbps if not express_route_port else None),
sku=ExpressRouteCircuitSku(name=sku_name, tier=sku_tier, family=sku_family),
allow_global_reach=allow_global_reach,
bandwidth_in_gbps=(int(bandwidth_in_mbps) / 1000) if express_route_port else None
)
if cmd.supported_api_version(min_api='2010-07-01') and allow_classic_operations is not None:
circuit.allow_classic_operations = allow_classic_operations
if cmd.supported_api_version(min_api='2018-08-01') and express_route_port:
circuit.express_route_port = SubResource(id=express_route_port)
circuit.service_provider_properties = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, circuit_name, circuit)
def update_express_route(instance, cmd, bandwidth_in_mbps=None, peering_location=None,
service_provider_name=None, sku_family=None, sku_tier=None, tags=None,
allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
with cmd.update_context(instance) as c:
c.set_param('allow_classic_operations', allow_classic_operations)
c.set_param('tags', tags)
c.set_param('allow_global_reach', allow_global_reach)
with cmd.update_context(instance.sku) as c:
c.set_param('family', sku_family)
c.set_param('tier', sku_tier)
with cmd.update_context(instance.service_provider_properties) as c:
c.set_param('peering_location', peering_location)
c.set_param('service_provider_name', service_provider_name)
if express_route_port is not None:
SubResource = cmd.get_models('SubResource')
instance.express_route_port = SubResource(id=express_route_port)
instance.service_provider_properties = None
if bandwidth_in_mbps is not None:
if not instance.express_route_port:
instance.service_provider_properties.bandwith_in_mbps = float(bandwidth_in_mbps)
else:
instance.bandwidth_in_gbps = (float(bandwidth_in_mbps) / 1000)
return instance
def create_express_route_peering_connection(cmd, resource_group_name, circuit_name, peering_name, connection_name,
peer_circuit, address_prefix, authorization_key=None):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
ExpressRouteCircuitConnection, SubResource = cmd.get_models('ExpressRouteCircuitConnection', 'SubResource')
source_circuit = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=circuit_name,
child_type_1='peerings',
child_name_1=peering_name
)
conn = ExpressRouteCircuitConnection(
express_route_circuit_peering=SubResource(id=source_circuit),
peer_express_route_circuit_peering=SubResource(id=peer_circuit),
address_prefix=address_prefix,
authorization_key=authorization_key
)
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def _validate_ipv6_address_prefixes(prefixes):
from ipaddress import ip_network, IPv6Network
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
version = None
for prefix in prefixes:
try:
network = ip_network(prefix)
if version is None:
version = type(network)
else:
if not isinstance(network, version): # pylint: disable=isinstance-second-argument-not-valid-type
raise CLIError("usage error: '{}' incompatible mix of IPv4 and IPv6 address prefixes."
.format(prefixes))
except ValueError:
raise CLIError("usage error: prefix '{}' is not recognized as an IPv4 or IPv6 address prefix."
.format(prefix))
return version == IPv6Network
def create_express_route_peering(
cmd, client, resource_group_name, circuit_name, peering_type, peer_asn, vlan_id,
primary_peer_address_prefix, secondary_peer_address_prefix, shared_key=None,
advertised_public_prefixes=None, customer_asn=None, routing_registry_name=None,
route_filter=None, legacy_mode=None, ip_version='IPv4'):
(ExpressRouteCircuitPeering, ExpressRouteCircuitPeeringConfig, RouteFilter) = \
cmd.get_models('ExpressRouteCircuitPeering', 'ExpressRouteCircuitPeeringConfig', 'RouteFilter')
if cmd.supported_api_version(min_api='2018-02-01'):
ExpressRoutePeeringType = cmd.get_models('ExpressRoutePeeringType')
else:
ExpressRoutePeeringType = cmd.get_models('ExpressRouteCircuitPeeringType')
if ip_version == 'IPv6' and cmd.supported_api_version(min_api='2020-08-01'):
Ipv6ExpressRouteCircuitPeeringConfig = cmd.get_models('Ipv6ExpressRouteCircuitPeeringConfig')
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
microsoft_config = ExpressRouteCircuitPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
else:
microsoft_config = None
ipv6 = Ipv6ExpressRouteCircuitPeeringConfig(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
peering = ExpressRouteCircuitPeering(peering_type=peering_type, ipv6_peering_config=ipv6, peer_asn=peer_asn,
vlan_id=vlan_id)
else:
peering = ExpressRouteCircuitPeering(
peering_type=peering_type, peer_asn=peer_asn, vlan_id=vlan_id,
primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
shared_key=shared_key)
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
peering.microsoft_peering_config = ExpressRouteCircuitPeeringConfig(
advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
if cmd.supported_api_version(min_api='2016-12-01') and route_filter:
peering.route_filter = RouteFilter(id=route_filter)
if cmd.supported_api_version(min_api='2017-10-01') and legacy_mode is not None:
peering.microsoft_peering_config.legacy_mode = legacy_mode
return client.begin_create_or_update(resource_group_name, circuit_name, peering_type, peering)
def _create_or_update_ipv6_peering(cmd, config, primary_peer_address_prefix, secondary_peer_address_prefix,
route_filter, advertised_public_prefixes, customer_asn, routing_registry_name):
if config:
# update scenario
with cmd.update_context(config) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
if route_filter:
RouteFilter = cmd.get_models('RouteFilter')
config.route_filter = RouteFilter(id=route_filter)
else:
# create scenario
IPv6Config, MicrosoftPeeringConfig = cmd.get_models(
'Ipv6ExpressRouteCircuitPeeringConfig', 'ExpressRouteCircuitPeeringConfig')
microsoft_config = MicrosoftPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
config = IPv6Config(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
return config
def update_express_route_peering(cmd, instance, peer_asn=None, primary_peer_address_prefix=None,
secondary_peer_address_prefix=None, vlan_id=None, shared_key=None,
advertised_public_prefixes=None, customer_asn=None,
routing_registry_name=None, route_filter=None, ip_version='IPv4',
legacy_mode=None):
# update settings common to all peering types
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('vlan_id', vlan_id)
c.set_param('shared_key', shared_key)
if ip_version == 'IPv6':
# update is the only way to add IPv6 peering options
instance.ipv6_peering_config = _create_or_update_ipv6_peering(cmd, instance.ipv6_peering_config,
primary_peer_address_prefix,
secondary_peer_address_prefix, route_filter,
advertised_public_prefixes, customer_asn,
routing_registry_name)
else:
# IPv4 Microsoft Peering (or non-Microsoft Peering)
with cmd.update_context(instance) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
if route_filter is not None:
RouteFilter = cmd.get_models('RouteFilter')
instance.route_filter = RouteFilter(id=route_filter)
try:
with cmd.update_context(instance.microsoft_peering_config) as c:
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
c.set_param('legacy_mode', legacy_mode)
except AttributeError:
raise CLIError('--advertised-public-prefixes, --customer-asn, --routing-registry-name and '
'--legacy-mode are only applicable for Microsoft Peering.')
return instance
# endregion
# region ExpressRoute Connection
# pylint: disable=unused-argument
def create_express_route_connection(cmd, resource_group_name, express_route_gateway_name, connection_name,
peering, circuit_name=None, authorization_key=None, routing_weight=None,
enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
ExpressRouteConnection, SubResource, RoutingConfiguration, PropagatedRouteTable\
= cmd.get_models('ExpressRouteConnection', 'SubResource', 'RoutingConfiguration', 'PropagatedRouteTable')
client = network_client_factory(cmd.cli_ctx).express_route_connections
propagated_route_tables = PropagatedRouteTable(
labels=labels,
ids=[SubResource(id=propagated_route_table) for propagated_route_table in
propagated_route_tables] if propagated_route_tables else None
)
routing_configuration = RoutingConfiguration(
associated_route_table=SubResource(id=associated_route_table),
propagated_route_tables=propagated_route_tables
)
connection = ExpressRouteConnection(
name=connection_name,
express_route_circuit_peering=SubResource(id=peering) if peering else None,
authorization_key=authorization_key,
routing_weight=routing_weight,
routing_configuration=routing_configuration
)
if enable_internet_security and cmd.supported_api_version(min_api='2019-09-01'):
connection.enable_internet_security = enable_internet_security
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, connection_name, connection)
# pylint: disable=unused-argument
def update_express_route_connection(instance, cmd, circuit_name=None, peering=None, authorization_key=None,
routing_weight=None, enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
SubResource = cmd.get_models('SubResource')
if peering is not None:
instance.express_route_connection_id = SubResource(id=peering)
if authorization_key is not None:
instance.authorization_key = authorization_key
if routing_weight is not None:
instance.routing_weight = routing_weight
if enable_internet_security is not None and cmd.supported_api_version(min_api='2019-09-01'):
instance.enable_internet_security = enable_internet_security
if associated_route_table is not None or propagated_route_tables is not None or labels is not None:
if instance.routing_configuration is None:
RoutingConfiguration = cmd.get_models('RoutingConfiguration')
instance.routing_configuration = RoutingConfiguration()
if associated_route_table is not None:
instance.routing_configuration.associated_route_table = SubResource(id=associated_route_table)
if propagated_route_tables is not None or labels is not None:
if instance.routing_configuration.propagated_route_tables is None:
PropagatedRouteTable = cmd.get_models('PropagatedRouteTable')
instance.routing_configuration.propagated_route_tables = PropagatedRouteTable()
if propagated_route_tables is not None:
instance.routing_configuration.propagated_route_tables.ids = [SubResource(id=propagated_route_table) for propagated_route_table in propagated_route_tables] # pylint: disable=line-too-long
if labels is not None:
instance.routing_configuration.propagated_route_tables.labels = labels
return instance
# endregion
# region ExpressRoute Gateways
def create_express_route_gateway(cmd, resource_group_name, express_route_gateway_name, location=None, tags=None,
min_val=2, max_val=None, virtual_hub=None):
ExpressRouteGateway, SubResource = cmd.get_models('ExpressRouteGateway', 'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_gateways
gateway = ExpressRouteGateway(
location=location,
tags=tags,
virtual_hub=SubResource(id=virtual_hub) if virtual_hub else None
)
if min or max:
gateway.auto_scale_configuration = {'bounds': {'min': min_val, 'max': max_val}}
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, gateway)
def update_express_route_gateway(instance, cmd, tags=None, min_val=None, max_val=None):
def _ensure_autoscale():
if not instance.auto_scale_configuration:
ExpressRouteGatewayPropertiesAutoScaleConfiguration, \
ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds = cmd.get_models(
'ExpressRouteGatewayPropertiesAutoScaleConfiguration',
'ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds')
instance.auto_scale_configuration = ExpressRouteGatewayPropertiesAutoScaleConfiguration(
bounds=ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds(min=min, max=max))
if tags is not None:
instance.tags = tags
if min is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.min = min_val
if max is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.max = max_val
return instance
def list_express_route_gateways(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_gateways
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
# endregion
# region ExpressRoute ports
def create_express_route_port(cmd, resource_group_name, express_route_port_name, location=None, tags=None,
peering_location=None, bandwidth_in_gbps=None, encapsulation=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ExpressRoutePort = cmd.get_models('ExpressRoutePort')
if bandwidth_in_gbps is not None:
bandwidth_in_gbps = int(bandwidth_in_gbps)
port = ExpressRoutePort(
location=location,
tags=tags,
peering_location=peering_location,
bandwidth_in_gbps=bandwidth_in_gbps,
encapsulation=encapsulation
)
return client.begin_create_or_update(resource_group_name, express_route_port_name, port)
def update_express_route_port(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags, True)
return instance
def download_generated_loa_as_pdf(cmd,
resource_group_name,
express_route_port_name,
customer_name,
file_path='loa.pdf'):
import os
import base64
dirname, basename = os.path.dirname(file_path), os.path.basename(file_path)
if basename == '':
basename = 'loa.pdf'
elif basename.endswith('.pdf') is False:
basename = basename + '.pdf'
file_path = os.path.join(dirname, basename)
generate_express_route_ports_loa_request =\
cmd.get_models('GenerateExpressRoutePortsLOARequest')(customer_name=customer_name)
client = network_client_factory(cmd.cli_ctx).express_route_ports
response = client.generate_loa(resource_group_name, express_route_port_name,
generate_express_route_ports_loa_request)
encoded_content = base64.b64decode(response.encoded_content)
from azure.cli.core.azclierror import FileOperationError
try:
with open(file_path, 'wb') as f:
f.write(encoded_content)
except OSError as ex:
raise FileOperationError(ex)
logger.warning("The generated letter of authorization is saved at %s", file_path)
def list_express_route_ports(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def assign_express_route_port_identity(cmd, resource_group_name, express_route_port_name,
user_assigned_identity, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity', 'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_identity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_identity_instance
identity_instance = ManagedServiceIdentity(type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance)
ports.identity = identity_instance
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def remove_express_route_port_identity(cmd, resource_group_name, express_route_port_name, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
if ports.identity is None:
logger.warning("The identity of the ExpressRoute Port doesn't exist.")
return ports
ports.identity = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def show_express_route_port_identity(cmd, resource_group_name, express_route_port_name):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
return ports.identity
def update_express_route_port_link(cmd, instance, parent, express_route_port_name, link_name,
macsec_cak_secret_identifier=None, macsec_ckn_secret_identifier=None,
macsec_sci_state=None, macsec_cipher=None, admin_state=None):
"""
:param cmd:
:param instance: an instance of ExpressRoutePort
:param express_route_port_name:
:param link_name:
:param macsec_cak_secret_identifier:
:param macsec_ckn_secret_identifier:
:param macsec_cipher:
:param admin_state:
:return:
"""
if any([macsec_cak_secret_identifier, macsec_ckn_secret_identifier, macsec_cipher, macsec_sci_state]):
instance.mac_sec_config.cak_secret_identifier = macsec_cak_secret_identifier
instance.mac_sec_config.ckn_secret_identifier = macsec_ckn_secret_identifier
# TODO https://github.com/Azure/azure-rest-api-specs/issues/7569
# need to remove this conversion when the issue is fixed.
if macsec_cipher is not None:
macsec_ciphers_tmp = {'gcm-aes-128': 'GcmAes128', 'gcm-aes-256': 'GcmAes256'}
macsec_cipher = macsec_ciphers_tmp.get(macsec_cipher, macsec_cipher)
instance.mac_sec_config.cipher = macsec_cipher
instance.mac_sec_config.sci_state = macsec_sci_state
if admin_state is not None:
instance.admin_state = admin_state
return parent
# endregion
# region PrivateEndpoint
def create_private_endpoint(cmd, resource_group_name, private_endpoint_name, subnet,
private_connection_resource_id, connection_name, group_ids=None,
virtual_network_name=None, tags=None, location=None,
request_message=None, manual_request=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
PrivateEndpoint, Subnet, PrivateLinkServiceConnection = cmd.get_models('PrivateEndpoint',
'Subnet',
'PrivateLinkServiceConnection')
pls_connection = PrivateLinkServiceConnection(private_link_service_id=private_connection_resource_id,
group_ids=group_ids,
request_message=request_message,
name=connection_name)
private_endpoint = PrivateEndpoint(
location=location,
tags=tags,
subnet=Subnet(id=subnet)
)
if manual_request:
private_endpoint.manual_private_link_service_connections = [pls_connection]
else:
private_endpoint.private_link_service_connections = [pls_connection]
if edge_zone:
private_endpoint.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def update_private_endpoint(instance, cmd, tags=None, request_message=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
if request_message is not None:
if instance.private_link_service_connections:
instance.private_link_service_connections[0].request_message = request_message
else:
instance.manual_private_link_service_connections[0].request_message = request_message
return instance
def list_private_endpoints(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def create_private_endpoint_private_dns_zone_group(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneGroup, PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneGroup', 'PrivateDnsZoneConfig')
private_dns_zone_group = PrivateDnsZoneGroup(name=private_dns_zone_group_name,
private_dns_zone_configs=[PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, # pylint: disable=line-too-long
name=private_dns_zone_name)]) # pylint: disable=line-too-long
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def add_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneConfig')
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone = PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, name=private_dns_zone_name)
private_dns_zone_group.private_dns_zone_configs.append(private_dns_zone)
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def remove_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone_configs = [item for item in private_dns_zone_group.private_dns_zone_configs if item.name != private_dns_zone_name] # pylint: disable=line-too-long
private_dns_zone_group.private_dns_zone_configs = private_dns_zone_configs
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
# endregion
# region PrivateLinkService
def create_private_link_service(cmd, resource_group_name, service_name, subnet, frontend_ip_configurations,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
virtual_network_name=None, public_ip_address=None,
location=None, tags=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None,
enable_proxy_protocol=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
FrontendIPConfiguration, PrivateLinkService, PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = \
cmd.get_models('FrontendIPConfiguration', 'PrivateLinkService', 'PrivateLinkServiceIpConfiguration',
'PublicIPAddress', 'Subnet')
pls_ip_config = PrivateLinkServiceIpConfiguration(
name='{}_ipconfig_0'.format(service_name),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service = PrivateLinkService(
location=location,
load_balancer_frontend_ip_configurations=frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
],
ip_configurations=[pls_ip_config],
visbility=visibility,
auto_approval=auto_approval,
fqdns=fqdns,
tags=tags,
enable_proxy_protocol=enable_proxy_protocol
)
if edge_zone:
link_service.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def update_private_link_service(instance, cmd, tags=None, frontend_ip_configurations=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None):
FrontendIPConfiguration = cmd.get_models('FrontendIPConfiguration')
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('load_balancer_frontend_ip_configurations', frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
])
c.set_param('visibility', visibility)
c.set_param('auto_approval', auto_approval)
c.set_param('fqdns', fqdns)
c.set_param('enable_proxy_protocol', enable_proxy_protocol)
return instance
def list_private_link_services(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def update_private_endpoint_connection(cmd, resource_group_name, service_name, pe_connection_name,
connection_status, description=None, action_required=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateEndpointConnection, PrivateLinkServiceConnectionState = cmd.get_models('PrivateEndpointConnection',
'PrivateLinkServiceConnectionState')
connection_state = PrivateLinkServiceConnectionState(
status=connection_status,
description=description,
actions_required=action_required
)
pe_connection = PrivateEndpointConnection(
private_link_service_connection_state=connection_state
)
return client.update_private_endpoint_connection(resource_group_name, service_name, pe_connection_name, pe_connection) # pylint: disable=line-too-long
def add_private_link_services_ipconfig(cmd, resource_group_name, service_name,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
subnet=None, virtual_network_name=None, public_ip_address=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = cmd.get_models('PrivateLinkServiceIpConfiguration',
'PublicIPAddress',
'Subnet')
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_name_index = len(link_service.ip_configurations)
ip_config = PrivateLinkServiceIpConfiguration(
name='{0}_ipconfig_{1}'.format(service_name, ip_name_index),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service.ip_configurations.append(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def remove_private_link_services_ipconfig(cmd, resource_group_name, service_name, ip_config_name):
client = network_client_factory(cmd.cli_ctx).private_link_services
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_config = None
for item in link_service.ip_configurations:
if item.name == ip_config_name:
ip_config = item
break
if ip_config is None: # pylint: disable=no-else-return
logger.warning("%s ip configuration doesn't exist", ip_config_name)
return link_service
else:
link_service.ip_configurations.remove(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
# endregion
def _edge_zone_model(cmd, edge_zone):
ExtendedLocation, ExtendedLocationTypes = cmd.get_models('ExtendedLocation', 'ExtendedLocationTypes')
return ExtendedLocation(name=edge_zone, type=ExtendedLocationTypes.EDGE_ZONE)
# region LoadBalancers
def create_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
public_ip_dns_name=None, subnet=None, subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
no_wait=False, sku=None, frontend_ip_zone=None, public_ip_zone=None,
private_ip_address_version=None, edge_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if edge_zone and cmd.supported_api_version(min_api='2020-08-01'):
edge_zone_type = 'EdgeZone'
else:
edge_zone_type = None
if subnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(
network_id_template, virtual_network_name, subnet)
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, None, edge_zone, edge_zone_type))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, subnet_id, private_ip_address, private_ip_allocation, sku,
frontend_ip_zone, private_ip_address_version, None, edge_zone, edge_zone_type)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def list_load_balancer_nic(cmd, resource_group_name, load_balancer_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_network_interfaces
return client.list(resource_group_name, load_balancer_name)
def create_lb_inbound_nat_rule(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port,
backend_port, frontend_ip_name=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
InboundNatRule = cmd.get_models('InboundNatRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) # pylint: disable=no-member
new_rule = InboundNatRule(
name=item_name, protocol=protocol,
frontend_port=frontend_port, backend_port=backend_port,
frontend_ip_configuration=frontend_ip,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
upsert_to_collection(lb, 'inbound_nat_rules', new_rule, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_rules, item_name)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get(client, resource_group_name, load_balancer_name):
lb = client.get(resource_group_name, load_balancer_name)
return lb_get_operation(lb)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get_operation(lb):
for item in lb.frontend_ip_configurations:
if item.zones is not None and len(item.zones) >= 3 and item.subnet is None:
item.zones = None
return lb
def set_lb_inbound_nat_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
if frontend_ip_name:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_floating_ip', floating_ip)
return parent
def create_lb_inbound_nat_pool(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port_range_start,
frontend_port_range_end, backend_port, frontend_ip_name=None, enable_tcp_reset=None,
floating_ip=None, idle_timeout=None):
InboundNatPool = cmd.get_models('InboundNatPool')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) \
if frontend_ip_name else None
new_pool = InboundNatPool(
name=item_name,
protocol=protocol,
frontend_ip_configuration=frontend_ip,
frontend_port_range_start=frontend_port_range_start,
frontend_port_range_end=frontend_port_range_end,
backend_port=backend_port,
enable_tcp_reset=enable_tcp_reset,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout)
upsert_to_collection(lb, 'inbound_nat_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_pools, item_name)
def set_lb_inbound_nat_pool(
cmd, instance, parent, item_name, protocol=None,
frontend_port_range_start=None, frontend_port_range_end=None, backend_port=None,
frontend_ip_name=None, enable_tcp_reset=None, floating_ip=None, idle_timeout=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port_range_start', frontend_port_range_start)
c.set_param('frontend_port_range_end', frontend_port_range_end)
c.set_param('backend_port', backend_port)
c.set_param('enable_floating_ip', floating_ip)
c.set_param('idle_timeout_in_minutes', idle_timeout)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_ip_name == '':
instance.frontend_ip_configuration = None
elif frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
return parent
def create_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, subnet=None, virtual_network_name=None, private_ip_address=None,
private_ip_address_version=None, private_ip_address_allocation=None, zone=None):
FrontendIPConfiguration, SubResource, Subnet = cmd.get_models(
'FrontendIPConfiguration', 'SubResource', 'Subnet')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if private_ip_address_allocation is None:
private_ip_address_allocation = 'static' if private_ip_address else 'dynamic'
new_config = FrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address,
private_ip_address_version=private_ip_address_version,
private_ip_allocation_method=private_ip_address_allocation,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None,
subnet=Subnet(id=subnet) if subnet else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def update_lb_frontend_ip_configuration_setter(cmd, resource_group_name, load_balancer_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).load_balancers
return client.begin_create_or_update(resource_group_name, load_balancer_name, parameters)
def set_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, private_ip_address=None,
private_ip_address_allocation=None, public_ip_address=None,
subnet=None, virtual_network_name=None, public_ip_prefix=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if not private_ip_address:
instance.private_ip_allocation_method = 'dynamic'
instance.private_ip_address = None
elif private_ip_address is not None:
instance.private_ip_allocation_method = 'static'
instance.private_ip_address = private_ip_address
# Doesn't support update operation for now
# if cmd.supported_api_version(min_api='2019-04-01'):
# instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _process_vnet_name_and_id(vnet, cmd, resource_group_name):
if vnet and not is_valid_resource_id(vnet):
vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet)
return vnet
def _process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name):
if subnet and not is_valid_resource_id(subnet):
vnet = _process_vnet_name_and_id(vnet, cmd, resource_group_name)
if vnet is None:
raise UnrecognizedArgumentError('vnet should be provided when input subnet name instead of subnet id')
subnet = vnet + f'/subnets/{subnet}'
return subnet
# pylint: disable=too-many-branches
def create_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
vnet=None, backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
(BackendAddressPool,
LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
# Before 2020-03-01, service doesn't support the other rest method.
# We have to use old one to keep backward compatibility.
# Same for basic sku. service refuses that basic sku lb call the other rest method.
if cmd.supported_api_version(max_api='2020-03-01') or lb.sku.name.lower() == 'basic':
new_pool = BackendAddressPool(name=backend_address_pool_name)
upsert_to_collection(lb, 'backend_address_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().backend_address_pools, backend_address_pool_name)
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
# when sku is 'gateway', 'tunnelInterfaces' can't be None. Otherwise service will response error
if cmd.supported_api_version(min_api='2021-02-01') and lb.sku.name.lower() == 'gateway':
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
new_pool.tunnel_interfaces = [
GatewayLoadBalancerTunnelInterface(type='Internal', protocol='VXLAN', identifier=900)]
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name):
from azure.cli.core.commands import LongRunningOperation
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
def delete_basic_lb_backend_address_pool():
new_be_pools = [pool for pool in lb.backend_address_pools
if pool.name.lower() != backend_address_pool_name.lower()]
lb.backend_address_pools = new_be_pools
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
result = LongRunningOperation(cmd.cli_ctx)(poller).backend_address_pools
if next((x for x in result if x.name.lower() == backend_address_pool_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(backend_address_pool_name, load_balancer_name))
if lb.sku.name.lower() == 'basic':
delete_basic_lb_backend_address_pool()
return None
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
# region cross-region lb
def create_cross_region_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
public_ip_address=None, public_ip_address_allocation=None,
public_ip_dns_name=None, public_ip_address_type=None, validate=False,
no_wait=False, frontend_ip_zone=None, public_ip_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
sku = 'standard'
tier = 'Global'
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, tier))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, None, None, None, sku, frontend_ip_zone, None, tier)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_cross_region_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, zone=None):
FrontendIPConfiguration, SubResource = cmd.get_models(
'FrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_config = FrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def set_cross_region_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, public_ip_address=None, public_ip_prefix=None):
PublicIPAddress, SubResource = cmd.get_models('PublicIPAddress', 'SubResource')
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return parent
def create_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
(BackendAddressPool,
LoadBalancerBackendAddress,
FrontendIPConfiguration) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'FrontendIPConfiguration')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
# pylint: disable=line-too-long
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=addr['frontend_ip_address'])) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise CLIError('Each backend address must have name and frontend_ip_configuration information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name): # pylint: disable=line-too-long
ncf = network_client_factory(cmd.cli_ctx)
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
def add_cross_region_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name, frontend_ip_address):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
# pylint: disable=line-too-long
(LoadBalancerBackendAddress, FrontendIPConfiguration) = cmd.get_models('LoadBalancerBackendAddress', 'FrontendIPConfiguration')
new_address = LoadBalancerBackendAddress(name=address_name,
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=frontend_ip_address) if frontend_ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def create_cross_region_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, i) for i in backend_pools_name]
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_cross_region_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
# endregion
# pylint: disable=line-too-long
def add_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
address_name, ip_address, vnet=None, subnet=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
if cmd.supported_api_version(min_api='2020-11-01'):
if vnet:
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=_process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name)) if subnet else None,
virtual_network=VirtualNetwork(id=vnet),
ip_address=ip_address if ip_address else None)
elif is_valid_resource_id(subnet):
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=subnet),
ip_address=ip_address if ip_address else None)
else:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet name | subnet id) information.')
else:
new_address = LoadBalancerBackendAddress(name=address_name,
virtual_network=VirtualNetwork(id=vnet) if vnet else None,
ip_address=ip_address if ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
lb_addresses = [addr for addr in address_pool.load_balancer_backend_addresses if addr.name != address_name]
address_pool.load_balancer_backend_addresses = lb_addresses
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.load_balancer_backend_addresses
def create_lb_outbound_rule(cmd, resource_group_name, load_balancer_name, item_name,
backend_address_pool, frontend_ip_configurations, protocol,
outbound_ports=None, enable_tcp_reset=None, idle_timeout=None):
OutboundRule, SubResource = cmd.get_models('OutboundRule', 'SubResource')
client = network_client_factory(cmd.cli_ctx).load_balancers
lb = lb_get(client, resource_group_name, load_balancer_name)
rule = OutboundRule(
protocol=protocol, enable_tcp_reset=enable_tcp_reset, idle_timeout_in_minutes=idle_timeout,
backend_address_pool=SubResource(id=backend_address_pool),
frontend_ip_configurations=[SubResource(id=x) for x in frontend_ip_configurations]
if frontend_ip_configurations else None,
allocated_outbound_ports=outbound_ports, name=item_name)
upsert_to_collection(lb, 'outbound_rules', rule, 'name')
poller = client.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().outbound_rules, item_name)
def set_lb_outbound_rule(instance, cmd, parent, item_name, protocol=None, outbound_ports=None,
idle_timeout=None, frontend_ip_configurations=None, enable_tcp_reset=None,
backend_address_pool=None):
SubResource = cmd.get_models('SubResource')
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('allocated_outbound_ports', outbound_ports)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('backend_address_pool', SubResource(id=backend_address_pool)
if backend_address_pool else None)
c.set_param('frontend_ip_configurations',
[SubResource(id=x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
return parent
def create_lb_probe(cmd, resource_group_name, load_balancer_name, item_name, protocol, port,
path=None, interval=None, threshold=None):
Probe = cmd.get_models('Probe')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_probe = Probe(
protocol=protocol, port=port, interval_in_seconds=interval, number_of_probes=threshold,
request_path=path, name=item_name)
upsert_to_collection(lb, 'probes', new_probe, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().probes, item_name)
def set_lb_probe(cmd, instance, parent, item_name, protocol=None, port=None,
path=None, interval=None, threshold=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('port', port)
c.set_param('request_path', path)
c.set_param('interval_in_seconds', interval)
c.set_param('number_of_probes', threshold)
return parent
def create_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, disable_outbound_snat=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
# avoid break when backend_address_pool_name is None and backend_pools_name is not None
if not backend_address_pool_name and backend_pools_name:
backend_address_pool_name = backend_pools_name[0]
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset,
disable_outbound_snat=disable_outbound_snat)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, name) for name in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
new_rule.backend_address_pool = None
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution='default', floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
disable_outbound_snat=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('disable_outbound_snat', disable_outbound_snat)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
instance.backend_address_pool = None
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
def add_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, protocol, identifier, traffic_type, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
tunnel_interface = GatewayLoadBalancerTunnelInterface(port=port, identifier=identifier, protocol=protocol, type=traffic_type)
if not address_pool.tunnel_interfaces:
address_pool.tunnel_interfaces = []
address_pool.tunnel_interfaces.append(tunnel_interface)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def update_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index, protocol=None, identifier=None, traffic_type=None, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
item = address_pool.tunnel_interfaces[index]
if protocol:
item.protocol = protocol
if identifier:
item.identifier = identifier
if port:
item.port = port
if traffic_type:
item.type = traffic_type
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
address_pool.tunnel_interfaces.pop(index)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.tunnel_interfaces
# endregion
# region LocalGateways
def _validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight):
if any([asn, bgp_peering_address, peer_weight]):
if instance.bgp_settings is not None:
# update existing parameters selectively
if asn is not None:
instance.bgp_settings.asn = asn
if peer_weight is not None:
instance.bgp_settings.peer_weight = peer_weight
if bgp_peering_address is not None:
instance.bgp_settings.bgp_peering_address = bgp_peering_address
elif asn:
BgpSettings = cmd.get_models('BgpSettings')
instance.bgp_settings = BgpSettings(asn, bgp_peering_address, peer_weight)
else:
raise CLIError(
'incorrect usage: --asn ASN [--peer-weight WEIGHT --bgp-peering-address IP]')
def create_local_gateway(cmd, resource_group_name, local_network_gateway_name, gateway_ip_address,
location=None, tags=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, no_wait=False):
AddressSpace, LocalNetworkGateway, BgpSettings = cmd.get_models(
'AddressSpace', 'LocalNetworkGateway', 'BgpSettings')
client = network_client_factory(cmd.cli_ctx).local_network_gateways
local_gateway = LocalNetworkGateway(
local_network_address_space=AddressSpace(address_prefixes=(local_address_prefix or [])),
location=location, tags=tags, gateway_ip_address=gateway_ip_address)
if bgp_peering_address or asn or peer_weight:
local_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, local_network_gateway_name, local_gateway)
def update_local_gateway(cmd, instance, gateway_ip_address=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, tags=None):
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
if gateway_ip_address is not None:
instance.gateway_ip_address = gateway_ip_address
if local_address_prefix is not None:
instance.local_network_address_space.address_prefixes = local_address_prefix
if tags is not None:
instance.tags = tags
return instance
# endregion
# region NetworkInterfaces (NIC)
def create_nic(cmd, resource_group_name, network_interface_name, subnet, location=None, tags=None,
internal_dns_name_label=None, dns_servers=None, enable_ip_forwarding=False,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
load_balancer_name=None, network_security_group=None,
private_ip_address=None, private_ip_address_version=None,
public_ip_address=None, virtual_network_name=None, enable_accelerated_networking=None,
application_security_groups=None, no_wait=False,
app_gateway_backend_address_pools=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
(NetworkInterface, NetworkInterfaceDnsSettings, NetworkInterfaceIPConfiguration, NetworkSecurityGroup,
PublicIPAddress, Subnet, SubResource) = cmd.get_models(
'NetworkInterface', 'NetworkInterfaceDnsSettings', 'NetworkInterfaceIPConfiguration',
'NetworkSecurityGroup', 'PublicIPAddress', 'Subnet', 'SubResource')
dns_settings = NetworkInterfaceDnsSettings(internal_dns_name_label=internal_dns_name_label,
dns_servers=dns_servers or [])
nic = NetworkInterface(location=location, tags=tags, enable_ip_forwarding=enable_ip_forwarding,
dns_settings=dns_settings)
if cmd.supported_api_version(min_api='2016-09-01'):
nic.enable_accelerated_networking = enable_accelerated_networking
if network_security_group:
nic.network_security_group = NetworkSecurityGroup(id=network_security_group)
ip_config_args = {
'name': 'ipconfig1',
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic',
'private_ip_address': private_ip_address,
'subnet': Subnet(id=subnet),
'application_gateway_backend_address_pools':
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if app_gateway_backend_address_pools else None
}
if cmd.supported_api_version(min_api='2016-09-01'):
ip_config_args['private_ip_address_version'] = private_ip_address_version
if cmd.supported_api_version(min_api='2017-09-01'):
ip_config_args['application_security_groups'] = application_security_groups
ip_config = NetworkInterfaceIPConfiguration(**ip_config_args)
if public_ip_address:
ip_config.public_ip_address = PublicIPAddress(id=public_ip_address)
nic.ip_configurations = [ip_config]
if edge_zone:
nic.extended_location = _edge_zone_model(cmd, edge_zone)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, network_interface_name, nic)
def update_nic(cmd, instance, network_security_group=None, enable_ip_forwarding=None,
internal_dns_name_label=None, dns_servers=None, enable_accelerated_networking=None):
if enable_ip_forwarding is not None:
instance.enable_ip_forwarding = enable_ip_forwarding
if network_security_group == '':
instance.network_security_group = None
elif network_security_group is not None:
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
if internal_dns_name_label == '':
instance.dns_settings.internal_dns_name_label = None
elif internal_dns_name_label is not None:
instance.dns_settings.internal_dns_name_label = internal_dns_name_label
if dns_servers == ['']:
instance.dns_settings.dns_servers = None
elif dns_servers:
instance.dns_settings.dns_servers = dns_servers
if enable_accelerated_networking is not None:
instance.enable_accelerated_networking = enable_accelerated_networking
return instance
def create_nic_ip_config(cmd, resource_group_name, network_interface_name, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None,
make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None):
NetworkInterfaceIPConfiguration, PublicIPAddress, Subnet, SubResource = cmd.get_models(
'NetworkInterfaceIPConfiguration', 'PublicIPAddress', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
nic = ncf.network_interfaces.get(resource_group_name, network_interface_name)
if cmd.supported_api_version(min_api='2016-09-01'):
IPVersion = cmd.get_models('IPVersion')
private_ip_address_version = private_ip_address_version or IPVersion.I_PV4.value
if private_ip_address_version == IPVersion.I_PV4.value and not subnet:
primary_config = next(x for x in nic.ip_configurations if x.primary)
subnet = primary_config.subnet.id
if make_primary:
for config in nic.ip_configurations:
config.primary = False
new_config_args = {
'name': ip_config_name,
'subnet': Subnet(id=subnet) if subnet else None,
'public_ip_address': PublicIPAddress(id=public_ip_address) if public_ip_address else None,
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_address': private_ip_address,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic'
}
if cmd.supported_api_version(min_api='2016-09-01'):
new_config_args['private_ip_address_version'] = private_ip_address_version
new_config_args['primary'] = make_primary
if cmd.supported_api_version(min_api='2017-09-01'):
new_config_args['application_security_groups'] = application_security_groups
if cmd.supported_api_version(min_api='2018-08-01'):
new_config_args['application_gateway_backend_address_pools'] = \
[SubResource(id=x) for x in app_gateway_backend_address_pools] \
if app_gateway_backend_address_pools else None
new_config = NetworkInterfaceIPConfiguration(**new_config_args)
upsert_to_collection(nic, 'ip_configurations', new_config, 'name')
poller = ncf.network_interfaces.begin_create_or_update(
resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def update_nic_ip_config_setter(cmd, resource_group_name, network_interface_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).network_interfaces
return client.begin_create_or_update(resource_group_name, network_interface_name, parameters)
def set_nic_ip_config(cmd, instance, parent, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None, make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if make_primary:
for config in parent.ip_configurations:
config.primary = False
instance.primary = True
if private_ip_address == '':
# switch private IP address allocation to Dynamic if empty string is used
instance.private_ip_address = None
instance.private_ip_allocation_method = 'dynamic'
if cmd.supported_api_version(min_api='2016-09-01'):
instance.private_ip_address_version = 'ipv4'
elif private_ip_address is not None:
# if specific address provided, allocation is static
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'static'
if private_ip_address_version is not None:
instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if load_balancer_backend_address_pool_ids == '':
instance.load_balancer_backend_address_pools = None
elif load_balancer_backend_address_pool_ids is not None:
instance.load_balancer_backend_address_pools = load_balancer_backend_address_pool_ids
if load_balancer_inbound_nat_rule_ids == '':
instance.load_balancer_inbound_nat_rules = None
elif load_balancer_inbound_nat_rule_ids is not None:
instance.load_balancer_inbound_nat_rules = load_balancer_inbound_nat_rule_ids
if application_security_groups == ['']:
instance.application_security_groups = None
elif application_security_groups:
instance.application_security_groups = application_security_groups
if app_gateway_backend_address_pools == ['']:
instance.application_gateway_backend_address_pools = None
elif app_gateway_backend_address_pools:
instance.application_gateway_backend_address_pools = \
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _get_nic_ip_config(nic, name):
if nic.ip_configurations:
ip_config = next(
(x for x in nic.ip_configurations if x.name.lower() == name.lower()), None)
else:
ip_config = None
if not ip_config:
raise CLIError('IP configuration {} not found.'.format(name))
return ip_config
def add_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
BackendAddressPool = cmd.get_models('BackendAddressPool')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
upsert_to_collection(ip_config, 'load_balancer_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
elif application_gateway_name:
upsert_to_collection(ip_config, 'application_gateway_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
keep_items = [x for x in ip_config.load_balancer_backend_address_pools or [] if x.id != backend_address_pool]
ip_config.load_balancer_backend_address_pools = keep_items
elif application_gateway_name:
keep_items = [x for x in ip_config.application_gateway_backend_address_pools or [] if
x.id != backend_address_pool]
ip_config.application_gateway_backend_address_pools = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def add_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
InboundNatRule = cmd.get_models('InboundNatRule')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
upsert_to_collection(ip_config, 'load_balancer_inbound_nat_rules',
InboundNatRule(id=inbound_nat_rule),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
keep_items = \
[x for x in ip_config.load_balancer_inbound_nat_rules or [] if x.id != inbound_nat_rule]
ip_config.load_balancer_inbound_nat_rules = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
# endregion
# region NetworkSecurityGroups
def create_nsg(cmd, resource_group_name, network_security_group_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).network_security_groups
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
nsg = NetworkSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, network_security_group_name, nsg)
def _create_singular_or_plural_property(kwargs, val, singular_name, plural_name):
if not val:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
kwargs[plural_name] = val
kwargs[singular_name] = None
else:
kwargs[singular_name] = val[0]
kwargs[plural_name] = None
def _handle_asg_property(kwargs, key, asgs):
prefix = key.split('_', 1)[0] + '_'
if asgs:
kwargs[key] = asgs
if kwargs[prefix + 'address_prefix'].is_default:
kwargs[prefix + 'address_prefix'] = ''
def create_nsg_rule_2017_06_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_ranges='*', source_address_prefixes='*',
destination_port_ranges=80, destination_address_prefixes='*',
source_asgs=None, destination_asgs=None):
kwargs = {
'protocol': protocol,
'direction': direction,
'description': description,
'priority': priority,
'access': access,
'name': security_rule_name
}
_create_singular_or_plural_property(kwargs, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_create_singular_or_plural_property(kwargs, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_create_singular_or_plural_property(kwargs, source_port_ranges,
'source_port_range', 'source_port_ranges')
_create_singular_or_plural_property(kwargs, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
kwargs['source_address_prefix'] = kwargs['source_address_prefix'] or ''
kwargs['destination_address_prefix'] = kwargs['destination_address_prefix'] or ''
if cmd.supported_api_version(min_api='2017-09-01'):
_handle_asg_property(kwargs, 'source_application_security_groups', source_asgs)
_handle_asg_property(kwargs, 'destination_application_security_groups', destination_asgs)
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(**kwargs)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def create_nsg_rule_2017_03_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_range='*', source_address_prefix='*',
destination_port_range=80, destination_address_prefix='*'):
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix,
destination_address_prefix=destination_address_prefix, access=access,
direction=direction,
description=description, source_port_range=source_port_range,
destination_port_range=destination_port_range, priority=priority,
name=security_rule_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def _update_singular_or_plural_property(instance, val, singular_name, plural_name):
if val is None:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
setattr(instance, plural_name, val)
setattr(instance, singular_name, None)
else:
setattr(instance, plural_name, None)
setattr(instance, singular_name, val[0])
def update_nsg_rule_2017_06_01(instance, protocol=None, source_address_prefixes=None,
destination_address_prefixes=None, access=None, direction=None, description=None,
source_port_ranges=None, destination_port_ranges=None, priority=None,
source_asgs=None, destination_asgs=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.priority = priority if priority is not None else instance.priority
_update_singular_or_plural_property(instance, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_update_singular_or_plural_property(instance, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_update_singular_or_plural_property(instance, source_port_ranges,
'source_port_range', 'source_port_ranges')
_update_singular_or_plural_property(instance, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
instance.source_address_prefix = instance.source_address_prefix or ''
instance.destination_address_prefix = instance.destination_address_prefix or ''
if source_asgs == ['']:
instance.source_application_security_groups = None
elif source_asgs:
instance.source_application_security_groups = source_asgs
if destination_asgs == ['']:
instance.destination_application_security_groups = None
elif destination_asgs:
instance.destination_application_security_groups = destination_asgs
return instance
def update_nsg_rule_2017_03_01(instance, protocol=None, source_address_prefix=None,
destination_address_prefix=None, access=None, direction=None, description=None,
source_port_range=None, destination_port_range=None, priority=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.source_address_prefix = (source_address_prefix if source_address_prefix is not None
else instance.source_address_prefix)
instance.destination_address_prefix = destination_address_prefix \
if destination_address_prefix is not None else instance.destination_address_prefix
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.source_port_range = source_port_range \
if source_port_range is not None else instance.source_port_range
instance.destination_port_range = destination_port_range \
if destination_port_range is not None else instance.destination_port_range
instance.priority = priority if priority is not None else instance.priority
return instance
# endregion
# region NetworkProfiles
def list_network_profiles(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).network_profiles
if resource_group_name:
return client.list(resource_group_name)
return client.list_all()
# endregion
# region NetworkWatchers
def _create_network_watchers(cmd, client, resource_group_name, locations, tags):
if resource_group_name is None:
raise CLIError("usage error: '--resource-group' required when enabling new regions")
NetworkWatcher = cmd.get_models('NetworkWatcher')
for location in locations:
client.create_or_update(
resource_group_name, '{}-watcher'.format(location),
NetworkWatcher(location=location, tags=tags))
def _update_network_watchers(cmd, client, watchers, tags):
NetworkWatcher = cmd.get_models('NetworkWatcher')
for watcher in watchers:
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
watcher_tags = watcher.tags if tags is None else tags
client.create_or_update(
watcher_rg, watcher_name,
NetworkWatcher(location=watcher.location, tags=watcher_tags))
def _delete_network_watchers(cmd, client, watchers):
for watcher in watchers:
from azure.cli.core.commands import LongRunningOperation
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
logger.warning(
"Disabling Network Watcher for region '%s' by deleting resource '%s'",
watcher.location, watcher.id)
LongRunningOperation(cmd.cli_ctx)(client.begin_delete(watcher_rg, watcher_name))
def configure_network_watcher(cmd, client, locations, resource_group_name=None, enabled=None, tags=None):
watcher_list = list(client.list_all())
locations_list = [location.lower() for location in locations]
existing_watchers = [w for w in watcher_list if w.location in locations_list]
nonenabled_regions = list(set(locations) - set(watcher.location for watcher in existing_watchers))
if enabled is None:
if resource_group_name is not None:
logger.warning(
"Resource group '%s' is only used when enabling new regions and will be ignored.",
resource_group_name)
for location in nonenabled_regions:
logger.warning(
"Region '%s' is not enabled for Network Watcher and will be ignored.", location)
_update_network_watchers(cmd, client, existing_watchers, tags)
elif enabled:
_create_network_watchers(cmd, client, resource_group_name, nonenabled_regions, tags)
_update_network_watchers(cmd, client, existing_watchers, tags)
else:
if tags is not None:
raise CLIError("usage error: '--tags' cannot be used when disabling regions")
_delete_network_watchers(cmd, client, existing_watchers)
return client.list_all()
def create_nw_connection_monitor(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
resource_group_name=None,
location=None,
source_resource=None,
source_port=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=None,
output_type=None,
workspace_ids=None,
notes=None):
v1_required_parameter_set = [
source_resource, source_port,
dest_resource, dest_address, dest_port
]
v2_required_parameter_set = [
endpoint_source_name, endpoint_source_resource_id, endpoint_source_type, endpoint_source_coverage_level,
endpoint_dest_name, endpoint_dest_address, endpoint_dest_type, endpoint_dest_coverage_level,
test_config_name, test_config_protocol,
output_type, workspace_ids,
]
if any(v1_required_parameter_set): # V1 creation
connection_monitor = _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name,
source_port,
location,
dest_resource,
dest_port,
dest_address,
tags,
do_not_start,
monitoring_interval)
from azure.cli.core.profiles._shared import AD_HOC_API_VERSIONS
client = get_mgmt_service_client(
cmd.cli_ctx,
ResourceType.MGMT_NETWORK,
api_version=AD_HOC_API_VERSIONS[ResourceType.MGMT_NETWORK]['nw_connection_monitor']
).connection_monitors
elif any(v2_required_parameter_set): # V2 creation
connection_monitor = _create_nw_connection_monitor_v2(cmd,
location,
tags,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address,
endpoint_source_type,
endpoint_source_coverage_level,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address,
endpoint_dest_type,
endpoint_dest_coverage_level,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_preferred_ip_version,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https,
test_group_name,
test_group_disable,
output_type,
workspace_ids,
notes)
else:
raise CLIError('Unknown operation')
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name=None,
source_port=None,
location=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=60):
ConnectionMonitor, ConnectionMonitorSource, ConnectionMonitorDestination = cmd.get_models(
'ConnectionMonitor', 'ConnectionMonitorSource', 'ConnectionMonitorDestination')
cmv1 = ConnectionMonitor(
location=location,
tags=tags,
source=ConnectionMonitorSource(
resource_id=source_resource,
port=source_port
),
destination=ConnectionMonitorDestination(
resource_id=dest_resource,
port=dest_port,
address=dest_address
),
auto_start=not do_not_start,
monitoring_interval_in_seconds=monitoring_interval,
endpoints=None,
test_configurations=None,
test_groups=None,
outputs=None,
notes=None
)
return cmv1
def _create_nw_connection_monitor_v2(cmd,
location=None,
tags=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_tcp_disable_trace_route=False,
test_config_icmp_disable_trace_route=False,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=False,
output_type=None,
workspace_ids=None,
notes=None):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_resource_id=endpoint_source_resource_id,
address=endpoint_source_address,
endpoint_type=endpoint_source_type,
coverage_level=endpoint_source_coverage_level)
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_resource_id=endpoint_dest_resource_id,
address=endpoint_dest_address,
endpoint_type=endpoint_dest_type,
coverage_level=endpoint_dest_coverage_level)
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
test_group = _create_nw_connection_monitor_v2_test_group(cmd,
test_group_name,
test_group_disable,
[test_config],
[src_endpoint],
[dst_endpoint])
if output_type:
outputs = []
if workspace_ids:
for workspace_id in workspace_ids:
output = _create_nw_connection_monitor_v2_output(cmd, output_type, workspace_id)
outputs.append(output)
else:
outputs = []
ConnectionMonitor = cmd.get_models('ConnectionMonitor')
cmv2 = ConnectionMonitor(location=location,
tags=tags,
auto_start=None,
monitoring_interval_in_seconds=None,
endpoints=[src_endpoint, dst_endpoint],
test_configurations=[test_config],
test_groups=[test_group],
outputs=outputs,
notes=notes)
return cmv2
def _create_nw_connection_monitor_v2_endpoint(cmd,
name,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
endpoint_type=None,
coverage_level=None):
if (filter_type and not filter_items) or (not filter_type and filter_items):
raise CLIError('usage error: '
'--filter-type and --filter-item for endpoint filter must be present at the same time.')
ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter')
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
return endpoint
def _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
test_frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
(ConnectionMonitorTestConfigurationProtocol,
ConnectionMonitorTestConfiguration, ConnectionMonitorSuccessThreshold) = cmd.get_models(
'ConnectionMonitorTestConfigurationProtocol',
'ConnectionMonitorTestConfiguration', 'ConnectionMonitorSuccessThreshold')
test_config = ConnectionMonitorTestConfiguration(name=name,
test_frequency_sec=test_frequency,
protocol=protocol,
preferred_ip_version=preferred_ip_version)
if threshold_failed_percent or threshold_round_trip_time:
threshold = ConnectionMonitorSuccessThreshold(checks_failed_percent=threshold_failed_percent,
round_trip_time_ms=threshold_round_trip_time)
test_config.success_threshold = threshold
if protocol == ConnectionMonitorTestConfigurationProtocol.tcp:
ConnectionMonitorTcpConfiguration = cmd.get_models('ConnectionMonitorTcpConfiguration')
tcp_config = ConnectionMonitorTcpConfiguration(
port=tcp_port,
destination_port_behavior=tcp_port_behavior,
disable_trace_route=tcp_disable_trace_route
)
test_config.tcp_configuration = tcp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.icmp:
ConnectionMonitorIcmpConfiguration = cmd.get_models('ConnectionMonitorIcmpConfiguration')
icmp_config = ConnectionMonitorIcmpConfiguration(disable_trace_route=icmp_disable_trace_route)
test_config.icmp_configuration = icmp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.http:
ConnectionMonitorHttpConfiguration = cmd.get_models('ConnectionMonitorHttpConfiguration')
http_config = ConnectionMonitorHttpConfiguration(
port=http_port,
method=http_method,
path=http_path,
request_headers=http_request_headers,
valid_status_code_ranges=http_valid_status_codes,
prefer_https=http_prefer_https)
test_config.http_configuration = http_config
else:
raise CLIError('Unsupported protocol: "{}" for test configuration'.format(protocol))
return test_config
def _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
test_configurations,
source_endpoints,
destination_endpoints):
ConnectionMonitorTestGroup = cmd.get_models('ConnectionMonitorTestGroup')
test_group = ConnectionMonitorTestGroup(name=name,
disable=disable,
test_configurations=[tc.name for tc in test_configurations],
sources=[e.name for e in source_endpoints],
destinations=[e.name for e in destination_endpoints])
return test_group
def _create_nw_connection_monitor_v2_output(cmd,
output_type,
workspace_id=None):
ConnectionMonitorOutput, OutputType = cmd.get_models('ConnectionMonitorOutput', 'OutputType')
output = ConnectionMonitorOutput(type=output_type)
if output_type == OutputType.workspace:
ConnectionMonitorWorkspaceSettings = cmd.get_models('ConnectionMonitorWorkspaceSettings')
workspace = ConnectionMonitorWorkspaceSettings(workspace_resource_id=workspace_id)
output.workspace_settings = workspace
else:
raise CLIError('Unsupported output type: "{}"'.format(output_type))
return output
def add_nw_connection_monitor_v2_endpoint(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
coverage_level=None,
endpoint_type=None,
source_test_groups=None,
dest_test_groups=None,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
address_include=None,
address_exclude=None):
(ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter,
ConnectionMonitorEndpointScope, ConnectionMonitorEndpointScopeItem) = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter',
'ConnectionMonitorEndpointScope', 'ConnectionMonitorEndpointScopeItem')
endpoint_scope = ConnectionMonitorEndpointScope(include=[], exclude=[])
for ip in address_include or []:
include_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.include.append(include_item)
for ip in address_exclude or []:
exclude_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.exclude.append(exclude_item)
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level,
scope=endpoint_scope if address_include or address_exclude else None)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.endpoints.append(endpoint)
src_test_groups, dst_test_groups = set(source_test_groups or []), set(dest_test_groups or [])
for test_group in connection_monitor.test_groups:
if test_group.name in src_test_groups:
test_group.sources.append(endpoint.name)
if test_group.name in dst_test_groups:
test_group.destinations.append(endpoint.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh endpoints
new_endpoints = [endpoint for endpoint in connection_monitor.endpoints if endpoint.name != name]
connection_monitor.endpoints = new_endpoints
# refresh test groups
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
for test_group in temp_test_groups:
if name in test_group.sources:
test_group.sources.remove(name)
if name in test_group.destinations:
test_group.destinations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for endpoint in connection_monitor.endpoints:
if endpoint.name == name:
return endpoint
raise CLIError('unknown endpoint: {}'.format(name))
def list_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.endpoints
def add_nw_connection_monitor_v2_test_configuration(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
protocol,
test_groups,
frequency=None,
threshold_failed_percent=None,
threshold_round_trip_time=None,
preferred_ip_version=None,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
new_test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port,
tcp_port_behavior,
tcp_disable_trace_route,
icmp_disable_trace_route,
http_port,
http_method,
http_path,
http_valid_status_codes,
http_prefer_https,
http_request_headers)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.test_configurations.append(new_test_config)
for test_group in connection_monitor.test_groups:
if test_group.name in test_groups:
test_group.test_configurations.append(new_test_config.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh test configurations
new_test_configurations = [t for t in connection_monitor.test_configurations if t.name != name]
connection_monitor.test_configurations = new_test_configurations
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
# refresh test groups
for test_group in temp_test_groups:
test_group.test_configurations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for test_config in connection_monitor.test_configurations:
if test_config.name == name:
return test_config
raise CLIError('unknown test configuration: {}'.format(name))
def list_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_configurations
def add_nw_connection_monitor_v2_test_group(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
location,
name,
endpoint_source_name,
endpoint_dest_name,
test_config_name,
disable=False,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None):
new_test_configuration_creation_requirements = [
test_config_protocol, test_config_preferred_ip_version,
test_config_threshold_failed_percent, test_config_threshold_round_trip_time,
test_config_tcp_disable_trace_route, test_config_tcp_port,
test_config_icmp_disable_trace_route,
test_config_http_port, test_config_http_method,
test_config_http_path, test_config_http_valid_status_codes, test_config_http_prefer_https
]
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_group = _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
[], [], [])
# deal with endpoint
if any([endpoint_source_address, endpoint_source_resource_id]):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address)
connection_monitor.endpoints.append(src_endpoint)
if any([endpoint_dest_address, endpoint_dest_resource_id]):
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address)
connection_monitor.endpoints.append(dst_endpoint)
new_test_group.sources.append(endpoint_source_name)
new_test_group.destinations.append(endpoint_dest_name)
# deal with test configuration
if any(new_test_configuration_creation_requirements):
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
connection_monitor.test_configurations.append(test_config)
new_test_group.test_configurations.append(test_config_name)
connection_monitor.test_groups.append(new_test_group)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_groups, removed_test_group = [], None
for t in connection_monitor.test_groups:
if t.name == name:
removed_test_group = t
else:
new_test_groups.append(t)
if removed_test_group is None:
raise CLIError('test group: "{}" not exist'.format(name))
connection_monitor.test_groups = new_test_groups
# deal with endpoints which are only referenced by this removed test group
removed_endpoints = []
for e in removed_test_group.sources + removed_test_group.destinations:
tmp = [t for t in connection_monitor.test_groups if (e in t.sources or e in t.destinations)]
if not tmp:
removed_endpoints.append(e)
connection_monitor.endpoints = [e for e in connection_monitor.endpoints if e.name not in removed_endpoints]
# deal with test configurations which are only referenced by this remove test group
removed_test_configurations = []
for c in removed_test_group.test_configurations:
tmp = [t for t in connection_monitor.test_groups if c in t.test_configurations]
if not tmp:
removed_test_configurations.append(c)
connection_monitor.test_configurations = [c for c in connection_monitor.test_configurations
if c.name not in removed_test_configurations]
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for t in connection_monitor.test_groups:
if t.name == name:
return t
raise CLIError('unknown test group: {}'.format(name))
def list_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_groups
def add_nw_connection_monitor_v2_output(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
out_type,
workspace_id=None):
output = _create_nw_connection_monitor_v2_output(cmd, out_type, workspace_id)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
if connection_monitor.outputs is None:
connection_monitor.outputs = []
connection_monitor.outputs.append(output)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.outputs = []
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def list_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.outputs
def show_topology_watcher(cmd, client, resource_group_name, network_watcher_name, target_resource_group_name=None,
target_vnet=None, target_subnet=None): # pylint: disable=unused-argument
TopologyParameters = cmd.get_models('TopologyParameters')
return client.get_topology(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=TopologyParameters(
target_resource_group_name=target_resource_group_name,
target_virtual_network=target_vnet,
target_subnet=target_subnet
))
def check_nw_connectivity(cmd, client, watcher_rg, watcher_name, source_resource, source_port=None,
dest_resource=None, dest_port=None, dest_address=None,
resource_group_name=None, protocol=None, method=None, headers=None, valid_status_codes=None):
ConnectivitySource, ConnectivityDestination, ConnectivityParameters, ProtocolConfiguration, HTTPConfiguration = \
cmd.get_models(
'ConnectivitySource', 'ConnectivityDestination', 'ConnectivityParameters', 'ProtocolConfiguration',
'HTTPConfiguration')
params = ConnectivityParameters(
source=ConnectivitySource(resource_id=source_resource, port=source_port),
destination=ConnectivityDestination(resource_id=dest_resource, address=dest_address, port=dest_port),
protocol=protocol
)
if any([method, headers, valid_status_codes]):
params.protocol_configuration = ProtocolConfiguration(http_configuration=HTTPConfiguration(
method=method,
headers=headers,
valid_status_codes=valid_status_codes
))
return client.begin_check_connectivity(watcher_rg, watcher_name, params)
def check_nw_ip_flow(cmd, client, vm, watcher_rg, watcher_name, direction, protocol, local, remote,
resource_group_name=None, nic=None, location=None):
VerificationIPFlowParameters = cmd.get_models('VerificationIPFlowParameters')
try:
local_ip_address, local_port = local.split(':')
remote_ip_address, remote_port = remote.split(':')
except:
raise CLIError("usage error: the format of the '--local' and '--remote' should be like x.x.x.x:port")
if not is_valid_resource_id(vm):
if not resource_group_name:
raise CLIError("usage error: --vm NAME --resource-group NAME | --vm ID")
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
if not resource_group_name:
raise CLIError("usage error: --nic NAME --resource-group NAME | --nic ID")
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_verify_ip_flow(
watcher_rg, watcher_name,
VerificationIPFlowParameters(
target_resource_id=vm, direction=direction, protocol=protocol, local_port=local_port,
remote_port=remote_port, local_ip_address=local_ip_address,
remote_ip_address=remote_ip_address, target_nic_resource_id=nic))
def show_nw_next_hop(cmd, client, resource_group_name, vm, watcher_rg, watcher_name,
source_ip, dest_ip, nic=None, location=None):
NextHopParameters = cmd.get_models('NextHopParameters')
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_get_next_hop(
watcher_rg, watcher_name, NextHopParameters(target_resource_id=vm,
source_ip_address=source_ip,
destination_ip_address=dest_ip,
target_nic_resource_id=nic))
def show_nw_security_view(cmd, client, resource_group_name, vm, watcher_rg, watcher_name, location=None):
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
security_group_view_parameters = cmd.get_models('SecurityGroupViewParameters')(target_resource_id=vm)
return client.begin_get_vm_security_rules(watcher_rg, watcher_name, security_group_view_parameters)
def create_nw_packet_capture(cmd, client, resource_group_name, capture_name, vm,
watcher_rg, watcher_name, location=None,
storage_account=None, storage_path=None, file_path=None,
capture_size=None, capture_limit=None, time_limit=None, filters=None):
PacketCapture, PacketCaptureStorageLocation = cmd.get_models('PacketCapture', 'PacketCaptureStorageLocation')
storage_settings = PacketCaptureStorageLocation(storage_id=storage_account,
storage_path=storage_path, file_path=file_path)
capture_params = PacketCapture(target=vm, storage_location=storage_settings,
bytes_to_capture_per_packet=capture_size,
total_bytes_per_session=capture_limit, time_limit_in_seconds=time_limit,
filters=filters)
return client.begin_create(watcher_rg, watcher_name, capture_name, capture_params)
def set_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, nsg, storage_account=None,
resource_group_name=None, enabled=None, retention=0, log_format=None, log_version=None,
traffic_analytics_workspace=None, traffic_analytics_interval=None,
traffic_analytics_enabled=None):
from azure.cli.core.commands import LongRunningOperation
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
config = LongRunningOperation(cmd.cli_ctx)(client.begin_get_flow_log_status(watcher_rg,
watcher_name,
flowlog_status_parameters))
try:
if not config.flow_analytics_configuration.network_watcher_flow_analytics_configuration.workspace_id:
config.flow_analytics_configuration = None
except AttributeError:
config.flow_analytics_configuration = None
with cmd.update_context(config) as c:
c.set_param('enabled', enabled if enabled is not None else config.enabled)
c.set_param('storage_id', storage_account or config.storage_id)
if retention is not None:
config.retention_policy = {
'days': retention,
'enabled': int(retention) > 0
}
if cmd.supported_api_version(min_api='2018-10-01') and (log_format or log_version):
config.format = {
'type': log_format,
'version': log_version
}
if cmd.supported_api_version(min_api='2018-10-01') and \
any([traffic_analytics_workspace is not None, traffic_analytics_enabled is not None]):
workspace = None
if traffic_analytics_workspace:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not config.flow_analytics_configuration:
# must create whole object
if not workspace:
raise CLIError('usage error (analytics not already configured): --workspace NAME_OR_ID '
'[--enabled {true|false}]')
if traffic_analytics_enabled is None:
traffic_analytics_enabled = True
config.flow_analytics_configuration = {
'network_watcher_flow_analytics_configuration': {
'enabled': traffic_analytics_enabled,
'workspace_id': workspace.properties['customerId'],
'workspace_region': workspace.location,
'workspace_resource_id': traffic_analytics_workspace,
'traffic_analytics_interval': traffic_analytics_interval
}
}
else:
# pylint: disable=line-too-long
with cmd.update_context(config.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
# update object
c.set_param('enabled', traffic_analytics_enabled)
if traffic_analytics_workspace == "":
config.flow_analytics_configuration = None
elif workspace:
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', traffic_analytics_workspace)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return client.begin_set_flow_log_configuration(watcher_rg, watcher_name, config)
# combination of resource_group_name and nsg is for old output
# combination of location and flow_log_name is for new output
def show_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, location=None, resource_group_name=None, nsg=None,
flow_log_name=None):
# deprecated approach to show flow log
if nsg is not None:
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
return client.begin_get_flow_log_status(watcher_rg, watcher_name, flowlog_status_parameters)
# new approach to show flow log
from ._client_factory import cf_flow_logs
client = cf_flow_logs(cmd.cli_ctx, None)
return client.get(watcher_rg, watcher_name, flow_log_name)
def create_nw_flow_log(cmd,
client,
location,
watcher_rg,
watcher_name,
flow_log_name,
nsg,
storage_account=None,
resource_group_name=None,
enabled=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
FlowLog = cmd.get_models('FlowLog')
flow_log = FlowLog(location=location,
target_resource_id=nsg,
storage_id=storage_account,
enabled=enabled,
tags=tags)
if retention > 0:
RetentionPolicyParameters = cmd.get_models('RetentionPolicyParameters')
retention_policy = RetentionPolicyParameters(days=retention, enabled=(retention > 0))
flow_log.retention_policy = retention_policy
if log_format is not None or log_version is not None:
FlowLogFormatParameters = cmd.get_models('FlowLogFormatParameters')
format_config = FlowLogFormatParameters(type=log_format, version=log_version)
flow_log.format = format_config
if traffic_analytics_workspace is not None:
TrafficAnalyticsProperties, TrafficAnalyticsConfigurationProperties = \
cmd.get_models('TrafficAnalyticsProperties', 'TrafficAnalyticsConfigurationProperties')
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
traffic_analytics_config = TrafficAnalyticsConfigurationProperties(
enabled=traffic_analytics_enabled,
workspace_id=workspace.properties['customerId'],
workspace_region=workspace.location,
workspace_resource_id=workspace.id,
traffic_analytics_interval=traffic_analytics_interval
)
traffic_analytics = TrafficAnalyticsProperties(
network_watcher_flow_analytics_configuration=traffic_analytics_config
)
flow_log.flow_analytics_configuration = traffic_analytics
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, flow_log)
def update_nw_flow_log_getter(client, watcher_rg, watcher_name, flow_log_name):
return client.get(watcher_rg, watcher_name, flow_log_name)
def update_nw_flow_log_setter(client, watcher_rg, watcher_name, flow_log_name, parameters):
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, parameters)
def update_nw_flow_log(cmd,
instance,
location,
resource_group_name=None, # dummy parameter to let it appear in command
enabled=None,
nsg=None,
storage_account=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
with cmd.update_context(instance) as c:
c.set_param('enabled', enabled)
c.set_param('tags', tags)
c.set_param('storage_id', storage_account)
c.set_param('target_resource_id', nsg)
with cmd.update_context(instance.retention_policy) as c:
c.set_param('days', retention)
c.set_param('enabled', retention > 0)
with cmd.update_context(instance.format) as c:
c.set_param('type', log_format)
c.set_param('version', log_version)
if traffic_analytics_workspace is not None:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
if instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration is None:
analytics_conf = cmd.get_models('TrafficAnalyticsConfigurationProperties')
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration = analytics_conf()
with cmd.update_context(
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
c.set_param('enabled', traffic_analytics_enabled)
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', workspace.id)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return instance
def list_nw_flow_log(client, watcher_rg, watcher_name, location):
return client.list(watcher_rg, watcher_name)
def delete_nw_flow_log(client, watcher_rg, watcher_name, location, flow_log_name):
return client.begin_delete(watcher_rg, watcher_name, flow_log_name)
def start_nw_troubleshooting(cmd, client, watcher_name, watcher_rg, resource, storage_account,
storage_path, resource_type=None, resource_group_name=None,
no_wait=False):
TroubleshootingParameters = cmd.get_models('TroubleshootingParameters')
params = TroubleshootingParameters(target_resource_id=resource, storage_id=storage_account,
storage_path=storage_path)
return sdk_no_wait(no_wait, client.begin_get_troubleshooting, watcher_rg, watcher_name, params)
def show_nw_troubleshooting_result(cmd, client, watcher_name, watcher_rg, resource, resource_type=None,
resource_group_name=None):
query_troubleshooting_parameters = cmd.get_models('QueryTroubleshootingParameters')(target_resource_id=resource)
return client.begin_get_troubleshooting_result(watcher_rg, watcher_name, query_troubleshooting_parameters)
def run_network_configuration_diagnostic(cmd, client, watcher_rg, watcher_name, resource,
direction=None, protocol=None, source=None, destination=None,
destination_port=None, queries=None,
resource_group_name=None, resource_type=None, parent=None):
NetworkConfigurationDiagnosticParameters, NetworkConfigurationDiagnosticProfile = \
cmd.get_models('NetworkConfigurationDiagnosticParameters', 'NetworkConfigurationDiagnosticProfile')
if not queries:
queries = [NetworkConfigurationDiagnosticProfile(
direction=direction,
protocol=protocol,
source=source,
destination=destination,
destination_port=destination_port
)]
params = NetworkConfigurationDiagnosticParameters(target_resource_id=resource, profiles=queries)
return client.begin_get_network_configuration_diagnostic(watcher_rg, watcher_name, params)
# endregion
# region CustomIpPrefix
def create_custom_ip_prefix(cmd, client, resource_group_name, custom_ip_prefix_name, location=None,
cidr=None, tags=None, zone=None, signed_message=None, authorization_message=None,
custom_ip_prefix_parent=None, no_wait=False):
CustomIpPrefix = cmd.get_models('CustomIpPrefix')
prefix = CustomIpPrefix(
location=location,
cidr=cidr,
zones=zone,
tags=tags,
signed_message=signed_message,
authorization_message=authorization_message
)
if custom_ip_prefix_parent:
try:
prefix.custom_ip_prefix_parent = client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Custom ip prefix parent {} doesn't exist".format(custom_ip_prefix_name))
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, custom_ip_prefix_name, prefix)
def update_custom_ip_prefix(instance, signed_message=None, authorization_message=None, tags=None):
if tags is not None:
instance.tags = tags
if signed_message is not None:
instance.signed_message = signed_message
if authorization_message is not None:
instance.authorization_message = authorization_message
return instance
# endregion
# region PublicIPAddresses
def create_public_ip(cmd, resource_group_name, public_ip_address_name, location=None, tags=None,
allocation_method=None, dns_name=None,
idle_timeout=4, reverse_fqdn=None, version=None, sku=None, tier=None, zone=None, ip_tags=None,
public_ip_prefix=None, edge_zone=None, ip_address=None):
IPAllocationMethod, PublicIPAddress, PublicIPAddressDnsSettings, SubResource = cmd.get_models(
'IPAllocationMethod', 'PublicIPAddress', 'PublicIPAddressDnsSettings', 'SubResource')
client = network_client_factory(cmd.cli_ctx).public_ip_addresses
if not allocation_method:
allocation_method = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
public_ip_args = {
'location': location,
'tags': tags,
'public_ip_allocation_method': allocation_method,
'idle_timeout_in_minutes': idle_timeout,
'ip_address': ip_address,
'dns_settings': None
}
if cmd.supported_api_version(min_api='2016-09-01'):
public_ip_args['public_ip_address_version'] = version
if cmd.supported_api_version(min_api='2017-06-01'):
public_ip_args['zones'] = zone
if cmd.supported_api_version(min_api='2017-11-01'):
public_ip_args['ip_tags'] = ip_tags
if cmd.supported_api_version(min_api='2018-07-01') and public_ip_prefix:
public_ip_args['public_ip_prefix'] = SubResource(id=public_ip_prefix)
if sku:
public_ip_args['sku'] = {'name': sku}
if tier:
if not sku:
public_ip_args['sku'] = {'name': 'Basic'}
public_ip_args['sku'].update({'tier': tier})
public_ip = PublicIPAddress(**public_ip_args)
if dns_name or reverse_fqdn:
public_ip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=dns_name,
reverse_fqdn=reverse_fqdn)
if edge_zone:
public_ip.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_address_name, public_ip)
def update_public_ip(cmd, instance, dns_name=None, allocation_method=None, version=None,
idle_timeout=None, reverse_fqdn=None, tags=None, sku=None, ip_tags=None,
public_ip_prefix=None):
if dns_name is not None or reverse_fqdn is not None:
if instance.dns_settings:
if dns_name is not None:
instance.dns_settings.domain_name_label = dns_name
if reverse_fqdn is not None:
instance.dns_settings.reverse_fqdn = reverse_fqdn
else:
PublicIPAddressDnsSettings = cmd.get_models('PublicIPAddressDnsSettings')
instance.dns_settings = PublicIPAddressDnsSettings(domain_name_label=dns_name, fqdn=None,
reverse_fqdn=reverse_fqdn)
if allocation_method is not None:
instance.public_ip_allocation_method = allocation_method
if version is not None:
instance.public_ip_address_version = version
if idle_timeout is not None:
instance.idle_timeout_in_minutes = idle_timeout
if tags is not None:
instance.tags = tags
if sku is not None:
instance.sku.name = sku
if ip_tags:
instance.ip_tags = ip_tags
if public_ip_prefix:
SubResource = cmd.get_models('SubResource')
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return instance
def create_public_ip_prefix(cmd, client, resource_group_name, public_ip_prefix_name, prefix_length,
version=None, location=None, tags=None, zone=None, edge_zone=None,
custom_ip_prefix_name=None):
PublicIPPrefix, PublicIPPrefixSku = cmd.get_models('PublicIPPrefix', 'PublicIPPrefixSku')
prefix = PublicIPPrefix(
location=location,
prefix_length=prefix_length,
sku=PublicIPPrefixSku(name='Standard'),
tags=tags,
zones=zone
)
if cmd.supported_api_version(min_api='2019-08-01'):
prefix.public_ip_address_version = version if version is not None else 'ipv4'
if cmd.supported_api_version(min_api='2020-06-01') and custom_ip_prefix_name:
cip_client = network_client_factory(cmd.cli_ctx).custom_ip_prefixes
try:
prefix.custom_ip_prefix = cip_client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError('Custom ip prefix {} doesn\'t exist.'.format(custom_ip_prefix_name))
if edge_zone:
prefix.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_prefix_name, prefix)
def update_public_ip_prefix(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region RouteFilters
def create_route_filter(cmd, client, resource_group_name, route_filter_name, location=None, tags=None):
RouteFilter = cmd.get_models('RouteFilter')
return client.begin_create_or_update(resource_group_name, route_filter_name,
RouteFilter(location=location, tags=tags))
def list_route_filters(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_route_filter_rule(cmd, client, resource_group_name, route_filter_name, rule_name, access, communities,
location=None):
RouteFilterRule = cmd.get_models('RouteFilterRule')
return client.begin_create_or_update(resource_group_name, route_filter_name, rule_name,
RouteFilterRule(access=access, communities=communities,
location=location))
# endregion
# region RouteTables
def create_route_table(cmd, resource_group_name, route_table_name, location=None, tags=None,
disable_bgp_route_propagation=None):
RouteTable = cmd.get_models('RouteTable')
ncf = network_client_factory(cmd.cli_ctx)
route_table = RouteTable(location=location, tags=tags)
if cmd.supported_api_version(min_api='2017-10-01'):
route_table.disable_bgp_route_propagation = disable_bgp_route_propagation
return ncf.route_tables.begin_create_or_update(resource_group_name, route_table_name, route_table)
def update_route_table(instance, tags=None, disable_bgp_route_propagation=None):
if tags == '':
instance.tags = None
elif tags is not None:
instance.tags = tags
if disable_bgp_route_propagation is not None:
instance.disable_bgp_route_propagation = disable_bgp_route_propagation
return instance
def create_route(cmd, resource_group_name, route_table_name, route_name, next_hop_type, address_prefix,
next_hop_ip_address=None):
Route = cmd.get_models('Route')
route = Route(next_hop_type=next_hop_type, address_prefix=address_prefix,
next_hop_ip_address=next_hop_ip_address, name=route_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.routes.begin_create_or_update(resource_group_name, route_table_name, route_name, route)
def update_route(instance, address_prefix=None, next_hop_type=None, next_hop_ip_address=None):
if address_prefix is not None:
instance.address_prefix = address_prefix
if next_hop_type is not None:
instance.next_hop_type = next_hop_type
if next_hop_ip_address is not None:
instance.next_hop_ip_address = next_hop_ip_address
return instance
# endregion
# region ServiceEndpoints
def create_service_endpoint_policy(cmd, resource_group_name, service_endpoint_policy_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
ServiceEndpointPolicy = cmd.get_models('ServiceEndpointPolicy')
policy = ServiceEndpointPolicy(tags=tags, location=location)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name, policy)
def list_service_endpoint_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_service_endpoint_policy(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def create_service_endpoint_policy_definition(cmd, resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, service, service_resources,
description=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policy_definitions
ServiceEndpointPolicyDefinition = cmd.get_models('ServiceEndpointPolicyDefinition')
policy_def = ServiceEndpointPolicyDefinition(description=description, service=service,
service_resources=service_resources)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, policy_def)
def update_service_endpoint_policy_definition(instance, service=None, service_resources=None, description=None):
if service is not None:
instance.service = service
if service_resources is not None:
instance.service_resources = service_resources
if description is not None:
instance.description = description
return instance
# endregion
# region TrafficManagers
def list_traffic_manager_profiles(cmd, resource_group_name=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_traffic_manager_profile(cmd, traffic_manager_profile_name, resource_group_name,
routing_method, unique_dns_name, monitor_path=None,
monitor_port=80, monitor_protocol=MonitorProtocol.http.value,
profile_status=ProfileStatus.enabled.value,
ttl=30, tags=None, interval=None, timeout=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Profile, DnsConfig, MonitorConfig
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if monitor_path is None and monitor_protocol == 'HTTP':
monitor_path = '/'
profile = Profile(location='global', tags=tags, profile_status=profile_status,
traffic_routing_method=routing_method,
dns_config=DnsConfig(relative_name=unique_dns_name, ttl=ttl),
monitor_config=MonitorConfig(protocol=monitor_protocol,
port=monitor_port,
path=monitor_path,
interval_in_seconds=interval,
timeout_in_seconds=timeout,
tolerated_number_of_failures=max_failures,
custom_headers=monitor_custom_headers,
expected_status_code_ranges=status_code_ranges),
max_return=max_return)
return client.create_or_update(resource_group_name, traffic_manager_profile_name, profile)
def update_traffic_manager_profile(instance, profile_status=None, routing_method=None, tags=None,
monitor_protocol=None, monitor_port=None, monitor_path=None,
ttl=None, timeout=None, interval=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
if tags is not None:
instance.tags = tags
if profile_status is not None:
instance.profile_status = profile_status
if routing_method is not None:
instance.traffic_routing_method = routing_method
if ttl is not None:
instance.dns_config.ttl = ttl
if monitor_protocol is not None:
instance.monitor_config.protocol = monitor_protocol
if monitor_port is not None:
instance.monitor_config.port = monitor_port
if monitor_path == '':
instance.monitor_config.path = None
elif monitor_path is not None:
instance.monitor_config.path = monitor_path
if interval is not None:
instance.monitor_config.interval_in_seconds = interval
if timeout is not None:
instance.monitor_config.timeout_in_seconds = timeout
if max_failures is not None:
instance.monitor_config.tolerated_number_of_failures = max_failures
if monitor_custom_headers is not None:
instance.monitor_config.custom_headers = monitor_custom_headers
if status_code_ranges is not None:
instance.monitor_config.expected_status_code_ranges = status_code_ranges
if max_return is not None:
instance.max_return = max_return
# TODO: Remove workaround after https://github.com/Azure/azure-rest-api-specs/issues/1940 fixed
for endpoint in instance.endpoints:
endpoint._validation = { # pylint: disable=protected-access
'name': {'readonly': False},
'type': {'readonly': False},
}
return instance
def create_traffic_manager_endpoint(cmd, resource_group_name, profile_name, endpoint_type, endpoint_name,
target_resource_id=None, target=None,
endpoint_status=None, weight=None, priority=None,
endpoint_location=None, endpoint_monitor_status=None,
min_child_endpoints=None, geo_mapping=None,
monitor_custom_headers=None, subnets=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Endpoint
ncf = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).endpoints
endpoint = Endpoint(target_resource_id=target_resource_id, target=target,
endpoint_status=endpoint_status, weight=weight, priority=priority,
endpoint_location=endpoint_location,
endpoint_monitor_status=endpoint_monitor_status,
min_child_endpoints=min_child_endpoints,
geo_mapping=geo_mapping,
subnets=subnets,
custom_headers=monitor_custom_headers)
return ncf.create_or_update(resource_group_name, profile_name, endpoint_type, endpoint_name,
endpoint)
def update_traffic_manager_endpoint(instance, endpoint_type=None, endpoint_location=None,
endpoint_status=None, endpoint_monitor_status=None,
priority=None, target=None, target_resource_id=None,
weight=None, min_child_endpoints=None, geo_mapping=None,
subnets=None, monitor_custom_headers=None):
if endpoint_location is not None:
instance.endpoint_location = endpoint_location
if endpoint_status is not None:
instance.endpoint_status = endpoint_status
if endpoint_monitor_status is not None:
instance.endpoint_monitor_status = endpoint_monitor_status
if priority is not None:
instance.priority = priority
if target is not None:
instance.target = target
if target_resource_id is not None:
instance.target_resource_id = target_resource_id
if weight is not None:
instance.weight = weight
if min_child_endpoints is not None:
instance.min_child_endpoints = min_child_endpoints
if geo_mapping is not None:
instance.geo_mapping = geo_mapping
if subnets is not None:
instance.subnets = subnets
if monitor_custom_headers:
instance.custom_headers = monitor_custom_headers
return instance
def list_traffic_manager_endpoints(cmd, resource_group_name, profile_name, endpoint_type=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
profile = client.get(resource_group_name, profile_name)
return [e for e in profile.endpoints if not endpoint_type or e.type.endswith(endpoint_type)]
# endregion
# region VirtualNetworks
# pylint: disable=too-many-locals
def create_vnet(cmd, resource_group_name, vnet_name, vnet_prefixes='10.0.0.0/16',
subnet_name=None, subnet_prefix=None, dns_servers=None,
location=None, tags=None, vm_protection=None, ddos_protection=None,
ddos_protection_plan=None, network_security_group=None, edge_zone=None, flowtimeout=None):
AddressSpace, DhcpOptions, Subnet, VirtualNetwork, SubResource, NetworkSecurityGroup = \
cmd.get_models('AddressSpace', 'DhcpOptions', 'Subnet', 'VirtualNetwork',
'SubResource', 'NetworkSecurityGroup')
client = network_client_factory(cmd.cli_ctx).virtual_networks
tags = tags or {}
vnet = VirtualNetwork(
location=location, tags=tags,
dhcp_options=DhcpOptions(dns_servers=dns_servers),
address_space=AddressSpace(address_prefixes=(vnet_prefixes if isinstance(vnet_prefixes, list) else [vnet_prefixes]))) # pylint: disable=line-too-long
if subnet_name:
if cmd.supported_api_version(min_api='2018-08-01'):
vnet.subnets = [Subnet(name=subnet_name,
address_prefix=subnet_prefix[0] if len(subnet_prefix) == 1 else None,
address_prefixes=subnet_prefix if len(subnet_prefix) > 1 else None,
network_security_group=NetworkSecurityGroup(id=network_security_group)
if network_security_group else None)]
else:
vnet.subnets = [Subnet(name=subnet_name, address_prefix=subnet_prefix)]
if cmd.supported_api_version(min_api='2017-09-01'):
vnet.enable_ddos_protection = ddos_protection
vnet.enable_vm_protection = vm_protection
if cmd.supported_api_version(min_api='2018-02-01'):
vnet.ddos_protection_plan = SubResource(id=ddos_protection_plan) if ddos_protection_plan else None
if edge_zone:
vnet.extended_location = _edge_zone_model(cmd, edge_zone)
if flowtimeout is not None:
vnet.flow_timeout_in_minutes = flowtimeout
return cached_put(cmd, client.begin_create_or_update, vnet, resource_group_name, vnet_name)
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None,
ddos_protection_plan=None, flowtimeout=None):
# server side validation reports pretty good error message on invalid CIDR,
# so we don't validate at client side
AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource')
if vnet_prefixes and instance.address_space:
instance.address_space.address_prefixes = vnet_prefixes
elif vnet_prefixes:
instance.address_space = AddressSpace(address_prefixes=vnet_prefixes)
if dns_servers == ['']:
instance.dhcp_options.dns_servers = None
elif dns_servers and instance.dhcp_options:
instance.dhcp_options.dns_servers = dns_servers
elif dns_servers:
instance.dhcp_options = DhcpOptions(dns_servers=dns_servers)
if ddos_protection is not None:
instance.enable_ddos_protection = ddos_protection
if vm_protection is not None:
instance.enable_vm_protection = vm_protection
if ddos_protection_plan == '':
instance.ddos_protection_plan = None
elif ddos_protection_plan is not None:
instance.ddos_protection_plan = SubResource(id=ddos_protection_plan)
if flowtimeout is not None:
instance.flow_timeout_in_minutes = flowtimeout
return instance
def _set_route_table(ncf, resource_group_name, route_table, subnet):
if route_table:
is_id = is_valid_resource_id(route_table)
rt = None
if is_id:
res_id = parse_resource_id(route_table)
rt = ncf.route_tables.get(res_id['resource_group'], res_id['name'])
else:
rt = ncf.route_tables.get(resource_group_name, route_table)
subnet.route_table = rt
elif route_table == '':
subnet.route_table = None
def create_subnet(cmd, resource_group_name, virtual_network_name, subnet_name,
address_prefix, network_security_group=None,
route_table=None, service_endpoints=None, service_endpoint_policy=None,
delegations=None, nat_gateway=None,
disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, Subnet, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-08-01'):
subnet = Subnet(
name=subnet_name,
address_prefixes=address_prefix if len(address_prefix) > 1 else None,
address_prefix=address_prefix[0] if len(address_prefix) == 1 else None
)
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
subnet.nat_gateway = SubResource(id=nat_gateway)
else:
subnet = Subnet(name=subnet_name, address_prefix=address_prefix)
if network_security_group:
subnet.network_security_group = NetworkSecurityGroup(id=network_security_group)
_set_route_table(ncf, resource_group_name, route_table, subnet)
if service_endpoints:
subnet.service_endpoints = []
for service in service_endpoints:
subnet.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy:
subnet.service_endpoint_policies = []
for policy in service_endpoint_policy:
subnet.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
subnet.delegations = delegations
if disable_private_endpoint_network_policies is True:
subnet.private_endpoint_network_policies = "Disabled"
if disable_private_endpoint_network_policies is False:
subnet.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies is True:
subnet.private_link_service_network_policies = "Disabled"
if disable_private_link_service_network_policies is False:
subnet.private_link_service_network_policies = "Enabled"
vnet = cached_get(cmd, ncf.virtual_networks.get, resource_group_name, virtual_network_name)
upsert_to_collection(vnet, 'subnets', subnet, 'name')
vnet = cached_put(
cmd, ncf.virtual_networks.begin_create_or_update, vnet, resource_group_name, virtual_network_name).result()
return get_property(vnet.subnets, subnet_name)
def update_subnet(cmd, instance, resource_group_name, address_prefix=None, network_security_group=None,
route_table=None, service_endpoints=None, delegations=None, nat_gateway=None,
service_endpoint_policy=None, disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'SubResource')
if address_prefix:
if cmd.supported_api_version(min_api='2018-08-01'):
instance.address_prefixes = address_prefix if len(address_prefix) > 1 else None
instance.address_prefix = address_prefix[0] if len(address_prefix) == 1 else None
else:
instance.address_prefix = address_prefix
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
instance.nat_gateway = SubResource(id=nat_gateway)
elif nat_gateway == '':
instance.nat_gateway = None
if network_security_group:
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
elif network_security_group == '': # clear it
instance.network_security_group = None
_set_route_table(network_client_factory(cmd.cli_ctx), resource_group_name, route_table, instance)
if service_endpoints == ['']:
instance.service_endpoints = None
elif service_endpoints:
instance.service_endpoints = []
for service in service_endpoints:
instance.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy == '':
instance.service_endpoint_policies = None
elif service_endpoint_policy:
instance.service_endpoint_policies = []
for policy in service_endpoint_policy:
instance.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
instance.delegations = delegations
if disable_private_endpoint_network_policies:
instance.private_endpoint_network_policies = "Disabled"
elif disable_private_endpoint_network_policies is not None:
instance.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies:
instance.private_link_service_network_policies = "Disabled"
elif disable_private_link_service_network_policies is not None:
instance.private_link_service_network_policies = "Enabled"
return instance
def list_avail_subnet_delegations(cmd, resource_group_name=None, location=None):
client = network_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.available_resource_group_delegations.list(location, resource_group_name)
return client.available_delegations.list(location)
def create_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name,
remote_virtual_network, allow_virtual_network_access=False,
allow_forwarded_traffic=False, allow_gateway_transit=False,
use_remote_gateways=False):
if not is_valid_resource_id(remote_virtual_network):
remote_virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=remote_virtual_network
)
SubResource, VirtualNetworkPeering = cmd.get_models('SubResource', 'VirtualNetworkPeering')
peering = VirtualNetworkPeering(
id=resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=virtual_network_name),
name=virtual_network_peering_name,
remote_virtual_network=SubResource(id=remote_virtual_network),
allow_virtual_network_access=allow_virtual_network_access,
allow_gateway_transit=allow_gateway_transit,
allow_forwarded_traffic=allow_forwarded_traffic,
use_remote_gateways=use_remote_gateways)
aux_subscription = parse_resource_id(remote_virtual_network)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def update_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name, **kwargs):
peering = kwargs['parameters']
aux_subscription = parse_resource_id(peering.remote_virtual_network.id)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def list_available_ips(cmd, resource_group_name, virtual_network_name):
client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet = client.get(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name)
start_ip = vnet.address_space.address_prefixes[0].split('/')[0]
available_ips = client.check_ip_address_availability(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
ip_address=start_ip)
return available_ips.available_ip_addresses
# endregion
# region VirtualNetworkGateways
def create_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, public_cert_data, cert_name):
VpnClientRootCertificate = cmd.get_models('VpnClientRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
raise CLIError("Must add address prefixes to gateway '{}' prior to adding a root cert."
.format(gateway_name))
config = gateway.vpn_client_configuration
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
cert = VpnClientRootCertificate(name=cert_name, public_cert_data=public_cert_data)
upsert_to_collection(config, 'vpn_client_root_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_root_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_root_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def create_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, thumbprint, cert_name):
VpnClientRevokedCertificate = cmd.get_models('VpnClientRevokedCertificate')
config, gateway, ncf = _prep_cert_create(cmd, gateway_name, resource_group_name)
cert = VpnClientRevokedCertificate(name=cert_name, thumbprint=thumbprint)
upsert_to_collection(config, 'vpn_client_revoked_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_revoked_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_revoked_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def _prep_cert_create(cmd, gateway_name, resource_group_name):
VpnClientConfiguration = cmd.get_models('VpnClientConfiguration')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
gateway.vpn_client_configuration = VpnClientConfiguration()
config = gateway.vpn_client_configuration
if not config.vpn_client_address_pool or not config.vpn_client_address_pool.address_prefixes:
raise CLIError('Address prefixes must be set on VPN gateways before adding'
' certificates. Please use "update" with --address-prefixes first.')
if config.vpn_client_revoked_certificates is None:
config.vpn_client_revoked_certificates = []
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
return config, gateway, ncf
def create_vnet_gateway(cmd, resource_group_name, virtual_network_gateway_name, public_ip_address,
virtual_network, location=None, tags=None,
no_wait=False, gateway_type=None, sku=None, vpn_type=None, vpn_gateway_generation=None,
asn=None, bgp_peering_address=None, peer_weight=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None, edge_zone=None,
nat_rule=None):
(VirtualNetworkGateway, BgpSettings, SubResource, VirtualNetworkGatewayIPConfiguration, VirtualNetworkGatewaySku,
VpnClientConfiguration, AddressSpace, VpnClientRootCertificate, VirtualNetworkGatewayNatRule,
VpnNatRuleMapping) = cmd.get_models(
'VirtualNetworkGateway', 'BgpSettings', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku', 'VpnClientConfiguration', 'AddressSpace', 'VpnClientRootCertificate',
'VirtualNetworkGatewayNatRule', 'VpnNatRuleMapping')
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
subnet = virtual_network + '/subnets/GatewaySubnet'
active = len(public_ip_address) == 2
vnet_gateway = VirtualNetworkGateway(
gateway_type=gateway_type, vpn_type=vpn_type, vpn_gateway_generation=vpn_gateway_generation, location=location,
tags=tags, sku=VirtualNetworkGatewaySku(name=sku, tier=sku), active=active, ip_configurations=[],
gateway_default_site=SubResource(id=gateway_default_site) if gateway_default_site else None)
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic',
name='vnetGatewayConfig{}'.format(i)
)
vnet_gateway.ip_configurations.append(ip_configuration)
if asn or bgp_peering_address or peer_weight:
vnet_gateway.enable_bgp = True
vnet_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
if any((address_prefixes, client_protocol)):
vnet_gateway.vpn_client_configuration = VpnClientConfiguration()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
vnet_gateway.vpn_client_configuration.vpn_client_protocols = client_protocol
if any((radius_secret, radius_server)) and cmd.supported_api_version(min_api='2017-06-01'):
vnet_gateway.vpn_client_configuration.radius_server_address = radius_server
vnet_gateway.vpn_client_configuration.radius_server_secret = radius_secret
# multi authentication
if cmd.supported_api_version(min_api='2020-11-01'):
vnet_gateway.vpn_client_configuration.vpn_authentication_types = vpn_auth_type
vnet_gateway.vpn_client_configuration.aad_tenant = aad_tenant
vnet_gateway.vpn_client_configuration.aad_issuer = aad_issuer
vnet_gateway.vpn_client_configuration.aad_audience = aad_audience
vnet_gateway.vpn_client_configuration.vpn_client_root_certificates = [
VpnClientRootCertificate(name=root_cert_name,
public_cert_data=root_cert_data)] if root_cert_data else None
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
vnet_gateway.custom_routes = AddressSpace()
vnet_gateway.custom_routes.address_prefixes = custom_routes
if edge_zone:
vnet_gateway.extended_location = _edge_zone_model(cmd, edge_zone)
if nat_rule:
vnet_gateway.nat_rules = [
VirtualNetworkGatewayNatRule(type_properties_type=rule.get('type'), mode=rule.get('mode'), name=rule.get('name'),
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('internal_mappings')] if rule.get('internal_mappings') else None,
external_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('external_mappings')] if rule.get('external_mappings') else None,
ip_configuration_id=rule.get('ip_config_id')) for rule in nat_rule]
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, virtual_network_gateway_name, vnet_gateway)
def update_vnet_gateway(cmd, instance, sku=None, vpn_type=None, tags=None,
public_ip_address=None, gateway_type=None, enable_bgp=None,
asn=None, bgp_peering_address=None, peer_weight=None, virtual_network=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None):
(AddressSpace, SubResource, VirtualNetworkGatewayIPConfiguration, VpnClientConfiguration,
VpnClientRootCertificate) = cmd.get_models('AddressSpace', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VpnClientConfiguration', 'VpnClientRootCertificate')
if any((address_prefixes, radius_server, radius_secret, client_protocol)) and not instance.vpn_client_configuration:
instance.vpn_client_configuration = VpnClientConfiguration()
if address_prefixes is not None:
if not instance.vpn_client_configuration.vpn_client_address_pool:
instance.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
if not instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes:
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = []
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
with cmd.update_context(instance.vpn_client_configuration) as c:
c.set_param('vpn_client_protocols', client_protocol)
c.set_param('radius_server_address', radius_server)
c.set_param('radius_server_secret', radius_secret)
if cmd.supported_api_version(min_api='2020-11-01'):
c.set_param('aad_tenant', aad_tenant)
c.set_param('aad_audience', aad_audience)
c.set_param('aad_issuer', aad_issuer)
c.set_param('vpn_authentication_types', vpn_auth_type)
if root_cert_data and cmd.supported_api_version(min_api='2020-11-01'):
upsert_to_collection(instance.vpn_client_configuration, 'vpn_client_root_certificates',
VpnClientRootCertificate(name=root_cert_name, public_cert_data=root_cert_data), 'name')
with cmd.update_context(instance.sku) as c:
c.set_param('name', sku)
c.set_param('tier', sku)
with cmd.update_context(instance) as c:
c.set_param('gateway_default_site', SubResource(id=gateway_default_site) if gateway_default_site else None)
c.set_param('vpn_type', vpn_type)
c.set_param('tags', tags)
subnet_id = '{}/subnets/GatewaySubnet'.format(virtual_network) if virtual_network else \
instance.ip_configurations[0].subnet.id
if virtual_network is not None:
for config in instance.ip_configurations:
config.subnet.id = subnet_id
if public_ip_address is not None:
instance.ip_configurations = []
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet_id),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic', name='vnetGatewayConfig{}'.format(i))
instance.ip_configurations.append(ip_configuration)
# Update active-active/active-standby status
active = len(public_ip_address) == 2
if instance.active and not active:
logger.info('Placing gateway in active-standby mode.')
elif not instance.active and active:
logger.info('Placing gateway in active-active mode.')
instance.active = active
if gateway_type is not None:
instance.gateway_type = gateway_type
if enable_bgp is not None:
instance.enable_bgp = enable_bgp.lower() == 'true'
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
if not instance.custom_routes:
instance.custom_routes = AddressSpace()
instance.custom_routes.address_prefixes = custom_routes
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
return instance
def start_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def stop_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def generate_vpn_client(cmd, client, resource_group_name, virtual_network_gateway_name, processor_architecture=None,
authentication_method=None, radius_server_auth_certificate=None, client_root_certificates=None,
use_legacy=False):
params = cmd.get_models('VpnClientParameters')(
processor_architecture=processor_architecture
)
if cmd.supported_api_version(min_api='2017-06-01') and not use_legacy:
params.authentication_method = authentication_method
params.radius_server_auth_certificate = radius_server_auth_certificate
params.client_root_certificates = client_root_certificates
return client.begin_generate_vpn_profile(resource_group_name, virtual_network_gateway_name, params)
# legacy implementation
return client.begin_generatevpnclientpackage(resource_group_name, virtual_network_gateway_name, params)
def set_vpn_client_ipsec_policy(cmd, client, resource_group_name, virtual_network_gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
VpnClientIPsecParameters = cmd.get_models('VpnClientIPsecParameters')
vpnclient_ipsec_params = VpnClientIPsecParameters(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
return sdk_no_wait(no_wait, client.begin_set_vpnclient_ipsec_parameters, resource_group_name,
virtual_network_gateway_name, vpnclient_ipsec_params)
def disconnect_vnet_gateway_vpn_connections(cmd, client, resource_group_name, virtual_network_gateway_name,
vpn_connection_ids, no_wait=False):
P2SVpnConnectionRequest = cmd.get_models('P2SVpnConnectionRequest')
request = P2SVpnConnectionRequest(vpn_connection_ids=vpn_connection_ids)
return sdk_no_wait(no_wait, client.begin_disconnect_virtual_network_gateway_vpn_connections,
resource_group_name, virtual_network_gateway_name, request)
# endregion
# region VirtualNetworkGatewayConnections
# pylint: disable=too-many-locals
def create_vpn_connection(cmd, resource_group_name, connection_name, vnet_gateway1,
location=None, tags=None, no_wait=False, validate=False,
vnet_gateway2=None, express_route_circuit2=None, local_gateway2=None,
authorization_key=None, enable_bgp=False, routing_weight=10,
connection_type=None, shared_key=None,
use_policy_based_traffic_selectors=False,
express_route_gateway_bypass=None, ingress_nat_rule=None, egress_nat_rule=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import build_vpn_connection_resource
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
vpn_connection_resource = build_vpn_connection_resource(
cmd, connection_name, location, tags, vnet_gateway1,
vnet_gateway2 or local_gateway2 or express_route_circuit2,
connection_type, authorization_key, enable_bgp, routing_weight, shared_key,
use_policy_based_traffic_selectors, express_route_gateway_bypass, ingress_nat_rule, egress_nat_rule)
master_template.add_resource(vpn_connection_resource)
master_template.add_output('resource', connection_name, output_type='object')
if shared_key:
master_template.add_secure_parameter('sharedKey', shared_key)
if authorization_key:
master_template.add_secure_parameter('authorizationKey', authorization_key)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vpn_connection_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_vpn_connection(cmd, instance, routing_weight=None, shared_key=None, tags=None,
enable_bgp=None, use_policy_based_traffic_selectors=None,
express_route_gateway_bypass=None):
with cmd.update_context(instance) as c:
c.set_param('routing_weight', routing_weight)
c.set_param('shared_key', shared_key)
c.set_param('tags', tags)
c.set_param('enable_bgp', enable_bgp)
c.set_param('express_route_gateway_bypass', express_route_gateway_bypass)
c.set_param('use_policy_based_traffic_selectors', use_policy_based_traffic_selectors)
# TODO: Remove these when issue #1615 is fixed
gateway1_id = parse_resource_id(instance.virtual_network_gateway1.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway1_id['subscription'])
instance.virtual_network_gateway1 = ncf.virtual_network_gateways.get(
gateway1_id['resource_group'], gateway1_id['name'])
if instance.virtual_network_gateway2:
gateway2_id = parse_resource_id(instance.virtual_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.virtual_network_gateway2 = ncf.virtual_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
if instance.local_network_gateway2:
gateway2_id = parse_resource_id(instance.local_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.local_network_gateway2 = ncf.local_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
return instance
def list_vpn_connections(cmd, resource_group_name, virtual_network_gateway_name=None):
if virtual_network_gateway_name:
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
return client.list_connections(resource_group_name, virtual_network_gateway_name)
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
return client.list(resource_group_name)
def start_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def stop_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def show_vpn_connection_device_config_script(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
vendor, device_family, firmware_version):
VpnDeviceScriptParameters = cmd.get_models('VpnDeviceScriptParameters')
parameters = VpnDeviceScriptParameters(
vendor=vendor,
device_family=device_family,
firmware_version=firmware_version
)
return client.vpn_device_configuration_script(resource_group_name, virtual_network_gateway_connection_name,
parameters=parameters)
# endregion
# region IPSec Policy Commands
def add_vnet_gateway_ipsec_policy(cmd, resource_group_name, gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
if gateway.vpn_client_configuration.vpn_client_ipsec_policies:
gateway.vpn_client_configuration.vpn_client_ipsec_policies.append(new_policy)
else:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = [new_policy]
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def clear_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = None
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
return LongRunningOperation(cmd.cli_ctx)(poller).vpn_client_configuration.vpn_client_ipsec_policies
def list_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
try:
return ncf.get(resource_group_name, gateway_name).vpn_client_configuration.vpn_client_ipsec_policies
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
def add_vpn_conn_ipsec_policy(cmd, client, resource_group_name, connection_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
conn = client.get(resource_group_name, connection_name)
if conn.ipsec_policies:
conn.ipsec_policies.append(new_policy)
else:
conn.ipsec_policies = [new_policy]
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
def clear_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name, no_wait=False):
conn = client.get(resource_group_name, connection_name)
conn.ipsec_policies = None
conn.use_policy_based_traffic_selectors = False
if no_wait:
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
return LongRunningOperation(cmd.cli_ctx)(poller).ipsec_policies
def list_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name):
return client.get(resource_group_name, connection_name).ipsec_policies
def assign_vnet_gateway_aad(cmd, resource_group_name, gateway_name,
aad_tenant, aad_audience, aad_issuer, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = aad_tenant
gateway.vpn_client_configuration.aad_audience = aad_audience
gateway.vpn_client_configuration.aad_issuer = aad_issuer
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_aad(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
return gateway.vpn_client_configuration
def remove_vnet_gateway_aad(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = None
gateway.vpn_client_configuration.aad_audience = None
gateway.vpn_client_configuration.aad_issuer = None
if cmd.supported_api_version(min_api='2020-11-01'):
gateway.vpn_client_configuration.vpn_authentication_types = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def add_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, internal_mappings, external_mappings,
rule_type=None, mode=None, ip_config_id=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
VirtualNetworkGatewayNatRule, VpnNatRuleMapping = cmd.get_models('VirtualNetworkGatewayNatRule',
'VpnNatRuleMapping')
gateway.nat_rules.append(
VirtualNetworkGatewayNatRule(type_properties_type=rule_type, mode=mode, name=name,
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in internal_mappings] if internal_mappings else None,
external_mappings=[VpnNatRuleMapping(address_space=e_map) for e_map in external_mappings] if external_mappings else None,
ip_configuration_id=ip_config_id))
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
return gateway.nat_rules
def remove_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
for rule in gateway.nat_rules:
if name == rule.name:
gateway.nat_rules.remove(rule)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
raise UnrecognizedArgumentError(f'Do not find nat_rules named {name}!!!')
# endregion
# region VirtualHub
def create_virtual_hub(cmd, client,
resource_group_name,
virtual_hub_name,
hosted_subnet,
public_ip_address=None,
location=None,
tags=None):
from azure.core.exceptions import HttpResponseError
from azure.cli.core.commands import LongRunningOperation
try:
client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualHub "{}" under resource group "{}" exists'.format(
virtual_hub_name, resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location,
virtual_wan=None,
sku='Standard')
vhub_poller = client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
ip_config = HubIpConfiguration(
subnet=SubResource(id=hosted_subnet),
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(
resource_group_name, virtual_hub_name, 'Default', ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
try:
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
except HttpResponseError:
pass
client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return client.get(resource_group_name, virtual_hub_name)
def virtual_hub_update_setter(client, resource_group_name, virtual_hub_name, parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, parameters)
def update_virtual_hub(cmd, instance,
tags=None,
allow_branch_to_branch_traffic=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('allow_branch_to_branch_traffic', allow_branch_to_branch_traffic)
return instance
def delete_virtual_hub(cmd, client, resource_group_name, virtual_hub_name, no_wait=False):
from azure.cli.core.commands import LongRunningOperation
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
ip_configs = list(vhub_ip_config_client.list(resource_group_name, virtual_hub_name))
if ip_configs:
ip_config = ip_configs[0] # There will always be only 1
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, ip_config.name)
LongRunningOperation(cmd.cli_ctx)(poller)
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name)
def list_virtual_hub(client, resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_virtual_hub_bgp_connection(cmd, client, resource_group_name, virtual_hub_name, connection_name,
peer_asn, peer_ip, no_wait=False):
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=connection_name, peer_asn=peer_asn, peer_ip=peer_ip)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name,
virtual_hub_name, connection_name, vhub_bgp_conn)
def virtual_hub_bgp_connection_update_setter(client, resource_group_name,
virtual_hub_name, connection_name,
parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, connection_name, parameters)
def update_virtual_hub_bgp_connection(cmd, instance, peer_asn=None, peer_ip=None):
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def delete_virtual_hub_bgp_connection(client, resource_group_name,
virtual_hub_name, connection_name, no_wait=False):
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_learned_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_learned_routes(resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_advertised_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_advertised_routes(resource_group_name, virtual_hub_name, connection_name)
# endregion
# region VirtualRouter
def create_virtual_router(cmd,
resource_group_name,
virtual_router_name,
hosted_gateway=None,
hosted_subnet=None,
location=None,
tags=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
virtual_hub_name = virtual_router_name
try:
vhub_client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualRouter "{}" under resource group "{}" exists'.format(virtual_hub_name,
resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
# for old VirtualRouter
if hosted_gateway is not None:
VirtualRouter = cmd.get_models('VirtualRouter')
virtual_router = VirtualRouter(virtual_router_asn=None,
virtual_router_ips=[],
hosted_subnet=None,
hosted_gateway=SubResource(id=hosted_gateway),
location=location,
tags=tags)
return vrouter_client.begin_create_or_update(resource_group_name, virtual_router_name, virtual_router)
# for VirtualHub
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location, virtual_wan=None, sku='Standard')
ip_config = HubIpConfiguration(subnet=SubResource(id=hosted_subnet))
from azure.cli.core.commands import LongRunningOperation
vhub_poller = vhub_client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(resource_group_name,
virtual_hub_name,
'Default',
ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
vhub_client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_getter(cmd, resource_group_name, virtual_router_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
return vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_setter(cmd, resource_group_name, virtual_router_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs':
client = network_client_factory(cmd.cli_ctx).virtual_hubs
else:
client = network_client_factory(cmd.cli_ctx).virtual_routers
# If the client is virtual_hubs,
# the virtual_router_name represents virtual_hub_name and
# the parameters represents VirtualHub
return client.begin_create_or_update(resource_group_name, virtual_router_name, parameters)
def update_virtual_router(cmd, instance, tags=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_virtual_router(cmd, resource_group_name=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
if resource_group_name is not None:
vrouters = vrouter_client.list_by_resource_group(resource_group_name)
vhubs = vhub_client.list_by_resource_group(resource_group_name)
else:
vrouters = vrouter_client.list()
vhubs = vhub_client.list()
return list(vrouters) + list(vhubs)
def show_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
item = vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
virtual_hub_name = virtual_router_name
item = vhub_client.get(resource_group_name, virtual_hub_name)
return item
def delete_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
item = vrouter_client.begin_delete(resource_group_name, virtual_router_name)
except HttpResponseError:
from azure.cli.core.commands import LongRunningOperation
virtual_hub_name = virtual_router_name
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
LongRunningOperation(cmd.cli_ctx)(poller)
item = vhub_client.begin_delete(resource_group_name, virtual_hub_name)
return item
def create_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name, peer_asn, peer_ip):
# try VirtualRouter first
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
VirtualRouterPeering = cmd.get_models('VirtualRouterPeering')
virtual_router_peering = VirtualRouterPeering(peer_asn=peer_asn, peer_ip=peer_ip)
return vrouter_peering_client.begin_create_or_update(resource_group_name,
virtual_router_name,
peering_name,
virtual_router_peering)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=peering_name, peer_asn=peer_asn, peer_ip=peer_ip)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_create_or_update(resource_group_name, virtual_hub_name,
bgp_conn_name, vhub_bgp_conn)
def virtual_router_peering_update_getter(cmd, resource_group_name, virtual_router_name, peering_name):
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
from azure.core.exceptions import HttpResponseError
try:
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def virtual_router_peering_update_setter(cmd, resource_group_name, virtual_router_name, peering_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs/bgpConnections':
client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
else:
client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
# if the client is virtual_hub_bgp_connection,
# the virtual_router_name represents virtual_hub_name and
# the peering_name represents bgp_connection_name and
# the parameters represents BgpConnection
return client.begin_create_or_update(resource_group_name, virtual_router_name, peering_name, parameters)
def update_virtual_router_peering(cmd, instance, peer_asn=None, peer_ip=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def list_virtual_router_peering(cmd, resource_group_name, virtual_router_name):
virtual_hub_name = virtual_router_name
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
try:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
vrouter_peerings = list(vrouter_peering_client.list(resource_group_name, virtual_router_name))
except HttpResponseError:
vrouter_peerings = []
virtual_hub_name = virtual_router_name
try:
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connections
vhub_bgp_connections = list(vhub_bgp_conn_client.list(resource_group_name, virtual_hub_name))
except HttpResponseError:
vhub_bgp_connections = []
return list(vrouter_peerings) + list(vhub_bgp_connections)
def show_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def delete_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except: # pylint: disable=bare-except
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.begin_delete(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_delete(resource_group_name, virtual_hub_name, bgp_conn_name)
# endregion
# region service aliases
def list_service_aliases(cmd, location, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).available_service_aliases
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name, location=location)
return client.list(location=location)
# endregion
# region bastion
def create_bastion_host(cmd, resource_group_name, bastion_host_name, virtual_network_name,
public_ip_address, location=None, subnet='AzureBastionSubnet'):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
(BastionHost,
BastionHostIPConfiguration,
SubResource) = cmd.get_models('BastionHost',
'BastionHostIPConfiguration',
'SubResource')
ip_config_name = "bastion_ip_config"
ip_configuration = BastionHostIPConfiguration(name=ip_config_name,
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip_address))
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location)
return client.begin_create_or_update(resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=bastion_host)
def list_bastion_host(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
SSH_EXTENSION_NAME = 'ssh'
SSH_EXTENSION_MODULE = 'azext_ssh.custom'
SSH_EXTENSION_VERSION = '0.1.3'
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _test_extension(extension_name):
from azure.cli.core.extension import (get_extension)
from pkg_resources import parse_version
ext = get_extension(extension_name)
if parse_version(ext.version) < parse_version(SSH_EXTENSION_VERSION):
raise CLIError('SSH Extension (version >= "{}") must be installed'.format(SSH_EXTENSION_VERSION))
def _get_ssh_path(ssh_command="ssh"):
import os
ssh_path = ssh_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
is_32bit = arch_data[0] == '32bit'
sys_path = 'SysNative' if is_32bit else 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
ssh_path = os.path.join(system32_path, "openSSH", (ssh_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run ssh from path %s", ssh_path)
if not os.path.isfile(ssh_path):
raise CLIError("Could not find " + ssh_command + ".exe. Is the OpenSSH client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for thie command. Supported platforms: Windows")
return ssh_path
def _get_rdp_path(rdp_command="mstsc"):
import os
rdp_path = rdp_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
sys_path = 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
rdp_path = os.path.join(system32_path, (rdp_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run rdp from path %s", rdp_path)
if not os.path.isfile(rdp_path):
raise CLIError("Could not find " + rdp_command + ".exe. Is the rdp client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for thie command. Supported platforms: Windows")
return rdp_path
def _get_host(username, ip):
return username + "@" + ip
def _build_args(cert_file, private_key_file):
private_key = []
certificate = []
if private_key_file:
private_key = ["-i", private_key_file]
if cert_file:
certificate = ["-o", "CertificateFile=" + cert_file]
return private_key + certificate
def ssh_bastion_host(cmd, auth_type, target_resource_id, resource_group_name, bastion_host_name, resource_port=None, username=None, ssh_key=None):
_test_extension(SSH_EXTENSION_NAME)
if not resource_port:
resource_port = 22
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
if auth_type.lower() == 'password':
if username is None:
raise RequiredArgumentMissingError("Please enter username with --username.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
elif auth_type.lower() == 'aad':
azssh = _get_azext_module(SSH_EXTENSION_NAME, SSH_EXTENSION_MODULE)
public_key_file, private_key_file = azssh._check_or_create_public_private_files(None, None) # pylint: disable=protected-access
cert_file, username = azssh._get_and_write_certificate(cmd, public_key_file, private_key_file + '-cert.pub') # pylint: disable=protected-access
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(cert_file, private_key_file)
elif auth_type.lower() == 'ssh-key':
if username is None or ssh_key is None:
raise RequiredArgumentMissingError("Please enter username --username and ssh cert location --ssh-key.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(None, ssh_key)
else:
raise UnrecognizedArgumentError("Unknown auth type. Use one of password, aad or ssh-key.")
command = command + ["-p", str(tunnel_server.local_port)]
command = command + ['-o', "StrictHostKeyChecking=no", '-o', "UserKnownHostsFile=/dev/null"]
command = command + ['-o', "LogLevel=Error"]
logger.debug("Running ssh command %s", ' '.join(command))
try:
subprocess.call(command, shell=platform.system() == 'Windows')
except Exception as ex:
raise CLIInternalError(ex)
def rdp_bastion_host(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port=None):
if not resource_port:
resource_port = 3389
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
command = [_get_rdp_path(), "/v:localhost:{0}".format(tunnel_server.local_port)]
logger.debug("Running rdp command %s", ' '.join(command))
subprocess.call(command, shell=platform.system() == 'Windows')
tunnel_server.cleanup()
def get_tunnel(cmd, resource_group_name, name, vm_id, resource_port, port=None):
from .tunnel import TunnelServer
client = network_client_factory(cmd.cli_ctx).bastion_hosts
bastion = client.get(resource_group_name, name)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
tunnel_server = TunnelServer(cmd.cli_ctx, 'localhost', port, bastion, vm_id, resource_port)
return tunnel_server
def create_bastion_tunnel(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port, port, timeout=None):
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port, port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
# endregion
# region security partner provider
def create_security_partner_provider(cmd, resource_group_name, security_partner_provider_name,
security_provider_name, virtual_hub, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
SecurityPartnerProvider, SubResource = cmd.get_models('SecurityPartnerProvider', 'SubResource')
security_partner_provider = SecurityPartnerProvider(security_provider_name=security_provider_name,
virtual_hub=SubResource(id=virtual_hub),
location=location,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=security_partner_provider)
def update_security_partner_provider(instance, cmd, security_provider_name=None, virtual_hub=None, tags=None):
with cmd.update_context(instance) as c:
c.set_param('security_provider_name', security_provider_name)
c.set_param('virtual_hub', virtual_hub)
c.set_param('tags', tags)
return instance
def list_security_partner_provider(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
# endregion
# region network gateway connection
def reset_shared_key(cmd, client, virtual_network_gateway_connection_name, key_length, resource_group_name=None):
ConnectionResetSharedKey = cmd.get_models('ConnectionResetSharedKey')
shared_key = ConnectionResetSharedKey(key_length=key_length)
return client.begin_reset_shared_key(resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name, # pylint: disable=line-too-long
parameters=shared_key)
def update_shared_key(cmd, instance, value):
with cmd.update_context(instance) as c:
c.set_param('value', value)
return instance
# endregion
# region network virtual appliance
def create_network_virtual_appliance(cmd, client, resource_group_name, network_virtual_appliance_name,
vendor, bundled_scale_unit, market_place_version,
virtual_hub, boot_strap_configuration_blobs=None,
cloud_init_configuration_blobs=None,
cloud_init_configuration=None, asn=None,
location=None, tags=None, no_wait=False):
(NetworkVirtualAppliance,
SubResource,
VirtualApplianceSkuProperties) = cmd.get_models('NetworkVirtualAppliance',
'SubResource',
'VirtualApplianceSkuProperties')
virtual_appliance = NetworkVirtualAppliance(boot_strap_configuration_blobs=boot_strap_configuration_blobs,
cloud_init_configuration_blobs=cloud_init_configuration_blobs,
cloud_init_configuration=cloud_init_configuration,
virtual_appliance_asn=asn,
virtual_hub=SubResource(id=virtual_hub),
nva_sku=VirtualApplianceSkuProperties(
vendor=vendor,
bundled_scale_unit=bundled_scale_unit,
market_place_version=market_place_version
),
location=location,
tags=tags)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, virtual_appliance)
def update_network_virtual_appliance(instance, cmd, cloud_init_configuration=None, asn=None):
with cmd.update_context(instance) as c:
c.set_param('virtual_appliance_asn', asn)
c.set_param('cloud_init_configuration', cloud_init_configuration)
return instance
def list_network_virtual_appliance(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def create_network_virtual_appliance_site(cmd, client, resource_group_name, network_virtual_appliance_name,
site_name, address_prefix, allow=None, optimize=None, default=None,
no_wait=False):
(BreakOutCategoryPolicies,
Office365PolicyProperties,
VirtualApplianceSite) = cmd.get_models('BreakOutCategoryPolicies',
'Office365PolicyProperties',
'VirtualApplianceSite')
virtual_appliance_site = VirtualApplianceSite(address_prefix=address_prefix,
o365_policy=Office365PolicyProperties(
break_out_categories=BreakOutCategoryPolicies(
allow=allow,
optimize=optimize,
default=default
)))
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, site_name, virtual_appliance_site)
def update_network_virtual_appliance_site(instance, cmd, address_prefix, allow=None, optimize=None, default=None):
with cmd.update_context(instance) as c:
c.set_param('address_prefix', address_prefix)
c.set_param('o365_policy.break_out_categories.allow', allow)
c.set_param('o365_policy.break_out_categories.optimize', optimize)
c.set_param('o365_policy.break_out_categories.default', default)
return instance
# endregion
|
the-stack_0_27800
|
import tempfile
import os
import pickle
import random
import numpy as np
from .base_provider import ImagesDataSet, DataProvider
from .downloader import download_data_url
def augment_image(image, pad):
"""Perform zero padding, randomly crop image to original size,
maybe mirror horizontally"""
flip = random.getrandbits(1)
if flip:
image = image[:, ::-1, :]
init_shape = image.shape
new_shape = [init_shape[0] + pad * 2,
init_shape[1] + pad * 2,
init_shape[2]]
zeros_padded = np.zeros(new_shape)
zeros_padded[pad:init_shape[0] + pad, pad:init_shape[1] + pad, :] = image
# randomly crop to original size
init_x = np.random.randint(0, pad * 2)
init_y = np.random.randint(0, pad * 2)
cropped = zeros_padded[
init_x: init_x + init_shape[0],
init_y: init_y + init_shape[1],
:]
return cropped
def augment_all_images(initial_images, pad):
new_images = np.zeros(initial_images.shape)
for i in range(initial_images.shape[0]):
new_images[i] = augment_image(initial_images[i], pad=4)
return new_images
class CifarDataSet(ImagesDataSet):
def __init__(self, images, labels, n_classes, shuffle, normalization,
augmentation):
"""
Args:
images: 4D numpy array
labels: 2D or 1D numpy array
n_classes: `int`, number of cifar classes - 10 or 100
shuffle: `str` or None
None: no any shuffling
once_prior_train: shuffle train data only once prior train
every_epoch: shuffle train data prior every epoch
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
by_chanels: substract mean of every chanel and divide each
chanel data by it's standart deviation
augmentation: `bool`
"""
if shuffle is None:
self.shuffle_every_epoch = False
elif shuffle == 'once_prior_train':
self.shuffle_every_epoch = False
images, labels = self.shuffle_images_and_labels(images, labels)
elif shuffle == 'every_epoch':
self.shuffle_every_epoch = True
else:
raise Exception("Unknown type of shuffling")
self.images = images
self.labels = labels
self.n_classes = n_classes
self.augmentation = augmentation
self.normalization = normalization
self.images = self.normalize_images(images, self.normalization)
self.start_new_epoch()
def start_new_epoch(self):
self._batch_counter = 0
if self.shuffle_every_epoch:
images, labels = self.shuffle_images_and_labels(
self.images, self.labels)
else:
images, labels = self.images, self.labels
if self.augmentation:
images = augment_all_images(images, pad=4)
self.epoch_images = images
self.epoch_labels = labels
@property
def num_examples(self):
return self.labels.shape[0]
def next_batch(self, batch_size):
start = self._batch_counter * batch_size
end = (self._batch_counter + 1) * batch_size
self._batch_counter += 1
images_slice = self.epoch_images[start: end]
labels_slice = self.epoch_labels[start: end]
if images_slice.shape[0] != batch_size:
self.start_new_epoch()
return self.next_batch(batch_size)
else:
return images_slice, labels_slice
class CifarDataProvider(DataProvider):
"""Abstract class for cifar readers"""
def __init__(self, save_path=None, validation_set=None,
validation_split=None, shuffle=None, normalization=None,
one_hot=True, **kwargs):
"""
Args:
save_path: `str`
validation_set: `bool`.
validation_split: `float` or None
float: chunk of `train set` will be marked as `validation set`.
None: if 'validation set' == True, `validation set` will be
copy of `test set`
shuffle: `str` or None
None: no any shuffling
once_prior_train: shuffle train data only once prior train
every_epoch: shuffle train data prior every epoch
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
by_chanels: substract mean of every chanel and divide each
chanel data by it's standart deviation
one_hot: `bool`, return lasels one hot encoded
"""
self._save_path = save_path
self.one_hot = one_hot
download_data_url(self.data_url, self.save_path)
train_fnames, test_fnames = self.get_filenames(self.save_path)
# add train and validations datasets
images, labels = self.read(train_fnames)
if validation_set is not None and validation_split is not None:
split_idx = int(images.shape[0] * (1 - validation_split))
self.train = CifarDataSet(
images=images[:split_idx], labels=labels[:split_idx],
n_classes=self.n_classes, shuffle=shuffle,
normalization=normalization,
augmentation=self.data_augmentation)
self.validation = CifarDataSet(
images=images[split_idx:], labels=labels[split_idx:],
n_classes=self.n_classes, shuffle=shuffle,
normalization=normalization,
augmentation=self.data_augmentation)
else:
self.train = CifarDataSet(
images=images, labels=labels,
n_classes=self.n_classes, shuffle=shuffle,
normalization=normalization,
augmentation=self.data_augmentation)
# add test set
images, labels = self.read(test_fnames)
self.test = CifarDataSet(
images=images, labels=labels,
shuffle=None, n_classes=self.n_classes,
normalization=normalization,
augmentation=False)
if validation_set and not validation_split:
self.validation = self.test
@property
def save_path(self):
if self._save_path is None:
self._save_path = os.path.join(
tempfile.gettempdir(), 'cifar%d' % self.n_classes)
return self._save_path
@property
def data_url(self):
"""Return url for downloaded data depends on cifar class"""
data_url = ('http://www.cs.toronto.edu/'
'~kriz/cifar-%d-python.tar.gz' % self.n_classes)
return data_url
@property
def data_shape(self):
return (32, 32, 3)
@property
def n_classes(self):
return self._n_classes
def get_filenames(self, save_path):
"""Return two lists of train and test filenames for dataset"""
raise NotImplementedError
def read(self, filenames):
if self.n_classes == 10:
labels_key = b'labels'
elif self.n_classes == 100:
labels_key = b'fine_labels'
images_res = []
labels_res = []
for fname in filenames:
with open(fname, 'rb') as f:
images_and_labels = pickle.load(f, encoding='bytes')
images = images_and_labels[b'data']
images = images.reshape(-1, 3, 32, 32)
images = images.swapaxes(1, 3).swapaxes(1, 2)
images_res.append(images)
labels_res.append(images_and_labels[labels_key])
images_res = np.vstack(images_res)
labels_res = np.hstack(labels_res)
if self.one_hot:
labels_res = self.labels_to_one_hot(labels_res)
return images_res, labels_res
class Cifar10DataProvider(CifarDataProvider):
_n_classes = 10
data_augmentation = False
def get_filenames(self, save_path):
sub_save_path = os.path.join(save_path, 'cifar-10-batches-py')
train_filenames = [
os.path.join(
sub_save_path,
'data_batch_%d' % i) for i in range(1, 6)]
test_filenames = [os.path.join(sub_save_path, 'test_batch')]
return train_filenames, test_filenames
class Cifar100DataProvider(CifarDataProvider):
_n_classes = 100
data_augmentation = False
def get_filenames(self, save_path):
sub_save_path = os.path.join(save_path, 'cifar-100-python')
train_filenames = [os.path.join(sub_save_path, 'train')]
test_filenames = [os.path.join(sub_save_path, 'test')]
return train_filenames, test_filenames
class Cifar10AugmentedDataProvider(Cifar10DataProvider):
_n_classes = 10
data_augmentation = True
class Cifar100AugmentedDataProvider(Cifar100DataProvider):
_n_classes = 100
data_augmentation = True
if __name__ == '__main__':
# some SANITY checks for Cifar data providers
import matplotlib.pyplot as plt
# plot some CIFAR10 images with classes
def plot_images_labels(images, labels, axes, main_label, classes):
plt.text(0, 1.5, main_label, ha='center', va='top',
transform=axes[len(axes) // 2].transAxes)
for image, label, axe in zip(images, labels, axes):
axe.imshow(image)
axe.set_title(classes[np.argmax(label)])
axe.set_axis_off()
cifar_10_idx_to_class = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
c10_provider = Cifar10DataProvider(
validation_set=True)
assert c10_provider._n_classes == 10
assert c10_provider.train.labels.shape[-1] == 10
assert len(c10_provider.train.labels.shape) == 2
assert np.all(c10_provider.validation.images == c10_provider.test.images)
assert c10_provider.train.images.shape[0] == 50000
assert c10_provider.test.images.shape[0] == 10000
# test split on validation dataset
c10_provider = Cifar10DataProvider(
one_hot=False, validation_set=True, validation_split=0.1)
assert len(c10_provider.train.labels.shape) == 1
assert not np.all(
c10_provider.validation.images == c10_provider.test.images)
assert c10_provider.train.images.shape[0] == 45000
assert c10_provider.validation.images.shape[0] == 5000
assert c10_provider.test.images.shape[0] == 10000
# test shuffling
c10_provider_not_shuffled = Cifar10DataProvider(shuffle=None)
c10_provider_shuffled = Cifar10DataProvider(shuffle='once_prior_train')
assert not np.all(
c10_provider_not_shuffled.train.images != c10_provider_shuffled.train.images)
assert np.all(
c10_provider_not_shuffled.test.images == c10_provider_shuffled.test.images)
n_plots = 10
fig, axes = plt.subplots(nrows=4, ncols=n_plots)
plot_images_labels(
c10_provider_not_shuffled.train.images[:n_plots],
c10_provider_not_shuffled.train.labels[:n_plots],
axes[0],
'Original dataset',
cifar_10_idx_to_class)
dataset = Cifar10DataProvider(normalization='divide_256')
plot_images_labels(
dataset.train.images[:n_plots],
dataset.train.labels[:n_plots],
axes[1],
'Original dataset normalized dividing by 256',
cifar_10_idx_to_class)
dataset = Cifar10DataProvider(normalization='by_chanels')
plot_images_labels(
dataset.train.images[:n_plots],
dataset.train.labels[:n_plots],
axes[2],
'Original dataset normalized by mean/std at every channel',
cifar_10_idx_to_class)
plot_images_labels(
c10_provider_shuffled.train.images[:n_plots],
c10_provider_shuffled.train.labels[:n_plots],
axes[3],
'Shuffled dataset',
cifar_10_idx_to_class)
plt.show()
text_classes_file = os.path.join(
os.path.dirname(__file__), 'cifar_100_classes.txt')
with open('/tmp/cifar100/cifar-100-python/meta', 'rb') as f:
cifar_100_meta = pickle.load(f, encoding='bytes')
cifar_100_idx_to_class = cifar_100_meta[b'fine_label_names']
c100_provider_not_shuffled = Cifar100DataProvider(shuffle=None)
assert c100_provider_not_shuffled.train.labels.shape[-1] == 100
c100_provider_shuffled = Cifar100DataProvider(shuffle='once_prior_train')
n_plots = 15
fig, axes = plt.subplots(nrows=2, ncols=n_plots)
plot_images_labels(
c100_provider_not_shuffled.train.images[:n_plots],
c100_provider_not_shuffled.train.labels[:n_plots],
axes[0],
'Original dataset',
cifar_100_idx_to_class)
plot_images_labels(
c100_provider_shuffled.train.images[:n_plots],
c100_provider_shuffled.train.labels[:n_plots],
axes[1],
'Shuffled dataset',
cifar_100_idx_to_class)
plt.show()
|
the-stack_0_27801
|
#!/usr/bin/env python3
import logging
import os
import sys
from argparse import ArgumentParser
from collections import defaultdict
from typing import Optional
from gizmos.hiccup import render
from sqlalchemy.engine.base import Connection
from sqlalchemy.sql.expression import bindparam
from sqlalchemy.sql.expression import text as sql_text
from .helpers import get_connection
"""
Usage: python3 tree.py <sqlite-database> <term-curie> > <html-file>
Creates an HTML page containing the tree structure of the term & its annotations.
HTML is written to stdout.
The sqlite-database must be created by RDFTab (https://github.com/ontodev/rdftab.rs)
and include 'statements' and 'prefixes' tables.
The term-curie must use a prefix from the 'prefixes' table.
"""
LOGGER = logging.getLogger("main")
logging.basicConfig(
level=logging.INFO, format="%(levelname)s - %(asctime)s - %(name)s - %(message)s"
)
# Plus sign to show a node has children
PLUS = [
"svg",
{"width": "14", "height": "14", "fill": "#808080", "viewBox": "0 0 16 16"},
[
"path",
{
"fill-rule": "evenodd",
"d": "M8 15A7 7 0 1 0 8 1a7 7 0 0 0 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z",
},
],
[
"path",
{
"fill-rule": "evenodd",
"d": "M8 4a.5.5 0 0 1 .5.5v3h3a.5.5 0 0 1 0 1h-3v3a.5.5 0 0 1-1 0v-3h-3a.5.5 0 0 1 "
+ "0-1h3v-3A.5.5 0 0 1 8 4z",
},
],
]
# Top levels to display in tree
TOP_LEVELS = {
"ontology": "Ontology",
"owl:Class": "Class",
"owl:AnnotationProperty": "Annotation Property",
"owl:DataProperty": "Data Property",
"owl:ObjectProperty": "Object Property",
"owl:Individual": "Individual",
"rdfs:Datatype": "Datatype",
}
# Stylesheets & JS scripts
bootstrap_css = "https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css"
bootstrap_js = "https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js"
popper_js = "https://cdn.jsdelivr.net/npm/[email protected]/dist/umd/popper.min.js"
typeahead_js = "https://cdnjs.cloudflare.com/ajax/libs/typeahead.js/0.11.1/typeahead.bundle.min.js"
def main():
p = ArgumentParser("tree.py", description="create an HTML page to display an ontology term")
p.add_argument("db", help="Database file (.db) or configuration (.ini)")
p.add_argument("term", help="CURIE of ontology term to display", nargs="?")
p.add_argument("-t", "--title", help="Optional tree title")
p.add_argument("-p", "--predicate", action="append", help="CURIE of predicate to include")
p.add_argument("-P", "--predicates", help="File containing CURIEs of predicates to include")
p.add_argument(
"-d",
"--include-db",
help="If provided, include 'db' param in query string",
action="store_true",
)
p.add_argument("-H", "--href", help="Format string to convert CURIEs to tree links")
p.add_argument(
"-s", "--include-search", help="If provided, include a search bar", action="store_true"
)
p.add_argument(
"-c",
"--contents-only",
action="store_true",
help="If provided, render HTML without the roots",
)
args = p.parse_args()
# Maybe get predicates to include
predicate_ids = args.predicate or []
if args.predicates:
with open(args.predicates, "r") as f:
predicate_ids.extend([x.strip() for x in f.readlines()])
if args.href:
href = args.href
if "{curie}" not in href:
raise RuntimeError("The --href argument must contain '{curie}'")
else:
href = "?id={curie}"
if args.include_db:
href += "&db={db}"
treename = os.path.splitext(os.path.basename(args.db))[0]
conn = get_connection(args.db)
# Run tree and write HTML to stdout
sys.stdout.write(
tree(
conn,
treename,
args.term,
title=args.title,
href=href,
predicate_ids=predicate_ids,
include_search=args.include_search,
standalone=not args.contents_only,
)
)
def tree(
conn: Connection,
treename: str,
term_id: Optional[str],
href: str = "?id={curie}",
title: str = None,
predicate_ids: list = None,
include_search: bool = False,
standalone: bool = True,
) -> str:
"""Create an HTML/RDFa tree for the given term.
If term_id is None, create the tree for owl:Class."""
# Get the prefixes
results = conn.execute("SELECT * FROM prefix ORDER BY length(base) DESC")
all_prefixes = [(x["prefix"], x["base"]) for x in results]
ps = set()
body = []
if not term_id:
p, t = term2rdfa(conn, all_prefixes, treename, [], "owl:Class", [], title=title, href=href)
ps.update(p)
body.append(t)
# Maybe find a * in the IDs that represents all remaining predicates
predicate_ids_split = None
if predicate_ids and "*" in predicate_ids:
before = []
after = []
found = False
for pred in predicate_ids:
if pred == "*":
found = True
continue
if not found:
before.append(pred)
else:
after.append(pred)
predicate_ids_split = [before, after]
# Run for given terms if terms have not yet been filled out
if not body:
if predicate_ids and predicate_ids_split:
# If some IDs were provided with *, add the remaining predicates
# These properties go in between the before & after defined in the split
rem_predicate_ids = get_sorted_predicates(conn, exclude_ids=predicate_ids)
# Separate before & after with the remaining properties
predicate_ids = predicate_ids_split[0]
predicate_ids.extend(rem_predicate_ids)
predicate_ids.extend(predicate_ids_split[1])
elif not predicate_ids:
predicate_ids = get_sorted_predicates(conn)
query = sql_text(
"""SELECT stanza, subject, predicate, object, value, datatype, language
FROM statements WHERE stanza = :term_id"""
)
results = conn.execute(query, term_id=term_id)
stanza = []
for res in results:
stanza.append(dict(res))
p, t = term2rdfa(
conn, all_prefixes, treename, predicate_ids, term_id, stanza, title=title, href=href
)
ps.update(p)
body.append(t)
if not title:
title = treename + " Browser"
# Create the prefix element
pref_strs = []
for prefix, base in all_prefixes:
pref_strs.append(f"{prefix}: {base}")
pref_str = "\n".join(pref_strs)
body_wrapper = ["div", {"prefix": pref_str}]
if include_search:
body_wrapper.append(
[
"div",
{"class": "form-row mt-2 mb-2"},
[
"input",
{
"id": f"statements-typeahead",
"class": "typeahead form-control",
"type": "text",
"value": "",
"placeholder": "Search",
},
],
]
)
body = body_wrapper + body
# JQuery
body.append(
[
"script",
{
"src": "https://code.jquery.com/jquery-3.5.1.min.js",
"integrity": "sha256-9/aliU8dGd2tb6OSsuzixeV4y/faTqgFtohetphbbj0=",
"crossorigin": "anonymous",
},
]
)
if include_search:
# Add JS imports for running search
body.append(["script", {"type": "text/javascript", "src": popper_js}])
body.append(["script", {"type": "text/javascript", "src": bootstrap_js}])
body.append(["script", {"type": "text/javascript", "src": typeahead_js}])
# Custom JS for show more children
js = """function show_children() {
hidden = $('#children li:hidden').slice(0, 100);
if (hidden.length > 1) {
hidden.show();
setTimeout(show_children, 100);
} else {
console.log("DONE");
}
$('#more').hide();
}"""
# Custom JS for search bar using Typeahead
if include_search:
# Built the href to return when you select a term
href_split = href.split("{curie}")
before = href_split[0].format(db=treename)
after = href_split[1].format(db=treename)
js_funct = f'str.push("{before}" + encodeURIComponent(obj[p]) + "{after}");'
# Build the href to return names JSON
remote = "'?text=%QUERY&format=json'"
if "db=" in href:
# Add tree name to query params
remote = f"'?db={treename}&text=%QUERY&format=json'"
js += (
"""
$('#search-form').submit(function () {
$(this)
.find('input[name]')
.filter(function () {
return !this.value;
})
.prop('name', '');
});
function jump(currentPage) {
newPage = prompt("Jump to page", currentPage);
if (newPage) {
href = window.location.href.replace("page="+currentPage, "page="+newPage);
window.location.href = href;
}
}
function configure_typeahead(node) {
if (!node.id || !node.id.endsWith("-typeahead")) {
return;
}
table = node.id.replace("-typeahead", "");
var bloodhound = new Bloodhound({
datumTokenizer: Bloodhound.tokenizers.obj.nonword('short_label', 'label', 'synonym'),
queryTokenizer: Bloodhound.tokenizers.nonword,
sorter: function(a, b) {
return a.order - b.order;
},
remote: {
url: """
+ remote
+ """,
wildcard: '%QUERY',
transform : function(response) {
return bloodhound.sorter(response);
}
}
});
$(node).typeahead({
minLength: 0,
hint: false,
highlight: true
}, {
name: table,
source: bloodhound,
display: function(item) {
if (item.label && item.short_label && item.synonym) {
return item.short_label + ' - ' + item.label + ' - ' + item.synonym;
} else if (item.label && item.short_label) {
return item.short_label + ' - ' + item.label;
} else if (item.label && item.synonym) {
return item.label + ' - ' + item.synonym;
} else if (item.short_label && item.synonym) {
return item.short_label + ' - ' + item.synonym;
} else if (item.short_label && !item.label) {
return item.short_label;
} else {
return item.label;
}
},
limit: 40
});
$(node).bind('click', function(e) {
$(node).select();
});
$(node).bind('typeahead:select', function(ev, suggestion) {
$(node).prev().val(suggestion.id);
go(table, suggestion.id);
});
$(node).bind('keypress',function(e) {
if(e.which == 13) {
go(table, $('#' + table + '-hidden').val());
}
});
}
$('.typeahead').each(function() { configure_typeahead(this); });
function go(table, value) {
q = {}
table = table.replace('_all', '');
q[table] = value
window.location = query(q);
}
function query(obj) {
var str = [];
for (var p in obj)
if (obj.hasOwnProperty(p)) {
"""
+ js_funct
+ """
}
return str.join("&");
}"""
)
body.append(["script", {"type": "text/javascript"}, js])
if standalone:
# HTML Headers & CSS
head = [
"head",
["meta", {"charset": "utf-8"}],
[
"meta",
{
"name": "viewport",
"content": "width=device-width, initial-scale=1, shrink-to-fit=no",
},
],
["link", {"rel": "stylesheet", "href": bootstrap_css, "crossorigin": "anonymous"}],
["link", {"rel": "stylesheet", "href": "../style.css"}],
["title", title],
[
"style",
"""
#annotations {
padding-left: 1em;
list-style-type: none !important;
}
#annotations ul {
padding-left: 3em;
list-style-type: circle !important;
}
#annotations ul ul {
padding-left: 2em;
list-style-type: none !important;
}
.hierarchy {
padding-left: 0em;
list-style-type: none !important;
}
.hierarchy ul {
padding-left: 1em;
list-style-type: none !important;
}
.hierarchy ul.multiple-children > li > ul {
border-left: 1px dotted #ddd;
}
.hierarchy .children {
border-left: none;
margin-left: 2em;
text-indent: -1em;
}
.hierarchy .children li::before {
content: "\2022";
color: #ddd;
display: inline-block;
width: 0em;
margin-left: -1em;
}
#nonpeptides .tt-dataset {
max-height: 300px;
overflow-y: scroll;
}
span.twitter-typeahead .tt-menu {
cursor: pointer;
}
.dropdown-menu, span.twitter-typeahead .tt-menu {
position: absolute;
top: 100%;
left: 0;
z-index: 1000;
display: none;
float: left;
min-width: 160px;
padding: 5px 0;
margin: 2px 0 0;
font-size: 1rem;
color: #373a3c;
text-align: left;
list-style: none;
background-color: #fff;
background-clip: padding-box;
border: 1px solid rgba(0, 0, 0, 0.15);
border-radius: 0.25rem; }
span.twitter-typeahead .tt-suggestion {
display: block;
width: 100%;
padding: 3px 20px;
clear: both;
font-weight: normal;
line-height: 1.5;
color: #373a3c;
text-align: inherit;
white-space: nowrap;
background: none;
border: 0; }
span.twitter-typeahead .tt-suggestion:focus,
.dropdown-item:hover,
span.twitter-typeahead .tt-suggestion:hover {
color: #2b2d2f;
text-decoration: none;
background-color: #f5f5f5; }
span.twitter-typeahead .active.tt-suggestion,
span.twitter-typeahead .tt-suggestion.tt-cursor,
span.twitter-typeahead .active.tt-suggestion:focus,
span.twitter-typeahead .tt-suggestion.tt-cursor:focus,
span.twitter-typeahead .active.tt-suggestion:hover,
span.twitter-typeahead .tt-suggestion.tt-cursor:hover {
color: #fff;
text-decoration: none;
background-color: #0275d8;
outline: 0; }
span.twitter-typeahead .disabled.tt-suggestion,
span.twitter-typeahead .disabled.tt-suggestion:focus,
span.twitter-typeahead .disabled.tt-suggestion:hover {
color: #818a91; }
span.twitter-typeahead .disabled.tt-suggestion:focus,
span.twitter-typeahead .disabled.tt-suggestion:hover {
text-decoration: none;
cursor: not-allowed;
background-color: transparent;
background-image: none;
filter: "progid:DXImageTransform.Microsoft.gradient(enabled = false)"; }
span.twitter-typeahead {
width: 100%; }
.input-group span.twitter-typeahead {
display: block !important; }
.input-group span.twitter-typeahead .tt-menu {
top: 2.375rem !important; }""",
],
]
body = ["body", {"class": "container"}, body]
html = ["html", head, body]
else:
html = body
return render(all_prefixes, html, href=href, db=treename)
def annotations2rdfa(
treename: str,
data: dict,
predicate_ids: list,
term_id: str,
stanza: list,
href: str = "?term={curie}",
) -> list:
"""Create a hiccup-style vector for the annotation on a term."""
# The subjects in the stanza that are of type owl:Axiom:
annotation_bnodes = set()
for row in stanza:
if row["predicate"] == "owl:annotatedSource":
annotation_bnodes.add(row["subject"])
# Annotations, etc. on the right-hand side for the subjects contained in
# annotation_bnodes:
annotations = defaultdict(dict)
for row in stanza:
# subject is the blank node, _:...
subject = row["subject"]
if subject not in annotation_bnodes:
continue
if subject not in annotations:
annotations[subject] = {}
predicate = row["predicate"]
obj = row["object"]
value = row["value"]
if predicate not in [
"owl:annotatedSource",
"owl:annotatedTarget",
"owl:annotatedProperty",
"rdf:type",
]:
# This is the actual axiom that we care about and contains display value
annotations[subject]["predicate"] = predicate
if obj:
annotations[subject]["object"] = obj
if value:
annotations[subject]["value"] = value
annotations[subject]["annotation"] = row
if predicate == "owl:annotatedSource":
annotations[subject]["source"] = obj
elif predicate == "owl:annotatedProperty":
annotations[subject]["target_predicate"] = obj
elif predicate == "owl:annotatedTarget":
if obj:
annotations[subject]["target_object"] = obj
if value:
annotations[subject]["target_value"] = value
spv2annotation = {}
for bnode, details in annotations.items():
source = details["source"]
target_predicate = details["target_predicate"]
target = details.get("target_object", None) or details.get("target_value", None)
if source in spv2annotation:
# list of predicate -> values on this target (combo of predicate + value)
pred2val = spv2annotation[source]
else:
pred2val = {}
if target_predicate in pred2val:
annotated_values = pred2val[target_predicate]
else:
annotated_values = {}
if target in annotated_values:
ax_annotations = annotated_values[target]
else:
ax_annotations = {}
# predicate of the annotation
ann_predicate = details["predicate"]
if ann_predicate in ax_annotations:
# values of the annotation
anns = ax_annotations[ann_predicate]
else:
anns = []
anns.append(details["annotation"])
ax_annotations[ann_predicate] = anns
annotated_values[target] = ax_annotations
pred2val[target_predicate] = annotated_values
spv2annotation[source] = pred2val
# The initial hiccup, which will be filled in later:
items = ["ul", {"id": "annotations", "class": "col-md"}]
labels = data["labels"]
# s2 maps the predicates of the given term to their corresponding rows (there can be more than
# one row per predicate):
s2 = defaultdict(list)
for row in stanza:
if row["subject"] == term_id:
s2[row["predicate"]].append(row)
pcs = list(s2.keys())
# Loop through the rows of the stanza that correspond to the predicates of the given term:
for predicate in predicate_ids:
if predicate not in pcs:
continue
predicate_label = predicate
if predicate.startswith("<"):
predicate_label = predicate.lstrip("<").rstrip(">")
anchor = [
"a",
{"href": href.format(curie=predicate, db=treename)},
labels.get(predicate, predicate_label),
]
# Initialise an empty list of "o"s, i.e., hiccup representations of objects:
objs = []
for row in s2[predicate]:
# Convert the `data` map, that has entries for the tree and for a list of the labels
# corresponding to all of the curies in the stanza, into a hiccup object `o`:
o = ["li", row2o(stanza, data, row)]
# Check for axiom annotations and create nested
nest = build_nested(treename, data, labels, spv2annotation, term_id, row, [], href=href)
if nest:
o += nest
# Append the `o` to the list of `os`:
objs.append(o)
if objs:
items.append(["li", anchor, ["ul"] + objs])
return items
def build_nested(
treename: str,
data: dict,
labels: dict,
spv2annotation: dict,
source: str,
row: dict,
ele: list,
href: str = "?id={curie}",
) -> list:
"""Build a nested hiccup list of axiom annotations."""
predicate = row["predicate"]
if source in spv2annotation:
annotated_predicates = spv2annotation[source]
if predicate in annotated_predicates:
annotated_values = annotated_predicates[predicate]
target = row.get("object", None) or row.get("value", None)
if target in annotated_values:
ax_annotations = annotated_values[target]
for ann_predicate, ann_rows in ax_annotations.items():
# Build the nested list "anchor" (predicate)
anchor = [
"li",
[
"small",
[
"a",
{"href": href.format(curie=ann_predicate, db=treename)},
labels.get(ann_predicate, ann_predicate),
],
],
]
# Collect the axiom annotation objects/values
ax_os = []
for ar in ann_rows:
ax_os.append(["li", ["small", row2o([], data, ar)]])
build_nested(
treename,
data,
labels,
spv2annotation,
ar["subject"],
ar,
ax_os,
href=href,
)
ele.append(["ul", anchor, ["ul"] + ax_os])
return ele
def thing2rdfa(
conn: Connection,
all_prefixes: list,
treename: str,
predicate_ids: list,
title: str = None,
href: str = "?id={curie}",
):
"""Create a hiccup-style HTML vector for owl:Thing as the parent of all top-level terms."""
# Select all classes without parents and set them as children of owl:Thing
results = conn.execute(
"""SELECT DISTINCT subject FROM statements
WHERE subject NOT IN
(SELECT subject FROM statements
WHERE predicate = 'rdfs:subClassOf')
AND subject IN
(SELECT subject FROM statements
WHERE predicate = 'rdf:type'
AND object = 'owl:Class' AND subject NOT LIKE '_:%%');"""
)
add_children = [x["subject"] for x in results if x["subject"] != "owl:Thing"]
results = conn.execute(
"""SELECT stanza, subject, predicate, object, value, datatype, language
FROM statements WHERE stanza = 'owl:Thing'"""
)
stanza = []
for res in results:
stanza.append(dict(res))
if not stanza:
stanza = [
{
"stanza": "owl:Thing",
"subject": "owl:Thing",
"predicate": "rdf:type",
"object": "owl:Class",
"value": None,
"datatype": None,
"language": None,
}
]
return term2rdfa(
conn,
all_prefixes,
treename,
predicate_ids,
"owl:Thing",
stanza,
title=title,
add_children=add_children,
href=href,
)
def curie2iri(prefixes: list, curie: str) -> str:
"""Convert a CURIE to IRI"""
if curie.startswith("<"):
return curie.lstrip("<").rstrip(">")
elif curie.startswith("_:"):
return curie
for prefix, base in prefixes:
if curie.startswith(prefix + ":"):
return curie.replace(prefix + ":", base)
raise ValueError(f"No matching prefix for {curie}")
def get_entity_type(conn: Connection, term_id: str) -> str:
"""Get the OWL entity type for a term."""
query = sql_text(
"""SELECT object FROM statements WHERE stanza = :term_id
AND subject = :term_id AND predicate = 'rdf:type'"""
)
results = list(conn.execute(query, term_id=term_id))
if len(results) > 1:
for res in results:
if res["object"] in TOP_LEVELS:
return res["object"]
return "owl:Individual"
elif len(results) == 1:
entity_type = results[0]["object"]
if entity_type == "owl:NamedIndividual":
entity_type = "owl:Individual"
return entity_type
else:
entity_type = None
query = sql_text(
"SELECT predicate FROM statements WHERE stanza = :term_id AND subject = :term_id"
)
results = conn.execute(query, term_id=term_id)
preds = [row["predicate"] for row in results]
if "rdfs:subClassOf" in preds:
return "owl:Class"
elif "rdfs:subPropertyOf" in preds:
return "owl:AnnotationProperty"
if not entity_type:
query = sql_text("SELECT predicate FROM statements WHERE object = :term_id")
results = conn.execute(query, term_id=term_id)
preds = [row["predicate"] for row in results]
if "rdfs:subClassOf" in preds:
return "owl:Class"
elif "rdfs:subPropertyOf" in preds:
return "owl:AnnotationProperty"
return "owl:Class"
def get_hierarchy(
conn: Connection, term_id: str, entity_type: str, add_children: list = None
) -> (dict, set):
"""Return a hierarchy dictionary for a term and all its ancestors and descendants."""
# Build the hierarchy
if entity_type == "owl:Individual":
query = sql_text(
"""SELECT DISTINCT object AS parent, subject AS child FROM statements
WHERE stanza = :term_id
AND subject = :term_id
AND predicate = 'rdf:type'
AND object NOT IN ('owl:Individual', 'owl:NamedIndividual')
AND object NOT LIKE '_:%%'"""
)
results = conn.execute(query, term_id=term_id)
else:
pred = "rdfs:subPropertyOf"
if entity_type == "owl:Class":
pred = "rdfs:subClassOf"
query = sql_text(
"""WITH RECURSIVE ancestors(parent, child) AS (
VALUES (:term_id, NULL)
UNION
-- The children of the given term:
SELECT object AS parent, subject AS child
FROM statements
WHERE predicate = :pred
AND object = :term_id
UNION
--- Children of the children of the given term
SELECT object AS parent, subject AS child
FROM statements
WHERE object IN (SELECT subject FROM statements
WHERE predicate = :pred AND object = :term_id)
AND predicate = :pred
UNION
-- The non-blank parents of all of the parent terms extracted so far:
SELECT object AS parent, subject AS child
FROM statements, ancestors
WHERE ancestors.parent = statements.stanza
AND statements.predicate = :pred
AND statements.object NOT LIKE '_:%%'
)
SELECT * FROM ancestors"""
)
results = conn.execute(query, term_id=term_id, pred=pred)
results = [[x["parent"], x["child"]] for x in results]
if add_children:
results.extend([[term_id, child] for child in add_children])
hierarchy = {
entity_type: {"parents": [], "children": []},
term_id: {"parents": [], "children": []},
}
curies = set()
for res in results:
# Consider the parent column of the current row:
parent = res[0]
if not parent or parent == "owl:Thing":
continue
# If it is not null, add it to the list of all of the compact URIs described by this tree:
curies.add(parent)
# If it is not already in the tree, add a new entry for it to the tree:
if parent not in hierarchy:
hierarchy[parent] = {
"parents": [],
"children": [],
}
# Consider the child column of the current row:
child = res[1]
if not child:
continue
# If it is not null, add it to the list of all the compact URIs described by this tree:
curies.add(child)
# If the child is not already in the tree, add a new entry for it to the tree:
if child not in hierarchy:
hierarchy[child] = {
"parents": [],
"children": [],
}
# Fill in the appropriate relationships in the entries for the parent and child:
hierarchy[parent]["children"].append(child)
hierarchy[child]["parents"].append(parent)
if not hierarchy[term_id]["parents"]:
# Place cur term directly under top level entity
hierarchy[term_id]["parents"].append(entity_type)
hierarchy[entity_type]["children"].append(term_id)
# Add entity type as top level to anything without a parent
for term_id, mini_tree in hierarchy.items():
if not mini_tree["parents"]:
hierarchy[term_id]["parents"].append(entity_type)
return hierarchy, curies
def get_sorted_predicates(conn: Connection, exclude_ids: list = None) -> list:
"""Return a list of predicates IDs sorted by their label, optionally excluding some predicate
IDs. If the predicate does not have a label, use the ID as the label."""
exclude = None
if exclude_ids:
exclude = ", ".join([f"'{x}'" for x in exclude_ids])
# Retrieve all predicate IDs
results = conn.execute("SELECT DISTINCT predicate FROM statements")
all_predicate_ids = [x["predicate"] for x in results]
if exclude:
all_predicate_ids = [x for x in all_predicate_ids if x not in exclude_ids]
# Retrieve predicates with labels
query = sql_text(
"""SELECT DISTINCT subject, value
FROM statements WHERE subject IN :ap AND predicate = 'rdfs:label';"""
).bindparams(bindparam("ap", expanding=True))
results = conn.execute(query, {"ap": all_predicate_ids})
predicate_label_map = {x["subject"]: x["value"] for x in results}
# Add unlabeled predicates to map with label = ID
for p in all_predicate_ids:
if p not in predicate_label_map:
predicate_label_map[p] = p
# Return list of keys sorted by value (label)
return [k for k, v in sorted(predicate_label_map.items(), key=lambda x: x[1].lower())]
def get_ontology(conn: Connection, prefixes: list) -> (str, str):
"""Get the ontology IRI and title (or None).
:param conn: database connection
:param prefixes: list of prefix tuples (prefix, base)
:return: IRI, title or None
"""
res = conn.execute(
"SELECT subject FROM statements WHERE predicate = 'rdf:type' AND object = 'owl:Ontology'"
).fetchone()
if not res:
return None, None
iri = res["subject"]
dct = "<http://purl.org/dc/terms/title>"
for prefix, base in prefixes:
if base == "http://purl.org/dc/terms/":
dct = f"{prefix}:title"
query = sql_text(
"SELECT value FROM statements WHERE stanza = :iri AND subject = :iri AND predicate = :dct"
)
res = conn.execute(query, iri=iri, dct=dct).fetchone()
if not res:
return iri, None
return iri, res["value"]
def term2rdfa(
conn: Connection,
prefixes: list,
treename: str,
predicate_ids: list,
term_id: str,
stanza: list,
title: str = None,
add_children: list = None,
href: str = "?id={curie}",
) -> (str, str):
"""Create a hiccup-style HTML vector for the given term."""
ontology_iri, ontology_title = get_ontology(conn, prefixes)
if term_id not in TOP_LEVELS:
# Get a hierarchy under the entity type
entity_type = get_entity_type(conn, term_id)
hierarchy, curies = get_hierarchy(conn, term_id, entity_type, add_children=add_children)
else:
# Get the top-level for this entity type
entity_type = term_id
if term_id == "ontology":
hierarchy = {term_id: {"parents": [], "children": []}}
curies = set()
if ontology_iri:
curies.add(ontology_iri)
else:
pred = None
if term_id == "owl:Individual":
# No user input, safe to use f-string for query
tls = ", ".join([f"'{x}'" for x in TOP_LEVELS.keys()])
results = conn.execute(
f"""SELECT DISTINCT subject FROM statements
WHERE subject NOT IN
(SELECT subject FROM statements
WHERE predicate = 'rdf:type'
AND object NOT IN ('owl:Individual', 'owl:NamedIndividual'))
AND subject IN
(SELECT subject FROM statements
WHERE predicate = 'rdf:type' AND object NOT IN ({tls}))"""
)
elif term_id == "rdfs:Datatype":
results = conn.execute(
"""SELECT DISTINCT subject FROM statements
WHERE predicate = 'rdf:type' AND object = 'rdfs:Datatype'"""
)
else:
pred = "rdfs:subPropertyOf"
if term_id == "owl:Class":
pred = "rdfs:subClassOf"
# Select all classes without parents and set them as children of owl:Thing
query = sql_text(
"""SELECT DISTINCT subject FROM statements
WHERE subject NOT IN
(SELECT subject FROM statements
WHERE predicate = :pred
AND object != 'owl:Thing')
AND subject IN
(SELECT subject FROM statements
WHERE predicate = 'rdf:type'
AND object = :term_id AND subject NOT LIKE '_:%%'
AND subject NOT IN ('owl:Thing', 'rdf:type'));"""
)
results = conn.execute(query, pred=pred, term_id=term_id)
children = [res["subject"] for res in results]
child_children = defaultdict(set)
if pred and children:
# Get children of children for classes & properties
query = sql_text(
"""SELECT DISTINCT object AS parent, subject AS child FROM statements
WHERE predicate = :pred AND object IN :children"""
).bindparams(bindparam("pred"), bindparam("children", expanding=True))
results = conn.execute(query, {"pred": pred, "children": children})
for res in results:
p = res["parent"]
if p not in child_children:
child_children[p] = set()
child_children[p].add(res["child"])
hierarchy = {term_id: {"parents": [], "children": children}}
curies = {term_id}
for c in children:
c_children = child_children.get(c, set())
hierarchy[c] = {"parents": [term_id], "children": list(c_children)}
curies.update(c_children)
curies.add(c)
# Add all of the other compact URIs in the stanza to the set of compact URIs:
stanza.sort(key=lambda x: x["predicate"])
for row in stanza:
curies.add(row.get("subject"))
curies.add(row.get("predicate"))
curies.add(row.get("object"))
curies.discard("")
curies.discard(None)
# Get all the prefixes that are referred to by the compact URIs:
ps = set()
for curie in curies:
if not isinstance(curie, str) or len(curie) == 0 or curie[0] in ("_", "<"):
continue
prefix, local = curie.split(":")
ps.add(prefix)
# Get all of the rdfs:labels corresponding to all of the compact URIs, in the form of a map
# from compact URIs to labels:
labels = {}
query = sql_text(
"""SELECT subject, value FROM statements
WHERE stanza IN :ids AND predicate = 'rdfs:label' AND value IS NOT NULL"""
).bindparams(bindparam("ids", expanding=True))
results = conn.execute(query, {"ids": list(curies)})
for res in results:
labels[res["subject"]] = res["value"]
for t, o_label in TOP_LEVELS.items():
labels[t] = o_label
if ontology_iri and ontology_title:
labels[ontology_iri] = ontology_title
obsolete = []
query = sql_text(
"""SELECT DISTINCT subject FROM statements
WHERE stanza in :ids AND predicate='owl:deprecated' AND value='true'"""
).bindparams(bindparam("ids", expanding=True))
results = conn.execute(query, {"ids": list(curies)})
for res in results:
obsolete.append(res["subject"])
# If the compact URIs in the labels map are also in the tree, then add the label info to the
# corresponding node in the tree:
for key in hierarchy.keys():
if key in labels:
hierarchy[key]["label"] = labels[key]
# Initialise a map with one entry for the tree and one for all of the labels corresponding to
# all of the compact URIs in the stanza:
data = {"labels": labels, "obsolete": obsolete, treename: hierarchy, "iri": ontology_iri}
# Determine the label to use for the given term id when generating RDFa (the term might have
# multiple labels, in which case we will just choose one and show it everywhere). This defaults
# to the term id itself, unless there is a label for the term in the stanza corresponding to the
# label for that term in the labels map:
if term_id in labels:
selected_label = labels[term_id]
else:
selected_label = term_id
label = term_id
for row in stanza:
predicate = row["predicate"]
value = row["value"]
if predicate == "rdfs:label" and value == selected_label:
label = value
break
subject = None
si = None
subject_label = None
if term_id == "ontology" and ontology_iri:
subject = ontology_iri
subject_label = data["labels"].get(ontology_iri, ontology_iri)
si = curie2iri(prefixes, subject)
elif term_id != "ontology":
subject = term_id
si = curie2iri(prefixes, subject)
subject_label = label
rdfa_tree = term2tree(data, treename, term_id, entity_type, href=href)
if not title:
title = treename + " Browser"
if (term_id in TOP_LEVELS and term_id != "ontology") or (
term_id == "ontology" and not ontology_iri
):
si = None
if ontology_iri:
si = curie2iri(prefixes, ontology_iri)
items = [
"ul",
{"id": "annotations", "class": "col-md"},
["p", {"class": "lead"}, "Hello! This is an ontology browser."],
[
"p",
"An ",
[
"a",
{"href": "https://en.wikipedia.org/wiki/Ontology_(information_science)"},
"ontology",
],
" is a terminology system designed for both humans and machines to read. Click the",
" links on the left to browse the hierarchy of terms. Terms have parent terms, ",
"child terms, annotations, and ",
[
"a",
{"href": "https://en.wikipedia.org/wiki/Web_Ontology_Language"},
"logical axioms",
],
". The page for each term is also machine-readable using ",
["a", {"href": "https://en.wikipedia.org/wiki/RDFa"}, "RDFa"],
".",
],
]
term = [
"div",
["div", {"class": "row"}, ["h2", title]],
]
if si:
# If ontology IRI, add it to the page
term.append(["div", {"class": "row"}, ["a", {"href": si}, si]])
term.append(["div", {"class": "row", "style": "padding-top: 10px;"}, rdfa_tree, items])
else:
items = annotations2rdfa(treename, data, predicate_ids, subject, stanza, href=href)
term = [
"div",
{"resource": subject},
["div", {"class": "row"}, ["h2", subject_label]],
["div", {"class": "row"}, ["a", {"href": si}, si]],
["div", {"class": "row", "style": "padding-top: 10px;"}, rdfa_tree, items],
]
return ps, term
def parent2tree(
data: dict, treename: str, selected_term, selected_children, node, href="?id={curie}"
) -> list:
"""Return a hiccup-style HTML vector of the full hierarchy for a parent node."""
cur_hierarchy = ["ul", ["li", tree_label(data, treename, selected_term), selected_children]]
if node in TOP_LEVELS:
# Parent is top-level, nothing to add
return cur_hierarchy
# Add parents to the hierarchy
i = 0
while node and i < 100:
i += 1
oc = node
object_label = tree_label(data, treename, node)
parents = data[treename][node]["parents"]
if len(parents) == 0:
# No parent
o = [
"a",
{"resource": oc, "href": href.format(curie=node, db=treename)},
object_label,
]
cur_hierarchy = ["ul", ["li", o, cur_hierarchy]]
break
parent = parents[0]
if node == parent:
# Parent is the same
o = [
"a",
{"resource": oc, "href": href.format(curie=node, db=treename)},
object_label,
]
cur_hierarchy = ["ul", ["li", o, cur_hierarchy]]
break
if parent in TOP_LEVELS:
href_ele = {"href": href.format(curie=node, db=treename)}
else:
href_ele = {
"about": parent,
"rev": "rdfs:subClassOf",
"resource": oc,
"href": href.format(curie=node, db=treename),
}
o = ["a", href_ele, object_label]
cur_hierarchy = ["ul", ["li", o, cur_hierarchy]]
node = parent
if node in TOP_LEVELS:
break
return cur_hierarchy
def term2tree(
data: dict,
treename: str,
term_id: str,
entity_type: str,
href: str = "?id={curie}",
max_children: int = 100,
) -> list:
"""Create a hiccup-style HTML hierarchy vector for the given term."""
if treename not in data or term_id not in data[treename]:
return []
term_tree = data[treename][term_id]
obsolete = data["obsolete"]
child_labels = []
obsolete_child_labels = []
for child in term_tree["children"]:
if child in obsolete:
obsolete_child_labels.append([child, data["labels"].get(child, child)])
else:
child_labels.append([child, data["labels"].get(child, child)])
child_labels.sort(key=lambda x: x[1].lower())
obsolete_child_labels.sort(key=lambda x: x[1].lower())
child_labels.extend(obsolete_child_labels)
if entity_type == "owl:Class":
predicate = "rdfs:subClassOf"
elif entity_type == "owl:Individual":
predicate = "rdf:type"
else:
predicate = "rdfs:subPropertyOf"
# Get the children for our target term
children = []
for child, label in child_labels:
if child not in data[treename]:
continue
oc = child
object_label = tree_label(data, treename, oc)
o = ["a", {"rev": predicate, "resource": oc}, object_label]
# Check for children of the child and add a plus next to label if so
if data[treename][oc]["children"]:
o.append(PLUS)
attrs = {}
if len(children) > max_children:
attrs["style"] = "display: none"
children.append(["li", attrs, o])
if len(children) == max_children:
total = len(term_tree["children"])
attrs = {"href": "javascript:show_children()"}
children.append(["li", {"id": "more"}, ["a", attrs, f"Click to show all {total} ..."]])
children = ["ul", {"id": "children"}] + children
if len(children) == 0:
children = ""
term_label = tree_label(data, treename, term_id)
# Get the parents for our target term
parents = term_tree["parents"]
if parents:
hierarchy = ["ul"]
for p in parents:
if p.startswith("_:"):
continue
hierarchy.append(parent2tree(data, treename, term_id, children.copy(), p, href=href))
else:
hierarchy = ["ul", ["li", term_label, children]]
i = 0
hierarchies = ["ul", {"id": f"hierarchy", "class": "hierarchy multiple-children col-md"}]
for t, object_label in TOP_LEVELS.items():
o = ["a", {"href": href.format(curie=t, db=treename)}, object_label]
if t == entity_type:
if term_id == entity_type:
hierarchies.append(hierarchy)
else:
hierarchies.append(["ul", ["li", o, hierarchy]])
continue
hierarchies.append(["ul", ["li", o]])
i += 1
return hierarchies
def tree_label(data: dict, treename: str, s: str) -> list:
"""Retrieve the hiccup-style vector label of a term."""
node = data[treename][s]
label = node.get("label", s)
if s in data["obsolete"]:
return ["s", label]
return label
def row2o(_stanza: list, _data: dict, _uber_row: dict) -> list:
"""Given a stanza, a map (`_data`) with entries for the tree structure of the stanza and for all
of the labels in it, and a row in the stanza, convert the object or value of the row to
hiccup-style HTML."""
def renderNonBlank(given_row: dict) -> list:
"""Renders the non-blank object from the given row"""
return [
"a",
{"rel": given_row["predicate"], "resource": given_row["object"]},
_data["labels"].get(given_row["object"], given_row["object"]),
]
def renderLiteral(given_row: dict) -> list:
"""Renders the object contained in the given row as a literal IRI"""
# Literal IRIs are enclosed in angle brackets.
iri = given_row["object"][1:-1]
return ["a", {"rel": given_row["predicate"], "href": iri}, iri]
def getOwlOperands(given_row: dict) -> list:
"""Extract all of the operands pointed to by the given row and return them as a list"""
LOGGER.debug("Finding operands for row with predicate: {}".format(given_row["predicate"]))
if not given_row["object"].startswith("_:"):
LOGGER.debug("Found non-blank operand: {}".format(given_row["object"]))
return [renderNonBlank(given_row)]
# Find the rows whose subject matches the object from the given row. In general there will
# be a few. If we find one with an rdf:type predicate then we call the appropriate function
# to render either a restriction or a class, as the case may be. Otherwise if we find a row
# with an rdf:first predicate, then if it is a blank node, it points to further operands,
# which we recursively chase and render, and similarly if the predicate is rdf:rest (which
# will always have a blank (or nil) object). If the predicate is rdf:first but the object is
# not blank, then we can render it directly.
inner_rows = [row for row in _stanza if row["subject"] == given_row["object"]]
operands = []
for inner_row in inner_rows:
inner_subj = inner_row["subject"]
inner_pred = inner_row["predicate"]
inner_obj = inner_row["object"]
LOGGER.debug(f"Found row with <s,p,o> = <{inner_subj}, {inner_pred}, {inner_obj}>")
if inner_pred == "rdf:type":
if inner_obj == "owl:Restriction":
operands.append(renderOwlRestriction(inner_rows))
break
elif inner_obj == "owl:Class":
operands.append(renderOwlClassExpression(inner_rows))
break
elif inner_pred == "rdf:rest":
if inner_obj != "rdf:nil":
operands.append(["span", {"rel": inner_pred}] + getOwlOperands(inner_row))
else:
operands.append(["span", {"rel": inner_pred, "resource": "rdf:nil"}])
LOGGER.debug(f"Returned from recursing on {inner_pred}")
elif inner_pred == "rdf:first":
if inner_obj.startswith("_:"):
LOGGER.debug(f"{inner_pred} points to a blank node, following the trail")
operands.append(["span", {"rel": inner_pred}] + getOwlOperands(inner_row))
LOGGER.debug(f"Returned from recursing on {inner_pred}")
else:
LOGGER.debug(f"Rendering non-blank object with predicate: {inner_pred}")
operands.append(renderNonBlank(inner_row))
return operands
def renderNaryRelation(class_pred: str, operands: list) -> list:
"""Render an n-ary relation using the given predicate and operands"""
def relate_ops(oplist: list, operator: str) -> list:
"""
Relate the logical operands in 'oplist' using the given operator word. E.g., if oplist
contains the logical operands: op1, op2, op3, and the operator is 'and', then an 'and'
should be rendered in between each of the logical operands.
"""
# There should always be exactly two operands (see comment below):
if len(oplist) != 2:
LOGGER.error(
"Unexpected number of operands: {} in relate_ops. Got "
"operands: {}".format(len(oplist), oplist)
)
return []
# The list that will be returned, with instances of `operator` inserted:
related_list = []
# Get the two operands and their attributes:
first_op = oplist[0]
first_op_attrs = first_op[1]
second_op = oplist[1]
second_op_attrs = second_op[1]
# Append the first operand to the related_list:
related_list.append(first_op)
# Now handle the second operand:
if (
not second_op_attrs.get("rel") == "rdf:rest"
or second_op_attrs.get("resource") == "rdf:nil"
):
# If there are no more logical operands, append the last rdf:nil span:
related_list.append(second_op)
else:
# Otherwise, logically connect the remaining ones with `operator` and recurse:
related_list += [" ", operator, " "]
related_list.append(
[second_op[0], second_op[1]] + relate_ops(second_op[2:], operator)
)
return related_list
# There should always be exactly two operands, even if, logically, there are more. Either
# they'll be literals or they'll be organised into a linked list like the following:
# [[rdf:first]
# [rdf:rest [[rdf:first]
# [rdf:rest [rdf:first]
# [rdf:rest ...
# [rdf:first]
# [rdf:rest nil]]]...]
# In this latter case we need to recurse into the rdf:rest spans to find all of the
# operands other than the first.
if len(operands) != 2:
LOGGER.error(
"Wrong number of operands ({}) to renderNaryRelation. Got class predicate: "
"{}; operands: {}".format(len(operands), class_pred, operands)
)
return ["div"]
if class_pred == "owl:intersectionOf":
operator = "and"
elif class_pred == "owl:unionOf":
operator = "or"
else:
LOGGER.error(f"Unrecognized predicate for n-ary relation: {class_pred}")
return ["div"]
owl_div = ["span", {"rel": class_pred}, " ", "("]
owl_div += relate_ops(operands, operator)
owl_div.append(")")
return owl_div
def renderUnaryRelation(class_pred: str, operands: list) -> list:
"""Render a unary relation using the given predicate and operands"""
if len(operands) != 1:
LOGGER.error(
f"Something is wrong. Wrong number of operands to '{class_pred}': {operands}"
)
return ["div"]
if class_pred == "owl:complementOf":
operator = "not"
elif class_pred == "owl:oneOf":
operator = "one of"
else:
LOGGER.error(f"Unrecognized predicate for unary relation: {class_pred}")
return ["div"]
operand = operands[0]
owl_div = ["span", {"rel": class_pred}, operator, " ", operand]
return owl_div
def renderOwlRestriction(given_rows: list) -> list:
"""Renders the OWL restriction described by the given rows"""
# OWL restrictions are represented using three rows. The first will have the predicate
# 'rdf:type' and its object should always be 'owl:Restriction'. The second row will have the
# predicate 'owl:onProperty' and its object will represent the property being restricted,
# which can be either a blank or a non-blank node. The third row will have either the
# predicate 'owl:allValuesFrom' or the predicate 'owl:someValuesFrom', which we render,
# respectively, as 'only' and 'some'. The object of this row is what the property being
# restricted is being restricted in relation to.
# E.g., in the restriction: "'has grain' some 'sodium phosphate'": 'has grain' is extracted
# via the object of the second row, while 'some' and 'sodium phosphate' are
# extracted via the predicate and object, respectively, of the third row.
rdf_type_row = [row for row in given_rows if row["predicate"] == "rdf:type"]
property_row = [row for row in given_rows if row["predicate"] == "owl:onProperty"]
target_row = [
row for row in given_rows if row["predicate"] not in ("rdf:type", "owl:onProperty")
]
for rowset in [rdf_type_row, property_row, target_row]:
if len(rowset) != 1:
LOGGER.error(f"Rows: {given_rows} do not represent a valid restriction")
return ["div"]
property_row = property_row[0]
target_row = target_row[0]
rdf_type_row = rdf_type_row[0]
if rdf_type_row["object"] != "owl:Restriction":
LOGGER.error(
"Unexpected rdf:type: '{}' found in OWL restriction".format(rdf_type_row["object"])
)
return ["div"]
target_pred = target_row["predicate"]
target_obj = target_row["object"]
LOGGER.debug("Rendering OWL restriction {} for object {}".format(target_pred, target_obj))
if target_obj.startswith("_:"):
inner_rows = [row for row in _stanza if row["subject"] == target_obj]
target_link = renderOwlClassExpression(inner_rows, target_pred)
else:
target_link = renderNonBlank(target_row)
if target_pred == "owl:someValuesFrom":
operator = "some"
elif target_pred == "owl:allValuesFrom":
operator = "only"
else:
LOGGER.error("Unrecognised predicate: {}".format(target_pred))
return ["div"]
return [
"span",
["span", {"rel": rdf_type_row["predicate"], "resource": rdf_type_row["object"]}],
[
"a",
{"rel": property_row["predicate"], "resource": property_row["object"]},
_data["labels"].get(property_row["object"], property_row["object"]),
],
" ",
operator,
target_link,
]
def renderOwlClassExpression(given_rows: list, rel: str = None) -> list:
"""Render the OWL class expression pointed to by the given row"""
# The sub-stanza corresponding to an owl:Class should have two rows. One of these points
# to the actual class referred to (either a named class or a blank node). From this row we
# get the subject, predicate, and object to render. The second row will have the object
# type, which we expect to be 'owl:Class'.
rdf_type_row = [row for row in given_rows if row["predicate"] == "rdf:type"]
class_row = [row for row in given_rows if row["predicate"].startswith("owl:")]
LOGGER.debug(f"Found rows: {rdf_type_row}, {class_row}")
rdf_type_row = rdf_type_row[0]
class_row = class_row[0]
class_subj = class_row["subject"]
class_pred = class_row["predicate"]
class_obj = class_row["object"]
# All blank class expressions will have operands, which we retrieve here:
operands = getOwlOperands(class_row)
hiccup = [
"span",
["span", {"rel": rdf_type_row["predicate"], "resource": rdf_type_row["object"]}],
]
# If `rel` is given, insert the attribute into the second position of the hiccup:
if rel:
hiccup = hiccup[:1] + [{"rel": rel}] + hiccup[1:]
LOGGER.debug(f"Rendering <s,p,o> = <{class_subj}, {class_pred}, {class_obj}>")
if class_pred in ["owl:intersectionOf", "owl:unionOf"]:
hiccup.append(renderNaryRelation(class_pred, operands))
elif class_pred in ["owl:complementOf", "owl:oneOf"]:
hiccup.append(renderUnaryRelation(class_pred, operands))
elif class_pred == "owl:onProperty":
hiccup.append(renderOwlRestriction(given_rows))
elif class_obj.startswith("<"):
hiccup.append(renderLiteral(class_row))
else:
LOGGER.warning(
f"Rendering for <s,p,o> = <{class_subj}, {class_pred}, {class_obj}> not implemented"
)
hiccup.append(["a", {"rel": class_pred}, _data["labels"].get(class_obj, class_obj)])
return hiccup
uber_subj = _uber_row["subject"]
uber_pred = _uber_row["predicate"]
uber_obj = _uber_row["object"]
LOGGER.debug(f"Called row2o on <s,p,o> = <{uber_subj}, {uber_pred}, {uber_obj}>")
if not isinstance(uber_obj, str):
if _uber_row["value"]:
LOGGER.debug("Rendering non-string object with value: {}".format(_uber_row["value"]))
return ["span", {"property": uber_pred}, _uber_row["value"]]
else:
LOGGER.error("Received non-string object with null value; returning empty div")
return ["div"]
elif uber_obj.startswith("<"):
LOGGER.debug(f"Rendering literal IRI: {uber_obj}")
return renderLiteral(_uber_row)
elif uber_obj.startswith("_:"):
LOGGER.debug(
f"Rendering triple with blank object: <s,p,o> = <{uber_subj}, {uber_pred}, {uber_obj}>"
)
inner_rows = [row for row in _stanza if row["subject"] == uber_obj]
object_type = [row for row in inner_rows if row["predicate"] == "rdf:type"]
if len(object_type) != 1:
LOGGER.warning(f"Wrong number of object types found for {uber_obj}: {object_type}")
object_type = object_type[0]["object"] if len(object_type) > 0 else None
if object_type == "owl:Class":
LOGGER.debug(f"Rendering OWL class pointed to by {uber_obj}")
return ["span", {"rel": uber_pred}, renderOwlClassExpression(inner_rows)]
elif object_type == "owl:Restriction":
LOGGER.debug(f"Rendering OWL restriction pointed to by {uber_obj}")
return ["span", {"rel": uber_pred}, renderOwlRestriction(inner_rows)]
else:
if not object_type:
LOGGER.warning(f"Could not determine object type for {uber_pred}")
else:
LOGGER.warning(f"Unrecognised object type: {object_type} for predicate {uber_pred}")
return ["span", {"property": uber_pred}, uber_obj]
else:
LOGGER.debug(
f"Rendering non-blank triple: <s,p,o> = <{uber_subj}, {uber_pred}, {uber_obj}>"
)
return renderNonBlank(_uber_row)
if __name__ == "__main__":
main()
|
the-stack_0_27802
|
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with ``Future`` objects.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package, and also adopted (in a
slightly different form) in Python 3.4's `asyncio` package. This
package defines a ``Future`` class that is an alias for `asyncio.Future`
when available, and a compatible implementation for older versions of
Python. It also includes some utility functions for interacting with
``Future`` objects.
While this package is an important part of Tornado's internal
implementation, applications rarely need to interact with it
directly.
"""
from __future__ import absolute_import, division, print_function
import functools
import platform
import textwrap
import traceback
import sys
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer, is_finalizing
try:
from concurrent import futures
except ImportError:
futures = None
try:
import asyncio
except ImportError:
asyncio = None
try:
import typing
except ImportError:
typing = None
# Can the garbage collector handle cycles that include __del__ methods?
# This is true in cpython beginning with version 3.4 (PEP 442).
_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
sys.version_info >= (3, 4))
class ReturnValueIgnoredError(Exception):
pass
# This class and associated code in the future object is derived
# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _TracebackLogger, and
then the _TracebackLogger would be included in a cycle, which is
what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ('exc_info', 'formatted_tb')
def __init__(self, exc_info):
self.exc_info = exc_info
self.formatted_tb = None
def activate(self):
exc_info = self.exc_info
if exc_info is not None:
self.exc_info = None
self.formatted_tb = traceback.format_exception(*exc_info)
def clear(self):
self.exc_info = None
self.formatted_tb = None
def __del__(self, is_finalizing=is_finalizing):
if not is_finalizing() and self.formatted_tb:
app_log.error('Future exception was never retrieved: %s',
''.join(self.formatted_tb).rstrip())
class Future(object):
"""Placeholder for an asynchronous result.
A ``Future`` encapsulates the result of an asynchronous
operation. In synchronous applications ``Futures`` are used
to wait for the result from a thread or process pool; in
Tornado they are normally used with `.IOLoop.add_future` or by
yielding them in a `.gen.coroutine`.
`tornado.concurrent.Future` is an alias for `asyncio.Future` when
that package is available (Python 3.4+). Unlike
`concurrent.futures.Future`, the ``Futures`` used by Tornado and
`asyncio` are not thread-safe (and therefore faster for use with
single-threaded event loops).
In addition to ``exception`` and ``set_exception``, Tornado's
``Future`` implementation supports storing an ``exc_info`` triple
to support better tracebacks on Python 2. To set an ``exc_info``
triple, use `future_set_exc_info`, and to retrieve one, call
`result()` (which will raise it).
.. versionchanged:: 4.0
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
with support for the ``exc_info`` methods. Previously it would
be an alias for the thread-safe `concurrent.futures.Future`
if that package was available and fall back to the thread-unsafe
implementation if it was not.
.. versionchanged:: 4.1
If a `.Future` contains an error but that error is never observed
(by calling ``result()``, ``exception()``, or ``exc_info()``),
a stack trace will be logged when the `.Future` is garbage collected.
This normally indicates an error in the application, but in cases
where it results in undesired logging it may be necessary to
suppress the logging by ensuring that the exception is observed:
``f.add_done_callback(lambda f: f.exception())``.
.. versionchanged:: 5.0
This class was previoiusly available under the name
``TracebackFuture``. This name, which was deprecated since
version 4.0, has been removed. When `asyncio` is available
``tornado.concurrent.Future`` is now an alias for
`asyncio.Future`. Like `asyncio.Future`, callbacks are now
always scheduled on the `.IOLoop` and are never run
synchronously.
"""
def __init__(self):
self._done = False
self._result = None
self._exc_info = None
self._log_traceback = False # Used for Python >= 3.4
self._tb_logger = None # Used for Python <= 3.3
self._callbacks = []
# Implement the Python 3.5 Awaitable protocol if possible
# (we can't use return and yield together until py33).
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
def __await__(self):
return (yield self)
"""))
else:
# Py2-compatible version for use with cython.
def __await__(self):
result = yield self
# StopIteration doesn't take args before py33,
# but Cython recognizes the args tuple.
e = StopIteration()
e.args = (result,)
raise e
def cancel(self):
"""Cancel the operation, if possible.
Tornado ``Futures`` do not support cancellation, so this method always
returns False.
"""
return False
def cancelled(self):
"""Returns True if the operation has been cancelled.
Tornado ``Futures`` do not support cancellation, so this method
always returns False.
"""
return False
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
def done(self):
"""Returns True if the future has finished running."""
return self._done
def _clear_tb_log(self):
self._log_traceback = False
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
def result(self, timeout=None):
"""If the operation succeeded, return its result. If it failed,
re-raise its exception.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._result is not None:
return self._result
if self._exc_info is not None:
try:
raise_exc_info(self._exc_info)
finally:
self = None
self._check_done()
return self._result
def exception(self, timeout=None):
"""If the operation raised an exception, return the `Exception`
object. Otherwise returns None.
This method takes a ``timeout`` argument for compatibility with
`concurrent.futures.Future` but it is an error to call it
before the `Future` is done, so the ``timeout`` is never used.
"""
self._clear_tb_log()
if self._exc_info is not None:
return self._exc_info[1]
else:
self._check_done()
return None
def add_done_callback(self, fn):
"""Attaches the given callback to the `Future`.
It will be invoked with the `Future` as its argument when the Future
has finished running and its result is available. In Tornado
consider using `.IOLoop.add_future` instead of calling
`add_done_callback` directly.
"""
if self._done:
from tornado.ioloop import IOLoop
IOLoop.current().add_callback(fn, self)
else:
self._callbacks.append(fn)
def set_result(self, result):
"""Sets the result of a ``Future``.
It is undefined to call any of the ``set`` methods more than once
on the same object.
"""
self._result = result
self._set_done()
def set_exception(self, exception):
"""Sets the exception of a ``Future.``"""
self.set_exc_info(
(exception.__class__,
exception,
getattr(exception, '__traceback__', None)))
def exc_info(self):
"""Returns a tuple in the same format as `sys.exc_info` or None.
.. versionadded:: 4.0
"""
self._clear_tb_log()
return self._exc_info
def set_exc_info(self, exc_info):
"""Sets the exception information of a ``Future.``
Preserves tracebacks on Python 2.
.. versionadded:: 4.0
"""
self._exc_info = exc_info
self._log_traceback = True
if not _GC_CYCLE_FINALIZERS:
self._tb_logger = _TracebackLogger(exc_info)
try:
self._set_done()
finally:
# Activate the logger after all callbacks have had a
# chance to call result() or exception().
if self._log_traceback and self._tb_logger is not None:
self._tb_logger.activate()
self._exc_info = exc_info
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
if self._callbacks:
from tornado.ioloop import IOLoop
loop = IOLoop.current()
for cb in self._callbacks:
loop.add_callback(cb, self)
self._callbacks = None
# On Python 3.3 or older, objects with a destructor part of a reference
# cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
# the PEP 442.
if _GC_CYCLE_FINALIZERS:
def __del__(self, is_finalizing=is_finalizing):
if is_finalizing() or not self._log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
tb = traceback.format_exception(*self._exc_info)
app_log.error('Future %r exception was never retrieved: %s',
self, ''.join(tb).rstrip())
if asyncio is not None:
Future = asyncio.Future # noqa
if futures is None:
FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]]
else:
FUTURES = (futures.Future, Future)
def is_future(x):
return isinstance(x, FUTURES)
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = Future()
try:
future_set_result_unless_cancelled(future, fn(*args, **kwargs))
except Exception:
future_set_exc_info(future, sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(*args, **kwargs):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The executor to be used is determined by the ``executor``
attributes of ``self``. To use a different attribute name, pass a
keyword argument to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
"""
def run_on_executor_decorator(fn):
executor = kwargs.get("executor", "executor")
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = getattr(self, executor).submit(fn, self, *args, **kwargs)
if callback:
from tornado.ioloop import IOLoop
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
return future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
This decorator was provided to ease the transition from
callback-oriented code to coroutines. It is not recommended for
new code.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with ``Future.result()`` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage:
.. testcode::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
..
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = Future()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future_set_result_unless_cancelled(future, value),
args, kwargs)
def handle_error(typ, value, tb):
future_set_exc_info(future, (typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
future.result()
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future_add_done_callback(future, wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
.. versionchanged:: 5.0
Now accepts both Tornado/asyncio `Future` objects and
`concurrent.futures.Future`.
"""
def copy(future):
assert future is a
if b.done():
return
if (hasattr(a, 'exc_info') and
a.exc_info() is not None):
future_set_exc_info(b, a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
if isinstance(a, Future):
future_add_done_callback(a, copy)
else:
# concurrent.futures.Future
from tornado.ioloop import IOLoop
IOLoop.current().add_future(a, copy)
def future_set_result_unless_cancelled(future, value):
"""Set the given ``value`` as the `Future`'s result, if not cancelled.
Avoids asyncio.InvalidStateError when calling set_result() on
a cancelled `asyncio.Future`.
.. versionadded:: 5.0
"""
if not future.cancelled():
future.set_result(value)
def future_set_exc_info(future, exc_info):
"""Set the given ``exc_info`` as the `Future`'s exception.
Understands both `asyncio.Future` and Tornado's extensions to
enable better tracebacks on Python 2.
.. versionadded:: 5.0
"""
if hasattr(future, 'set_exc_info'):
# Tornado's Future
future.set_exc_info(exc_info)
else:
# asyncio.Future
future.set_exception(exc_info[1])
def future_add_done_callback(future, callback):
"""Arrange to call ``callback`` when ``future`` is complete.
``callback`` is invoked with one argument, the ``future``.
If ``future`` is already done, ``callback`` is invoked immediately.
This may differ from the behavior of ``Future.add_done_callback``,
which makes no such guarantee.
.. versionadded:: 5.0
"""
if future.done():
callback(future)
else:
future.add_done_callback(callback)
|
the-stack_0_27803
|
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from mock.mock import patch
from pytest import raises
from circuitbreaker import CircuitBreaker, CircuitBreakerError, circuit
def test_circuitbreaker__str__():
cb = CircuitBreaker(name='Foobar')
assert str(cb) == 'Foobar'
def test_circuitbreaker_error__str__():
cb = CircuitBreaker(name='Foobar')
cb._last_failure = Exception()
error = CircuitBreakerError(cb)
assert str(error).startswith('Circuit "Foobar" OPEN until ')
assert str(error).endswith('(0 failures, 30 sec remaining) (last_failure: Exception())')
def test_circuitbreaker_should_save_last_exception_on_failure_call():
cb = CircuitBreaker(name='Foobar')
func = Mock(side_effect=IOError)
with raises(IOError):
cb.call(func)
assert isinstance(cb.last_failure, IOError)
def test_circuitbreaker_should_clear_last_exception_on_success_call():
cb = CircuitBreaker(name='Foobar')
cb._last_failure = IOError()
assert isinstance(cb.last_failure, IOError)
cb.call(lambda: True)
assert cb.last_failure is None
def test_circuitbreaker_should_call_fallback_function_if_open():
fallback = Mock(return_value=True)
func = Mock(return_value=False)
CircuitBreaker.opened = lambda self: True
cb = CircuitBreaker(name='WithFallback', fallback_function=fallback)
cb.call(func)
fallback.assert_called_once_with()
def test_circuitbreaker_should_not_call_function_if_open():
fallback = Mock(return_value=True)
func = Mock(return_value=False)
CircuitBreaker.opened = lambda self: True
cb = CircuitBreaker(name='WithFallback', fallback_function=fallback)
assert cb.call(func) == fallback.return_value
assert not func.called
def mocked_function(*args, **kwargs):
pass
def test_circuitbreaker_call_fallback_function_with_parameters():
fallback = Mock(return_value=True)
cb = circuit(name='with_fallback', fallback_function=fallback)
# mock opened prop to see if fallback is called with correct parameters.
cb.opened = lambda self: True
func_decorated = cb.decorate(mocked_function)
func_decorated('test2',test='test')
# check args and kwargs are getting correctly to fallback function
fallback.assert_called_once_with('test2', test='test')
@patch('circuitbreaker.CircuitBreaker.decorate')
def test_circuit_decorator_without_args(circuitbreaker_mock):
function = lambda: True
circuit(function)
circuitbreaker_mock.assert_called_once_with(function)
@patch('circuitbreaker.CircuitBreaker.__init__')
def test_circuit_decorator_with_args(circuitbreaker_mock):
circuitbreaker_mock.return_value = None
function_fallback = lambda: True
circuit(10, 20, KeyError, 'foobar', function_fallback)
circuitbreaker_mock.assert_called_once_with(
expected_exception=KeyError,
failure_threshold=10,
recovery_timeout=20,
name='foobar',
fallback_function=function_fallback
)
|
the-stack_0_27807
|
import math
import tensorflow as tf
from tensorflow import keras
__all__ = ["ArcMarginProduct"]
@keras.utils.register_keras_serializable()
class ArcMarginProduct(keras.layers.Layer):
"""
Implements large margin arc distance.
Reference:
https://arxiv.org/pdf/1801.07698.pdf
https://github.com/lyakaap/Landmark2019-1st-and-3rd-Place-Solution/
blob/master/src/modeling/metric_learning.py
"""
def __init__(
self, n_classes, s=30, m=0.50, easy_margin=False, ls_eps=0.0, **kwargs
):
super(ArcMarginProduct, self).__init__(**kwargs)
self.n_classes = n_classes
self.s = s
self.m = m
self.ls_eps = ls_eps
self.easy_margin = easy_margin
self.cos_m = tf.math.cos(m)
self.sin_m = tf.math.sin(m)
self.th = tf.math.cos(math.pi - m)
self.mm = tf.math.sin(math.pi - m) * m
self.W = None
def get_config(self):
config = super().get_config().copy()
config.update(
{
"n_classes": self.n_classes,
"s": self.s,
"m": self.m,
"ls_eps": self.ls_eps,
"easy_margin": self.easy_margin,
}
)
return config
def build(self, input_shape):
super(ArcMarginProduct, self).build(input_shape[0])
self.W = self.add_weight(
name="W",
shape=(int(input_shape[0][-1]), self.n_classes),
initializer="glorot_uniform",
dtype="float32",
trainable=True,
regularizer=None,
)
def call(self, inputs, **kwargs):
X, y = inputs
y = tf.cast(y, dtype=tf.int32)
cosine = tf.matmul(
tf.math.l2_normalize(X, axis=1), tf.math.l2_normalize(self.W, axis=0)
)
sine = tf.math.sqrt(1.0 - tf.math.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = tf.where(cosine > 0, phi, cosine)
else:
phi = tf.where(cosine > self.th, phi, cosine - self.mm)
one_hot = tf.cast(tf.one_hot(y, depth=self.n_classes), dtype=cosine.dtype)
if self.ls_eps > 0:
one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.n_classes
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
return output
|
the-stack_0_27811
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ARMErrorResponseBody(Model):
"""ARM error response body.
:param message: Gets or sets the string that describes the error in detail
and provides debugging information.
:type message: str
:param code: Gets or sets the string that can be used to programmatically
identify the error.
:type code: str
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
}
def __init__(self, *, message: str=None, code: str=None, **kwargs) -> None:
super(ARMErrorResponseBody, self).__init__(**kwargs)
self.message = message
self.code = code
|
the-stack_0_27812
|
import logging
import numpy as np
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import TensorDataset
from knodle.transformation.majority import input_to_majority_vote_input
from knodle.transformation.torch_input import input_labels_to_tensordataset
from knodle.trainer.trainer import BaseTrainer
from knodle.trainer.auto_trainer import AutoTrainer
from knodle.trainer.baseline.config import MajorityConfig
from knodle.transformation.filter import filter_probability_threshold
logger = logging.getLogger(__name__)
@AutoTrainer.register('majority')
class MajorityVoteTrainer(BaseTrainer):
"""
The baseline class implements a baseline model for labeling data with weak supervision.
A simple majority vote is used for this purpose.
"""
def __init__(self, **kwargs):
if kwargs.get("trainer_config") is None:
kwargs["trainer_config"] = MajorityConfig(optimizer=SGD, lr=0.001)
super().__init__(**kwargs)
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
"""
This function gets final labels with a majority vote approach and trains the provided model.
"""
self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
self._apply_rule_reduction()
# initialise optimizer
self.trainer_config.optimizer = self.initialise_optimizer()
self.model_input_x, noisy_y_train, self.rule_matches_z = input_to_majority_vote_input(
self.rule_matches_z,
self.mapping_rules_labels_t,
self.model_input_x,
use_probabilistic_labels=self.trainer_config.use_probabilistic_labels,
filter_non_labelled=self.trainer_config.filter_non_labelled,
probability_threshold=self.trainer_config.probability_threshold,
other_class_id=self.trainer_config.other_class_id
)
feature_label_dataset = input_labels_to_tensordataset(self.model_input_x, noisy_y_train)
feature_label_dataloader = self._make_dataloader(feature_label_dataset)
self._train_loop(feature_label_dataloader)
|
the-stack_0_27813
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import datetime as dt
import functools
import itertools
import re
import sys
import threading
import time
import uuid
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db import options
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
import six
import sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy import or_, and_, case
from sqlalchemy.orm import joinedload, joinedload_all, undefer_group
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy import sql
from sqlalchemy.sql.expression import bindparam
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import true
from sqlalchemy.sql import func
from sqlalchemy.sql import sqltypes
from cinder.api import common
from cinder.common import sqlalchemyutils
from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder import utils
from cinder.volume import utils as vol_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite')
_LOCK = threading.Lock()
_FACADE = None
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database)
)
# NOTE(geguileo): To avoid a cyclical dependency we import the
# group here. Dependency cycle is objects.base requires db.api,
# which requires db.sqlalchemy.api, which requires service which
# requires objects.base
CONF.import_group("profiler", "cinder.service")
if CONF.profiler.enabled:
if CONF.profiler.trace_sqlalchemy:
osprofiler_sqlalchemy.add_tracing(sqlalchemy,
_FACADE.get_engine(),
"db")
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def dispose_engine():
get_engine().dispose()
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
LOG.warning('Use of empty request context is deprecated',
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, volume_id, *args, **kwargs):
if not resource_exists(context, models.Volume, volume_id):
raise exception.VolumeNotFound(volume_id=volume_id)
return f(context, volume_id, *args, **kwargs)
return wrapper
def require_snapshot_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and snapshot_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, snapshot_id, *args, **kwargs):
if not resource_exists(context, models.Snapshot, snapshot_id):
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return f(context, snapshot_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warning("Deadlock detected when running "
"'%(func_name)s': Retrying...",
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def handle_db_data_error(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
return wrapper
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(model, *args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
elif read_deleted == 'int_no':
query = query.filter_by(deleted=0)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and is_user_context(context):
if model is models.VolumeAttachment:
# NOTE(dulek): In case of VolumeAttachment, we need to join
# `project_id` through `volume` relationship.
query = query.filter(models.Volume.project_id ==
context.project_id)
else:
query = query.filter_by(project_id=context.project_id)
return query
def _sync_volumes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(volumes, _gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(snapshots, _gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_backups(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(backups, _gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'backups'
return {key: backups}
def _sync_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(_junk, vol_gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
(_junk, snap_gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: vol_gigs + snap_gigs}
def _sync_consistencygroups(context, project_id, session,
volume_type_id=None,
volume_type_name=None):
(_junk, groups) = _consistencygroup_data_get_for_project(
context, project_id, session=session)
key = 'consistencygroups'
return {key: groups}
def _sync_groups(context, project_id, session,
volume_type_id=None,
volume_type_name=None):
(_junk, groups) = _group_data_get_for_project(
context, project_id, session=session)
key = 'groups'
return {key: groups}
def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
key = 'backup_gigabytes'
(_junk, backup_gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: backup_gigs}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes,
'_sync_groups': _sync_groups,
}
###################
def _clean_filters(filters):
return {k: v for k, v in filters.items() if v is not None}
def _filter_host(field, value, match_level=None):
"""Generate a filter condition for host and cluster fields.
Levels are:
- 'pool': Will search for an exact match
- 'backend': Will search for exact match and value#*
- 'host'; Will search for exact match, value@* and value#*
If no level is provided we'll determine it based on the value we want to
match:
- 'pool': If '#' is present in value
- 'backend': If '@' is present in value and '#' is not present
- 'host': In any other case
:param field: ORM field. Ex: objects.Volume.model.host
:param value: String to compare with
:param match_level: 'pool', 'backend', or 'host'
"""
# If we don't set level we'll try to determine it automatically. LIKE
# operations are expensive, so we try to reduce them to the minimum.
if match_level is None:
if '#' in value:
match_level = 'pool'
elif '@' in value:
match_level = 'backend'
else:
match_level = 'host'
# Mysql is not doing case sensitive filtering, so we force it
conn_str = CONF.database.connection
if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']:
cmp_value = func.binary(value)
like_op = 'LIKE BINARY'
else:
cmp_value = value
like_op = 'LIKE'
conditions = [field == cmp_value]
if match_level != 'pool':
conditions.append(field.op(like_op)(value + '#%'))
if match_level == 'host':
conditions.append(field.op(like_op)(value + '@%'))
return or_(*conditions)
def _clustered_bool_field_filter(query, field_name, filter_value):
# Now that we have clusters, a service is disabled/frozen if the service
# doesn't belong to a cluster or if it belongs to a cluster and the cluster
# itself is disabled/frozen.
if filter_value is not None:
query_filter = or_(
and_(models.Service.cluster_name.is_(None),
getattr(models.Service, field_name)),
and_(models.Service.cluster_name.isnot(None),
sql.exists().where(and_(
models.Cluster.name == models.Service.cluster_name,
models.Cluster.binary == models.Service.binary,
~models.Cluster.deleted,
getattr(models.Cluster, field_name)))))
if not filter_value:
query_filter = ~query_filter
query = query.filter(query_filter)
return query
def _service_query(context, session=None, read_deleted='no', host=None,
cluster_name=None, is_up=None, host_or_cluster=None,
backend_match_level=None, disabled=None, frozen=None,
**filters):
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Service, filters):
return None
query = model_query(context, models.Service, session=session,
read_deleted=read_deleted)
# Host and cluster are particular cases of filters, because we must
# retrieve not only exact matches (single backend configuration), but also
# match those that have the backend defined (multi backend configuration).
if host:
query = query.filter(_filter_host(models.Service.host, host,
backend_match_level))
if cluster_name:
query = query.filter(_filter_host(models.Service.cluster_name,
cluster_name, backend_match_level))
if host_or_cluster:
query = query.filter(or_(
_filter_host(models.Service.host, host_or_cluster,
backend_match_level),
_filter_host(models.Service.cluster_name, host_or_cluster,
backend_match_level),
))
query = _clustered_bool_field_filter(query, 'disabled', disabled)
query = _clustered_bool_field_filter(query, 'frozen', frozen)
if filters:
query = query.filter_by(**filters)
if is_up is not None:
date_limit = utils.service_expired_time()
svc = models.Service
filter_ = or_(
and_(svc.created_at.isnot(None), svc.created_at >= date_limit),
and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit))
query = query.filter(filter_ == is_up)
return query
@require_admin_context
def service_destroy(context, service_id):
query = _service_query(context, id=service_id)
updated_values = models.Service.delete_values()
if not query.update(updated_values):
raise exception.ServiceNotFound(service_id=service_id)
return updated_values
@require_admin_context
def service_get(context, service_id=None, backend_match_level=None, **filters):
"""Get a service that matches the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param service_id: Id of the service.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host
method)
:raise ServiceNotFound: If service doesn't exist.
"""
query = _service_query(context, backend_match_level=backend_match_level,
id=service_id, **filters)
service = None if not query else query.first()
if not service:
serv_id = service_id or filters.get('topic') or filters.get('binary')
raise exception.ServiceNotFound(service_id=serv_id,
host=filters.get('host'))
return service
@require_admin_context
def service_get_all(context, backend_match_level=None, **filters):
"""Get all services that match the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host
method)
"""
query = _service_query(context, backend_match_level=backend_match_level,
**filters)
return [] if not query else query.all()
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
session = get_session()
with session.begin():
service_ref.save(session)
return service_ref
@require_admin_context
@_retry_on_deadlock
def service_update(context, service_id, values):
if 'disabled' in values:
values = values.copy()
values['modified_at'] = values.get('modified_at', timeutils.utcnow())
values['updated_at'] = values.get('updated_at',
literal_column('updated_at'))
query = _service_query(context, id=service_id)
result = query.update(values)
if not result:
raise exception.ServiceNotFound(service_id=service_id)
###################
@require_admin_context
def is_backend_frozen(context, host, cluster_name):
"""Check if a storage backend is frozen based on host and cluster_name."""
if cluster_name:
model = models.Cluster
conditions = [model.name == vol_utils.extract_host(cluster_name)]
else:
model = models.Service
conditions = [model.host == vol_utils.extract_host(host)]
conditions.extend((~model.deleted, model.frozen))
query = get_session().query(sql.exists().where(and_(*conditions)))
frozen = query.scalar()
return frozen
###################
def _cluster_query(context, is_up=None, get_services=False,
services_summary=False, read_deleted='no',
name_match_level=None, name=None, session=None, **filters):
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Cluster, filters):
return None
query = model_query(context, models.Cluster, session=session,
read_deleted=read_deleted)
# Cluster is a special case of filter, because we must match exact match
# as well as hosts that specify the backend
if name:
query = query.filter(_filter_host(models.Cluster.name, name,
name_match_level))
if filters:
query = query.filter_by(**filters)
if services_summary:
query = query.options(undefer_group('services_summary'))
# We bind the expiration time to now (as it changes with each query)
# and is required by num_down_hosts
query = query.params(expired=utils.service_expired_time())
elif 'num_down_hosts' in filters:
query = query.params(expired=utils.service_expired_time())
if get_services:
query = query.options(joinedload_all('services'))
if is_up is not None:
date_limit = utils.service_expired_time()
filter_ = and_(models.Cluster.last_heartbeat.isnot(None),
models.Cluster.last_heartbeat >= date_limit)
query = query.filter(filter_ == is_up)
return query
@require_admin_context
def cluster_get(context, id=None, is_up=None, get_services=False,
services_summary=False, read_deleted='no',
name_match_level=None, **filters):
"""Get a cluster that matches the criteria.
:param id: Id of the cluster.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param filters: Field based filters in the form of key/value.
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:raise ClusterNotFound: If cluster doesn't exist.
"""
query = _cluster_query(context, is_up, get_services, services_summary,
read_deleted, name_match_level, id=id, **filters)
cluster = None if not query else query.first()
if not cluster:
cluster_id = id or six.text_type(filters)
raise exception.ClusterNotFound(id=cluster_id)
return cluster
@require_admin_context
def cluster_get_all(context, is_up=None, get_services=False,
services_summary=False, read_deleted='no',
name_match_level=None, **filters):
"""Get all clusters that match the criteria.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:param filters: Field based filters in the form of key/value.
"""
query = _cluster_query(context, is_up, get_services, services_summary,
read_deleted, name_match_level, **filters)
return [] if not query else query.all()
@require_admin_context
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
cluster_ref = models.Cluster()
cluster_ref.update(values)
# Provided disabled value takes precedence
if values.get('disabled') is None:
cluster_ref.disabled = not CONF.enable_new_services
session = get_session()
try:
with session.begin():
cluster_ref.save(session)
# We mark that newly created cluster has no hosts to prevent
# problems at the OVO level
cluster_ref.last_heartbeat = None
return cluster_ref
# If we had a race condition (another non deleted cluster exists with the
# same name) raise Duplicate exception.
except db_exc.DBDuplicateEntry:
raise exception.ClusterExists(name=values.get('name'))
@require_admin_context
@_retry_on_deadlock
def cluster_update(context, id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
query = _cluster_query(context, id=id)
result = query.update(values)
if not result:
raise exception.ClusterNotFound(id=id)
@require_admin_context
def cluster_destroy(context, id):
"""Destroy the cluster or raise if it does not exist or has hosts."""
query = _cluster_query(context, id=id)
query = query.filter(models.Cluster.num_hosts == 0)
# If the update doesn't succeed we don't know if it's because the
# cluster doesn't exist or because it has hosts.
result = query.update(models.Cluster.delete_values(),
synchronize_session=False)
if not result:
# This will fail if the cluster doesn't exist raising the right
# exception
cluster_get(context, id=id)
# If it doesn't fail, then the problem is that there are hosts
raise exception.ClusterHasHosts(id=id)
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _dict_with_extra_specs_if_authorized(context, inst_type_query):
"""Convert type query result to dict with extra_spec and rate_limit.
Takes a volume type query returned by sqlalchemy and returns it
as a dictionary, converting the extra_specs entry from a list
of dicts. NOTE the contents of extra-specs are admin readable
only. If the context passed in for this request is not admin
then we will return an empty extra-specs dict rather than
providing the admin only details.
Example response with admin context:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = {x['key']: x['value']
for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
###################
def _dict_with_group_specs_if_authorized(context, inst_type_query):
"""Convert group type query result to dict with spec and rate_limit.
Takes a group type query returned by sqlalchemy and returns it
as a dictionary, converting the extra_specs entry from a list
of dicts. NOTE the contents of extra-specs are admin readable
only. If the context passed in for this request is not admin
then we will return an empty extra-specs dict rather than
providing the admin only details.
Example response with admin context:
'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'group_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
if not is_admin_context(context):
del(inst_type_dict['group_specs'])
else:
group_specs = {x['key']: x['value']
for x in inst_type_query['group_specs']}
inst_type_dict['group_specs'] = group_specs
return inst_type_dict
###################
@require_context
def _quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get(context, project_id, resource):
return _quota_get(context, project_id, resource)
@require_context
def quota_get_all_by_project(context, project_id):
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_allocated_get_all_by_project(context, project_id, session=None):
rows = model_query(context, models.Quota, read_deleted='no',
session=session).filter_by(project_id=project_id).all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.allocated
return result
@require_context
def _quota_get_all_by_resource(context, resource, session=None):
rows = model_query(context, models.Quota,
session=session,
read_deleted='no').filter_by(
resource=resource).all()
return rows
@require_context
def quota_create(context, project_id, resource, limit, allocated):
quota_ref = models.Quota()
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
if allocated:
quota_ref.allocated = allocated
session = get_session()
with session.begin():
quota_ref.save(session)
return quota_ref
@require_context
def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.hard_limit = limit
return quota_ref
@require_context
def quota_update_resource(context, old_res, new_res):
session = get_session()
with session.begin():
quotas = _quota_get_all_by_resource(context, old_res, session=session)
for quota in quotas:
quota.resource = new_res
@require_admin_context
def quota_allocated_update(context, project_id, resource, allocated):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.allocated = allocated
return quota_ref
@require_admin_context
def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
return quota_ref.delete(session=session)
###################
@require_context
def _quota_class_get(context, class_name, resource, session=None):
result = model_query(context, models.QuotaClass, session=session,
read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@require_context
def quota_class_get(context, class_name, resource):
return _quota_class_get(context, class_name, resource)
def quota_class_get_defaults(context):
rows = model_query(context, models.QuotaClass,
read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def _quota_class_get_all_by_resource(context, resource, session):
result = model_query(context, models.QuotaClass,
session=session,
read_deleted="no").\
filter_by(resource=resource).\
all()
return result
@handle_db_data_error
@require_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
session = get_session()
with session.begin():
quota_class_ref.save(session)
return quota_class_ref
@require_context
def quota_class_update(context, class_name, resource, limit):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
quota_class_ref.hard_limit = limit
return quota_class_ref
@require_context
def quota_class_update_resource(context, old_res, new_res):
session = get_session()
with session.begin():
quota_class_list = _quota_class_get_all_by_resource(
context, old_res, session)
for quota_class in quota_class_list:
quota_class.resource = new_res
@require_context
def quota_class_destroy(context, class_name, resource):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
return quota_class_ref.delete(session=session)
@require_context
def quota_class_destroy_all_by_name(context, class_name):
session = get_session()
with session.begin():
quota_classes = model_query(context, models.QuotaClass,
session=session, read_deleted="no").\
filter_by(class_name=class_name).\
all()
for quota_class_ref in quota_classes:
quota_class_ref.delete(session=session)
###################
@require_context
def quota_usage_get(context, project_id, resource):
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
@require_context
def quota_usage_get_all_by_project(context, project_id):
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
return result
@require_admin_context
def _quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.save(session=session)
return quota_usage_ref
###################
def _reservation_create(context, uuid, usage, project_id, resource, delta,
expire, session=None, allocated_id=None):
usage_id = usage['id'] if usage else None
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.allocated_id = allocated_id
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_quota_usages(context, session, project_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
all()
return {row.resource: row for row in rows}
def _get_quota_usages_by_resource(context, session, resource):
rows = model_query(context, models.QuotaUsage,
deleted="no",
session=session).\
filter_by(resource=resource).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
all()
return rows
@require_context
@_retry_on_deadlock
def quota_usage_update_resource(context, old_res, new_res):
session = get_session()
with session.begin():
usages = _get_quota_usages_by_resource(context, session, old_res)
for usage in usages:
usage.resource = new_res
usage.until_refresh = 1
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None,
is_allocated_reserve=False):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
# Get the current usages
usages = _get_quota_usages(context, session, project_id)
allocated = quota_allocated_get_all_by_project(context, project_id,
session=session)
allocated.pop('project_id')
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if resource not in usages:
usages[resource] = _quota_usage_create(elevated,
project_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif usages[resource].until_refresh is not None:
usages[resource].until_refresh -= 1
if usages[resource].until_refresh <= 0:
refresh = True
elif max_age and usages[resource].updated_at is not None and (
(timeutils.utcnow() -
usages[resource].updated_at).total_seconds() >= max_age):
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
volume_type_id = getattr(resources[resource],
'volume_type_id', None)
volume_type_name = getattr(resources[resource],
'volume_type_name', None)
updates = sync(elevated, project_id,
volume_type_id=volume_type_id,
volume_type_name=volume_type_name,
session=session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
usages[res] = _quota_usage_create(
elevated,
project_id,
res,
0, 0,
until_refresh or None,
session=session
)
# Update the usage
usages[res].in_use = in_use
usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
if is_allocated_reserve:
unders = [r for r, delta in deltas.items()
if delta < 0 and delta + allocated.get(r, 0) < 0]
else:
unders = [r for r, delta in deltas.items()
if delta < 0 and delta + usages[r].in_use < 0]
# TODO(mc_nair): Should ignore/zero alloc if using non-nested driver
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
overs = [r for r, delta in deltas.items()
if quotas[r] >= 0 and delta >= 0 and
quotas[r] < delta + usages[r].total + allocated.get(r, 0)]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for resource, delta in deltas.items():
usage = usages[resource]
allocated_id = None
if is_allocated_reserve:
try:
quota = _quota_get(context, project_id, resource,
session=session)
except exception.ProjectQuotaNotFound:
# If we were using the default quota, create DB entry
quota = quota_create(context, project_id, resource,
quotas[resource], 0)
# Since there's no reserved/total for allocated, update
# allocated immediately and subtract on rollback if needed
quota_allocated_update(context, project_id, resource,
quota.allocated + delta)
allocated_id = quota.id
usage = None
reservation = _reservation_create(
elevated, str(uuid.uuid4()), usage, project_id, resource,
delta, expire, session=session, allocated_id=allocated_id)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0 and not is_allocated_reserve:
usages[resource].reserved += delta
if unders:
LOG.warning("Change will make usage less than 0 for the following "
"resources: %s", unders)
if overs:
usages = {k: dict(in_use=v.in_use, reserved=v.reserved,
allocated=allocated.get(k, 0))
for k, v in usages.items()}
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages=usages)
return reservations
def _quota_reservations(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update').\
all()
def _dict_with_usage_id(usages):
return {row.id: row for row in usages.values()}
@require_context
@_retry_on_deadlock
def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(session, context, reservations):
# Allocated reservations will have already been bumped
if not reservation.allocated_id:
usage = usages[reservation.usage_id]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation.delete(session=session)
@require_context
@_retry_on_deadlock
def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(session, context, reservations):
if reservation.allocated_id:
reservation.quota.allocated -= reservation.delta
else:
usage = usages[reservation.usage_id]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation.delete(session=session)
def quota_destroy_by_project(*args, **kwargs):
"""Destroy all limit quotas associated with a project.
Leaves usage and reservation quotas intact.
"""
quota_destroy_all_by_project(only_quotas=True, *args, **kwargs)
@require_admin_context
@_retry_on_deadlock
def quota_destroy_all_by_project(context, project_id, only_quotas=False):
"""Destroy all quotas associated with a project.
This includes limit quotas, usage quotas and reservation quotas.
Optionally can only remove limit quotas and leave other types as they are.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
:param only_quotas: Only delete limit quotas, leave other types intact.
"""
session = get_session()
with session.begin():
quotas = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_ref in quotas:
quota_ref.delete(session=session)
if only_quotas:
return
quota_usages = model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_usage_ref in quota_usages:
quota_usage_ref.delete(session=session)
reservations = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for reservation_ref in reservations:
reservation_ref.delete(session=session)
@require_admin_context
@_retry_on_deadlock
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
results = model_query(context, models.Reservation, session=session,
read_deleted="no").\
filter(models.Reservation.expire < current_time).\
all()
if results:
for reservation in results:
if reservation.delta >= 0:
if reservation.allocated_id:
reservation.quota.allocated -= reservation.delta
reservation.quota.save(session=session)
else:
reservation.usage.reserved -= reservation.delta
reservation.usage.save(session=session)
reservation.delete(session=session)
###################
@require_admin_context
def volume_attach(context, values):
volume_attachment_ref = models.VolumeAttachment()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_attachment_ref.update(values)
session = get_session()
with session.begin():
volume_attachment_ref.save(session=session)
return _attachment_get(context, values['id'],
session=session)
@require_admin_context
def volume_attached(context, attachment_id, instance_uuid, host_name,
mountpoint, attach_mode='rw'):
"""This method updates a volume attachment entry.
This function saves the information related to a particular
attachment for a volume. It also updates the volume record
to mark the volume as attached.
"""
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
volume_attachment_ref = _attachment_get(context, attachment_id,
session=session)
updated_values = {'mountpoint': mountpoint,
'attach_status': fields.VolumeAttachStatus.ATTACHED,
'instance_uuid': instance_uuid,
'attached_host': host_name,
'attach_time': timeutils.utcnow(),
'attach_mode': attach_mode,
'updated_at': literal_column('updated_at')}
volume_attachment_ref.update(updated_values)
volume_attachment_ref.save(session=session)
del updated_values['updated_at']
volume_ref = _volume_get(context, volume_attachment_ref['volume_id'],
session=session)
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = fields.VolumeAttachStatus.ATTACHED
volume_ref.save(session=session)
return (volume_ref, updated_values)
@handle_db_data_error
@require_context
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
models.VolumeMetadata)
if is_admin_context(context):
values['volume_admin_metadata'] = \
_metadata_refs(values.get('admin_metadata'),
models.VolumeAdminMetadata)
elif values.get('volume_admin_metadata'):
del values['volume_admin_metadata']
volume_ref = models.Volume()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_ref.update(values)
session = get_session()
with session.begin():
session.add(volume_ref)
return _volume_get(context, values['id'], session=session)
def get_booleans_for_table(table_name):
booleans = set()
table = getattr(models, table_name.capitalize())
if hasattr(table, '__table__'):
columns = table.__table__.columns
for column in columns:
if isinstance(column.type, sqltypes.Boolean):
booleans.add(column.name)
return booleans
@require_admin_context
def volume_data_get_for_host(context, host, count_only=False):
host_attr = models.Volume.host
conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')]
if count_only:
result = model_query(context,
func.count(models.Volume.id),
read_deleted="no").filter(
or_(*conditions)).first()
return result[0] or 0
else:
result = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no").filter(
or_(*conditions)).first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _volume_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _backup_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Backup.id),
func.sum(models.Backup.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_data_get_for_project(context, project_id, volume_type_id=None):
return _volume_data_get_for_project(context, project_id, volume_type_id)
@require_admin_context
@_retry_on_deadlock
def volume_destroy(context, volume_id):
session = get_session()
now = timeutils.utcnow()
with session.begin():
updated_values = {'status': 'deleted',
'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at'),
'migration_status': None}
model_query(context, models.Volume, session=session).\
filter_by(id=volume_id).\
update(updated_values)
model_query(context, models.VolumeMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeAdminMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.Transfer, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
del updated_values['updated_at']
return updated_values
def _include_in_cluster(context, cluster, model, partial_rename, filters):
"""Generic include in cluster method.
When we include resources in a cluster we have to be careful to preserve
the addressing sections that have not been provided. That's why we allow
partial_renaming, so we can preserve the backend and pool if we are only
providing host/cluster level information, and preserve pool information if
we only provide backend level information.
For example when we include a host in a cluster we receive calls with
filters like {'host': 'localhost@lvmdriver-1'} and cluster with something
like 'mycluster@lvmdriver-1'. Since in the DB the resources will have the
host field set to something like 'localhost@lvmdriver-1#lvmdriver-1' we
want to include original pool in the new cluster_name. So we want to store
in cluster_name value 'mycluster@lvmdriver-1#lvmdriver-1'.
"""
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(model, filters):
return None
query = get_session().query(model)
if hasattr(model, 'deleted'):
query = query.filter_by(deleted=False)
# cluster_name and host are special filter cases
for field in {'cluster_name', 'host'}.intersection(filters):
value = filters.pop(field)
# We do a special backend filter
query = query.filter(_filter_host(getattr(model, field), value))
# If we want do do a partial rename and we haven't set the cluster
# already, the value we want to set is a SQL replace of existing field
# value.
if partial_rename and isinstance(cluster, six.string_types):
cluster = func.replace(getattr(model, field), value, cluster)
query = query.filter_by(**filters)
result = query.update({'cluster_name': cluster}, synchronize_session=False)
return result
@require_admin_context
def volume_include_in_cluster(context, cluster, partial_rename=True,
**filters):
"""Include all volumes matching the filters into a cluster."""
return _include_in_cluster(context, cluster, models.Volume,
partial_rename, filters)
@require_admin_context
def volume_detached(context, volume_id, attachment_id):
"""This updates a volume attachment and marks it as detached.
This method also ensures that the volume entry is correctly
marked as either still attached/in-use or detached/available
if this was the last detachment made.
"""
# NOTE(jdg): This is a funky band-aid for the earlier attempts at
# multiattach, it's a bummer because these things aren't really being used
# but at the same time we don't want to break them until we work out the
# new proposal for multi-attach
remain_attachment = True
session = get_session()
with session.begin():
try:
attachment = _attachment_get(context, attachment_id,
session=session)
except exception.VolumeAttachmentNotFound:
attachment_updates = None
attachment = None
if attachment:
now = timeutils.utcnow()
attachment_updates = {
'attach_status': fields.VolumeAttachStatus.DETACHED,
'detach_time': now,
'deleted': True,
'deleted_at': now,
'updated_at':
literal_column('updated_at'),
}
attachment.update(attachment_updates)
attachment.save(session=session)
del attachment_updates['updated_at']
attachment_list = None
volume_ref = _volume_get(context, volume_id,
session=session)
volume_updates = {'updated_at': literal_column('updated_at')}
if not volume_ref.volume_attachment:
# NOTE(jdg): We kept the old arg style allowing session exclusively
# for this one call
attachment_list = volume_attachment_get_all_by_volume_id(
context, volume_id, session=session)
remain_attachment = False
if attachment_list and len(attachment_list) > 0:
remain_attachment = True
if not remain_attachment:
# Hide status update from user if we're performing volume migration
# or uploading it to image
if ((not volume_ref.migration_status and
not (volume_ref.status == 'uploading')) or
volume_ref.migration_status in ('success', 'error')):
volume_updates['status'] = 'available'
volume_updates['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
else:
# Volume is still attached
volume_updates['status'] = 'in-use'
volume_updates['attach_status'] = (
fields.VolumeAttachStatus.ATTACHED)
volume_ref.update(volume_updates)
volume_ref.save(session=session)
del volume_updates['updated_at']
return (volume_updates, attachment_updates)
def _process_model_like_filter(model, query, filters):
"""Applies regex expression filtering to a query.
:param model: model to apply filters to
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
:returns: the updated query.
"""
if query is None:
return query
for key in filters:
column_attr = getattr(model, key)
if 'property' == type(column_attr).__name__:
continue
value = filters[key]
if not isinstance(value, six.string_types):
continue
query = query.filter(column_attr.op('LIKE')(u'%' + value + u'%'))
return query
def apply_like_filters(model):
def decorator_filters(process_exact_filters):
def _decorator(query, filters):
exact_filters = filters.copy()
regex_filters = {}
for key, value in filters.items():
# NOTE(tommylikehu): For inexact match, the filter keys
# are in the format of 'key~=value'
if key.endswith('~'):
exact_filters.pop(key)
regex_filters[key.rstrip('~')] = value
query = process_exact_filters(query, exact_filters)
return _process_model_like_filter(model, query, regex_filters)
return _decorator
return decorator_filters
@require_context
def _volume_get_query(context, session=None, project_only=False,
joined_load=True):
"""Get the query to retrieve the volume.
:param context: the context used to run the method _volume_get_query
:param session: the session to use
:param project_only: the boolean used to decide whether to query the
volume in the current project or all projects
:param joined_load: the boolean used to decide whether the query loads
the other models, which join the volume model in
the database. Currently, the False value for this
parameter is specially for the case of updating
database during volume migration
:returns: updated query or None
"""
if not joined_load:
return model_query(context, models.Volume, session=session,
project_only=project_only)
if is_admin_context(context):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup')).\
options(joinedload('group'))
else:
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup')).\
options(joinedload('group'))
@require_context
def _volume_get(context, volume_id, session=None, joined_load=True):
result = _volume_get_query(context, session=session, project_only=True,
joined_load=joined_load)
if joined_load:
result = result.options(joinedload('volume_type.extra_specs'))
result = result.filter_by(id=volume_id).first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
def _attachment_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.VolumeAttachment,
filters,
exclude_list=['project_id']):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.VolumeAttachment)
if query is None:
return []
return query.all()
def _attachment_get(context, attachment_id, session=None, read_deleted=False,
project_only=True):
result = (model_query(context, models.VolumeAttachment, session=session,
read_deleted=read_deleted)
.filter_by(id=attachment_id)
.options(joinedload('volume'))
.first())
if not result:
msg = _("Unable to find attachment with id: %s"), attachment_id
raise exception.VolumeAttachmentNotFound(msg)
return result
def _attachment_get_query(context, session=None, project_only=False):
return model_query(context, models.VolumeAttachment, session=session,
project_only=project_only).options(joinedload('volume'))
@apply_like_filters(model=models.VolumeAttachment)
def _process_attachment_filters(query, filters):
if filters:
project_id = filters.pop('project_id', None)
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.VolumeAttachment, filters):
return
if project_id:
volume = models.Volume
query = query.filter(volume.id ==
models.VolumeAttachment.volume_id,
volume.project_id == project_id)
query = query.filter_by(**filters)
return query
@require_admin_context
def volume_attachment_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Retrieve all Attachment records with filter and pagination options."""
return _attachment_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@require_context
def volume_attachment_get_all_by_volume_id(context, volume_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter(models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED). \
options(joinedload('volume')).\
all()
return result
@require_context
def volume_attachment_get_all_by_host(context, host):
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(attached_host=host).\
filter(models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED). \
options(joinedload('volume')).\
all()
return result
@require_context
def volume_attachment_get(context, attachment_id):
"""Fetch the specified attachment record."""
return _attachment_get(context, attachment_id)
@require_context
def volume_attachment_get_all_by_instance_uuid(context,
instance_uuid):
"""Fetch all attachment records associated with the specified instance."""
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(instance_uuid=instance_uuid).\
filter(models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED).\
options(joinedload('volume')).\
all()
return result
@require_context
def volume_attachment_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Retrieve all Attachment records for specific project."""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _attachment_get_all(context, filters, marker,
limit, offset, sort_keys,
sort_dirs)
@require_admin_context
@_retry_on_deadlock
def attachment_destroy(context, attachment_id):
"""Destroy the specified attachment record."""
utcnow = timeutils.utcnow()
session = get_session()
with session.begin():
updated_values = {'attach_status': 'deleted',
'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')}
model_query(context, models.VolumeAttachment, session=session).\
filter_by(id=attachment_id).\
update(updated_values)
model_query(context, models.AttachmentSpecs, session=session).\
filter_by(attachment_id=attachment_id).\
update({'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')})
del updated_values['updated_at']
return updated_values
def _attachment_specs_query(context, attachment_id, session=None):
return model_query(context, models.AttachmentSpecs, session=session,
read_deleted="no").\
filter_by(attachment_id=attachment_id)
@require_context
def attachment_specs_get(context, attachment_id):
"""Fetch the attachment_specs for the specified attachment record."""
rows = _attachment_specs_query(context, attachment_id).\
all()
result = {row['key']: row['value'] for row in rows}
return result
@require_context
def attachment_specs_delete(context, attachment_id, key):
"""Delete attachment_specs for the specified attachment record."""
session = get_session()
with session.begin():
_attachment_specs_get_item(context,
attachment_id,
key,
session)
_attachment_specs_query(context, attachment_id, session).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _attachment_specs_get_item(context,
attachment_id,
key,
session=None):
result = _attachment_specs_query(
context, attachment_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.AttachmentSpecsNotFound(
specs_key=key,
attachment_id=attachment_id)
return result
@handle_db_data_error
@require_context
def attachment_specs_update_or_create(context,
attachment_id,
specs):
"""Update attachment_specs for the specified attachment record."""
session = get_session()
with session.begin():
spec_ref = None
for key, value in specs.items():
try:
spec_ref = _attachment_specs_get_item(
context, attachment_id, key, session)
except exception.AttachmentSpecsNotFound:
spec_ref = models.AttachmentSpecs()
spec_ref.update({"key": key, "value": value,
"attachment_id": attachment_id,
"deleted": False})
spec_ref.save(session=session)
return specs
@require_context
def volume_get(context, volume_id):
return _volume_get(context, volume_id)
@require_admin_context
def volume_get_all(context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
"""Retrieves all volumes.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters, offset)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
def get_volume_summary(context, project_only):
"""Retrieves all volumes summary.
:param context: context to query under
:param project_only: limit summary to project volumes
:returns: volume summary
"""
if not (project_only or is_admin_context(context)):
raise exception.AdminRequired()
query = model_query(context, func.count(models.Volume.id),
func.sum(models.Volume.size), read_deleted="no")
if project_only:
query = query.filter_by(project_id=context.project_id)
if query is None:
return []
result = query.first()
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_get_all_by_host(context, host, filters=None):
"""Retrieves all volumes hosted on a host.
:param context: context to query under
:param host: host for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
# As a side effect of the introduction of pool-aware scheduler,
# newly created volumes will have pool information appended to
# 'host' field of a volume record. So a volume record in DB can
# now be either form below:
# Host
# Host#Pool
if host and isinstance(host, six.string_types):
session = get_session()
with session.begin():
host_attr = getattr(models.Volume, 'host')
conditions = [host_attr == host,
host_attr.op('LIKE')(host + '#%')]
query = _volume_get_query(context).filter(or_(*conditions))
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
elif not host:
return []
@require_context
def volume_get_all_by_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: consistency group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
query = _volume_get_query(context).filter_by(consistencygroup_id=group_id)
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
def volume_get_all_by_generic_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
query = _volume_get_query(context).filter_by(group_id=group_id)
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
"""Retrieves all volumes in a project.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param project_id: project for all volumes being retrieved
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
authorize_project_context(context, project_id)
# Add in the project filter without modifying the given filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters, offset)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
def _generate_paginate_query(context, session, marker, limit, sort_keys,
sort_dirs, filters, offset=None,
paginate_type=models.Volume):
"""Generate the query to include the filters and the paginate options.
Returns a query with sorting / pagination criteria added or None
if the given filters will not yield any results.
:param context: context to query under
:param session: the session to use
:param marker: the last item of the previous page; we returns the next
results after this value.
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:param offset: number of items to skip
:param paginate_type: type of pagination to generate
:returns: updated query or None
"""
get_query, process_filters, get = PAGINATION_HELPERS[paginate_type]
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
query = get_query(context, session=session)
if filters:
query = process_filters(query, filters)
if query is None:
return None
marker_object = None
if marker is not None:
marker_object = get(context, marker, session)
return sqlalchemyutils.paginate_query(query, paginate_type, limit,
sort_keys,
marker=marker_object,
sort_dirs=sort_dirs,
offset=offset)
@apply_like_filters(model=models.Volume)
def _process_volume_filters(query, filters):
"""Common filter processing for Volume queries.
Filter values that are in lists, tuples, or sets cause an 'IN' operator
to be used, while exact matching ('==' operator) is used for other values.
A filter key/value of 'no_migration_targets'=True causes volumes with
either a NULL 'migration_status' or a 'migration_status' that does not
start with 'target:' to be retrieved.
A 'metadata' filter key must correspond to a dictionary value of metadata
key-value pairs.
:param query: Model query to use
:param filters: dictionary of filters
:returns: updated query or None
"""
filters = filters.copy()
# 'no_migration_targets' is unique, must be either NULL or
# not start with 'target:'
if filters.get('no_migration_targets', False):
filters.pop('no_migration_targets')
try:
column_attr = getattr(models.Volume, 'migration_status')
conditions = [column_attr == None, # noqa
column_attr.op('NOT LIKE')('target:%')]
query = query.filter(or_(*conditions))
except AttributeError:
LOG.debug("'migration_status' column could not be found.")
return None
host = filters.pop('host', None)
if host:
query = query.filter(_filter_host(models.Volume.host, host))
cluster_name = filters.pop('cluster_name', None)
if cluster_name:
query = query.filter(_filter_host(models.Volume.cluster_name,
cluster_name))
# Apply exact match filters for everything else, ensure that the
# filter value exists on the model
for key in filters.keys():
# metadata/glance_metadata is unique, must be a dict
if key in ('metadata', 'glance_metadata'):
if not isinstance(filters[key], dict):
LOG.debug("'%s' filter value is not valid.", key)
return None
continue
try:
column_attr = getattr(models.Volume, key)
# Do not allow relationship properties since those require
# schema specific knowledge
prop = getattr(column_attr, 'property')
if isinstance(prop, RelationshipProperty):
LOG.debug(("'%s' filter key is not valid, "
"it maps to a relationship."), key)
return None
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return None
# Holds the simple exact matches
filter_dict = {}
# Iterate over all filters, special case the filter if necessary
for key, value in filters.items():
if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
for k, v in value.items():
query = query.filter(or_(col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v)))
elif key == 'glance_metadata':
# use models.Volume.volume_glance_metadata as column attribute key.
col_gl_attr = models.Volume.volume_glance_metadata
for k, v in value.items():
query = query.filter(col_gl_attr.any(key=k, value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(models.Volume, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def process_sort_params(sort_keys, sort_dirs, default_keys=None,
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
if default_keys is None:
default_keys = ['created_at', 'id']
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs):
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys.
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@handle_db_data_error
@require_context
def volume_update(context, volume_id, values):
session = get_session()
with session.begin():
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(context,
volume_id,
values.pop('admin_metadata'),
delete=True,
session=session)
query = _volume_get_query(context, session, joined_load=False)
result = query.filter_by(id=volume_id).update(values)
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
@handle_db_data_error
@require_context
def volumes_update(context, values_list):
session = get_session()
with session.begin():
volume_refs = []
for values in values_list:
volume_id = values['id']
values.pop('id')
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(context,
volume_id,
values.pop('admin_metadata'),
delete=True,
session=session)
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref.update(values)
volume_refs.append(volume_ref)
return volume_refs
@require_context
def volume_attachment_update(context, attachment_id, values):
query = model_query(context, models.VolumeAttachment)
result = query.filter_by(id=attachment_id).update(values)
if not result:
raise exception.VolumeAttachmentNotFound(
filter='attachment_id = ' + attachment_id)
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status based on attachment.
Get volume and check if 'volume_attachment' parameter is present in volume.
If 'volume_attachment' is None then set volume status to 'available'
else set volume status to 'in-use'.
:param context: context to query under
:param volume_id: id of volume to be updated
:returns: updated volume
"""
session = get_session()
with session.begin():
volume_ref = _volume_get(context, volume_id, session=session)
# We need to get and update volume using same session because
# there is possibility that instance is deleted between the 'get'
# and 'update' volume call.
if not volume_ref['volume_attachment']:
volume_ref.update({'status': 'available'})
else:
volume_ref.update({'status': 'in-use'})
return volume_ref
def volume_has_snapshots_filter():
return sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted))
def volume_has_undeletable_snapshots_filter():
deletable_statuses = ['available', 'error']
return sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses)),
or_(models.Snapshot.group_snapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses))))
def volume_has_snapshots_in_a_cgsnapshot_filter():
return sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
models.Snapshot.cgsnapshot_id.isnot(None)))
def volume_has_attachments_filter():
return sql.exists().where(
and_(models.Volume.id == models.VolumeAttachment.volume_id,
models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED,
~models.VolumeAttachment.deleted))
def volume_qos_allows_retype(new_vol_type):
"""Filter to check that qos allows retyping the volume to new_vol_type.
Returned sqlalchemy filter will evaluate to True when volume's status is
available or when it's 'in-use' but the qos in new_vol_type is the same as
the qos of the volume or when it doesn't exist a consumer spec key that
specifies anything other than the back-end in any of the 2 volume_types.
"""
# Query to get the qos of the volume type new_vol_type
q = sql.select([models.VolumeTypes.qos_specs_id]).where(and_(
~models.VolumeTypes.deleted,
models.VolumeTypes.id == new_vol_type))
# Construct the filter to check qos when volume is 'in-use'
return or_(
# If volume is available
models.Volume.status == 'available',
# Or both volume types have the same qos specs
sql.exists().where(and_(
~models.VolumeTypes.deleted,
models.VolumeTypes.id == models.Volume.volume_type_id,
models.VolumeTypes.qos_specs_id == q.as_scalar())),
# Or they are different specs but they are handled by the backend or
# it is not specified. The way SQL evaluatels value != 'back-end'
# makes it result in False not only for 'back-end' values but for
# NULL as well, and with the double negation we ensure that we only
# allow QoS with 'consumer' values of 'back-end' and NULL.
and_(
~sql.exists().where(and_(
~models.VolumeTypes.deleted,
models.VolumeTypes.id == models.Volume.volume_type_id,
(models.VolumeTypes.qos_specs_id ==
models.QualityOfServiceSpecs.specs_id),
models.QualityOfServiceSpecs.key == 'consumer',
models.QualityOfServiceSpecs.value != 'back-end')),
~sql.exists().where(and_(
~models.VolumeTypes.deleted,
models.VolumeTypes.id == new_vol_type,
(models.VolumeTypes.qos_specs_id ==
models.QualityOfServiceSpecs.specs_id),
models.QualityOfServiceSpecs.key == 'consumer',
models.QualityOfServiceSpecs.value != 'back-end'))))
####################
def _volume_x_metadata_get_query(context, volume_id, model, session=None):
return model_query(context, model, session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
def _volume_x_metadata_get(context, volume_id, model, session=None):
rows = _volume_x_metadata_get_query(context, volume_id, model,
session=session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec,
session=None):
result = _volume_x_metadata_get_query(context, volume_id,
model, session=session).\
filter_by(key=key).\
first()
if not result:
if model is models.VolumeGlanceMetadata:
raise notfound_exec(id=volume_id)
else:
raise notfound_exec(metadata_key=key, volume_id=volume_id)
return result
def _volume_x_metadata_update(context, volume_id, metadata, delete, model,
session=None, add=True, update=True):
session = session or get_session()
metadata = metadata.copy()
with session.begin(subtransactions=True):
# Set existing metadata to deleted if delete argument is True. This is
# committed immediately to the DB
if delete:
expected_values = {'volume_id': volume_id}
# We don't want to delete keys we are going to update
if metadata:
expected_values['key'] = db.Not(metadata.keys())
conditional_update(context, model,
{'deleted': True,
'deleted_at': timeutils.utcnow()},
expected_values)
# Get existing metadata
db_meta = _volume_x_metadata_get_query(context, volume_id, model).all()
save = []
skip = []
# We only want to send changed metadata.
for row in db_meta:
if row.key in metadata:
value = metadata.pop(row.key)
if row.value != value and update:
# ORM objects will not be saved until we do the bulk save
row.value = value
save.append(row)
continue
skip.append(row)
# We also want to save non-existent metadata
if add:
save.extend(model(key=key, value=value, volume_id=volume_id)
for key, value in metadata.items())
# Do a bulk save
if save:
session.bulk_save_objects(save, update_changed_only=True)
# Construct result dictionary with current metadata
save.extend(skip)
result = {row['key']: row['value'] for row in save}
return result
def _volume_user_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeMetadata, session=session)
def _volume_image_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeGlanceMetadata,
session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
def _volume_user_metadata_get_item(context, volume_id, key, session=None):
return _volume_x_metadata_get_item(context, volume_id, key,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeMetadata,
session=session)
@require_context
@require_volume_exists
def _volume_image_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeGlanceMetadata,
session=session)
@require_context
def _volume_glance_metadata_key_to_id(context, volume_id, key):
db_data = volume_glance_metadata_get(context, volume_id)
metadata = {meta_entry.key: meta_entry.id
for meta_entry in db_data
if meta_entry.key == key}
metadata_id = metadata[key]
return metadata_id
@require_context
@require_volume_exists
def volume_metadata_get(context, volume_id):
return _volume_user_metadata_get(context, volume_id)
@require_context
@require_volume_exists
@_retry_on_deadlock
def volume_metadata_delete(context, volume_id, key, meta_type):
if meta_type == common.METADATA_TYPES.user:
(_volume_user_metadata_get_query(context, volume_id).
filter_by(key=key).
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
elif meta_type == common.METADATA_TYPES.image:
metadata_id = _volume_glance_metadata_key_to_id(context,
volume_id, key)
(_volume_image_metadata_get_query(context, volume_id).
filter_by(id=metadata_id).
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
else:
raise exception.InvalidMetadataType(metadata_type=meta_type,
id=volume_id)
@require_context
@require_volume_exists
@handle_db_data_error
@_retry_on_deadlock
def volume_metadata_update(context, volume_id, metadata, delete, meta_type):
if meta_type == common.METADATA_TYPES.user:
return _volume_user_metadata_update(context,
volume_id,
metadata,
delete)
elif meta_type == common.METADATA_TYPES.image:
return _volume_image_metadata_update(context,
volume_id,
metadata,
delete)
else:
raise exception.InvalidMetadataType(metadata_type=meta_type,
id=volume_id)
###################
def _volume_admin_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeAdminMetadata,
session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeAdminMetadata, session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_update(context, volume_id, metadata, delete,
session=None, add=True, update=True):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeAdminMetadata,
session=session, add=add, update=update)
@require_admin_context
def volume_admin_metadata_get(context, volume_id):
return _volume_admin_metadata_get(context, volume_id)
@require_admin_context
@require_volume_exists
@_retry_on_deadlock
def volume_admin_metadata_delete(context, volume_id, key):
_volume_admin_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
@_retry_on_deadlock
def volume_admin_metadata_update(context, volume_id, metadata, delete,
add=True, update=True):
return _volume_admin_metadata_update(context, volume_id, metadata, delete,
add=add, update=update)
###################
@require_context
@handle_db_data_error
def snapshot_create(context, values):
values['snapshot_metadata'] = _metadata_refs(values.get('metadata'),
models.SnapshotMetadata)
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
snapshot_ref = models.Snapshot()
snapshot_ref.update(values)
session.add(snapshot_ref)
return _snapshot_get(context, values['id'], session=session)
@require_admin_context
@_retry_on_deadlock
def snapshot_destroy(context, snapshot_id):
utcnow = timeutils.utcnow()
session = get_session()
with session.begin():
updated_values = {'status': 'deleted',
'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')}
model_query(context, models.Snapshot, session=session).\
filter_by(id=snapshot_id).\
update(updated_values)
model_query(context, models.SnapshotMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')})
del updated_values['updated_at']
return updated_values
@require_context
def _snapshot_get(context, snapshot_id, session=None):
result = model_query(context, models.Snapshot, session=session,
project_only=True).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
filter_by(id=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result
@require_context
def snapshot_get(context, snapshot_id):
return _snapshot_get(context, snapshot_id)
@require_admin_context
def snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
"""Retrieves all snapshots.
If no sorting parameters are specified then returned snapshots are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param filters: dictionary of filters; will do exact matching on values.
Special keys host and cluster_name refer to the volume.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:returns: list of matching snapshots
"""
if filters and not is_valid_model_filters(models.Snapshot, filters,
exclude_list=('host',
'cluster_name')):
return []
session = get_session()
with session.begin():
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters,
offset, models.Snapshot)
# No snapshots would match, return empty list
if not query:
return []
return query.all()
def _snaps_get_query(context, session=None, project_only=False):
return model_query(context, models.Snapshot, session=session,
project_only=project_only).\
options(joinedload('snapshot_metadata'))
@apply_like_filters(model=models.Snapshot)
def _process_snaps_filters(query, filters):
if filters:
filters = filters.copy()
exclude_list = ('host', 'cluster_name')
# Ensure that filters' keys exist on the model or is metadata
for key in filters.keys():
# Ensure if filtering based on metadata filter is queried
# then the filters value is a dictionary
if key == 'metadata':
if not isinstance(filters[key], dict):
LOG.debug("Metadata filter value is not valid dictionary")
return None
continue
if key in exclude_list:
continue
# for keys in filter other than metadata and exclude_list
# ensure that the keys are in Snapshot modelt
try:
column_attr = getattr(models.Snapshot, key)
prop = getattr(column_attr, 'property')
if isinstance(prop, RelationshipProperty):
LOG.debug(
"'%s' key is not valid, it maps to a relationship.",
key)
return None
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return None
# filter handling for host and cluster name
host = filters.pop('host', None)
cluster = filters.pop('cluster_name', None)
if host or cluster:
query = query.join(models.Snapshot.volume)
vol_field = models.Volume
if host:
query = query.filter(_filter_host(vol_field.host, host))
if cluster:
query = query.filter(_filter_host(vol_field.cluster_name, cluster))
filters_dict = {}
LOG.debug("Building query based on filter")
for key, value in filters.items():
if key == 'metadata':
col_attr = getattr(models.Snapshot, 'snapshot_metadata')
for k, v in value.items():
query = query.filter(col_attr.any(key=k, value=v))
else:
filters_dict[key] = value
# Apply exact matches
if filters_dict:
query = query.filter_by(**filters_dict)
return query
@require_context
def snapshot_get_all_for_volume(context, volume_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(volume_id=volume_id).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_by_host(context, host, filters=None):
if filters and not is_valid_model_filters(models.Snapshot, filters):
return []
query = model_query(context, models.Snapshot, read_deleted='no',
project_only=True)
if filters:
query = query.filter_by(**filters)
# As a side effect of the introduction of pool-aware scheduler,
# newly created volumes will have pool information appended to
# 'host' field of a volume record. So a volume record in DB can
# now be either form below:
# Host
# Host#Pool
if host and isinstance(host, six.string_types):
session = get_session()
with session.begin():
host_attr = getattr(models.Volume, 'host')
conditions = [host_attr == host,
host_attr.op('LIKE')(host + '#%')]
query = query.join(models.Snapshot.volume).filter(
or_(*conditions)).options(joinedload('snapshot_metadata'))
return query.all()
elif not host:
return []
@require_context
def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(cgsnapshot_id=cgsnapshot_id).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_for_group_snapshot(context, group_snapshot_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(group_snapshot_id=group_snapshot_id).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
""""Retrieves all snapshots in a project.
If no sorting parameters are specified then returned snapshots are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param project_id: project for all snapshots being retrieved
:param filters: dictionary of filters; will do exact matching on values
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:returns: list of matching snapshots
"""
if filters and not is_valid_model_filters(models.Snapshot, filters):
return []
authorize_project_context(context, project_id)
# Add project_id to filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
session = get_session()
with session.begin():
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters,
offset, models.Snapshot)
# No snapshots would match, return empty list
if not query:
return []
query = query.options(joinedload('snapshot_metadata'))
return query.all()
@require_context
def _snapshot_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
authorize_project_context(context, project_id)
query = model_query(context,
func.count(models.Snapshot.id),
func.sum(models.Snapshot.volume_size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.join('volume').filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_context
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
return _snapshot_data_get_for_project(context, project_id, volume_type_id)
@require_context
def snapshot_get_all_active_by_window(context, begin, end=None,
project_id=None):
"""Return snapshots that were active during window."""
query = model_query(context, models.Snapshot, read_deleted="yes")
query = query.filter(or_(models.Snapshot.deleted_at == None, # noqa
models.Snapshot.deleted_at > begin))
query = query.options(joinedload(models.Snapshot.volume))
query = query.options(joinedload('snapshot_metadata'))
if end:
query = query.filter(models.Snapshot.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@handle_db_data_error
@require_context
def snapshot_update(context, snapshot_id, values):
query = model_query(context, models.Snapshot, project_only=True)
result = query.filter_by(id=snapshot_id).update(values)
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
####################
def _snapshot_metadata_get_query(context, snapshot_id, session=None):
return model_query(context, models.SnapshotMetadata,
session=session, read_deleted="no").\
filter_by(snapshot_id=snapshot_id)
@require_context
def _snapshot_metadata_get(context, snapshot_id, session=None):
rows = _snapshot_metadata_get_query(context, snapshot_id, session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_snapshot_exists
def snapshot_metadata_get(context, snapshot_id):
return _snapshot_metadata_get(context, snapshot_id)
@require_context
@require_snapshot_exists
@_retry_on_deadlock
def snapshot_metadata_delete(context, snapshot_id, key):
_snapshot_metadata_get_query(context, snapshot_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _snapshot_metadata_get_item(context, snapshot_id, key, session=None):
result = _snapshot_metadata_get_query(context,
snapshot_id,
session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.SnapshotMetadataNotFound(metadata_key=key,
snapshot_id=snapshot_id)
return result
@require_context
@require_snapshot_exists
@handle_db_data_error
@_retry_on_deadlock
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
session = get_session()
with session.begin():
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id,
session)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(context,
snapshot_id,
meta_key, session)
meta_ref.update({'deleted': True,
'deleted_at': timeutils.utcnow()})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _snapshot_metadata_get_item(context, snapshot_id,
meta_key, session)
except exception.SnapshotMetadataNotFound:
meta_ref = models.SnapshotMetadata()
item.update({"key": meta_key, "snapshot_id": snapshot_id})
meta_ref.update(item)
meta_ref.save(session=session)
return snapshot_metadata_get(context, snapshot_id)
###################
@handle_db_data_error
@require_admin_context
def volume_type_create(context, values, projects=None):
"""Create a new volume type.
In order to pass in extra specs, the values dict should contain a
'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
if not values.get('id'):
values['id'] = str(uuid.uuid4())
projects = projects or []
session = get_session()
with session.begin():
try:
_volume_type_get_by_name(context, values['name'], session)
raise exception.VolumeTypeExists(id=values['name'])
except exception.VolumeTypeNotFoundByName:
pass
try:
_volume_type_get(context, values['id'], session)
raise exception.VolumeTypeExists(id=values['id'])
except exception.VolumeTypeNotFound:
pass
try:
values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
models.VolumeTypeExtraSpecs)
volume_type_ref = models.VolumeTypes()
volume_type_ref.update(values)
session.add(volume_type_ref)
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.VolumeTypeProjects()
access_ref.update({"volume_type_id": volume_type_ref.id,
"project_id": project})
access_ref.save(session=session)
return volume_type_ref
@handle_db_data_error
@require_admin_context
def group_type_create(context, values, projects=None):
"""Create a new group type.
In order to pass in group specs, the values dict should contain a
'group_specs' key/value pair:
{'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
if not values.get('id'):
values['id'] = six.text_type(uuid.uuid4())
projects = projects or []
session = get_session()
with session.begin():
try:
_group_type_get_by_name(context, values['name'], session)
raise exception.GroupTypeExists(id=values['name'])
except exception.GroupTypeNotFoundByName:
pass
try:
_group_type_get(context, values['id'], session)
raise exception.GroupTypeExists(id=values['id'])
except exception.GroupTypeNotFound:
pass
try:
values['group_specs'] = _metadata_refs(values.get('group_specs'),
models.GroupTypeSpecs)
group_type_ref = models.GroupTypes()
group_type_ref.update(values)
session.add(group_type_ref)
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.GroupTypeProjects()
access_ref.update({"group_type_id": group_type_ref.id,
"project_id": project})
access_ref.save(session=session)
return group_type_ref
def _volume_type_get_query(context, session=None, read_deleted='no',
expected_fields=None):
expected_fields = expected_fields or []
query = model_query(context,
models.VolumeTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
for expected in expected_fields:
query = query.options(joinedload(expected))
if not context.is_admin:
the_filter = [models.VolumeTypes.is_public == true()]
projects_attr = getattr(models.VolumeTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
def _group_type_get_query(context, session=None, read_deleted='no',
expected_fields=None):
expected_fields = expected_fields or []
query = model_query(context,
models.GroupTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('group_specs'))
if 'projects' in expected_fields:
query = query.options(joinedload('projects'))
if not context.is_admin:
the_filter = [models.GroupTypes.is_public == true()]
projects_attr = models.GroupTypes.projects
the_filter.extend([
projects_attr.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
def _process_volume_types_filters(query, filters):
context = filters.pop('context', None)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.VolumeTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models.VolumeTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
if 'is_public' in filters:
del filters['is_public']
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.VolumeTypes, filters):
return
if filters.get('extra_specs') is not None:
the_filter = []
searchdict = filters.pop('extra_specs')
extra_specs = getattr(models.VolumeTypes, 'extra_specs')
for k, v in searchdict.items():
the_filter.extend([extra_specs.any(key=k, value=v,
deleted=False)])
if len(the_filter) > 1:
query = query.filter(and_(*the_filter))
else:
query = query.filter(the_filter[0])
query = query.filter_by(**filters)
return query
def _process_group_types_filters(query, filters):
context = filters.pop('context', None)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.GroupTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models.GroupTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
if 'is_public' in filters:
del filters['is_public']
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.GroupTypes, filters):
return
if filters.get('group_specs') is not None:
the_filter = []
searchdict = filters.pop('group_specs')
group_specs = getattr(models.GroupTypes, 'group_specs')
for k, v in searchdict.items():
the_filter.extend([group_specs.any(key=k, value=v,
deleted=False)])
if len(the_filter) > 1:
query = query.filter(and_(*the_filter))
else:
query = query.filter(the_filter[0])
query = query.filter_by(**filters)
return query
@handle_db_data_error
@require_admin_context
def _type_update(context, type_id, values, is_group):
if is_group:
model = models.GroupTypes
exists_exc = exception.GroupTypeExists
else:
model = models.VolumeTypes
exists_exc = exception.VolumeTypeExists
session = get_session()
with session.begin():
# No description change
if values['description'] is None:
del values['description']
# No is_public change
if values['is_public'] is None:
del values['is_public']
# No name change
if values['name'] is None:
del values['name']
else:
# Group type name is unique. If change to a name that belongs to
# a different group_type, it should be prevented.
conditions = and_(model.name == values['name'],
model.id != type_id, ~model.deleted)
query = session.query(sql.exists().where(conditions))
if query.scalar():
raise exists_exc(id=values['name'])
query = model_query(context, model, project_only=True, session=session)
result = query.filter_by(id=type_id).update(values)
if not result:
if is_group:
raise exception.GroupTypeNotFound(group_type_id=type_id)
else:
raise exception.VolumeTypeNotFound(volume_type_id=type_id)
def volume_type_update(context, volume_type_id, values):
_type_update(context, volume_type_id, values, is_group=False)
def group_type_update(context, group_type_id, values):
_type_update(context, group_type_id, values, is_group=True)
@require_context
def volume_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Returns a dict describing all volume_types with name as key.
If no sort parameters are specified then the returned volume types are
sorted first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_type_filters
function for more information
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:returns: list/dict of matching volume types
"""
session = get_session()
with session.begin():
# Add context for _process_volume_types_filters
filters = filters or {}
filters['context'] = context
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters, offset,
models.VolumeTypes)
# No volume types would match, return empty dict or list
if query is None:
if list_result:
return []
return {}
rows = query.all()
if list_result:
result = [_dict_with_extra_specs_if_authorized(context, row)
for row in rows]
return result
result = {row['name']: _dict_with_extra_specs_if_authorized(context,
row)
for row in rows}
return result
@require_context
def group_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Returns a dict describing all group_types with name as key.
If no sort parameters are specified then the returned group types are
sorted first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_type_filters
function for more information
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:returns: list/dict of matching group types
"""
session = get_session()
with session.begin():
# Add context for _process_group_types_filters
filters = filters or {}
filters['context'] = context
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters, offset,
models.GroupTypes)
# No group types would match, return empty dict or list
if query is None:
if list_result:
return []
return {}
rows = query.all()
if list_result:
result = [_dict_with_group_specs_if_authorized(context, row)
for row in rows]
return result
result = {row['name']: _dict_with_group_specs_if_authorized(context,
row)
for row in rows}
return result
def _volume_type_get_id_from_volume_type_query(context, id, session=None):
return model_query(
context, models.VolumeTypes.id, read_deleted="no",
session=session, base_model=models.VolumeTypes).\
filter_by(id=id)
def _group_type_get_id_from_group_type_query(context, id, session=None):
return model_query(
context, models.GroupTypes.id, read_deleted="no",
session=session, base_model=models.GroupTypes).\
filter_by(id=id)
def _volume_type_get_id_from_volume_type(context, id, session=None):
result = _volume_type_get_id_from_volume_type_query(
context, id, session=session).first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result[0]
def _group_type_get_id_from_group_type(context, id, session=None):
result = _group_type_get_id_from_group_type_query(
context, id, session=session).first()
if not result:
raise exception.GroupTypeNotFound(group_type_id=id)
return result[0]
def _volume_type_get_db_object(context, id, session=None, inactive=False,
expected_fields=None):
read_deleted = "yes" if inactive else "no"
result = _volume_type_get_query(
context, session, read_deleted, expected_fields).\
filter_by(id=id).\
first()
return result
def _group_type_get_db_object(context, id, session=None, inactive=False,
expected_fields=None):
read_deleted = "yes" if inactive else "no"
result = _group_type_get_query(
context, session, read_deleted, expected_fields).\
filter_by(id=id).\
first()
return result
@require_context
def _volume_type_get(context, id, session=None, inactive=False,
expected_fields=None):
expected_fields = expected_fields or []
result = _volume_type_get_db_object(context, id, session, inactive,
expected_fields)
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
vtype = _dict_with_extra_specs_if_authorized(context, result)
if 'projects' in expected_fields:
vtype['projects'] = [p['project_id'] for p in result['projects']]
if 'qos_specs' in expected_fields:
vtype['qos_specs'] = result.qos_specs
return vtype
@require_context
def _group_type_get(context, id, session=None, inactive=False,
expected_fields=None):
expected_fields = expected_fields or []
result = _group_type_get_db_object(context, id, session, inactive,
expected_fields)
if not result:
raise exception.GroupTypeNotFound(group_type_id=id)
gtype = _dict_with_group_specs_if_authorized(context, result)
if 'projects' in expected_fields:
gtype['projects'] = [p['project_id'] for p in result['projects']]
return gtype
@require_context
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Return a dict describing specific volume_type."""
return _volume_type_get(context, id,
session=None,
inactive=inactive,
expected_fields=expected_fields)
@require_context
def group_type_get(context, id, inactive=False, expected_fields=None):
"""Return a dict describing specific group_type."""
return _group_type_get(context, id,
session=None,
inactive=inactive,
expected_fields=expected_fields)
def _volume_type_get_full(context, id):
"""Return dict for a specific volume_type with extra_specs and projects."""
return _volume_type_get(context, id, session=None, inactive=False,
expected_fields=('extra_specs', 'projects'))
def _group_type_get_full(context, id):
"""Return dict for a specific group_type with group_specs and projects."""
return _group_type_get(context, id, session=None, inactive=False,
expected_fields=('group_specs', 'projects'))
@require_context
def _volume_type_ref_get(context, id, session=None, inactive=False):
read_deleted = "yes" if inactive else "no"
result = model_query(context,
models.VolumeTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result
@require_context
def _group_type_ref_get(context, id, session=None, inactive=False):
read_deleted = "yes" if inactive else "no"
result = model_query(context,
models.GroupTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('group_specs')).\
filter_by(id=id).\
first()
if not result:
raise exception.GroupTypeNotFound(group_type_id=id)
return result
@require_context
def _volume_type_get_by_name(context, name, session=None):
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return _dict_with_extra_specs_if_authorized(context, result)
@require_context
def _group_type_get_by_name(context, name, session=None):
result = model_query(context, models.GroupTypes, session=session).\
options(joinedload('group_specs')).\
filter_by(name=name).\
first()
if not result:
raise exception.GroupTypeNotFoundByName(group_type_name=name)
return _dict_with_group_specs_if_authorized(context, result)
@require_context
def volume_type_get_by_name(context, name):
"""Return a dict describing specific volume_type."""
return _volume_type_get_by_name(context, name)
@require_context
def group_type_get_by_name(context, name):
"""Return a dict describing specific group_type."""
return _group_type_get_by_name(context, name)
@require_context
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Return a dict describing specific volume_type."""
req_volume_types = []
for vol_t in volume_type_list:
if not uuidutils.is_uuid_like(vol_t):
vol_type = _volume_type_get_by_name(context, vol_t)
else:
vol_type = _volume_type_get(context, vol_t)
req_volume_types.append(vol_type)
return req_volume_types
@require_context
def group_types_get_by_name_or_id(context, group_type_list):
"""Return a dict describing specific group_type."""
req_group_types = []
for grp_t in group_type_list:
if not uuidutils.is_uuid_like(grp_t):
grp_type = _group_type_get_by_name(context, grp_t)
else:
grp_type = _group_type_get(context, grp_t)
req_group_types.append(grp_type)
return req_group_types
@require_admin_context
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
read_deleted = "yes" if inactive else "no"
# Raise QoSSpecsNotFound if no specs found
if not resource_exists(context,
models.QualityOfServiceSpecs,
qos_specs_id):
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
vts = (model_query(context, models.VolumeTypes, read_deleted=read_deleted).
options(joinedload('extra_specs')).
options(joinedload('projects')).
filter_by(qos_specs_id=qos_specs_id).all())
return vts
@require_admin_context
def volume_type_qos_associate(context, type_id, qos_specs_id):
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
update({'qos_specs_id': qos_specs_id,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from qos specs."""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types associated with specified qos specs."""
session = get_session()
with session.begin():
session.query(models.VolumeTypes). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_specs_get(context, type_id):
"""Return all qos specs for given volume type.
result looks like:
{
'qos_specs':
{
'id': 'qos-specs-id',
'name': 'qos_specs_name',
'consumer': 'Consumer',
'specs': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'
}
}
}
"""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
row = session.query(models.VolumeTypes). \
options(joinedload('qos_specs')). \
filter_by(id=type_id). \
first()
# row.qos_specs is a list of QualityOfServiceSpecs ref
specs = _dict_with_qos_specs(row.qos_specs)
if not specs:
# turn empty list to None
specs = None
else:
specs = specs[0]
return {'qos_specs': specs}
@require_admin_context
@_retry_on_deadlock
def volume_type_destroy(context, id):
utcnow = timeutils.utcnow()
session = get_session()
with session.begin():
_volume_type_get(context, id, session)
results = model_query(context, models.Volume, session=session). \
filter_by(volume_type_id=id).all()
group_count = model_query(context,
models.GroupVolumeTypeMapping,
read_deleted="no",
session=session).\
filter_by(volume_type_id=id).count()
cg_count = model_query(context, models.ConsistencyGroup,
session=session).filter(
models.ConsistencyGroup.volume_type_id.contains(id)).count()
if results or group_count or cg_count:
LOG.error('VolumeType %s deletion failed, VolumeType in use.', id)
raise exception.VolumeTypeInUse(volume_type_id=id)
updated_values = {'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')}
model_query(context, models.VolumeTypes, session=session).\
filter_by(id=id).\
update(updated_values)
model_query(context, models.VolumeTypeExtraSpecs, session=session).\
filter_by(volume_type_id=id).\
update({'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeTypeProjects, session=session,
read_deleted="int_no").filter_by(
volume_type_id=id).soft_delete(synchronize_session=False)
del updated_values['updated_at']
return updated_values
@require_admin_context
@_retry_on_deadlock
def group_type_destroy(context, id):
session = get_session()
with session.begin():
_group_type_get(context, id, session)
# TODO(xyang): Uncomment the following after groups table is added.
# results = model_query(context, models.Group, session=session). \
# filter_by(group_type_id=id).all()
# if results:
# LOG.error('GroupType %s deletion failed, '
# 'GroupType in use.', id)
# raise exception.GroupTypeInUse(group_type_id=id)
model_query(context, models.GroupTypes, session=session).\
filter_by(id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
model_query(context, models.GroupTypeSpecs, session=session).\
filter_by(group_type_id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_get_all_active_by_window(context,
begin,
end=None,
project_id=None):
"""Return volumes that were active during window."""
query = model_query(context, models.Volume, read_deleted="yes")
query = query.filter(or_(models.Volume.deleted_at == None, # noqa
models.Volume.deleted_at > begin))
if end:
query = query.filter(models.Volume.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
query = (query.options(joinedload('volume_metadata')).
options(joinedload('volume_type')).
options(joinedload('volume_attachment')).
options(joinedload('consistencygroup')).
options(joinedload('group')))
if is_admin_context(context):
query = query.options(joinedload('volume_admin_metadata'))
return query.all()
def _volume_type_access_query(context, session=None):
return model_query(context, models.VolumeTypeProjects, session=session,
read_deleted="int_no")
def _group_type_access_query(context, session=None):
return model_query(context, models.GroupTypeProjects, session=session,
read_deleted="int_no")
@require_admin_context
def volume_type_access_get_all(context, type_id):
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
return _volume_type_access_query(context).\
filter_by(volume_type_id=volume_type_id).all()
@require_admin_context
def group_type_access_get_all(context, type_id):
group_type_id = _group_type_get_id_from_group_type(context, type_id)
return _group_type_access_query(context).\
filter_by(group_type_id=group_type_id).all()
def _group_volume_type_mapping_query(context, session=None):
return model_query(context, models.GroupVolumeTypeMapping, session=session,
read_deleted="no")
@require_admin_context
def volume_type_get_all_by_group(context, group_id):
# Generic volume group
mappings = (_group_volume_type_mapping_query(context).
filter_by(group_id=group_id).all())
session = get_session()
with session.begin():
volume_type_ids = [mapping.volume_type_id for mapping in mappings]
query = (model_query(context,
models.VolumeTypes,
session=session,
read_deleted='no').
filter(models.VolumeTypes.id.in_(volume_type_ids)).
options(joinedload('extra_specs')).
options(joinedload('projects')).
all())
return query
def _group_volume_type_mapping_get_all_by_group_volume_type(context, group_id,
volume_type_id):
mappings = _group_volume_type_mapping_query(context).\
filter_by(group_id=group_id).\
filter_by(volume_type_id=volume_type_id).all()
return mappings
@require_admin_context
def volume_type_access_add(context, type_id, project_id):
"""Add given tenant to the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
access_ref = models.VolumeTypeProjects()
access_ref.update({"volume_type_id": volume_type_id,
"project_id": project_id})
session = get_session()
with session.begin():
try:
access_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.VolumeTypeAccessExists(volume_type_id=type_id,
project_id=project_id)
return access_ref
@require_admin_context
def group_type_access_add(context, type_id, project_id):
"""Add given tenant to the group type access list."""
group_type_id = _group_type_get_id_from_group_type(context, type_id)
access_ref = models.GroupTypeProjects()
access_ref.update({"group_type_id": group_type_id,
"project_id": project_id})
session = get_session()
with session.begin():
try:
access_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.GroupTypeAccessExists(group_type_id=type_id,
project_id=project_id)
return access_ref
@require_admin_context
def volume_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
count = (_volume_type_access_query(context).
filter_by(volume_type_id=volume_type_id).
filter_by(project_id=project_id).
soft_delete(synchronize_session=False))
if count == 0:
raise exception.VolumeTypeAccessNotFound(
volume_type_id=type_id, project_id=project_id)
@require_admin_context
def group_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the group type access list."""
group_type_id = _group_type_get_id_from_group_type(context, type_id)
count = (_group_type_access_query(context).
filter_by(group_type_id=group_type_id).
filter_by(project_id=project_id).
soft_delete(synchronize_session=False))
if count == 0:
raise exception.GroupTypeAccessNotFound(
group_type_id=type_id, project_id=project_id)
####################
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id)
@require_context
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).\
all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
def volume_type_extra_specs_delete(context, volume_type_id, key):
session = get_session()
with session.begin():
_volume_type_extra_specs_get_item(context, volume_type_id, key,
session)
_volume_type_extra_specs_query(context, volume_type_id, session).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _volume_type_extra_specs_get_item(context, volume_type_id, key,
session=None):
result = _volume_type_extra_specs_query(
context, volume_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
extra_specs_key=key,
volume_type_id=volume_type_id)
return result
@handle_db_data_error
@require_context
def volume_type_extra_specs_update_or_create(context, volume_type_id,
specs):
session = get_session()
with session.begin():
spec_ref = None
for key, value in specs.items():
try:
spec_ref = _volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
except exception.VolumeTypeExtraSpecsNotFound:
spec_ref = models.VolumeTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"volume_type_id": volume_type_id,
"deleted": False})
spec_ref.save(session=session)
return specs
####################
def _group_type_specs_query(context, group_type_id, session=None):
return model_query(context, models.GroupTypeSpecs, session=session,
read_deleted="no").\
filter_by(group_type_id=group_type_id)
@require_context
def group_type_specs_get(context, group_type_id):
rows = _group_type_specs_query(context, group_type_id).\
all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
def group_type_specs_delete(context, group_type_id, key):
session = get_session()
with session.begin():
_group_type_specs_get_item(context, group_type_id, key,
session)
_group_type_specs_query(context, group_type_id, session).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _group_type_specs_get_item(context, group_type_id, key,
session=None):
result = _group_type_specs_query(
context, group_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.GroupTypeSpecsNotFound(
group_specs_key=key,
group_type_id=group_type_id)
return result
@handle_db_data_error
@require_context
def group_type_specs_update_or_create(context, group_type_id,
specs):
session = get_session()
with session.begin():
spec_ref = None
for key, value in specs.items():
try:
spec_ref = _group_type_specs_get_item(
context, group_type_id, key, session)
except exception.GroupTypeSpecsNotFound:
spec_ref = models.GroupTypeSpecs()
spec_ref.update({"key": key, "value": value,
"group_type_id": group_type_id,
"deleted": False})
spec_ref.save(session=session)
return specs
####################
@require_admin_context
def qos_specs_create(context, values):
"""Create a new QoS specs.
:param values dictionary that contains specifications for QoS
e.g. {'name': 'Name',
'consumer': 'front-end',
'specs': {
'total_iops_sec': 1000,
'total_bytes_sec': 1024000
}
}
"""
specs_id = str(uuid.uuid4())
session = get_session()
with session.begin():
try:
_qos_specs_get_all_by_name(context, values['name'], session)
raise exception.QoSSpecsExists(specs_id=values['name'])
except exception.QoSSpecsNotFound:
pass
try:
# Insert a root entry for QoS specs
specs_root = models.QualityOfServiceSpecs()
root = dict(id=specs_id)
# 'QoS_Specs_Name' is an internal reserved key to store
# the name of QoS specs
root['key'] = 'QoS_Specs_Name'
root['value'] = values['name']
LOG.debug("DB qos_specs_create(): root %s", root)
specs_root.update(root)
specs_root.save(session=session)
# Save 'consumer' value directly as it will not be in
# values['specs'] and so we avoid modifying/copying passed in dict
consumer = {'key': 'consumer',
'value': values['consumer'],
'specs_id': specs_id,
'id': six.text_type(uuid.uuid4())}
cons_entry = models.QualityOfServiceSpecs()
cons_entry.update(consumer)
cons_entry.save(session=session)
# Insert all specification entries for QoS specs
for k, v in values.get('specs', {}).items():
item = dict(key=k, value=v, specs_id=specs_id)
item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs()
spec_entry.update(item)
spec_entry.save(session=session)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
except Exception as e:
raise db_exc.DBError(e)
return dict(id=specs_root.id, name=specs_root.value)
@require_admin_context
def _qos_specs_get_all_by_name(context, name, session=None, inactive=False):
read_deleted = 'yes' if inactive else 'no'
results = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(key='QoS_Specs_Name'). \
filter_by(value=name). \
options(joinedload('specs')).all()
if not results:
raise exception.QoSSpecsNotFound(specs_id=name)
return results
@require_admin_context
def _qos_specs_get_all_ref(context, qos_specs_id, session=None,
inactive=False):
read_deleted = 'yes' if inactive else 'no'
result = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(id=qos_specs_id). \
options(joinedload_all('specs')).all()
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
return result
def _dict_with_children_specs(specs):
"""Convert specs list to a dict."""
result = {}
for spec in specs:
# Skip deleted keys
if not spec['deleted']:
result.update({spec['key']: spec['value']})
return result
def _dict_with_qos_specs(rows):
"""Convert qos specs query results to list.
Qos specs query results are a list of quality_of_service_specs refs,
some are root entry of a qos specs (key == 'QoS_Specs_Name') and the
rest are children entry, a.k.a detailed specs for a qos specs. This
function converts query results to a dict using spec name as key.
"""
result = []
for row in rows:
if row['key'] == 'QoS_Specs_Name':
member = {'name': row['value'], 'id': row['id']}
if row.specs:
spec_dict = _dict_with_children_specs(row.specs)
member['consumer'] = spec_dict.pop('consumer')
member.update(dict(specs=spec_dict))
result.append(member)
return result
@require_admin_context
def qos_specs_get(context, qos_specs_id, inactive=False):
rows = _qos_specs_get_all_ref(context, qos_specs_id, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Returns a list of all qos_specs.
Results is like:
[{
'id': SPECS-UUID,
'name': 'qos_spec-1',
'consumer': 'back-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
{
'id': SPECS-UUID,
'name': 'qos_spec-2',
'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
]
"""
session = get_session()
with session.begin():
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters,
offset, models.QualityOfServiceSpecs)
# No Qos specs would match, return empty list
if query is None:
return []
rows = query.all()
return _dict_with_qos_specs(rows)
@require_admin_context
def _qos_specs_get_query(context, session):
rows = model_query(context, models.QualityOfServiceSpecs,
session=session,
read_deleted='no').\
options(joinedload_all('specs')).filter_by(key='QoS_Specs_Name')
return rows
def _process_qos_specs_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.QualityOfServiceSpecs, filters):
return
query = query.filter_by(**filters)
return query
@require_admin_context
def _qos_specs_get(context, qos_spec_id, session=None):
result = model_query(context, models.QualityOfServiceSpecs,
session=session,
read_deleted='no').\
filter_by(id=qos_spec_id).filter_by(key='QoS_Specs_Name').first()
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_spec_id)
return result
@require_admin_context
def qos_specs_get_by_name(context, name, inactive=False):
rows = _qos_specs_get_all_by_name(context, name, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_associations_get(context, qos_specs_id):
"""Return all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_associations_get(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
return volume_type_qos_associations_get(context, qos_specs_id)
@require_admin_context
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate volume type from specified qos specs."""
return volume_type_qos_associate(context, type_id, qos_specs_id)
@require_admin_context
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from specified qos specs."""
return volume_type_qos_disassociate(context, qos_specs_id, type_id)
@require_admin_context
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_disassociate_all(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
return volume_type_qos_disassociate_all(context, qos_specs_id)
@require_admin_context
def qos_specs_item_delete(context, qos_specs_id, key):
session = get_session()
with session.begin():
session.query(models.QualityOfServiceSpecs). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def qos_specs_delete(context, qos_specs_id):
session = get_session()
with session.begin():
_qos_specs_get_all_ref(context, qos_specs_id, session)
updated_values = {'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}
session.query(models.QualityOfServiceSpecs).\
filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id,
models.QualityOfServiceSpecs.specs_id ==
qos_specs_id)).\
update(updated_values)
del updated_values['updated_at']
return updated_values
@require_admin_context
def _qos_specs_get_item(context, qos_specs_id, key, session=None):
result = model_query(context, models.QualityOfServiceSpecs,
session=session). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
first()
if not result:
raise exception.QoSSpecsKeyNotFound(
specs_key=key,
specs_id=qos_specs_id)
return result
@handle_db_data_error
@require_admin_context
def qos_specs_update(context, qos_specs_id, updates):
"""Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
session = get_session()
with session.begin():
# make sure qos specs exists
exists = resource_exists(context, models.QualityOfServiceSpecs,
qos_specs_id, session)
if not exists:
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
specs = updates.get('specs', {})
if 'consumer' in updates:
# Massage consumer to the right place for DB and copy specs
# before updating so we don't modify dict for caller
specs = specs.copy()
specs['consumer'] = updates['consumer']
spec_ref = None
for key in specs.keys():
try:
spec_ref = _qos_specs_get_item(
context, qos_specs_id, key, session)
except exception.QoSSpecsKeyNotFound:
spec_ref = models.QualityOfServiceSpecs()
id = None
if spec_ref.get('id', None):
id = spec_ref['id']
else:
id = str(uuid.uuid4())
value = dict(id=id, key=key, value=specs[key],
specs_id=qos_specs_id,
deleted=False)
LOG.debug('qos_specs_update() value: %s', value)
spec_ref.update(value)
spec_ref.save(session=session)
return specs
####################
@require_context
def volume_type_encryption_get(context, volume_type_id, session=None):
return model_query(context, models.Encryption, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id).first()
@require_admin_context
def volume_type_encryption_delete(context, volume_type_id):
session = get_session()
with session.begin():
encryption = volume_type_encryption_get(context, volume_type_id,
session)
if not encryption:
raise exception.VolumeTypeEncryptionNotFound(
type_id=volume_type_id)
encryption.update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@handle_db_data_error
@require_admin_context
def volume_type_encryption_create(context, volume_type_id, values):
session = get_session()
with session.begin():
encryption = models.Encryption()
if 'volume_type_id' not in values:
values['volume_type_id'] = volume_type_id
if 'encryption_id' not in values:
values['encryption_id'] = six.text_type(uuid.uuid4())
encryption.update(values)
session.add(encryption)
return encryption
@handle_db_data_error
@require_admin_context
def volume_type_encryption_update(context, volume_type_id, values):
query = model_query(context, models.Encryption)
result = query.filter_by(volume_type_id=volume_type_id).update(values)
if not result:
raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id)
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
volume_list = _volume_get_query(context, session=session,
project_only=False).\
filter_by(volume_type_id=volume_type_id).\
all()
return volume_list
####################
@require_context
def volume_encryption_metadata_get(context, volume_id, session=None):
"""Return the encryption metadata for a given volume."""
volume_ref = _volume_get(context, volume_id)
encryption_ref = volume_type_encryption_get(context,
volume_ref['volume_type_id'])
values = {
'encryption_key_id': volume_ref['encryption_key_id'],
}
if encryption_ref:
for key in ['control_location', 'cipher', 'key_size', 'provider']:
values[key] = encryption_ref[key]
return values
####################
@require_context
def _volume_glance_metadata_get_all(context, session=None):
query = model_query(context,
models.VolumeGlanceMetadata,
session=session)
if is_user_context(context):
query = query.filter(
models.Volume.id == models.VolumeGlanceMetadata.volume_id,
models.Volume.project_id == context.project_id)
return query.all()
@require_context
def volume_glance_metadata_get_all(context):
"""Return the Glance metadata for all volumes."""
return _volume_glance_metadata_get_all(context)
@require_context
def volume_glance_metadata_list_get(context, volume_id_list):
"""Return the glance metadata for a volume list."""
query = model_query(context,
models.VolumeGlanceMetadata,
session=None)
query = query.filter(
models.VolumeGlanceMetadata.volume_id.in_(volume_id_list))
return query.all()
@require_context
@require_volume_exists
def _volume_glance_metadata_get(context, volume_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(volume_id=volume_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=volume_id)
return rows
@require_context
@require_volume_exists
def volume_glance_metadata_get(context, volume_id):
"""Return the Glance metadata for the specified volume."""
return _volume_glance_metadata_get(context, volume_id)
@require_context
@require_snapshot_exists
def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=snapshot_id)
return rows
@require_context
@require_snapshot_exists
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return _volume_snapshot_glance_metadata_get(context, snapshot_id)
@require_context
@require_volume_exists
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for a volume by adding a new key:value pair.
This API does not support changing the value of a key once it has been
created.
"""
session = get_session()
with session.begin():
rows = session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = six.text_type(value)
session.add(vol_glance_metadata)
return
@require_context
@require_volume_exists
def volume_glance_metadata_bulk_create(context, volume_id, metadata):
"""Update the Glance metadata for a volume by adding new key:value pairs.
This API does not support changing the value of a key once it has been
created.
"""
session = get_session()
with session.begin():
for (key, value) in metadata.items():
rows = session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = six.text_type(value)
session.add(vol_glance_metadata)
@require_context
@require_snapshot_exists
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This copies all of the key:value pairs from the originating volume, to
ensure that a volume created from the snapshot will retain the
original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context, volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.snapshot_id = snapshot_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
This copies all all of the key:value pairs from the originating volume,
to ensure that a volume created from the volume (clone) will
retain the original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context,
src_volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update Glance metadata from a volume.
Update the Glance metadata from a volume (created from a snapshot) by
copying all of the key:value pairs from the originating snapshot.
This is so that the Glance metadata from the original volume is retained.
"""
session = get_session()
with session.begin():
metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
def volume_glance_metadata_delete_by_volume(context, volume_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def backup_get(context, backup_id, read_deleted=None, project_only=True):
return _backup_get(context, backup_id,
read_deleted=read_deleted,
project_only=project_only)
def _backup_get(context, backup_id, session=None, read_deleted=None,
project_only=True):
result = model_query(context, models.Backup, session=session,
project_only=project_only,
read_deleted=read_deleted).\
filter_by(id=backup_id).\
first()
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
return result
def _backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.Backup, filters):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.Backup)
if query is None:
return []
return query.all()
def _backups_get_query(context, session=None, project_only=False):
return model_query(context, models.Backup, session=session,
project_only=project_only)
@apply_like_filters(model=models.Backup)
def _process_backups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Backup, filters):
return
query = query.filter_by(**filters)
return query
@require_admin_context
def backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return _backup_get_all(context, filters, marker, limit, offset, sort_keys,
sort_dirs)
@require_admin_context
def backup_get_all_by_host(context, host):
return model_query(context, models.Backup).filter_by(host=host).all()
@require_context
def backup_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _backup_get_all(context, filters, marker, limit, offset, sort_keys,
sort_dirs)
@require_context
def backup_get_all_by_volume(context, volume_id, filters=None):
authorize_project_context(context, volume_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['volume_id'] = volume_id
return _backup_get_all(context, filters)
@require_context
def backup_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Return backups that were active during window."""
query = model_query(context, models.Backup, read_deleted="yes")
query = query.filter(or_(models.Backup.deleted_at == None, # noqa
models.Backup.deleted_at > begin))
if end:
query = query.filter(models.Backup.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@handle_db_data_error
@require_context
def backup_create(context, values):
backup = models.Backup()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
backup.update(values)
session = get_session()
with session.begin():
backup.save(session)
return backup
@handle_db_data_error
@require_context
def backup_update(context, backup_id, values):
if 'fail_reason' in values:
values = values.copy()
values['fail_reason'] = (values['fail_reason'] or '')[:255]
query = model_query(context, models.Backup, read_deleted="yes")
result = query.filter_by(id=backup_id).update(values)
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
@require_admin_context
def backup_destroy(context, backup_id):
updated_values = {'status': fields.BackupStatus.DELETED,
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}
model_query(context, models.Backup).\
filter_by(id=backup_id).\
update(updated_values)
del updated_values['updated_at']
return updated_values
###############################
@require_context
def _transfer_get(context, transfer_id, session=None):
query = model_query(context, models.Transfer,
session=session).\
filter_by(id=transfer_id)
if not is_admin_context(context):
volume = models.Volume
query = query.filter(models.Transfer.volume_id == volume.id,
volume.project_id == context.project_id)
result = query.first()
if not result:
raise exception.TransferNotFound(transfer_id=transfer_id)
return result
@require_context
def transfer_get(context, transfer_id):
return _transfer_get(context, transfer_id)
def _translate_transfers(transfers):
fields = ('id', 'volume_id', 'display_name', 'created_at', 'deleted')
return [{k: transfer[k] for k in fields} for transfer in transfers]
@require_admin_context
def transfer_get_all(context):
results = model_query(context, models.Transfer).all()
return _translate_transfers(results)
@require_context
def transfer_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
query = (model_query(context, models.Transfer)
.filter(models.Volume.id == models.Transfer.volume_id,
models.Volume.project_id == project_id))
results = query.all()
return _translate_transfers(results)
@require_context
@handle_db_data_error
def transfer_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
transfer_id = values['id']
volume_id = values['volume_id']
session = get_session()
with session.begin():
expected = {'id': volume_id,
'status': 'available'}
update = {'status': 'awaiting-transfer'}
if not conditional_update(context, models.Volume, update, expected):
msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s '
'expected in available state.')
% {'transfer_id': transfer_id, 'volume_id': volume_id})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
transfer = models.Transfer()
transfer.update(values)
session.add(transfer)
return transfer
@require_context
@_retry_on_deadlock
def transfer_destroy(context, transfer_id):
utcnow = timeutils.utcnow()
session = get_session()
with session.begin():
volume_id = _transfer_get(context, transfer_id, session)['volume_id']
expected = {'id': volume_id,
'status': 'awaiting-transfer'}
update = {'status': 'available'}
if not conditional_update(context, models.Volume, update, expected):
# If the volume state is not 'awaiting-transfer' don't change it,
# but we can still mark the transfer record as deleted.
msg = (_('Transfer %(transfer_id)s: Volume expected in '
'awaiting-transfer state.')
% {'transfer_id': transfer_id})
LOG.error(msg)
updated_values = {'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')}
(model_query(context, models.Transfer, session=session)
.filter_by(id=transfer_id)
.update(updated_values))
del updated_values['updated_at']
return updated_values
@require_context
def transfer_accept(context, transfer_id, user_id, project_id):
session = get_session()
with session.begin():
volume_id = _transfer_get(context, transfer_id, session)['volume_id']
expected = {'id': volume_id,
'status': 'awaiting-transfer'}
update = {'status': 'available',
'user_id': user_id,
'project_id': project_id,
'updated_at': models.Volume.updated_at}
if not conditional_update(context, models.Volume, update, expected):
msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s '
'expected in awaiting-transfer state.')
% {'transfer_id': transfer_id, 'volume_id': volume_id})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
(session.query(models.Transfer)
.filter_by(id=transfer_id)
.update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
###############################
@require_admin_context
def _consistencygroup_data_get_for_project(context, project_id,
session=None):
query = model_query(context,
func.count(models.ConsistencyGroup.id),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
result = query.first()
return (0, result[0] or 0)
@require_context
def _consistencygroup_get(context, consistencygroup_id, session=None):
result = model_query(context, models.ConsistencyGroup, session=session,
project_only=True).\
filter_by(id=consistencygroup_id).\
first()
if not result:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=consistencygroup_id)
return result
@require_context
def consistencygroup_get(context, consistencygroup_id):
return _consistencygroup_get(context, consistencygroup_id)
def _consistencygroups_get_query(context, session=None, project_only=False):
return model_query(context, models.ConsistencyGroup, session=session,
project_only=project_only)
def _process_consistencygroups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.ConsistencyGroup, filters):
return
query = query.filter_by(**filters)
return query
def _consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.ConsistencyGroup,
filters):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.ConsistencyGroup)
if query is None:
return []
return query.all()
@require_admin_context
def consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Retrieves all consistency groups.
If no sort parameters are specified then the returned cgs are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching consistency groups
"""
return _consistencygroup_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@require_context
def consistencygroup_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Retrieves all consistency groups in a project.
If no sort parameters are specified then the returned cgs are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching consistency groups
"""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _consistencygroup_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@handle_db_data_error
@require_context
def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None):
cg_model = models.ConsistencyGroup
values = values.copy()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
if cg_snap_id:
conditions = [cg_model.id == models.Cgsnapshot.consistencygroup_id,
models.Cgsnapshot.id == cg_snap_id]
elif cg_id:
conditions = [cg_model.id == cg_id]
else:
conditions = None
if conditions:
# We don't want duplicated field values
names = ['volume_type_id', 'availability_zone', 'host',
'cluster_name']
for name in names:
values.pop(name, None)
fields = [getattr(cg_model, name) for name in names]
fields.extend(bindparam(k, v) for k, v in values.items())
sel = session.query(*fields).filter(*conditions)
names.extend(values.keys())
insert_stmt = cg_model.__table__.insert().from_select(names, sel)
result = session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
if cg_id:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=cg_id)
raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_snap_id)
else:
consistencygroup = cg_model()
consistencygroup.update(values)
session.add(consistencygroup)
return _consistencygroup_get(context, values['id'], session=session)
@handle_db_data_error
@require_context
def consistencygroup_update(context, consistencygroup_id, values):
query = model_query(context, models.ConsistencyGroup, project_only=True)
result = query.filter_by(id=consistencygroup_id).update(values)
if not result:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=consistencygroup_id)
@require_admin_context
def consistencygroup_destroy(context, consistencygroup_id):
utcnow = timeutils.utcnow()
session = get_session()
with session.begin():
updated_values = {'status': fields.ConsistencyGroupStatus.DELETED,
'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')}
model_query(context, models.ConsistencyGroup, session=session).\
filter_by(id=consistencygroup_id).\
update({'status': fields.ConsistencyGroupStatus.DELETED,
'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')})
del updated_values['updated_at']
return updated_values
@require_admin_context
def cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids,
volume_ids, snapshot_ids, session):
utcnow = timeutils.utcnow()
if snapshot_ids:
snaps = (model_query(context, models.Snapshot,
session=session, read_deleted="no").
filter(models.Snapshot.id.in_(snapshot_ids)).
all())
for snap in snaps:
snap.update({'cgsnapshot_id': None,
'updated_at': utcnow})
if cgsnapshot_ids:
cg_snaps = (model_query(context, models.Cgsnapshot,
session=session, read_deleted="no").
filter(models.Cgsnapshot.id.in_(cgsnapshot_ids)).
all())
for cg_snap in cg_snaps:
cg_snap.delete(session=session)
if volume_ids:
vols = (model_query(context, models.Volume,
session=session, read_deleted="no").
filter(models.Volume.id.in_(volume_ids)).
all())
for vol in vols:
vol.update({'consistencygroup_id': None,
'updated_at': utcnow})
if cg_ids:
cgs = (model_query(context, models.ConsistencyGroup,
session=session, read_deleted="no").
filter(models.ConsistencyGroup.id.in_(cg_ids)).
all())
for cg in cgs:
cg.delete(session=session)
def cg_has_cgsnapshot_filter():
"""Return a filter that checks if a CG has CG Snapshots."""
return sql.exists().where(and_(
models.Cgsnapshot.consistencygroup_id == models.ConsistencyGroup.id,
~models.Cgsnapshot.deleted))
def cg_has_volumes_filter(attached_or_with_snapshots=False):
"""Return a filter to check if a CG has volumes.
When attached_or_with_snapshots parameter is given a True value only
attached volumes or those with snapshots will be considered.
"""
query = sql.exists().where(
and_(models.Volume.consistencygroup_id == models.ConsistencyGroup.id,
~models.Volume.deleted))
if attached_or_with_snapshots:
query = query.where(or_(
models.Volume.attach_status == 'attached',
sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted))))
return query
def cg_creating_from_src(cg_id=None, cgsnapshot_id=None):
"""Return a filter to check if a CG is being used as creation source.
Returned filter is meant to be used in the Conditional Update mechanism and
checks if provided CG ID or CG Snapshot ID is currently being used to
create another CG.
This filter will not include CGs that have used the ID but have already
finished their creation (status is no longer creating).
Filter uses a subquery that allows it to be used on updates to the
consistencygroups table.
"""
# NOTE(geguileo): As explained in devref api_conditional_updates we use a
# subquery to trick MySQL into using the same table in the update and the
# where clause.
subq = sql.select([models.ConsistencyGroup]).where(
and_(~models.ConsistencyGroup.deleted,
models.ConsistencyGroup.status == 'creating')).alias('cg2')
if cg_id:
match_id = subq.c.source_cgid == cg_id
elif cgsnapshot_id:
match_id = subq.c.cgsnapshot_id == cgsnapshot_id
else:
msg = _('cg_creating_from_src must be called with cg_id or '
'cgsnapshot_id parameter.')
raise exception.ProgrammingError(reason=msg)
return sql.exists([subq]).where(match_id)
@require_admin_context
def consistencygroup_include_in_cluster(context, cluster,
partial_rename=True, **filters):
"""Include all consistency groups matching the filters into a cluster."""
return _include_in_cluster(context, cluster, models.ConsistencyGroup,
partial_rename, filters)
###############################
@require_admin_context
def _group_data_get_for_project(context, project_id,
session=None):
query = model_query(context,
func.count(models.Group.id),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
result = query.first()
return (0, result[0] or 0)
@require_context
def _group_get(context, group_id, session=None):
result = (model_query(context, models.Group, session=session,
project_only=True).
filter_by(id=group_id).
first())
if not result:
raise exception.GroupNotFound(group_id=group_id)
return result
@require_context
def group_get(context, group_id):
return _group_get(context, group_id)
def _groups_get_query(context, session=None, project_only=False):
return model_query(context, models.Group, session=session,
project_only=project_only)
def _group_snapshot_get_query(context, session=None, project_only=False):
return model_query(context, models.GroupSnapshot, session=session,
project_only=project_only)
@apply_like_filters(model=models.Group)
def _process_groups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Group, filters):
return
query = query.filter_by(**filters)
return query
@apply_like_filters(model=models.GroupSnapshot)
def _process_group_snapshot_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.GroupSnapshot, filters):
return
query = query.filter_by(**filters)
return query
def _group_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.Group,
filters):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.Group)
return query.all() if query else []
@require_admin_context
def group_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Retrieves all groups.
If no sort parameters are specified then the returned groups are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching groups
"""
return _group_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@require_context
def group_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Retrieves all groups in a project.
If no sort parameters are specified then the returned groups are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching groups
"""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _group_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@handle_db_data_error
@require_context
def group_create(context, values, group_snapshot_id=None,
source_group_id=None):
group_model = models.Group
values = values.copy()
if not values.get('id'):
values['id'] = six.text_type(uuid.uuid4())
session = get_session()
with session.begin():
if group_snapshot_id:
conditions = [group_model.id == models.GroupSnapshot.group_id,
models.GroupSnapshot.id == group_snapshot_id]
elif source_group_id:
conditions = [group_model.id == source_group_id]
else:
conditions = None
if conditions:
# We don't want duplicated field values
values.pop('group_type_id', None)
values.pop('availability_zone', None)
values.pop('host', None)
# NOTE(xyang): Save volume_type_ids to update later.
volume_type_ids = values.pop('volume_type_ids', [])
sel = session.query(group_model.group_type_id,
group_model.availability_zone,
group_model.host,
*(bindparam(k, v) for k, v in values.items())
).filter(*conditions)
names = ['group_type_id', 'availability_zone', 'host']
names.extend(values.keys())
insert_stmt = group_model.__table__.insert().from_select(
names, sel)
result = session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
if source_group_id:
raise exception.GroupNotFound(
group_id=source_group_id)
raise exception.GroupSnapshotNotFound(
group_snapshot_id=group_snapshot_id)
for item in volume_type_ids:
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = values['id']
session.add(mapping)
else:
for item in values.get('volume_type_ids') or []:
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = values['id']
session.add(mapping)
group = group_model()
group.update(values)
session.add(group)
return _group_get(context, values['id'], session=session)
@handle_db_data_error
@require_context
def group_volume_type_mapping_create(context, group_id, volume_type_id):
"""Add group volume_type mapping entry."""
# Verify group exists
_group_get(context, group_id)
# Verify volume type exists
_volume_type_get_id_from_volume_type(context, volume_type_id)
existing = _group_volume_type_mapping_get_all_by_group_volume_type(
context, group_id, volume_type_id)
if existing:
raise exception.GroupVolumeTypeMappingExists(
group_id=group_id,
volume_type_id=volume_type_id)
mapping = models.GroupVolumeTypeMapping()
mapping.update({"group_id": group_id,
"volume_type_id": volume_type_id})
session = get_session()
with session.begin():
try:
mapping.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.GroupVolumeTypeMappingExists(
group_id=group_id,
volume_type_id=volume_type_id)
return mapping
@handle_db_data_error
@require_context
def group_update(context, group_id, values):
query = model_query(context, models.Group, project_only=True)
result = query.filter_by(id=group_id).update(values)
if not result:
raise exception.GroupNotFound(group_id=group_id)
@require_admin_context
def group_destroy(context, group_id):
session = get_session()
with session.begin():
(model_query(context, models.Group, session=session).
filter_by(id=group_id).
update({'status': fields.GroupStatus.DELETED,
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
(session.query(models.GroupVolumeTypeMapping).
filter_by(group_id=group_id).
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
def group_has_group_snapshot_filter():
return sql.exists().where(and_(
models.GroupSnapshot.group_id == models.Group.id,
~models.GroupSnapshot.deleted))
def group_has_volumes_filter(attached_or_with_snapshots=False):
query = sql.exists().where(
and_(models.Volume.group_id == models.Group.id,
~models.Volume.deleted))
if attached_or_with_snapshots:
query = query.where(or_(
models.Volume.attach_status == 'attached',
sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted))))
return query
def group_creating_from_src(group_id=None, group_snapshot_id=None):
# NOTE(geguileo): As explained in devref api_conditional_updates we use a
# subquery to trick MySQL into using the same table in the update and the
# where clause.
subq = sql.select([models.Group]).where(
and_(~models.Group.deleted,
models.Group.status == 'creating')).alias('group2')
if group_id:
match_id = subq.c.source_group_id == group_id
elif group_snapshot_id:
match_id = subq.c.group_snapshot_id == group_snapshot_id
else:
msg = _('group_creating_from_src must be called with group_id or '
'group_snapshot_id parameter.')
raise exception.ProgrammingError(reason=msg)
return sql.exists([subq]).where(match_id)
###############################
@require_context
def _cgsnapshot_get(context, cgsnapshot_id, session=None):
result = model_query(context, models.Cgsnapshot, session=session,
project_only=True).\
filter_by(id=cgsnapshot_id).\
first()
if not result:
raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id)
return result
@require_context
def cgsnapshot_get(context, cgsnapshot_id):
return _cgsnapshot_get(context, cgsnapshot_id)
def is_valid_model_filters(model, filters, exclude_list=None):
"""Return True if filter values exist on the model
:param model: a Cinder model
:param filters: dictionary of filters
"""
for key in filters.keys():
if exclude_list and key in exclude_list:
continue
try:
key = key.rstrip('~')
getattr(model, key)
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return False
return True
def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None):
query = model_query(context, models.Cgsnapshot)
if filters:
if not is_valid_model_filters(models.Cgsnapshot, filters):
return []
query = query.filter_by(**filters)
if project_id:
query = query.filter_by(project_id=project_id)
if group_id:
query = query.filter_by(consistencygroup_id=group_id)
return query.all()
@require_admin_context
def cgsnapshot_get_all(context, filters=None):
return _cgsnapshot_get_all(context, filters=filters)
@require_admin_context
def cgsnapshot_get_all_by_group(context, group_id, filters=None):
return _cgsnapshot_get_all(context, group_id=group_id, filters=filters)
@require_context
def cgsnapshot_get_all_by_project(context, project_id, filters=None):
authorize_project_context(context, project_id)
return _cgsnapshot_get_all(context, project_id=project_id, filters=filters)
@handle_db_data_error
@require_context
def cgsnapshot_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
cg_id = values.get('consistencygroup_id')
session = get_session()
model = models.Cgsnapshot
with session.begin():
if cg_id:
# There has to exist at least 1 volume in the CG and the CG cannot
# be updating the composing volumes or being created.
conditions = [
sql.exists().where(and_(
~models.Volume.deleted,
models.Volume.consistencygroup_id == cg_id)),
~models.ConsistencyGroup.deleted,
models.ConsistencyGroup.id == cg_id,
~models.ConsistencyGroup.status.in_(('creating', 'updating'))]
# NOTE(geguileo): We build a "fake" from_select clause instead of
# using transaction isolation on the session because we would need
# SERIALIZABLE level and that would have a considerable performance
# penalty.
binds = (bindparam(k, v) for k, v in values.items())
sel = session.query(*binds).filter(*conditions)
insert_stmt = model.__table__.insert().from_select(values.keys(),
sel)
result = session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
msg = _("Source CG cannot be empty or in 'creating' or "
"'updating' state. No cgsnapshot will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
else:
cgsnapshot = model()
cgsnapshot.update(values)
session.add(cgsnapshot)
return _cgsnapshot_get(context, values['id'], session=session)
@require_context
@handle_db_data_error
def cgsnapshot_update(context, cgsnapshot_id, values):
query = model_query(context, models.Cgsnapshot, project_only=True)
result = query.filter_by(id=cgsnapshot_id).update(values)
if not result:
raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id)
@require_admin_context
def cgsnapshot_destroy(context, cgsnapshot_id):
session = get_session()
with session.begin():
updated_values = {'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}
model_query(context, models.Cgsnapshot, session=session).\
filter_by(id=cgsnapshot_id).\
update(updated_values)
del updated_values['updated_at']
return updated_values
def cgsnapshot_creating_from_src():
"""Get a filter that checks if a CGSnapshot is being created from a CG."""
return sql.exists().where(and_(
models.Cgsnapshot.consistencygroup_id == models.ConsistencyGroup.id,
~models.Cgsnapshot.deleted,
models.Cgsnapshot.status == 'creating'))
###############################
@require_context
def _group_snapshot_get(context, group_snapshot_id, session=None):
result = model_query(context, models.GroupSnapshot, session=session,
project_only=True).\
filter_by(id=group_snapshot_id).\
first()
if not result:
raise exception.GroupSnapshotNotFound(
group_snapshot_id=group_snapshot_id)
return result
@require_context
def group_snapshot_get(context, group_snapshot_id):
return _group_snapshot_get(context, group_snapshot_id)
def _group_snapshot_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.GroupSnapshot,
filters):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.GroupSnapshot)
return query.all() if query else []
@require_admin_context
def group_snapshot_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return _group_snapshot_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@require_admin_context
def group_snapshot_get_all_by_group(context, group_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
if filters is None:
filters = {}
if group_id:
filters['group_id'] = group_id
return _group_snapshot_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@require_context
def group_snapshot_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
authorize_project_context(context, project_id)
if filters is None:
filters = {}
if project_id:
filters['project_id'] = project_id
return _group_snapshot_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@handle_db_data_error
@require_context
def group_snapshot_create(context, values):
if not values.get('id'):
values['id'] = six.text_type(uuid.uuid4())
group_id = values.get('group_id')
session = get_session()
model = models.GroupSnapshot
with session.begin():
if group_id:
# There has to exist at least 1 volume in the group and the group
# cannot be updating the composing volumes or being created.
conditions = [
sql.exists().where(and_(
~models.Volume.deleted,
models.Volume.group_id == group_id)),
~models.Group.deleted,
models.Group.id == group_id,
~models.Group.status.in_(('creating', 'updating'))]
# NOTE(geguileo): We build a "fake" from_select clause instead of
# using transaction isolation on the session because we would need
# SERIALIZABLE level and that would have a considerable performance
# penalty.
binds = (bindparam(k, v) for k, v in values.items())
sel = session.query(*binds).filter(*conditions)
insert_stmt = model.__table__.insert().from_select(values.keys(),
sel)
result = session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
msg = _("Source group cannot be empty or in 'creating' or "
"'updating' state. No group snapshot will be created.")
raise exception.InvalidGroup(reason=msg)
else:
group_snapshot = model()
group_snapshot.update(values)
session.add(group_snapshot)
return _group_snapshot_get(context, values['id'], session=session)
@require_context
@handle_db_data_error
def group_snapshot_update(context, group_snapshot_id, values):
session = get_session()
with session.begin():
result = model_query(context, models.GroupSnapshot,
project_only=True).\
filter_by(id=group_snapshot_id).\
first()
if not result:
raise exception.GroupSnapshotNotFound(
_("No group snapshot with id %s") % group_snapshot_id)
result.update(values)
result.save(session=session)
return result
@require_admin_context
def group_snapshot_destroy(context, group_snapshot_id):
session = get_session()
with session.begin():
updated_values = {'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}
model_query(context, models.GroupSnapshot, session=session).\
filter_by(id=group_snapshot_id).\
update(updated_values)
del updated_values['updated_at']
return updated_values
def group_snapshot_creating_from_src():
"""Get a filter to check if a grp snapshot is being created from a grp."""
return sql.exists().where(and_(
models.GroupSnapshot.group_id == models.Group.id,
~models.GroupSnapshot.deleted,
models.GroupSnapshot.status == 'creating'))
###############################
@require_admin_context
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than age from cinder tables."""
try:
age_in_days = int(age_in_days)
except ValueError:
msg = _('Invalid value for age, %(age)s') % {'age': age_in_days}
LOG.exception(msg)
raise exception.InvalidParameterValue(msg)
engine = get_engine()
session = get_session()
metadata = MetaData()
metadata.reflect(engine)
for table in reversed(metadata.sorted_tables):
if 'deleted' not in table.columns.keys():
continue
LOG.info('Purging deleted rows older than age=%(age)d days '
'from table=%(table)s', {'age': age_in_days,
'table': table})
deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
try:
with session.begin():
# Delete child records first from quality_of_service_specs
# table to avoid FK constraints
if six.text_type(table) == "quality_of_service_specs":
session.query(models.QualityOfServiceSpecs).filter(
and_(models.QualityOfServiceSpecs.specs_id.isnot(
None), models.QualityOfServiceSpecs.deleted == 1,
models.QualityOfServiceSpecs.deleted_at <
deleted_age)).delete()
result = session.execute(
table.delete()
.where(table.c.deleted_at < deleted_age))
except db_exc.DBReferenceError as ex:
LOG.error('DBError detected when purging from '
'%(tablename)s: %(error)s.',
{'tablename': table, 'error': ex})
raise
rows_purged = result.rowcount
if rows_purged != 0:
LOG.info("Deleted %(row)d rows from table=%(table)s",
{'row': rows_purged, 'table': table})
###############################
def _translate_messages(messages):
return [_translate_message(message) for message in messages]
def _translate_message(message):
"""Translate the Message model to a dict."""
return {
'id': message['id'],
'project_id': message['project_id'],
'request_id': message['request_id'],
'resource_type': message['resource_type'],
'resource_uuid': message.get('resource_uuid'),
'event_id': message['event_id'],
'message_level': message['message_level'],
'created_at': message['created_at'],
'expires_at': message.get('expires_at'),
}
def _message_get(context, message_id, session=None):
query = model_query(context,
models.Message,
read_deleted="no",
project_only="yes",
session=session)
result = query.filter_by(id=message_id).first()
if not result:
raise exception.MessageNotFound(message_id=message_id)
return result
@require_context
def message_get(context, message_id, session=None):
result = _message_get(context, message_id, session)
return _translate_message(result)
@require_context
def message_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Retrieves all messages.
If no sort parameters are specified then the returned messages are
sorted first by the 'created_at' key and then by the 'id' key in
descending order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see
_process_messages_filters function for more
information
:returns: list of matching messages
"""
messages = models.Message
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, messages)
if query is None:
return []
results = query.all()
return _translate_messages(results)
@apply_like_filters(model=models.Message)
def _process_messages_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Message, filters):
return None
query = query.filter_by(**filters)
return query
def _messages_get_query(context, session=None, project_only=False):
return model_query(context, models.Message, session=session,
project_only=project_only)
@require_context
def message_create(context, values):
message_ref = models.Message()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
message_ref.update(values)
session = get_session()
with session.begin():
session.add(message_ref)
@require_admin_context
def message_destroy(context, message):
session = get_session()
now = timeutils.utcnow()
with session.begin():
updated_values = {'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')}
(model_query(context, models.Message, session=session).
filter_by(id=message.get('id')).
update(updated_values))
del updated_values['updated_at']
return updated_values
@require_admin_context
def cleanup_expired_messages(context):
session = get_session()
now = timeutils.utcnow()
with session.begin():
# NOTE(tommylikehu): Directly delete the expired
# messages here.
return session.query(models.Message).filter(
models.Message.expires_at < now).delete()
###############################
@require_context
def driver_initiator_data_insert_by_key(context, initiator, namespace,
key, value):
data = models.DriverInitiatorData()
data.initiator = initiator
data.namespace = namespace
data.key = key
data.value = value
session = get_session()
try:
with session.begin():
session.add(data)
return True
except db_exc.DBDuplicateEntry:
return False
@require_context
def driver_initiator_data_get(context, initiator, namespace):
session = get_session()
with session.begin():
return session.query(models.DriverInitiatorData).\
filter_by(initiator=initiator).\
filter_by(namespace=namespace).\
all()
###############################
PAGINATION_HELPERS = {
models.Volume: (_volume_get_query, _process_volume_filters, _volume_get),
models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get),
models.Backup: (_backups_get_query, _process_backups_filters, _backup_get),
models.QualityOfServiceSpecs: (_qos_specs_get_query,
_process_qos_specs_filters, _qos_specs_get),
models.VolumeTypes: (_volume_type_get_query, _process_volume_types_filters,
_volume_type_get_db_object),
models.ConsistencyGroup: (_consistencygroups_get_query,
_process_consistencygroups_filters,
_consistencygroup_get),
models.Message: (_messages_get_query, _process_messages_filters,
_message_get),
models.GroupTypes: (_group_type_get_query, _process_group_types_filters,
_group_type_get_db_object),
models.Group: (_groups_get_query,
_process_groups_filters,
_group_get),
models.GroupSnapshot: (_group_snapshot_get_query,
_process_group_snapshot_filters,
_group_snapshot_get),
models.VolumeAttachment: (_attachment_get_query,
_process_attachment_filters,
_attachment_get),
}
###############################
@require_context
def image_volume_cache_create(context, host, cluster_name, image_id,
image_updated_at, volume_id, size):
session = get_session()
with session.begin():
cache_entry = models.ImageVolumeCacheEntry()
cache_entry.host = host
cache_entry.cluster_name = cluster_name
cache_entry.image_id = image_id
cache_entry.image_updated_at = image_updated_at
cache_entry.volume_id = volume_id
cache_entry.size = size
session.add(cache_entry)
return cache_entry
@require_context
def image_volume_cache_delete(context, volume_id):
session = get_session()
with session.begin():
session.query(models.ImageVolumeCacheEntry).\
filter_by(volume_id=volume_id).\
delete()
@require_context
def image_volume_cache_get_and_update_last_used(context, image_id, **filters):
filters = _clean_filters(filters)
session = get_session()
with session.begin():
entry = session.query(models.ImageVolumeCacheEntry).\
filter_by(image_id=image_id).\
filter_by(**filters).\
order_by(desc(models.ImageVolumeCacheEntry.last_used)).\
first()
if entry:
entry.last_used = timeutils.utcnow()
entry.save(session=session)
return entry
@require_context
def image_volume_cache_get_by_volume_id(context, volume_id):
session = get_session()
with session.begin():
return session.query(models.ImageVolumeCacheEntry).\
filter_by(volume_id=volume_id).\
first()
@require_context
def image_volume_cache_get_all(context, **filters):
filters = _clean_filters(filters)
session = get_session()
with session.begin():
return session.query(models.ImageVolumeCacheEntry).\
filter_by(**filters).\
order_by(desc(models.ImageVolumeCacheEntry.last_used)).\
all()
@require_admin_context
def image_volume_cache_include_in_cluster(context, cluster,
partial_rename=True, **filters):
"""Include all volumes matching the filters into a cluster."""
filters = _clean_filters(filters)
return _include_in_cluster(context, cluster, models.ImageVolumeCacheEntry,
partial_rename, filters)
###################
def _worker_query(context, session=None, until=None, db_filters=None,
ignore_sentinel=True, **filters):
# Remove all filters based on the workers table that are set to None
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Worker, filters):
return None
query = model_query(context, models.Worker, session=session)
# TODO(geguileo): Once we remove support for MySQL 5.5 we can remove this
if ignore_sentinel:
# We don't want to retrieve the workers sentinel
query = query.filter(models.Worker.resource_type != 'SENTINEL')
if until:
db_filters = list(db_filters) if db_filters else []
# Since we set updated_at at creation time we don't need to check
# created_at field.
db_filters.append(models.Worker.updated_at <= until)
if db_filters:
query = query.filter(and_(*db_filters))
if filters:
query = query.filter_by(**filters)
return query
DB_SUPPORTS_SUBSECOND_RESOLUTION = True
def workers_init():
"""Check if DB supports subsecond resolution and set global flag.
MySQL 5.5 doesn't support subsecond resolution in datetime fields, so we
have to take it into account when working with the worker's table.
To do this we'll have 1 row in the DB, created by the migration script,
where we have tried to set the microseconds and we'll check it.
Once we drop support for MySQL 5.5 we can remove this method.
"""
global DB_SUPPORTS_SUBSECOND_RESOLUTION
session = get_session()
query = session.query(models.Worker).filter_by(resource_type='SENTINEL')
worker = query.first()
DB_SUPPORTS_SUBSECOND_RESOLUTION = bool(worker.updated_at.microsecond)
def _worker_set_updated_at_field(values):
# TODO(geguileo): Once we drop support for MySQL 5.5 we can simplify this
# method.
updated_at = values.get('updated_at', timeutils.utcnow())
if isinstance(updated_at, six.string_types):
return
if not DB_SUPPORTS_SUBSECOND_RESOLUTION:
updated_at = updated_at.replace(microsecond=0)
values['updated_at'] = updated_at
def worker_create(context, **values):
"""Create a worker entry from optional arguments."""
_worker_set_updated_at_field(values)
worker = models.Worker(**values)
session = get_session()
try:
with session.begin():
worker.save(session)
except db_exc.DBDuplicateEntry:
raise exception.WorkerExists(type=values.get('resource_type'),
id=values.get('resource_id'))
return worker
def worker_get(context, **filters):
"""Get a worker or raise exception if it does not exist."""
query = _worker_query(context, **filters)
worker = query.first() if query else None
if not worker:
raise exception.WorkerNotFound(**filters)
return worker
def worker_get_all(context, **filters):
"""Get all workers that match given criteria."""
query = _worker_query(context, **filters)
return query.all() if query else []
def _orm_worker_update(worker, values):
if not worker:
return
for key, value in values.items():
setattr(worker, key, value)
def worker_update(context, id, filters=None, orm_worker=None, **values):
"""Update a worker with given values."""
filters = filters or {}
query = _worker_query(context, id=id, **filters)
# If we want to update the orm_worker and we don't set the update_at field
# we set it here instead of letting SQLAlchemy do it to be able to update
# the orm_worker.
_worker_set_updated_at_field(values)
reference = orm_worker or models.Worker
values['race_preventer'] = reference.race_preventer + 1
result = query.update(values)
if not result:
raise exception.WorkerNotFound(id=id, **filters)
_orm_worker_update(orm_worker, values)
return result
def worker_claim_for_cleanup(context, claimer_id, orm_worker):
"""Claim a worker entry for cleanup."""
# We set updated_at value so we are sure we update the DB entry even if the
# service_id is the same in the DB, thus flagging the claim.
values = {'service_id': claimer_id,
'race_preventer': orm_worker.race_preventer + 1,
'updated_at': timeutils.utcnow()}
_worker_set_updated_at_field(values)
# We only update the worker entry if it hasn't been claimed by other host
# or thread
query = _worker_query(context,
status=orm_worker.status,
service_id=orm_worker.service_id,
race_preventer=orm_worker.race_preventer,
until=orm_worker.updated_at,
id=orm_worker.id)
result = query.update(values, synchronize_session=False)
if result:
_orm_worker_update(orm_worker, values)
return result
def worker_destroy(context, **filters):
"""Delete a worker (no soft delete)."""
query = _worker_query(context, **filters)
return query.delete()
###############################
@require_context
def resource_exists(context, model, resource_id, session=None):
# Match non deleted resources by the id
conditions = [model.id == resource_id, ~model.deleted]
# If the context is not admin we limit it to the context's project
if is_user_context(context) and hasattr(model, 'project_id'):
conditions.append(model.project_id == context.project_id)
session = session or get_session()
query = session.query(sql.exists().where(and_(*conditions)))
return query.scalar()
def get_model_for_versioned_object(versioned_object):
# Exceptions to model mapping, in general Versioned Objects have the same
# name as their ORM models counterparts, but there are some that diverge
VO_TO_MODEL_EXCEPTIONS = {
'BackupImport': models.Backup,
'VolumeType': models.VolumeTypes,
'CGSnapshot': models.Cgsnapshot,
'GroupType': models.GroupTypes,
'GroupSnapshot': models.GroupSnapshot,
}
if isinstance(versioned_object, six.string_types):
model_name = versioned_object
else:
model_name = versioned_object.obj_name()
return (VO_TO_MODEL_EXCEPTIONS.get(model_name) or
getattr(models, model_name))
def _get_get_method(model):
# Exceptions to model to get methods, in general method names are a simple
# conversion changing ORM name from camel case to snake format and adding
# _get to the string
GET_EXCEPTIONS = {
models.ConsistencyGroup: consistencygroup_get,
models.VolumeTypes: _volume_type_get_full,
models.QualityOfServiceSpecs: qos_specs_get,
models.GroupTypes: _group_type_get_full,
}
if model in GET_EXCEPTIONS:
return GET_EXCEPTIONS[model]
# General conversion
# Convert camel cased model name to snake format
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__)
# Get method must be snake formatted model name concatenated with _get
method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get'
return globals().get(method_name)
_GET_METHODS = {}
@require_context
def get_by_id(context, model, id, *args, **kwargs):
# Add get method to cache dictionary if it's not already there
if not _GET_METHODS.get(model):
_GET_METHODS[model] = _get_get_method(model)
return _GET_METHODS[model](context, id, *args, **kwargs)
def condition_db_filter(model, field, value):
"""Create matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
"""
orm_field = getattr(model, field)
# For values that must match and are iterables we use IN
if (isinstance(value, collections.Iterable) and
not isinstance(value, six.string_types)):
# We cannot use in_ when one of the values is None
if None not in value:
return orm_field.in_(value)
return or_(orm_field == v for v in value)
# For values that must match and are not iterables we use ==
return orm_field == value
def condition_not_db_filter(model, field, value, auto_none=True):
"""Create non matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
If auto_none is True then we'll consider NULL values as different as well,
like we do in Python and not like SQL does.
"""
result = ~condition_db_filter(model, field, value)
if (auto_none
and ((isinstance(value, collections.Iterable) and
not isinstance(value, six.string_types)
and None not in value)
or (value is not None))):
orm_field = getattr(model, field)
result = or_(result, orm_field.is_(None))
return result
def is_orm_value(obj):
"""Check if object is an ORM field or expression."""
return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute,
sqlalchemy.sql.expression.ColumnElement))
def _check_is_not_multitable(values, model):
"""Check that we don't try to do multitable updates.
Since PostgreSQL doesn't support multitable updates we want to always fail
if we have such a query in our code, even if with MySQL it would work.
"""
used_models = set()
for field in values:
if isinstance(field, sqlalchemy.orm.attributes.InstrumentedAttribute):
used_models.add(field.class_)
elif isinstance(field, six.string_types):
used_models.add(model)
else:
raise exception.ProgrammingError(
reason='DB Conditional update - Unknown field type, must be '
'string or ORM field.')
if len(used_models) > 1:
raise exception.ProgrammingError(
reason='DB Conditional update - Error in query, multitable '
'updates are not supported.')
@require_context
@_retry_on_deadlock
def conditional_update(context, model, values, expected_values, filters=(),
include_deleted='no', project_only=False, order=None):
"""Compare-and-swap conditional update SQLAlchemy implementation."""
_check_is_not_multitable(values, model)
# Provided filters will become part of the where clause
where_conds = list(filters)
# Build where conditions with operators ==, !=, NOT IN and IN
for field, condition in expected_values.items():
if not isinstance(condition, db.Condition):
condition = db.Condition(condition, field)
where_conds.append(condition.get_filter(model, field))
# Create the query with the where clause
query = model_query(context, model, read_deleted=include_deleted,
project_only=project_only).filter(*where_conds)
# NOTE(geguileo): Some DBs' update method are order dependent, and they
# behave differently depending on the order of the values, example on a
# volume with 'available' status:
# UPDATE volumes SET previous_status=status, status='reyping'
# WHERE id='44f284f9-877d-4fce-9eb4-67a052410054';
# Will result in a volume with 'retyping' status and 'available'
# previous_status both on SQLite and MariaDB, but
# UPDATE volumes SET status='retyping', previous_status=status
# WHERE id='44f284f9-877d-4fce-9eb4-67a052410054';
# Will yield the same result in SQLite but will result in a volume with
# status and previous_status set to 'retyping' in MariaDB, which is not
# what we want, so order must be taken into consideration.
# Order for the update will be:
# 1- Order specified in argument order
# 2- Values that refer to other ORM field (simple and using operations,
# like size + 10)
# 3- Values that use Case clause (since they may be using fields as well)
# 4- All other values
order = list(order) if order else tuple()
orm_field_list = []
case_list = []
unordered_list = []
for key, value in values.items():
if isinstance(value, db.Case):
value = case(value.whens, value.value, value.else_)
if key in order:
order[order.index(key)] = (key, value)
continue
# NOTE(geguileo): Check Case first since it's a type of orm value
if isinstance(value, sql.elements.Case):
value_list = case_list
elif is_orm_value(value):
value_list = orm_field_list
else:
value_list = unordered_list
value_list.append((key, value))
update_args = {'synchronize_session': False}
# If we don't have to enforce any kind of order just pass along the values
# dictionary since it will be a little more efficient.
if order or orm_field_list or case_list:
# If we are doing an update with ordered parameters, we need to add
# remaining values to the list
values = itertools.chain(order, orm_field_list, case_list,
unordered_list)
# And we have to tell SQLAlchemy that we want to preserve the order
update_args['update_args'] = {'preserve_parameter_order': True}
# Return True if we were able to change any DB entry, False otherwise
result = query.update(values, **update_args)
return 0 != result
|
the-stack_0_27815
|
#!/usr/bin/env python
import sys
import argparse
from baselines import bench, logger
def train(env_id, num_timesteps, seed, policy):
from baselines.common import set_global_seeds
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.ppo2 import ppo2
from baselines.ppo2.policies import CnnPolicy, LstmPolicy, LnLstmPolicy
import gym
import logging
import multiprocessing
import os.path as osp
import tensorflow as tf
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu)
config.gpu_options.allow_growth = True #pylint: disable=E1101
gym.logger.setLevel(logging.WARN)
tf.Session(config=config).__enter__()
def make_env(rank):
def env_fn():
env = make_atari(env_id)
env.seed(seed + rank)
env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
return wrap_deepmind(env)
return env_fn
nenvs = 8
env = SubprocVecEnv([make_env(i) for i in range(nenvs)])
set_global_seeds(seed)
env = VecFrameStack(env, 4)
policy = {'cnn' : CnnPolicy, 'lstm' : LstmPolicy, 'lnlstm' : LnLstmPolicy}[policy]
ppo2.learn(policy=policy, env=env, nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=lambda f : f * 0.1,
total_timesteps=int(num_timesteps * 1.1))
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--policy', help='Policy architecture', choices=['cnn', 'lstm', 'lnlstm'], default='cnn')
parser.add_argument('--num-timesteps', type=int, default=int(10e6))
parser.add_argument('--data_name', type=str, default='ppo2')
args = parser.parse_args()
logger.configure(dir='/home/chase/rl_data/{}'.format(args.data_name))
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed,
policy=args.policy)
if __name__ == '__main__':
main()
|
the-stack_0_27816
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
import time
from bidict import bidict
from DES import DES
import numpy as np
from RSA_Class import RSA
class NetworkUser(object):
"""docstring for NetworkUser."""
def __init__(self, usernamd, encpt_obj, rsa_obj, uid=-1):
super(NetworkUser, self).__init__()
self.role = 'User'
self.desc = ''
self.action_codes = bidict({'SNT_CIPHER':'000', 'RPY_CIPHER':'001', 'REQ_KEY': '100', 'RPY_KEY': '200', 'REQ_CON': '300', 'RPY_CHLNG': '400', 'RPY_CLG_SOL': '500'})
self.pkg_id = 0
self.sess_key_info = {} # {uid_str: ['sess key', 'nonce', 'nonce2']}
self.sess_ttl = 300 # time to live (sec)
self.uid = (int(time.time()) + np.random.randint(1000)) % 1000 if uid == -1 else uid
self.uname = usernamd
self.packet_items = ['action_code', 'action_name', 'cipher_key', 'src_uid', 'dst_uid', 'nonce', 'encpt_KDC_rpy', 'encpt_sess_key_and_src_id_and_nonce', 'encpt_nonce', 'encpt_encpt_nonce']
self.encpy_obj = encpt_obj # encryption and decryption method
self.pv_key = None # generate private key
self.RSA = rsa_obj
def __str__(self):
str_format = """name: {name}\nrole: {role}\nuid: {uid}"""
s = str_format.format(name=self.uname, role=self.role, uid=self.uid)
return s + self.desc
def gen_cipher_key(self): # note that, pv_key will change every time this function is called
return self.RSA.gen_pv_key()
def set_pv_key(self, cipher_key):
shared_key = self.RSA.gen_shared_key(cipher_key)
self.pv_key = self.encpy_obj.int_to_key(shared_key)
def gen_nonce(self): # return a float number
nonce = str(self.pkg_id) + '@' + str(time.time())
return nonce
def interp_nonce(self, nonce):
nonce_elms = nonce.split('@')
if len(nonce_elms) < 2:
return None
else:
pkg_id = int(nonce_elms[0])
ts = float(nonce_elms[1])
return pkg_id, ts
def change_rsa_key(self, k=1000):
self.RSA.change_pv_key(k)
# action_code, src_uid=-1, dst_uid=-1, nonce="", sess_key=0, encpt_sess_key_and_src_id_and_nonce="", encpt_nonce="", encpt_encpt_nonce=""
def gen_packet(self, packet_info):
# a general function to assemble pieces to package
try:
if 'action_code' in packet_info:
action_code = packet_info['action_code']
action_name = self.action_codes.inv[str(action_code)]
elif 'action_name' in packet_info:
action_name = packet_info['action_name']
action_code = self.action_codes[action_name]
else:
print('action code/name unknown')
return
except Exception as e:
print(e, 'action code/name not define')
return
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
try:
if action_name == 'SNT_CIPHER':
packet_format = "{action_code}||{src_uid}||{cipher_key}"
packet = packet_format.format(\
action_code=packet_info['action_code'],
src_uid=packet_info['src_uid'],
cipher_key=packet_info['cipher_key'])
elif action_name == 'RPY_CIPHER':
packet_format = "{action_code}||{src_uid}||{cipher_key}"
packet = packet_format.format(\
action_code=packet_info['action_code'],
src_uid=packet_info['src_uid'],
cipher_key=packet_info['cipher_key'])
elif action_name == 'REQ_KEY':
# request a key from KDC
packet_format = "{action_code}||{src_uid}||{dst_uid}||{nonce}"
packet = packet_format.format(\
action_code=packet_info['action_code'],
src_uid=packet_info['src_uid'],
dst_uid=packet_info['dst_uid'], nonce=packet_info['nonce'])
elif action_name == 'RPY_KEY':
# KDC reply beed to be excrypted
packet_format = "{action_code}||{encpt_KDC_rpy}"
packet = packet_format.format(\
action_code=packet_info['action_code'],
encpt_KDC_rpy=packet_info['encpt_KDC_rpy'])
elif action_name == 'REQ_CON':
# alice request to conect to bob
packet_format = "{action_code}||{encpt_sess_key_and_src_id_and_nonce}"
packet = packet_format.format(\
action_code=packet_info['action_code'],
encpt_sess_key_and_src_id_and_nonce=packet_info['encpt_sess_key_and_src_id_and_nonce'])
elif action_name == 'RPY_CHLNG':
packet_format = "{action_code}||{encpt_nonce}"
packet = packet_format.format(\
action_code=packet_info['action_code'],
encpt_nonce=packet_info['encpt_nonce'])
elif action_name == 'RPY_CLG_SOL':
packet_format = "{action_code}||{src_uid}||{encpt_encpt_nonce}"
packet = packet_format.format(\
action_code=packet_info['action_code'],
src_uid=packet_info['src_uid'],
encpt_encpt_nonce=packet_info['encpt_encpt_nonce'])
else:
print('action name ', action_name, ' unknown')
except Exception as e:
print(e, 'cannot generate packet')
return None
return packet
def interp_packet(self, packet):
if isinstance(packet, dict):
return packet
elif not isinstance(packet, str):
print('Unknown packet type')
return None
packet_elems = packet.split('||')
action_code = packet_elems[0]
if action_code in self.action_codes.inv:
action_name = self.action_codes.inv[action_code]
else:
print('action code/name unknown')
return
packet_info = self.gen_packet_dict()
try:
if action_name == 'SNT_CIPHER':
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
packet_info['src_uid'] = int(packet_elems[1])
packet_info['cipher_key'] = int(packet_elems[2])
elif action_name == 'RPY_CIPHER':
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
packet_info['src_uid'] = int(packet_elems[1])
packet_info['cipher_key'] = int(packet_elems[2])
elif action_name == 'REQ_KEY':
# request a key from KDC
# packet_format = "{action_code}||{src_uid}||{dst_uid}||{nonce}"
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
packet_info['src_uid'] = int(packet_elems[1])
packet_info['dst_uid'] = int(packet_elems[2])
packet_info['nonce'] = packet_elems[3]
elif action_name == 'RPY_KEY':
# KDC reply beed to be excrypted
packet_format = "{action_code}||{encpt_KDC_rpy}"
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
packet_info['encpt_KDC_rpy'] = packet_elems[1]
elif action_name == 'REQ_CON':
# alice request to conect to bob
packet_format = "{action_code}||{encpt_sess_key_and_src_id_and_nonce}"
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
packet_info['encpt_sess_key_and_src_id_and_nonce'] = packet_elems[1]
elif action_name == 'RPY_CHLNG':
packet_format = "{action_code}||{encpt_nonce}"
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
packet_info['encpt_nonce'] = packet_elems[1]
elif action_name == 'RPY_CLG_SOL':
packet_format = "{action_code}||{encpt_encpt_nonce}"
packet_info['action_code'] = action_code
packet_info['action_name'] = action_name
packet_info['src_uid'] = int(packet_elems[1])
packet_info['encpt_encpt_nonce'] = packet_elems[2]
else:
print('action name ', action_name, ' unknown')
except Exception as e:
print(e, 'cannot generate packet')
return None
return packet_info
def gen_packet_dict(self):
return {key:None for key in self.packet_items}
def check_nonce(self, nonce):
pkg_id, ts = self.interp_nonce(nonce)
if (ts - time.time()) > self.sess_ttl:
return False
return True
def send_cipher_key(self):
packet_info = self.gen_packet_dict()
user_cipher_key = self.gen_cipher_key()
packet_info['action_name'] = 'SNT_CIPHER'
packet_info['action_code'] = self.action_codes['SNT_CIPHER']
packet_info['src_uid'] = self.uid
packet_info['cipher_key'] = user_cipher_key
packet = self.gen_packet(packet_info)
return packet
def request_key(self, dst_uid):
nonce = self.gen_nonce()
self.sess_key_info[str(dst_uid)] = [None, nonce, None]
packet_info = self.gen_packet_dict()
packet_info['action_name'] = 'REQ_KEY'
packet_info['action_code'] = self.action_codes['REQ_KEY']
packet_info['src_uid'] = self.uid
packet_info['dst_uid'] = dst_uid
packet_info['nonce'] = nonce
packet = self.gen_packet(packet_info)
return packet
def request_connection(self, packet_REQ_KEY):
if self.pv_key is None:
print('Please share the private key with the KDC first!')
return
packet_REQ_KEY_info = self.interp_packet(packet_REQ_KEY)
sess_key_src_id_and_nonce_bob_cipher = self.encpy_obj.decrypt(packet_REQ_KEY_info['encpt_KDC_rpy'], self.pv_key)
encpt_KDC_rpy_elems = sess_key_src_id_and_nonce_bob_cipher.split('||')
if len(encpt_KDC_rpy_elems) != 4:
print('packet has been tampered!')
return
sess_key_str, bob_uid_str, nonce, bob_cipher = encpt_KDC_rpy_elems
sess_key = self.encpy_obj.str_to_key_array(sess_key_str)
if not self.check_nonce(nonce):
print('session expires')
return
if bob_uid_str not in self.sess_key_info:
print('didn''t request to connect to bob!')
return
if nonce != self.sess_key_info[bob_uid_str][1]:
print('not the same nonce')
return
self.sess_key_info[bob_uid_str][0] = sess_key
packet_info = self.gen_packet_dict()
packet_info['action_name'] = 'REQ_CON'
packet_info['action_code'] = self.action_codes['REQ_CON']
packet_info['encpt_sess_key_and_src_id_and_nonce'] = bob_cipher
packet = self.gen_packet(packet_info)
return packet
def reply_challenge(self, packet_REQ_CON_info):
if self.pv_key is None:
print('Please share the private key with the KDC first!')
return
request_connection = self.encpy_obj.decrypt(packet_REQ_CON_info['encpt_sess_key_and_src_id_and_nonce'], self.pv_key)
request_connection_elems = request_connection.split('||')
if len(request_connection_elems) != 3:
print('packet has been tampered!')
return
sess_key_str, alice_uid_str, nonce = request_connection_elems
sess_key = self.encpy_obj.str_to_key_array(sess_key_str)
if not self.check_nonce(nonce):
print('session expires')
return
nonce2 = self.gen_nonce()
self.sess_key_info[alice_uid_str] = [sess_key, nonce, nonce2]
encpt_nonce = self.encpy_obj.encrypt(nonce2, sess_key)
packet_info = self.gen_packet_dict()
packet_info['action_name'] = 'RPY_CHLNG'
packet_info['action_code'] = self.action_codes['RPY_CHLNG']
packet_info['encpt_nonce'] = encpt_nonce
packet = self.gen_packet(packet_info)
return packet
def reply_challenge_sol(self, packet_RPY_CHLNG, bob_uid):
packet_RPY_CHLNG_info = self.interp_packet(packet_RPY_CHLNG)
sess_key = self.sess_key_info[str(bob_uid)][0]
nonce2 = self.encpy_obj.decrypt(packet_RPY_CHLNG_info['encpt_nonce'], sess_key)
perm_nonce2 = self.encpy_obj.encrypt(nonce2 + str(self.uid), sess_key)
packet_info = self.gen_packet_dict()
packet_info['action_name'] = 'RPY_CLG_SOL'
packet_info['action_code'] = self.action_codes['RPY_CLG_SOL']
packet_info['src_uid'] = self.uid
packet_info['encpt_encpt_nonce'] = perm_nonce2
packet = self.gen_packet(packet_info)
return packet
def check_challenge_sol(self, packet_RPY_CLG_SOL_info):
alice_uid = packet_RPY_CLG_SOL_info['src_uid']
sess_key = self.sess_key_info[str(alice_uid)][0]
encpt_nonce = self.encpy_obj.decrypt(packet_RPY_CLG_SOL_info['encpt_encpt_nonce'], sess_key)
encpt_nonce_self_cmp = self.sess_key_info[str(alice_uid)][2] + str(alice_uid)
if encpt_nonce == encpt_nonce_self_cmp:
return True
else:
return False
def process_packet(self, packet):
packet_info = self.interp_packet(packet)
action_name = packet_info['action_name']
if action_name == 'RPY_CIPHER':
self.set_pv_key(packet_info['cipher_key'])
print('user: ', self.uid, ' key: ', self.pv_key+0)
print('Communicate the private key with KDC')
return None
elif action_name == 'REQ_CON':
print('receive request for connection from ', packet_info['src_uid'])
packet_RPY_CHLNG = self.reply_challenge(packet_info)
return packet_RPY_CHLNG
elif action_name == 'RPY_CLG_SOL':
print('receive challenge solution from ', packet_info['src_uid'])
check_challenge_sol_rst = self.check_challenge_sol(packet_info)
if check_challenge_sol_rst:
print('agree on connection with ', packet_info['src_uid'])
else:
print('deny on connection with ', packet_info['src_uid'])
return str(check_challenge_sol_rst+0)
else:
print('action name ', action_name, ' unknown')
class KDC(NetworkUser):
"""docstring for KDC."""
def __init__(self, encpy_obj, rsa_obj):
super(KDC, self).__init__('KDC', encpy_obj, rsa_obj)
self.role = 'KDC'
self.user_key_dict = {}
def gen_pv_key(self, cipher_key):
return self.RSA.gen_shared_key(cipher_key)
def gen_sess_key(self):
return self.encpy_obj.gen_key()
def add_user_encpt_info(self, uid, cipher_key):
shared_key = self.gen_pv_key(cipher_key)
self.user_key_dict[str(uid)] = self.encpy_obj.int_to_key(shared_key)
def reply_cipher_key(self, user_cipher_key_packet_info):
kdc_user_cipher_key = self.gen_cipher_key() # regenerate a cipher key
src_uid = user_cipher_key_packet_info['src_uid']
self.add_user_encpt_info(src_uid, user_cipher_key_packet_info['cipher_key'])
print('kdc add user: ', src_uid, ' key: ', self.user_key_dict[str(src_uid)]+0)
# format the packet
packet_info = self.gen_packet_dict()
packet_info['action_name'] = 'RPY_CIPHER'
packet_info['action_code'] = self.action_codes['RPY_CIPHER']
packet_info['src_uid'] = self.uid
packet_info['cipher_key'] = kdc_user_cipher_key
packet = self.gen_packet(packet_info)
return packet
def reply_sess_key(self, req_key_packet_info):
nonce = req_key_packet_info['nonce']
alice_uid_str, bob_uid_str = str(req_key_packet_info['src_uid']), str(req_key_packet_info['dst_uid'])
print(alice_uid_str, ' would like to talk to ', bob_uid_str)
# check nonce validation
if not self.check_nonce(nonce):
print('session expires')
return
if alice_uid_str not in self.user_key_dict:
print('KDC doesn''t have requester''s key')
return
alice_key = self.user_key_dict[alice_uid_str]
if bob_uid_str not in self.user_key_dict:
print('KDC doesn''t have destination''s key')
return
bob_key = self.user_key_dict[bob_uid_str]
# gen bob's sess+key
sess_key_str = self.encpy_obj.key_array_to_str(self.gen_sess_key())
sess_key_src_id_and_nonce = sess_key_str + '||' + alice_uid_str + '||' + nonce
bob_enc_sess_key_src_id_and_nonce = self.encpy_obj.encrypt(sess_key_src_id_and_nonce, bob_key)
# gen alice's reply
sess_key_src_id_and_nonce_bob_cipher = sess_key_str + '||' + bob_uid_str + '||' + nonce + '||' + bob_enc_sess_key_src_id_and_nonce
alice_enc_sess_key_src_id_and_nonce_bob_cipher = self.encpy_obj.encrypt(sess_key_src_id_and_nonce_bob_cipher, alice_key)
# format the packet
packet_info = self.gen_packet_dict()
packet_info['action_name'] = 'RPY_KEY'
packet_info['action_code'] = self.action_codes['RPY_KEY']
packet_info['encpt_KDC_rpy'] = alice_enc_sess_key_src_id_and_nonce_bob_cipher
packet = self.gen_packet(packet_info)
return packet
def process_packet(self, packet):
packet_info = self.interp_packet(packet)
action_name = packet_info['action_name']
if action_name == 'SNT_CIPHER':
packet = self.reply_cipher_key(packet_info)
elif action_name == 'REQ_KEY':
packet = self.reply_sess_key(packet_info)
else:
print('action name ', action_name, ' not suitable')
return packet
|
the-stack_0_27818
|
import os
import random
import cv2
import densenets.dense_correspondence_manipulation.utils.utils as utils
import densenets.dense_correspondence_manipulation.utils.visualization as vis_utils
import numpy as np
from densenets.dataset.spartan_dataset_masked import SpartanDataset
from densenets.dense_correspondence_manipulation.simple_pixel_correspondence_labeler.annotate_correspondences import (
draw_reticle,
label_colors,
pil_image_to_cv2,
)
from densenets.evaluation import evaluation
from densenets.network.dense_correspondence_network import DenseCorrespondenceNetwork
COLOR_RED = np.array([0, 0, 255])
COLOR_GREEN = np.array([0, 255, 0])
utils.set_default_cuda_visible_devices()
eval_config_filename = os.path.join(
utils.getDenseCorrespondenceSourceDir(),
'config',
'dense_correspondence',
'evaluation',
'evaluation.yaml',
)
EVAL_CONFIG = utils.getDictFromYamlFilename(eval_config_filename)
LOAD_SPECIFIC_DATASET = False
class HeatmapVisualization(object):
"""
Launches a live interactive heatmap visualization.
Edit config/dense_correspondence/heatmap_vis/heatmap.yaml to specify which networks
to visualize. Specifically add the network you want to visualize to the "networks" list.
Make sure that this network appears in the file pointed to by EVAL_CONFIG
Usage: Launch this file with python after sourcing the environment with
`use_pytorch_dense_correspondence`
Then `python live_heatmap_visualization.py`.
Keypresses:
n: new set of images
s: swap images
p: pause/un-pause
"""
def __init__(self, config):
self._config = config
self._dce = evaluation.DenseCorrespondenceEvaluation(EVAL_CONFIG)
self._load_networks()
self._reticle_color = COLOR_GREEN
self._paused = False
if LOAD_SPECIFIC_DATASET:
self.load_specific_dataset() # uncomment if you want to load a specific dataset
def _load_networks(self):
# we will use the dataset for the first network in the series
self._dcn_dict = dict()
self._dataset = None
self._network_reticle_color = dict()
for idx, network_name in enumerate(self._config["networks"]):
dcn = self._dce.load_network_from_config(network_name)
dcn.eval()
self._dcn_dict[network_name] = dcn
# self._network_reticle_color[network_name] = label_colors[idx]
if len(self._config["networks"]) == 1:
self._network_reticle_color[network_name] = COLOR_RED
else:
self._network_reticle_color[network_name] = label_colors[idx]
if self._dataset is None:
self._dataset = dcn.load_training_dataset()
def load_specific_dataset(self):
dataset_config_filename = os.path.join(
utils.getDenseCorrespondenceSourceDir(),
'config',
'dense_correspondence',
'dataset',
'composite',
'hats_3_demo_composite.yaml',
)
# dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config',
# 'dense_correspondence',
# 'dataset', 'composite', '4_shoes_all.yaml')
dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)
self._dataset = SpartanDataset(config=dataset_config)
def get_random_image_pair(self):
"""
Gets a pair of random images for different scenes of the same object
"""
object_id = self._dataset.get_random_object_id()
# scene_name_a = "2018-04-10-16-02-59"
# scene_name_b = scene_name_a
scene_name_a = self._dataset.get_random_single_object_scene_name(object_id)
scene_name_b = self._dataset.get_different_scene_for_object(
object_id, scene_name_a
)
if self._config["randomize_images"]:
image_a_idx = self._dataset.get_random_image_index(scene_name_a)
image_b_idx = self._dataset.get_random_image_index(scene_name_b)
else:
image_a_idx = 0
image_b_idx = 0
return scene_name_a, scene_name_b, image_a_idx, image_b_idx
def get_random_image_pair_across_object(self):
"""
Gets cross object image pairs
:param randomize:
:type randomize:
:return:
:rtype:
"""
object_id_a, object_id_b = self._dataset.get_two_different_object_ids()
# object_id_a = "shoe_red_nike.yaml"
# object_id_b = "shoe_gray_nike"
# object_id_b = "shoe_green_nike"
scene_name_a = self._dataset.get_random_single_object_scene_name(object_id_a)
scene_name_b = self._dataset.get_random_single_object_scene_name(object_id_b)
if self._config["randomize_images"]:
image_a_idx = self._dataset.get_random_image_index(scene_name_a)
image_b_idx = self._dataset.get_random_image_index(scene_name_b)
else:
image_a_idx = 0
image_b_idx = 0
return scene_name_a, scene_name_b, image_a_idx, image_b_idx
def get_random_image_pair_multi_object_scenes(self):
"""
Gets cross object image pairs
:param randomize:
:type randomize:
:return:
:rtype:
"""
scene_name_a = self._dataset.get_random_multi_object_scene_name()
scene_name_b = self._dataset.get_random_multi_object_scene_name()
if self._config["randomize_images"]:
image_a_idx = self._dataset.get_random_image_index(scene_name_a)
image_b_idx = self._dataset.get_random_image_index(scene_name_b)
else:
image_a_idx = 0
image_b_idx = 0
return scene_name_a, scene_name_b, image_a_idx, image_b_idx
def _get_new_images(self):
"""
Gets a new pair of images
:return:
:rtype:
"""
if random.random() < 0.5:
self._dataset.set_train_mode()
else:
self._dataset.set_test_mode()
if self._config["same_object"]:
(
scene_name_1,
scene_name_2,
image_1_idx,
image_2_idx,
) = self.get_random_image_pair()
elif self._config["different_objects"]:
(
scene_name_1,
scene_name_2,
image_1_idx,
image_2_idx,
) = self.get_random_image_pair_across_object()
elif self._config["multiple_object"]:
(
scene_name_1,
scene_name_2,
image_1_idx,
image_2_idx,
) = self.get_random_image_pair_multi_object_scenes()
else:
raise ValueError("At least one of the image types must be set tot True")
# caterpillar
# scene_name_1 = "2018-04-16-14-42-26"
# scene_name_2 = "2018-04-16-14-25-19"
# hats
# scene_name_1 = "2018-05-15-22-01-44"
# scene_name_2 = "2018-05-15-22-04-17"
self.img1_pil = self._dataset.get_rgb_image_from_scene_name_and_idx(
scene_name_1, image_1_idx
)
self.img2_pil = self._dataset.get_rgb_image_from_scene_name_and_idx(
scene_name_2, image_2_idx
)
self._scene_name_1 = scene_name_1
self._scene_name_2 = scene_name_2
self._image_1_idx = image_1_idx
self._image_2_idx = image_2_idx
self._compute_descriptors()
# self.rgb_1_tensor = self._dataset.rgb_image_to_tensor(img1_pil)
# self.rgb_2_tensor = self._dataset.rgb_image_to_tensor(img2_pil)
def _compute_descriptors(self):
"""
Computes the descriptors for image 1 and image 2 for each network
:return:
:rtype:
"""
self.img1 = pil_image_to_cv2(self.img1_pil)
self.img2 = pil_image_to_cv2(self.img2_pil)
self.rgb_1_tensor = self._dataset.rgb_image_to_tensor(self.img1_pil)
self.rgb_2_tensor = self._dataset.rgb_image_to_tensor(self.img2_pil)
self.img1_gray = cv2.cvtColor(self.img1, cv2.COLOR_RGB2GRAY) / 255.0
self.img2_gray = cv2.cvtColor(self.img2, cv2.COLOR_RGB2GRAY) / 255.0
cv2.imshow('source', self.img1)
cv2.imshow('target', self.img2)
self._res_a = dict()
self._res_b = dict()
for network_name, dcn in self._dcn_dict.items():
self._res_a[network_name] = (
dcn.forward_single_image_tensor(self.rgb_1_tensor).data.cpu().numpy()
)
self._res_b[network_name] = (
dcn.forward_single_image_tensor(self.rgb_2_tensor).data.cpu().numpy()
)
self.find_best_match(None, 0, 0, None, None)
def scale_norm_diffs_to_make_heatmap(self, norm_diffs, threshold):
"""
TODO (@manuelli) scale with Gaussian kernel instead of linear
Scales the norm diffs to make a heatmap. This will be scaled between 0 and 1.
0 corresponds to a match, 1 to non-match
:param norm_diffs: The norm diffs
:type norm_diffs: numpy.array [H,W]
:return:
:rtype:
"""
heatmap = np.copy(norm_diffs)
greater_than_threshold = np.where(norm_diffs > threshold)
heatmap = (
heatmap / threshold * self._config["heatmap_vis_upper_bound"]
) # linearly scale [0, threshold] to [0, 0.5]
heatmap[greater_than_threshold] = 1 # greater than threshold is set to 1
heatmap = heatmap.astype(self.img1_gray.dtype)
return heatmap
def find_best_match(self, event, u, v, flags, param):
"""
For each network, find the best match in the target image to point highlighted
with reticle in the source image. Displays the result
:return:
:rtype:
"""
if self._paused:
return
img_1_with_reticle = np.copy(self.img1)
draw_reticle(img_1_with_reticle, u, v, self._reticle_color)
cv2.imshow("source", img_1_with_reticle)
alpha = self._config["blend_weight_original_image"]
beta = 1 - alpha
img_2_with_reticle = np.copy(self.img2)
print("\n\n")
self._res_uv = dict()
# self._res_a_uv = dict()
# self._res_b_uv = dict()
for network_name in self._dcn_dict:
res_a = self._res_a[network_name]
res_b = self._res_b[network_name]
(
best_match_uv,
best_match_diff,
norm_diffs,
) = DenseCorrespondenceNetwork.find_best_match((u, v), res_a, res_b)
print("\n\n")
print("network_name:" + network_name)
print("scene_name_1" + self._scene_name_1)
print("image_1_idx" + str(self._image_1_idx))
print("scene_name_2" + self._scene_name_2)
print("image_2_idx" + str(self._image_2_idx))
d = dict()
d['scene_name'] = self._scene_name_1
d['image_idx'] = self._image_1_idx
d['descriptor'] = res_a[v, u, :].tolist()
d['u'] = u
d['v'] = v
print("\n-------keypoint info\n" + str(d))
print("\n--------\n")
self._res_uv[network_name] = dict()
self._res_uv[network_name]['source'] = res_a[v, u, :].tolist()
self._res_uv[network_name]['target'] = res_b[v, u, :].tolist()
print("res_a[v, u, :]:" + str(res_a[v, u, :]))
print("res_b[v, u, :]:" + str(res_b[best_match_uv[1], best_match_uv[0], :]))
print("%s best match diff: %.3f".format(network_name, best_match_diff))
print("res_a" + str(self._res_uv[network_name]['source']))
print("res_b" + str(self._res_uv[network_name]['target']))
threshold = self._config["norm_diff_threshold"]
if network_name in self._config["norm_diff_threshold_dict"]:
threshold = self._config["norm_diff_threshold_dict"][network_name]
heatmap_color = vis_utils.compute_gaussian_kernel_heatmap_from_norm_diffs(
norm_diffs, self._config['kernel_variance']
)
reticle_color = self._network_reticle_color[network_name]
draw_reticle(
heatmap_color, best_match_uv[0], best_match_uv[1], reticle_color
)
draw_reticle(
img_2_with_reticle, best_match_uv[0], best_match_uv[1], reticle_color
)
blended = cv2.addWeighted(self.img2, alpha, heatmap_color, beta, 0)
cv2.imshow(network_name, blended)
cv2.imshow("target", img_2_with_reticle)
if event == cv2.EVENT_LBUTTONDOWN:
utils.saveToYaml(self._res_uv, 'clicked_point.yaml')
def run(self):
self._get_new_images()
cv2.namedWindow('target')
cv2.setMouseCallback('source', self.find_best_match)
self._get_new_images()
while True:
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
elif k == ord('n'):
self._get_new_images()
elif k == ord('s'):
img1_pil = self.img1_pil
img2_pil = self.img2_pil
self.img1_pil = img2_pil
self.img2_pil = img1_pil
self._compute_descriptors()
elif k == ord('p'):
if self._paused:
print("un pausing")
self._paused = False
else:
print("pausing")
self._paused = True
if __name__ == "__main__":
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
config_file = os.path.join(
dc_source_dir, 'config', 'dense_correspondence', 'heatmap_vis', 'heatmap.yaml'
)
config = utils.getDictFromYamlFilename(config_file)
heatmap_vis = HeatmapVisualization(config)
print("starting heatmap vis")
heatmap_vis.run()
cv2.destroyAllWindows()
cv2.destroyAllWindows()
|
the-stack_0_27820
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class TriggerResourcePaged(Paged):
"""
A paging container for iterating over a list of :class:`TriggerResource <azure.mgmt.datafactory.models.TriggerResource>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[TriggerResource]'}
}
def __init__(self, *args, **kwargs):
super(TriggerResourcePaged, self).__init__(*args, **kwargs)
|
the-stack_0_27821
|
from __future__ import print_function, division, absolute_import
import re
import os
import sys
import subprocess
from os.path import isdir, isfile, join
if sys.platform == 'win32':
dir_paths = [join(sys.prefix, 'Scripts')]
else:
dir_paths = [join(sys.prefix, 'bin')]
dir_paths.extend(os.environ['PATH'].split(os.pathsep))
def find_executable(cmd):
executable = 'conda-%s' % cmd
for dir_path in dir_paths:
if sys.platform == 'win32':
for ext in '.exe', '.bat':
path = join(dir_path, executable + ext)
if isfile(path):
return path
else:
path = join(dir_path, executable)
if isfile(path):
return path
return None
def find_commands():
if sys.platform == 'win32':
pat = re.compile(r'conda-(\w+)\.(exe|bat)$')
else:
pat = re.compile(r'conda-(\w+)$')
res = set()
for dir_path in dir_paths:
if not isdir(dir_path):
continue
for fn in os.listdir(dir_path):
m = pat.match(fn)
if m:
res.add(m.group(1))
return sorted(res)
def filter_descr(cmd):
args = [find_executable(cmd), '--help']
try:
output = subprocess.check_output(args)
except subprocess.CalledProcessError:
print('failed: %s' % (' '.join(args)))
return
try:
descr = output.decode('utf-8').split('\n\n')[1]
except IndexError:
descr = '<could not extract description>'
print(' %-12s %s' % (cmd, descr))
def help():
print("\nexternal commands:")
for cmd in find_commands():
filter_descr(cmd)
if __name__ == '__main__':
help()
|
the-stack_0_27824
|
#2D pipeline
import numpy as np
from skimage.morphology import label
from scipy.spatial import cKDTree as KDTree
import pandas as pd
import itertools
from tqdm import tqdm
def Execute_Correspondences_CreateInputs(candidates,normalized_images,im_th,cycle,channels,nbit):
inputs_df=pd.DataFrame(columns=['cycle','ch','x','y','Intensities_window_5x5'])
max_df=pd.DataFrame(columns=['I_T','I_G','I_C','I_A','x_T','y_T','x_G','y_G','x_C','y_C','x_A','y_A','cycle'])
cc, n_c = label(np.amax(candidates[cycle,2:channels,:,:],axis=0),return_num=True,connectivity=1)
conn_components = np.zeros((4,candidates.shape[-2],candidates.shape[-1]))
for ch in range(4):
conn_components[ch,:,:] = np.multiply(cc,candidates[cycle,ch+2,:,:])
for i in tqdm(range(1,n_c+1)):
ch,y,x = np.where(conn_components==i)
kdT_tmp = KDTree(np.array([x,y]).T)
if len(list(itertools.combinations(np.arange(len(x)),2)))==len(kdT_tmp.query_pairs(2,p=1)): # if connected components is too large (likely cover more signals) then split it
df=pd.Series(data={ 'I_T':np.nan,'I_G':np.nan,'I_C':np.nan,'I_A':np.nan,'x_T':np.nan,'y_T':np.nan,'x_G':np.nan,'y_G':np.nan,'x_C':np.nan,'y_C':np.nan,'x_A':np.nan,'y_A':np.nan,'cycle':cycle})
df=df[['I_T','I_G','I_C','I_A','x_T','y_T','x_G','y_G','x_C','y_C','x_A','y_A','cycle']]
for j in range(len(x)):
df.iloc[ch[j]] = im_th[cycle,ch[j]+2,y[j],x[j]]
df.iloc[ch[j]*2+4]= x[j]
df.iloc[ch[j]*2+4+1]= y[j]
I=df['I_T':'I_A']
col=I[I==np.nanmax(I)].index[0] #retrieving the column
tomove=df.index.get_loc(col) #column index to reach the correct columns coordinates
x_ch=int(df[tomove*2+4])
y_ch=int(df[tomove*2+4+1])
ch_idx=tomove
cycle=int(df['cycle'])
rect=normalized_images[cycle,ch_idx+2,y_ch-2:y_ch+3,x_ch-2:x_ch+3]
if not rect.size==0:
rect=(rect-np.amin(rect))/(np.amax(rect)-np.amin(rect))
rect=rect-np.mean(rect)
row=pd.Series(data={'cycle':cycle,'ch':ch_idx+2,'x':x_ch,'y':y_ch,'Intensities_window_5x5':rect})
inputs_df=inputs_df.append(row,ignore_index=True)
max_df=max_df.append(df,ignore_index=True)
else:
coords = np.vstack((x,y))
coords_unique = np.unique(coords,axis=1)
for j in range(coords_unique.shape[-1]):
coords_tmp = coords_unique[:,j][:, np.newaxis]
coords_idx = np.argwhere(np.all(coords==coords_tmp,axis=0)).reshape((-1,))
df=pd.Series(data={ 'I_T':np.nan,'I_G':np.nan,'I_C':np.nan,'I_A':np.nan,'x_T':np.nan,'y_T':np.nan,'x_G':np.nan,'y_G':np.nan,'x_C':np.nan,'y_C':np.nan,'x_A':np.nan,'y_A':np.nan,'cycle':cycle})
df=df[['I_T','I_G','I_C','I_A','x_T','y_T','x_G','y_G','x_C','y_C','x_A','y_A','cycle']]
for k in coords_idx:
df.iloc[ch[k]] = im_th[cycle,ch[k]+2,y[k],x[k]]
df.iloc[ch[k]*2+4]= x[k]
df.iloc[ch[k]*2+4+1]= y[k]
I=df['I_T':'I_A']
col=I[I==np.nanmax(I)].index[0] #retrieving the column
tomove=df.index.get_loc(col) #column index to reach the correct columns coordinates
x_ch=int(df[tomove*2+4])
y_ch=int(df[tomove*2+4+1])
ch_idx=tomove
cycle=int(df['cycle'])
rect=normalized_images[cycle,ch_idx+2,y_ch-2:y_ch+3,x_ch-2:x_ch+3]
if not rect.size==0:
rect=(rect-np.amin(rect))/(np.amax(rect)-np.amin(rect))
rect=rect-np.mean(rect)
row=pd.Series(data={'cycle':cycle,'ch':ch_idx+2,'x':x_ch,'y':y_ch,'Intensities_window_5x5':rect})
inputs_df=inputs_df.append(row,ignore_index=True)
max_df=max_df.append(df,ignore_index=True)
return {'max_df':max_df, 'inputs_df':inputs_df}
|
the-stack_0_27828
|
import logging
import sys
import os
import naslib as nl
from naslib.defaults.predictor_evaluator import PredictorEvaluator
from naslib.predictors import (
BayesianLinearRegression,
BOHAMIANN,
BonasPredictor,
DNGOPredictor,
EarlyStopping,
Ensemble,
GCNPredictor,
GPPredictor,
LCEPredictor,
LCEMPredictor,
LGBoost,
MLPPredictor,
NGBoost,
OmniNGBPredictor,
OmniSemiNASPredictor,
RandomForestPredictor,
SVR_Estimator,
SemiNASPredictor,
SoLosspredictor,
SparseGPPredictor,
VarSparseGPPredictor,
XGBoost,
ZeroCostV1,
ZeroCostV2,
GPWLPredictor,
)
from naslib.search_spaces.core.query_metrics import Metric
from naslib.search_spaces import (
NasBench101SearchSpace,
NasBench201SearchSpace,
DartsSearchSpace,
NasBenchNLPSearchSpace,
TransBench101SearchSpace
)
from naslib.utils import utils, setup_logger, get_dataset_api
from naslib.utils.utils import get_project_root
config = utils.get_config_from_args(config_type="predictor")
utils.set_seed(config.seed)
logger = setup_logger(config.save + "/log.log")
logger.setLevel(logging.INFO)
utils.log_args(config)
supported_predictors = {
"bananas": Ensemble(predictor_type="bananas", num_ensemble=3, hpo_wrapper=True),
"bayes_lin_reg": BayesianLinearRegression(encoding_type="adjacency_one_hot"),
"bohamiann": BOHAMIANN(encoding_type="adjacency_one_hot"),
"bonas": BonasPredictor(encoding_type="bonas", hpo_wrapper=True),
"dngo": DNGOPredictor(encoding_type="adjacency_one_hot"),
"fisher": ZeroCostV2(config, batch_size=64, method_type="fisher"),
"gcn": GCNPredictor(encoding_type="gcn", hpo_wrapper=True),
"gp": GPPredictor(encoding_type="adjacency_one_hot"),
"gpwl": GPWLPredictor(
ss_type=config.search_space,
kernel_type="wloa",
optimize_gp_hyper=True,
h="auto",
),
"grad_norm": ZeroCostV2(config, batch_size=64, method_type="grad_norm"),
"grasp": ZeroCostV2(config, batch_size=64, method_type="grasp"),
"jacov": ZeroCostV1(config, batch_size=64, method_type="jacov"),
"lce": LCEPredictor(metric=Metric.VAL_ACCURACY),
"lce_m": LCEMPredictor(metric=Metric.VAL_ACCURACY),
"lcsvr": SVR_Estimator(
metric=Metric.VAL_ACCURACY, all_curve=False, require_hyper=False
),
"lgb": LGBoost(encoding_type="adjacency_one_hot", hpo_wrapper=False),
"mlp": MLPPredictor(encoding_type="adjacency_one_hot", hpo_wrapper=True),
"nao": SemiNASPredictor(encoding_type="seminas", semi=False, hpo_wrapper=False),
"ngb": NGBoost(encoding_type="adjacency_one_hot", hpo_wrapper=False),
"rf": RandomForestPredictor(encoding_type="adjacency_one_hot", hpo_wrapper=False),
"seminas": SemiNASPredictor(encoding_type="seminas", semi=True, hpo_wrapper=False),
"snip": ZeroCostV2(config, batch_size=64, method_type="snip"),
"sotl": SoLosspredictor(metric=Metric.TRAIN_LOSS, sum_option="SoTL"),
"sotle": SoLosspredictor(metric=Metric.TRAIN_LOSS, sum_option="SoTLE"),
"sotlema": SoLosspredictor(metric=Metric.TRAIN_LOSS, sum_option="SoTLEMA"),
"sparse_gp": SparseGPPredictor(
encoding_type="adjacency_one_hot", optimize_gp_hyper=True, num_steps=100
),
"synflow": ZeroCostV2(config, batch_size=64, method_type="synflow"),
"valacc": EarlyStopping(metric=Metric.VAL_ACCURACY),
"valloss": EarlyStopping(metric=Metric.VAL_LOSS),
"var_sparse_gp": VarSparseGPPredictor(
encoding_type="adjacency_one_hot", optimize_gp_hyper=True, num_steps=200
),
"xgb": XGBoost(encoding_type="adjacency_one_hot", hpo_wrapper=False),
# path encoding experiments:
"bayes_lin_reg_path": BayesianLinearRegression(encoding_type="path"),
"bohamiann_path": BOHAMIANN(encoding_type="path"),
"dngo_path": DNGOPredictor(encoding_type="path"),
"gp_path": GPPredictor(encoding_type="path"),
"lgb_path": LGBoost(encoding_type="path", hpo_wrapper=False),
"ngb_path": NGBoost(encoding_type="path", hpo_wrapper=False),
# omni:
"omni_ngb": OmniNGBPredictor(
encoding_type="adjacency_one_hot",
config=config,
zero_cost=["jacov"],
lce=["sotle"],
),
"omni_seminas": OmniSemiNASPredictor(
encoding_type="seminas",
config=config,
semi=True,
hpo_wrapper=False,
zero_cost=["jacov"],
lce=["sotle"],
jacov_onehot=True,
),
# omni ablation studies:
"omni_ngb_no_lce": OmniNGBPredictor(
encoding_type="adjacency_one_hot", config=config, zero_cost=["jacov"], lce=[]
),
"omni_seminas_no_lce": OmniSemiNASPredictor(
encoding_type="seminas",
config=config,
semi=True,
hpo_wrapper=False,
zero_cost=["jacov"],
lce=[],
jacov_onehot=True,
),
"omni_ngb_no_zerocost": OmniNGBPredictor(
encoding_type="adjacency_one_hot", config=config, zero_cost=[], lce=["sotle"]
),
"omni_ngb_no_encoding": OmniNGBPredictor(
encoding_type=None, config=config, zero_cost=["jacov"], lce=["sotle"]
),
}
supported_search_spaces = {
"nasbench101": NasBench101SearchSpace(),
"nasbench201": NasBench201SearchSpace(),
"darts": DartsSearchSpace(),
"nlp": NasBenchNLPSearchSpace(),
'transbench101': TransBench101SearchSpace()
}
# 'transbench101_micro': TransBench101SearchSpace('micro'),
# 'transbench101_macro': TransBench101SearchSpace('micro')}
#}
"""
If the API did not evaluate *all* architectures in the search space,
set load_labeled=True
"""
load_labeled = True if config.search_space in ["darts", "nlp"] else False
dataset_api = get_dataset_api(config.search_space, config.dataset)
# initialize the search space and predictor
utils.set_seed(config.seed)
predictor = supported_predictors[config.predictor]
search_space = supported_search_spaces[config.search_space]
# initialize the PredictorEvaluator class
predictor_evaluator = PredictorEvaluator(predictor, config=config)
predictor_evaluator.adapt_search_space(
search_space, load_labeled=load_labeled, dataset_api=dataset_api
)
# evaluate the predictor
predictor_evaluator.evaluate()
|
the-stack_0_27830
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
def calc_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolates = real_data
elif type == 'fake':
interpolates = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
alpha = alpha.to(device)
interpolates = alpha * real_data.detach() + ((1 - alpha) * fake_data.detach())
interpolates = interpolates.to(device)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolates.requires_grad_(True)
disc_interpolates = netD(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty
else:
return 0.0, None
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'layer':
norm_layer = functools.partial(nn.LayerNorm)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
curr_input_size = [2,2]
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True,input_size=curr_input_size) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
curr_input_size = [x *2 for x in curr_input_size]
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout,input_size=curr_input_size)
# gradually reduce the number of filters from ngf * 8 to ngf
curr_input_size = [x *2 for x in curr_input_size]
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer,input_size=curr_input_size)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer,input_size=curr_input_size)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer,input_size=curr_input_size)
curr_input_size = [x *2 for x in curr_input_size]
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer,input_size=curr_input_size) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False,input_size=[64,64]):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
#downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
#upnorm =norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
nn.LeakyReLU(0.2, True) #ndf * nf_mult
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.InstanceNorm2d
else:
use_bias = norm_layer != nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
|
the-stack_0_27831
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import llnl.util.filesystem as fs
import spack.paths
from spack.main import get_version, main
def test_get_version_no_match_git(tmpdir, working_env):
git = str(tmpdir.join("git"))
with open(git, "w") as f:
f.write("""#!/bin/sh
echo v0.13.3
""")
fs.set_executable(git)
os.environ["PATH"] = str(tmpdir)
assert spack.spack_version == get_version()
def test_get_version_match_git(tmpdir, working_env):
git = str(tmpdir.join("git"))
with open(git, "w") as f:
f.write("""#!/bin/sh
echo v0.13.3-912-g3519a1762
""")
fs.set_executable(git)
os.environ["PATH"] = str(tmpdir)
assert "0.13.3-912-3519a1762" == get_version()
def test_get_version_no_repo(tmpdir, monkeypatch):
monkeypatch.setattr(spack.paths, "prefix", str(tmpdir))
assert spack.spack_version == get_version()
def test_get_version_no_git(tmpdir, working_env):
os.environ["PATH"] = str(tmpdir)
assert spack.spack_version == get_version()
def test_main_calls_get_version(tmpdir, capsys, working_env):
os.environ["PATH"] = str(tmpdir)
main(["-V"])
assert spack.spack_version == capsys.readouterr()[0].strip()
def test_get_version_bad_git(tmpdir, working_env):
bad_git = str(tmpdir.join("git"))
with open(bad_git, "w") as f:
f.write("""#!/bin/sh
exit 1
""")
fs.set_executable(bad_git)
os.environ["PATH"] = str(tmpdir)
assert spack.spack_version == get_version()
|
the-stack_0_27833
|
from ....models.models import Projector
from ....permissions.permissions import Permissions
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
@register_action("projector.update")
class ProjectorUpdate(UpdateAction):
"""
Action to update a projector.
"""
model = Projector()
schema = DefaultSchema(Projector()).get_update_schema(
optional_properties=[
"name",
"width",
"aspect_ratio_numerator",
"aspect_ratio_denominator",
"color",
"background_color",
"header_background_color",
"header_font_color",
"header_h1_color",
"chyron_background_color",
"chyron_font_color",
"show_header_footer",
"show_title",
"show_logo",
"show_clock",
"used_as_default_$_in_meeting_id",
],
)
permission = Permissions.Projector.CAN_MANAGE
|
the-stack_0_27835
|
"""Scraper for the Rhode Island Supreme Court
CourtID: ri
Court Short Name: R.I.
Court Contact: [email protected], [email protected] (Ferris, Mirella), [email protected]
https://www.courts.ri.gov/PDF/TelephoneDirectory.pdf
Author: Brian W. Carver
Date created: 2013-08-10
"""
import re
from datetime import datetime
from juriscraper.OpinionSiteLinear import OpinionSiteLinear
from juriscraper.lib.exceptions import InsanityException
class Site(OpinionSiteLinear):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = self.build_url(
"https://www.courts.ri.gov/Courts/SupremeCourt/Pages/Opinions/Opinions"
)
self.previous_date = None
self.include_summary = True
self.status = "Published"
def build_url(self, base_url):
# This court hears things from mid-September to end of June. This
# defines the "term" for that year, which triggers the website updates.
today = datetime.today()
this_year = today.year
term_end = datetime(this_year, 9, 15)
year = this_year if today >= term_end else this_year - 1
return "%s%d-%d.aspx" % (base_url, year, year + 1)
def _process_html(self):
# case information spans over 3 rows, so must process 3 at a time:
# <tr> - contains case name, docket number, date and pdf link
# <tr> - contains case summary
# <tr> - contains a one-pixel gif spacer
table = "//table[@id = 'onetidDoclibViewTbl0']/tr[position() > 1]"
rows = list(self.html.xpath(table))
row_triplets = list(zip(rows, rows[1:]))[::3]
for tr1, tr2 in row_triplets:
case = self.extract_case_from_rows(tr1, tr2)
self.previous_date = case["date"]
self.cases.append(case)
def extract_case_from_rows(self, row1, row2):
docket = row1.xpath("./td/a/text()")[0]
docket = ", ".join([d.strip() for d in docket.split(",")])
url = row1.xpath("./td/a/@href")[0]
text = row1.xpath("./td[1]/text()")[0]
text_to_parse = [text]
if self.include_summary:
summary_lines = row2.xpath("./td/div/text()")
summary = "\n".join(summary_lines)
joined_text = "\n".join([text, summary_lines[0]])
text_to_parse.append(joined_text)
else:
summary = False
return {
"url": url,
"docket": docket,
"date": self.parse_date_from_text(text_to_parse),
"name": self.parse_name_from_text(text_to_parse),
"summary": summary,
}
def parse_date_from_text(self, text_list):
regex = r"(.*?)(\((\w+\s+\d+\,\s+\d+)\))(.*?)"
for text in text_list:
date_match = re.match(regex, text)
if date_match:
return date_match.group(3)
# Fall back on previous case's date
if self.previous_date:
return self.previous_date
raise InsanityException(
"Could not parse date from string, and no "
'previous date to fall back on: "%s"' % text_list
)
@staticmethod
def parse_name_from_text(text_list):
regexes = [
# Expected format
r"(.*?)(,?\sNos?\.)(.*?)",
# Clerk typo, forgot "No."/"Nos." substring
r"(.*?)(,?\s\d+-\d+(,|\s))(.*?)",
# Same as above, and there's an unconventional docket number
# like 'SU-14-324' instead of '14-324'. See ri_p_example_4.html
r"(.*?)(,?\s(?:\w+-)?\d+-\d+(,|\s))(.*?)",
]
for regex in regexes:
for text in text_list:
name_match = re.match(regex, text)
if name_match:
return name_match.group(1)
# "No."/"Nos." and docket missing, fall back on whatever's before first
# semi-colon
for text in text_list:
if ";" in text:
return text.split(";")[0]
raise InsanityException(
'Could not parse name from string: "%s"' % text_list
)
|
the-stack_0_27836
|
from uuid import uuid4
from flask import Response
from flask_restful import Resource, request
from blockchain.blockchain import Blockchain
blockchain = Blockchain()
class Node(Resource):
def post(self):
"""
Add new node to blockchain
"""
node_id = blockchain.register_node(request.host)
return {
'message': 'New node have been added.',
'node_id': node_id,
'nodes': list(blockchain.nodes)
}, 201
class Chain(Resource):
def get(self):
"""
Returns blockchain
"""
chains = blockchain.chain
return {
'chains': chains,
'length': len(chains)
}, 200
class Mine(Resource):
def post(self):
if not request.is_json:
return Response('', 400)
req = request.get_json()
node_id = req.get('node_id')
if not all([node_id]):
return Response('', 400)
if node_id not in blockchain.nodes:
return Response('Invalid node id', 400)
last_block = blockchain.last_block
nonce = blockchain.proof_of_work(last_block['nonce'])
# Mine
blockchain.new_transaction(
sender='0',
recipient=node_id,
amount=1
)
previous_hash = blockchain.hash_block(last_block)
new_block = blockchain.new_block(nonce, previous_hash)
# Generates new block
return {
'message': 'New Block Forged',
'block': {
'version': new_block['version'],
'transactions': new_block['transactions'],
'timestamp': new_block['timestamp'],
'nonce': new_block['nonce']
}
}, 200
class Transaction(Resource):
def post(self):
if not request.is_json:
return Response('', 400)
req = request.get_json()
sender = req.get('sender')
recipient = req.get('recipient')
amount = req.get('amount')
if not all([sender, recipient, amount]):
return Response('', 400)
if sender not in blockchain.nodes or recipient not in blockchain.nodes:
return Response('Invalid sender id or recipient id', 400)
blockchain.new_transaction(sender, recipient, amount)
return Response('', 201)
|
the-stack_0_27837
|
import wave
from pyogg import OpusEncoder
from pyogg import OpusDecoder
if __name__ == "__main__":
# Setup encoding
# ==============
# Read a wav file to obtain PCM data
filename = "left-right-demo-5s.wav"
wave_read = wave.open(filename, "rb")
print("Reading wav from file '{:s}'".format(filename))
# Extract the wav's specification
channels = wave_read.getnchannels()
print("Number of channels:", channels)
samples_per_second = wave_read.getframerate()
print("Sampling frequency:", samples_per_second)
bytes_per_sample = wave_read.getsampwidth()
# Create an Opus encoder
opus_encoder = OpusEncoder()
opus_encoder.set_application("audio")
opus_encoder.set_sampling_frequency(samples_per_second)
opus_encoder.set_channels(channels)
# Calculate the desired frame size (in samples per channel)
desired_frame_duration = 20/1000 # milliseconds
desired_frame_size = int(desired_frame_duration * samples_per_second)
# Setup decoding
# ==============
# Create an Opus decoder
opus_decoder = OpusDecoder()
opus_decoder.set_channels(channels)
opus_decoder.set_sampling_frequency(samples_per_second)
# Open an output wav for the decoded PCM
output_filename = "output-"+filename
wave_write = wave.open(output_filename, "wb")
print("Writing wav into file '{:s}'".format(output_filename))
# Save the wav's specification
wave_write.setnchannels(channels)
wave_write.setframerate(samples_per_second)
wave_write.setsampwidth(bytes_per_sample)
# Execute encode-decode
# =====================
# Loop through the wav file's PCM data and encode it as Opus
bytes_encoded = 0
while True:
# Get data from the wav file
pcm = wave_read.readframes(desired_frame_size)
# Check if we've finished reading the wav file
if len(pcm) == 0:
break
# Calculate the effective frame size from the number of bytes
# read
effective_frame_size = (
len(pcm) # bytes
// bytes_per_sample
// channels
)
# Check if we've received enough data
if effective_frame_size < desired_frame_size:
# We haven't read a full frame from the wav file, so this
# is most likely a final partial frame before the end of
# the file. We'll pad the end of this frame with silence.
pcm += (
b"\x00"
* ((desired_frame_size - effective_frame_size)
* bytes_per_sample
* channels)
)
# Encode the PCM data
encoded_packet = opus_encoder.encode(pcm)
bytes_encoded += len(encoded_packet)
# At this stage we now have a buffer containing an
# Opus-encoded packet. This could be sent over UDP, for
# example, and then decoded with OpusDecoder. However it
# cannot really be saved to a file without wrapping it in the
# likes of an Ogg stream; for this see OggOpusWriter.
# For this example, we will now immediately decode this
# encoded packet using OpusDecoder.
decoded_pcm = opus_decoder.decode(encoded_packet)
# Save the decoded PCM as a new wav file
wave_write.writeframes(decoded_pcm)
wave_read.close()
wave_write.close()
print("Total bytes of encoded packets:", bytes_encoded)
print("Finished.")
|
the-stack_0_27839
|
# Copyright (c) 2011-2013 Peng Sun. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYRIGHT file.
# hone_hostEntry.py
# Define class to contain host-related information
from hone_util import LogUtil
''' entry about host's information '''
class HostEntry:
def __init__(self, hostId, hostAddress, appList=None, jobs=None):
self.hostId = hostId
self.hostAddress = hostAddress
if appList is None:
self.appList = []
else:
self.appList = appList
if jobs is None:
self.jobs = []
else:
self.jobs = jobs
def addJob(self, jobId):
if jobId not in self.jobs:
self.jobs.append(jobId)
# LogUtil.DebugLog('exeGen', 'HostEntry addJob. hostId: {0}. jobId: {1}.'.format(self.hostId, jobId))
def removeJob(self, jobId):
if jobId in self.jobs:
self.jobs.remove(jobId)
# LogUtil.DebugLog('exeGen', 'HostEntry removeJob. hostId: {0}. jobId: {1}'.format(self.hostId, jobId))
|
the-stack_0_27841
|
"""
molecool
A Python package for analyzing and visulazing molecular files. For molssi workshop.
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='molecool',
author='Steven Ayoub ',
author_email='[email protected] ',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
the-stack_0_27842
|
import pandas as pd
from libs.datasets import data_source
from libs.datasets import dataset_utils
from libs.datasets.common_fields import CommonFields, CommonIndexFields
class NYTimesDataset(data_source.DataSource):
SOURCE_NAME = "NYTimes"
DATA_FOLDER = "data/cases-nytimes"
COUNTIES_DATA_FILE = "us-counties.csv"
HAS_AGGREGATED_NYC_BOROUGH = True
class Fields(object):
DATE = "date"
COUNTY = "county"
STATE = "state"
FIPS = "fips"
COUNTRY = "country"
AGGREGATE_LEVEL = "aggregate_level"
CASES = "cases"
DEATHS = "deaths"
INDEX_FIELD_MAP = {
CommonIndexFields.DATE: Fields.DATE,
CommonIndexFields.COUNTRY: Fields.COUNTRY,
CommonIndexFields.STATE: Fields.STATE,
CommonIndexFields.FIPS: Fields.FIPS,
CommonIndexFields.AGGREGATE_LEVEL: Fields.AGGREGATE_LEVEL,
}
COMMON_FIELD_MAP = {
CommonFields.CASES: Fields.CASES,
CommonFields.DEATHS: Fields.DEATHS,
}
def __init__(self, input_path):
data = pd.read_csv(input_path, parse_dates=[self.Fields.DATE], dtype={"fips": str})
data = self.standardize_data(data)
super().__init__(data)
@classmethod
def local(cls) -> "NYTimesDataset":
data_root = dataset_utils.LOCAL_PUBLIC_DATA_PATH
return cls(data_root / cls.DATA_FOLDER / cls.COUNTIES_DATA_FILE)
@classmethod
def standardize_data(cls, data: pd.DataFrame) -> pd.DataFrame:
data[cls.Fields.COUNTRY] = "USA"
data = dataset_utils.strip_whitespace(data)
data[cls.Fields.STATE] = data[cls.Fields.STATE].apply(dataset_utils.parse_state)
# Super hacky way of filling in new york.
data.loc[data[cls.Fields.COUNTY] == "New York City", "county"] = "New York County"
data.loc[data[cls.Fields.COUNTY] == "New York County", "fips"] = "36061"
data[cls.Fields.AGGREGATE_LEVEL] = "county"
return data
|
the-stack_0_27844
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to shares.
"""
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from manila.api import extensions
from manila.common import constants
from manila.data import rpcapi as data_rpcapi
from manila.db import base
from manila import exception
from manila.i18n import _
from manila import policy
from manila import quota
from manila.scheduler import rpcapi as scheduler_rpcapi
from manila.share import access
from manila.share import rpcapi as share_rpcapi
from manila.share import share_types
from manila.share import utils as share_utils
from manila import utils
share_api_opts = [
cfg.BoolOpt('use_scheduler_creating_share_from_snapshot',
default=False,
help='If set to False, then share creation from snapshot will '
'be performed on the same host. '
'If set to True, then scheduling step will be used.')
]
CONF = cfg.CONF
CONF.register_opts(share_api_opts)
LOG = log.getLogger(__name__)
GB = 1048576 * 1024
QUOTAS = quota.QUOTAS
class API(base.Base):
"""API for interacting with the share manager."""
def __init__(self, db_driver=None):
super(API, self).__init__(db_driver)
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.share_rpcapi = share_rpcapi.ShareAPI()
self.access_helper = access.ShareInstanceAccess(self.db, None)
def create(self, context, share_proto, size, name, description,
snapshot_id=None, availability_zone=None, metadata=None,
share_network_id=None, share_type=None, is_public=False,
share_group_id=None, share_group_snapshot_member=None):
"""Create new share."""
policy.check_policy(context, 'share', 'create')
self._check_metadata_properties(context, metadata)
if snapshot_id is not None:
snapshot = self.get_snapshot(context, snapshot_id)
if snapshot['aggregate_status'] != constants.STATUS_AVAILABLE:
msg = _("status must be '%s'") % constants.STATUS_AVAILABLE
raise exception.InvalidShareSnapshot(reason=msg)
if not size:
size = snapshot['size']
else:
snapshot = None
def as_int(s):
try:
return int(s)
except (ValueError, TypeError):
return s
# tolerate size as stringified int
size = as_int(size)
if not isinstance(size, int) or size <= 0:
msg = (_("Share size '%s' must be an integer and greater than 0")
% size)
raise exception.InvalidInput(reason=msg)
if snapshot and size < snapshot['size']:
msg = (_("Share size '%s' must be equal or greater "
"than snapshot size") % size)
raise exception.InvalidInput(reason=msg)
if snapshot is None:
share_type_id = share_type['id'] if share_type else None
else:
source_share = self.db.share_get(context, snapshot['share_id'])
availability_zone = source_share['instance']['availability_zone']
if share_type is None:
# Grab the source share's share_type if no new share type
# has been provided.
share_type_id = source_share['instance']['share_type_id']
share_type = share_types.get_share_type(context, share_type_id)
else:
share_type_id = share_type['id']
if share_type_id != source_share['instance']['share_type_id']:
msg = _("Invalid share type specified: the requested "
"share type must match the type of the source "
"share. If a share type is not specified when "
"requesting a new share from a snapshot, the "
"share type of the source share will be applied "
"to the new share.")
raise exception.InvalidInput(reason=msg)
supported_share_protocols = (
proto.upper() for proto in CONF.enabled_share_protocols)
if not (share_proto and
share_proto.upper() in supported_share_protocols):
msg = (_("Invalid share protocol provided: %(provided)s. "
"It is either disabled or unsupported. Available "
"protocols: %(supported)s") % dict(
provided=share_proto,
supported=CONF.enabled_share_protocols))
raise exception.InvalidInput(reason=msg)
try:
reservations = QUOTAS.reserve(context, shares=1, gigabytes=size)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'gigabytes' in overs:
LOG.warning("Quota exceeded for %(s_pid)s, "
"tried to create "
"%(s_size)sG share (%(d_consumed)dG of "
"%(d_quota)dG already consumed).", {
's_pid': context.project_id,
's_size': size,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.ShareSizeExceedsAvailableQuota()
elif 'shares' in overs:
LOG.warning("Quota exceeded for %(s_pid)s, "
"tried to create "
"share (%(d_consumed)d shares "
"already consumed).", {
's_pid': context.project_id,
'd_consumed': _consumed('shares')})
raise exception.ShareLimitExceeded(allowed=quotas['shares'])
try:
is_public = strutils.bool_from_string(is_public, strict=True)
except ValueError as e:
raise exception.InvalidParameterValue(six.text_type(e))
share_group = None
if share_group_id:
try:
share_group = self.db.share_group_get(context, share_group_id)
except exception.NotFound as e:
raise exception.InvalidParameterValue(six.text_type(e))
if (not share_group_snapshot_member and
not (share_group['status'] == constants.STATUS_AVAILABLE)):
params = {
'avail': constants.STATUS_AVAILABLE,
'status': share_group['status'],
}
msg = _("Share group status must be %(avail)s, got"
"%(status)s.") % params
raise exception.InvalidShareGroup(message=msg)
if share_type_id:
share_group_st_ids = [
st['share_type_id']
for st in share_group.get('share_types', [])]
if share_type_id not in share_group_st_ids:
params = {
'type': share_type_id,
'group': share_group_id,
}
msg = _("The specified share type (%(type)s) is not "
"supported by the specified share group "
"(%(group)s).") % params
raise exception.InvalidParameterValue(msg)
if not share_group.get('share_network_id') == share_network_id:
params = {
'net': share_network_id,
'group': share_group_id
}
msg = _("The specified share network (%(net)s) is not "
"supported by the specified share group "
"(%(group)s).") % params
raise exception.InvalidParameterValue(msg)
options = {
'size': size,
'user_id': context.user_id,
'project_id': context.project_id,
'snapshot_id': snapshot_id,
'metadata': metadata,
'display_name': name,
'display_description': description,
'share_proto': share_proto,
'is_public': is_public,
'share_group_id': share_group_id,
}
options.update(self.get_share_attributes_from_share_type(share_type))
if share_group_snapshot_member:
options['source_share_group_snapshot_member_id'] = (
share_group_snapshot_member['id'])
try:
share = self.db.share_create(context, options,
create_share_instance=False)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.share_delete(context, share['id'])
finally:
QUOTAS.rollback(context, reservations)
host = None
if snapshot and not CONF.use_scheduler_creating_share_from_snapshot:
# Shares from snapshots with restriction - source host only.
# It is common situation for different types of backends.
host = snapshot['share']['instance']['host']
elif share_group:
host = share_group['host']
self.create_instance(
context, share, share_network_id=share_network_id, host=host,
availability_zone=availability_zone, share_group=share_group,
share_group_snapshot_member=share_group_snapshot_member,
share_type_id=share_type_id)
# Retrieve the share with instance details
share = self.db.share_get(context, share['id'])
return share
def get_share_attributes_from_share_type(self, share_type):
"""Determine share attributes from the share type.
The share type can change any time after shares of that type are
created, so we copy some share type attributes to the share to
consistently govern the behavior of that share over its lifespan.
"""
inferred_map = constants.ExtraSpecs.INFERRED_OPTIONAL_MAP
snapshot_support_key = constants.ExtraSpecs.SNAPSHOT_SUPPORT
create_share_from_snapshot_key = (
constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT)
revert_to_snapshot_key = (
constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT)
mount_snapshot_support_key = (
constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT)
snapshot_support_default = inferred_map.get(snapshot_support_key)
create_share_from_snapshot_support_default = inferred_map.get(
create_share_from_snapshot_key)
revert_to_snapshot_support_default = inferred_map.get(
revert_to_snapshot_key)
mount_snapshot_support_default = inferred_map.get(
constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT)
if share_type:
snapshot_support = share_types.parse_boolean_extra_spec(
snapshot_support_key,
share_type.get('extra_specs', {}).get(
snapshot_support_key, snapshot_support_default))
create_share_from_snapshot_support = (
share_types.parse_boolean_extra_spec(
create_share_from_snapshot_key,
share_type.get('extra_specs', {}).get(
create_share_from_snapshot_key,
create_share_from_snapshot_support_default)))
revert_to_snapshot_support = (
share_types.parse_boolean_extra_spec(
revert_to_snapshot_key,
share_type.get('extra_specs', {}).get(
revert_to_snapshot_key,
revert_to_snapshot_support_default)))
mount_snapshot_support = share_types.parse_boolean_extra_spec(
mount_snapshot_support_key, share_type.get(
'extra_specs', {}).get(
mount_snapshot_support_key,
mount_snapshot_support_default))
replication_type = share_type.get('extra_specs', {}).get(
'replication_type')
else:
snapshot_support = snapshot_support_default
create_share_from_snapshot_support = (
create_share_from_snapshot_support_default)
revert_to_snapshot_support = revert_to_snapshot_support_default
mount_snapshot_support = mount_snapshot_support_default
replication_type = None
return {
'snapshot_support': snapshot_support,
'create_share_from_snapshot_support':
create_share_from_snapshot_support,
'revert_to_snapshot_support': revert_to_snapshot_support,
'replication_type': replication_type,
'mount_snapshot_support': mount_snapshot_support,
}
def create_instance(self, context, share, share_network_id=None,
host=None, availability_zone=None,
share_group=None, share_group_snapshot_member=None,
share_type_id=None):
policy.check_policy(context, 'share', 'create')
request_spec, share_instance = (
self.create_share_instance_and_get_request_spec(
context, share, availability_zone=availability_zone,
share_group=share_group, host=host,
share_network_id=share_network_id,
share_type_id=share_type_id))
if share_group_snapshot_member:
# Inherit properties from the share_group_snapshot_member
member_share_instance = share_group_snapshot_member[
'share_instance']
updates = {
'host': member_share_instance['host'],
'share_network_id': member_share_instance['share_network_id'],
'share_server_id': member_share_instance['share_server_id'],
}
share = self.db.share_instance_update(context,
share_instance['id'],
updates)
# NOTE(ameade): Do not cast to driver if creating from share group
# snapshot
return
if host:
self.share_rpcapi.create_share_instance(
context,
share_instance,
host,
request_spec=request_spec,
filter_properties={},
snapshot_id=share['snapshot_id'],
)
else:
# Create share instance from scratch or from snapshot could happen
# on hosts other than the source host.
self.scheduler_rpcapi.create_share_instance(
context, request_spec=request_spec, filter_properties={})
return share_instance
def create_share_instance_and_get_request_spec(
self, context, share, availability_zone=None,
share_group=None, host=None, share_network_id=None,
share_type_id=None, cast_rules_to_readonly=False):
availability_zone_id = None
if availability_zone:
availability_zone_id = self.db.availability_zone_get(
context, availability_zone).id
# TODO(u_glide): Add here validation that provided share network
# doesn't conflict with provided availability_zone when Neutron
# will have AZ support.
share_instance = self.db.share_instance_create(
context, share['id'],
{
'share_network_id': share_network_id,
'status': constants.STATUS_CREATING,
'scheduled_at': timeutils.utcnow(),
'host': host if host else '',
'availability_zone_id': availability_zone_id,
'share_type_id': share_type_id,
'cast_rules_to_readonly': cast_rules_to_readonly,
}
)
share_properties = {
'id': share['id'],
'size': share['size'],
'user_id': share['user_id'],
'project_id': share['project_id'],
'metadata': self.db.share_metadata_get(context, share['id']),
'share_server_id': share_instance['share_server_id'],
'snapshot_support': share['snapshot_support'],
'create_share_from_snapshot_support':
share['create_share_from_snapshot_support'],
'revert_to_snapshot_support': share['revert_to_snapshot_support'],
'mount_snapshot_support': share['mount_snapshot_support'],
'share_proto': share['share_proto'],
'share_type_id': share_type_id,
'is_public': share['is_public'],
'share_group_id': share['share_group_id'],
'source_share_group_snapshot_member_id': share[
'source_share_group_snapshot_member_id'],
'snapshot_id': share['snapshot_id'],
'replication_type': share['replication_type'],
}
share_instance_properties = {
'id': share_instance['id'],
'availability_zone_id': share_instance['availability_zone_id'],
'share_network_id': share_instance['share_network_id'],
'share_server_id': share_instance['share_server_id'],
'share_id': share_instance['share_id'],
'host': share_instance['host'],
'status': share_instance['status'],
'replica_state': share_instance['replica_state'],
'share_type_id': share_instance['share_type_id'],
}
share_type = None
if share_instance['share_type_id']:
share_type = self.db.share_type_get(
context, share_instance['share_type_id'])
request_spec = {
'share_properties': share_properties,
'share_instance_properties': share_instance_properties,
'share_proto': share['share_proto'],
'share_id': share['id'],
'snapshot_id': share['snapshot_id'],
'share_type': share_type,
'share_group': share_group,
'availability_zone_id': availability_zone_id,
}
return request_spec, share_instance
def create_share_replica(self, context, share, availability_zone=None,
share_network_id=None):
if not share.get('replication_type'):
msg = _("Replication not supported for share %s.")
raise exception.InvalidShare(message=msg % share['id'])
if share.get('share_group_id'):
msg = _("Replication not supported for shares in a group.")
raise exception.InvalidShare(message=msg)
self._check_is_share_busy(share)
active_replica = self.db.share_replicas_get_available_active_replica(
context, share['id'])
if not active_replica:
msg = _("Share %s does not have any active replica in available "
"state.")
raise exception.ReplicationException(reason=msg % share['id'])
if share['replication_type'] == constants.REPLICATION_TYPE_READABLE:
cast_rules_to_readonly = True
else:
cast_rules_to_readonly = False
request_spec, share_replica = (
self.create_share_instance_and_get_request_spec(
context, share, availability_zone=availability_zone,
share_network_id=share_network_id,
share_type_id=share['instance']['share_type_id'],
cast_rules_to_readonly=cast_rules_to_readonly))
all_replicas = self.db.share_replicas_get_all_by_share(
context, share['id'])
all_hosts = [r['host'] for r in all_replicas]
request_spec['active_replica_host'] = active_replica['host']
request_spec['all_replica_hosts'] = ','.join(all_hosts)
self.db.share_replica_update(
context, share_replica['id'],
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC})
existing_snapshots = (
self.db.share_snapshot_get_all_for_share(
context, share_replica['share_id'])
)
snapshot_instance = {
'status': constants.STATUS_CREATING,
'progress': '0%',
'share_instance_id': share_replica['id'],
}
for snapshot in existing_snapshots:
self.db.share_snapshot_instance_create(
context, snapshot['id'], snapshot_instance)
self.scheduler_rpcapi.create_share_replica(
context, request_spec=request_spec, filter_properties={})
return share_replica
def delete_share_replica(self, context, share_replica, force=False):
# Disallow deletion of ONLY active replica, *even* when this
# operation is forced.
replicas = self.db.share_replicas_get_all_by_share(
context, share_replica['share_id'])
active_replicas = list(filter(
lambda x: x['replica_state'] == constants.REPLICA_STATE_ACTIVE,
replicas))
if (share_replica.get('replica_state') ==
constants.REPLICA_STATE_ACTIVE and len(active_replicas) == 1):
msg = _("Cannot delete last active replica.")
raise exception.ReplicationException(reason=msg)
LOG.info("Deleting replica %s.", share_replica['id'])
self.db.share_replica_update(
context, share_replica['id'],
{
'status': constants.STATUS_DELETING,
'terminated_at': timeutils.utcnow(),
}
)
if not share_replica['host']:
# Delete any snapshot instances created on the database
replica_snapshots = (
self.db.share_snapshot_instance_get_all_with_filters(
context, {'share_instance_ids': share_replica['id']})
)
for snapshot in replica_snapshots:
self.db.share_snapshot_instance_delete(context, snapshot['id'])
# Delete the replica from the database
self.db.share_replica_delete(context, share_replica['id'])
else:
self.share_rpcapi.delete_share_replica(context,
share_replica,
force=force)
def promote_share_replica(self, context, share_replica):
if share_replica.get('status') != constants.STATUS_AVAILABLE:
msg = _("Replica %(replica_id)s must be in %(status)s state to be "
"promoted.")
raise exception.ReplicationException(
reason=msg % {'replica_id': share_replica['id'],
'status': constants.STATUS_AVAILABLE})
replica_state = share_replica['replica_state']
if (replica_state in (constants.REPLICA_STATE_OUT_OF_SYNC,
constants.STATUS_ERROR)
and not context.is_admin):
msg = _("Promoting a replica with 'replica_state': %s requires "
"administrator privileges.")
raise exception.AdminRequired(
message=msg % replica_state)
self.db.share_replica_update(
context, share_replica['id'],
{'status': constants.STATUS_REPLICATION_CHANGE})
self.share_rpcapi.promote_share_replica(context, share_replica)
return self.db.share_replica_get(context, share_replica['id'])
def update_share_replica(self, context, share_replica):
if not share_replica['host']:
msg = _("Share replica does not have a valid host.")
raise exception.InvalidHost(reason=msg)
self.share_rpcapi.update_share_replica(context, share_replica)
def manage(self, context, share_data, driver_options):
policy.check_policy(context, 'share', 'manage')
shares = self.get_all(context, {
'host': share_data['host'],
'export_location': share_data['export_location'],
'share_proto': share_data['share_proto'],
'share_type_id': share_data['share_type_id']
})
share_type_id = share_data['share_type_id']
share_type = share_types.get_share_type(context, share_type_id)
share_data.update({
'user_id': context.user_id,
'project_id': context.project_id,
'status': constants.STATUS_MANAGING,
'scheduled_at': timeutils.utcnow(),
})
share_data.update(
self.get_share_attributes_from_share_type(share_type))
LOG.debug("Manage: Found shares %s.", len(shares))
export_location = share_data.pop('export_location')
if len(shares) == 0:
share = self.db.share_create(context, share_data)
else:
msg = _("Share already exists.")
raise exception.InvalidShare(reason=msg)
self.db.share_export_locations_update(context, share.instance['id'],
export_location)
request_spec = self._get_request_spec_dict(
share, share_type, size=0, share_proto=share_data['share_proto'],
host=share_data['host'])
# NOTE(ganso): Scheduler is called to validate if share type
# provided can fit in host provided. It will invoke manage upon
# successful validation.
self.scheduler_rpcapi.manage_share(context, share['id'],
driver_options, request_spec)
return self.db.share_get(context, share['id'])
def _get_request_spec_dict(self, share, share_type, **kwargs):
if share is None:
share = {'instance': {}}
share_instance = share['instance']
share_properties = {
'size': kwargs.get('size', share.get('size')),
'user_id': kwargs.get('user_id', share.get('user_id')),
'project_id': kwargs.get('project_id', share.get('project_id')),
'snapshot_support': kwargs.get(
'snapshot_support',
share_type.get('extra_specs', {}).get('snapshot_support')
),
'create_share_from_snapshot_support': kwargs.get(
'create_share_from_snapshot_support',
share_type.get('extra_specs', {}).get(
'create_share_from_snapshot_support')
),
'revert_to_snapshot_support': kwargs.get(
'revert_to_snapshot_support',
share_type.get('extra_specs', {}).get(
'revert_to_snapshot_support')
),
'mount_snapshot_support': kwargs.get(
'mount_snapshot_support',
share_type.get('extra_specs', {}).get(
'mount_snapshot_support')
),
'share_proto': kwargs.get('share_proto', share.get('share_proto')),
'share_type_id': share_type['id'],
'is_public': kwargs.get('is_public', share.get('is_public')),
'share_group_id': kwargs.get(
'share_group_id', share.get('share_group_id')),
'source_share_group_snapshot_member_id': kwargs.get(
'source_share_group_snapshot_member_id',
share.get('source_share_group_snapshot_member_id')),
'snapshot_id': kwargs.get('snapshot_id', share.get('snapshot_id')),
}
share_instance_properties = {
'availability_zone_id': kwargs.get(
'availability_zone_id',
share_instance.get('availability_zone_id')),
'share_network_id': kwargs.get(
'share_network_id', share_instance.get('share_network_id')),
'share_server_id': kwargs.get(
'share_server_id', share_instance.get('share_server_id')),
'share_id': kwargs.get('share_id', share_instance.get('share_id')),
'host': kwargs.get('host', share_instance.get('host')),
'status': kwargs.get('status', share_instance.get('status')),
}
request_spec = {
'share_properties': share_properties,
'share_instance_properties': share_instance_properties,
'share_type': share_type,
'share_id': share.get('id'),
}
return request_spec
def unmanage(self, context, share):
policy.check_policy(context, 'share', 'unmanage')
self._check_is_share_busy(share)
update_data = {'status': constants.STATUS_UNMANAGING,
'terminated_at': timeutils.utcnow()}
share_ref = self.db.share_update(context, share['id'], update_data)
self.share_rpcapi.unmanage_share(context, share_ref)
# NOTE(u_glide): We should update 'updated_at' timestamp of
# share server here, when manage/unmanage operations will be supported
# for driver_handles_share_servers=True mode
def manage_snapshot(self, context, snapshot_data, driver_options):
try:
share = self.db.share_get(context, snapshot_data['share_id'])
except exception.NotFound:
raise exception.ShareNotFound(share_id=snapshot_data['share_id'])
if share['has_replicas']:
msg = (_("Share %s has replicas. Snapshots of this share cannot "
"currently be managed until all replicas are removed.")
% share['id'])
raise exception.InvalidShare(reason=msg)
existing_snapshots = self.db.share_snapshot_get_all_for_share(
context, snapshot_data['share_id'])
for existing_snap in existing_snapshots:
for inst in existing_snap.get('instances'):
if (snapshot_data['provider_location'] ==
inst['provider_location']):
msg = _("A share snapshot %(share_snapshot_id)s is "
"already managed for provider location "
"%(provider_location)s.") % {
'share_snapshot_id': existing_snap['id'],
'provider_location':
snapshot_data['provider_location'],
}
raise exception.ManageInvalidShareSnapshot(
reason=msg)
snapshot_data.update({
'user_id': context.user_id,
'project_id': context.project_id,
'status': constants.STATUS_MANAGING,
'share_size': share['size'],
'progress': '0%',
'share_proto': share['share_proto']
})
snapshot = self.db.share_snapshot_create(context, snapshot_data)
self.share_rpcapi.manage_snapshot(context, snapshot, share['host'],
driver_options)
return snapshot
def unmanage_snapshot(self, context, snapshot, host):
update_data = {'status': constants.STATUS_UNMANAGING,
'terminated_at': timeutils.utcnow()}
snapshot_ref = self.db.share_snapshot_update(context,
snapshot['id'],
update_data)
self.share_rpcapi.unmanage_snapshot(context, snapshot_ref, host)
def revert_to_snapshot(self, context, share, snapshot):
"""Revert a share to a snapshot."""
reservations = self._handle_revert_to_snapshot_quotas(
context, share, snapshot)
try:
if share.get('has_replicas'):
self._revert_to_replicated_snapshot(
context, share, snapshot, reservations)
else:
self._revert_to_snapshot(
context, share, snapshot, reservations)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context, reservations)
def _handle_revert_to_snapshot_quotas(self, context, share, snapshot):
"""Reserve extra quota if a revert will result in a larger share."""
# Note(cknight): This value may be positive or negative.
size_increase = snapshot['size'] - share['size']
if not size_increase:
return None
try:
return QUOTAS.reserve(context,
project_id=share['project_id'],
gigabytes=size_increase,
user_id=share['user_id'])
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
consumed_gb = (usages['gigabytes']['reserved'] +
usages['gigabytes']['in_use'])
msg = _("Quota exceeded for %(s_pid)s. Reverting share "
"%(s_sid)s to snapshot %(s_ssid)s will increase the "
"share's size by %(s_size)sG, "
"(%(d_consumed)dG of %(d_quota)dG already consumed).")
msg_args = {
's_pid': context.project_id,
's_sid': share['id'],
's_ssid': snapshot['id'],
's_size': size_increase,
'd_consumed': consumed_gb,
'd_quota': quotas['gigabytes'],
}
message = msg % msg_args
LOG.error(message)
raise exception.ShareSizeExceedsAvailableQuota(message=message)
def _revert_to_snapshot(self, context, share, snapshot, reservations):
"""Revert a non-replicated share to a snapshot."""
# Set status of share to 'reverting'
self.db.share_update(
context, snapshot['share_id'],
{'status': constants.STATUS_REVERTING})
# Set status of snapshot to 'restoring'
self.db.share_snapshot_update(
context, snapshot['id'],
{'status': constants.STATUS_RESTORING})
# Send revert API to share host
self.share_rpcapi.revert_to_snapshot(
context, share, snapshot, share['instance']['host'], reservations)
def _revert_to_replicated_snapshot(self, context, share, snapshot,
reservations):
"""Revert a replicated share to a snapshot."""
# Get active replica
active_replica = self.db.share_replicas_get_available_active_replica(
context, share['id'])
if not active_replica:
msg = _('Share %s has no active replica in available state.')
raise exception.ReplicationException(reason=msg % share['id'])
# Get snapshot instance on active replica
snapshot_instance_filters = {
'share_instance_ids': active_replica['id'],
'snapshot_ids': snapshot['id'],
}
snapshot_instances = (
self.db.share_snapshot_instance_get_all_with_filters(
context, snapshot_instance_filters))
active_snapshot_instance = (
snapshot_instances[0] if snapshot_instances else None)
if not active_snapshot_instance:
msg = _('Share %(share)s has no snapshot %(snap)s associated with '
'its active replica.')
msg_args = {'share': share['id'], 'snap': snapshot['id']}
raise exception.ReplicationException(reason=msg % msg_args)
# Set active replica to 'reverting'
self.db.share_replica_update(
context, active_replica['id'],
{'status': constants.STATUS_REVERTING})
# Set snapshot instance on active replica to 'restoring'
self.db.share_snapshot_instance_update(
context, active_snapshot_instance['id'],
{'status': constants.STATUS_RESTORING})
# Send revert API to active replica host
self.share_rpcapi.revert_to_snapshot(
context, share, snapshot, active_replica['host'], reservations)
@policy.wrap_check_policy('share')
def delete(self, context, share, force=False):
"""Delete share."""
share = self.db.share_get(context, share['id'])
if context.is_admin and context.project_id != share['project_id']:
project_id = share['project_id']
else:
project_id = context.project_id
share_id = share['id']
statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR,
constants.STATUS_INACTIVE)
if not (force or share['status'] in statuses):
msg = _("Share status must be one of %(statuses)s") % {
"statuses": statuses}
raise exception.InvalidShare(reason=msg)
# NOTE(gouthamr): If the share has more than one replica,
# it can't be deleted until the additional replicas are removed.
if share.has_replicas:
msg = _("Share %s has replicas. Remove the replicas before "
"deleting the share.") % share_id
raise exception.Conflict(err=msg)
snapshots = self.db.share_snapshot_get_all_for_share(context, share_id)
if len(snapshots):
msg = _("Share still has %d dependent snapshots.") % len(snapshots)
raise exception.InvalidShare(reason=msg)
share_group_snapshot_members_count = (
self.db.count_share_group_snapshot_members_in_share(
context, share_id))
if share_group_snapshot_members_count:
msg = (
_("Share still has %d dependent share group snapshot "
"members.") % share_group_snapshot_members_count)
raise exception.InvalidShare(reason=msg)
self._check_is_share_busy(share)
try:
# we give the user_id of the share, to update the quota usage
# for the user, who created the share
reservations = QUOTAS.reserve(context,
project_id=project_id,
shares=-1,
gigabytes=-share['size'],
user_id=share['user_id'])
except Exception as e:
reservations = None
LOG.exception(
("Failed to update quota for deleting share: %s"), e)
for share_instance in share.instances:
if share_instance['host']:
self.delete_instance(context, share_instance, force=force)
else:
self.db.share_instance_delete(context, share_instance['id'])
if reservations:
# we give the user_id of the share, to update the quota usage
# for the user, who created the share
QUOTAS.commit(context, reservations, project_id=project_id,
user_id=share['user_id'])
def delete_instance(self, context, share_instance, force=False):
policy.check_policy(context, 'share', 'delete')
statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR,
constants.STATUS_INACTIVE)
if not (force or share_instance['status'] in statuses):
msg = _("Share instance status must be one of %(statuses)s") % {
"statuses": statuses}
raise exception.InvalidShareInstance(reason=msg)
share_instance = self.db.share_instance_update(
context, share_instance['id'],
{'status': constants.STATUS_DELETING,
'terminated_at': timeutils.utcnow()}
)
self.share_rpcapi.delete_share_instance(context, share_instance,
force=force)
# NOTE(u_glide): 'updated_at' timestamp is used to track last usage of
# share server. This is required for automatic share servers cleanup
# because we should track somehow period of time when share server
# doesn't have shares (unused). We do this update only on share
# deletion because share server with shares cannot be deleted, so no
# need to do this update on share creation or any other share operation
if share_instance['share_server_id']:
self.db.share_server_update(
context,
share_instance['share_server_id'],
{'updated_at': timeutils.utcnow()})
def delete_share_server(self, context, server):
"""Delete share server."""
policy.check_policy(context, 'share_server', 'delete', server)
shares = self.db.share_instances_get_all_by_share_server(context,
server['id'])
if shares:
raise exception.ShareServerInUse(share_server_id=server['id'])
share_groups = self.db.share_group_get_all_by_share_server(
context, server['id'])
if share_groups:
LOG.error("share server '%(ssid)s' in use by share groups.",
{'ssid': server['id']})
raise exception.ShareServerInUse(share_server_id=server['id'])
# NOTE(vponomaryov): There is no share_server status update here,
# it is intentional.
# Status will be changed in manila.share.manager after verification
# for race condition between share creation on server
# and server deletion.
self.share_rpcapi.delete_share_server(context, server)
def create_snapshot(self, context, share, name, description,
force=False):
policy.check_policy(context, 'share', 'create_snapshot', share)
if ((not force) and (share['status'] != constants.STATUS_AVAILABLE)):
msg = _("Source share status must be "
"%s") % constants.STATUS_AVAILABLE
raise exception.InvalidShare(reason=msg)
size = share['size']
self._check_is_share_busy(share)
try:
reservations = QUOTAS.reserve(
context, snapshots=1, snapshot_gigabytes=size)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'snapshot_gigabytes' in overs:
msg = ("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': size,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['snapshot_gigabytes']})
raise exception.SnapshotSizeExceedsAvailableQuota()
elif 'snapshots' in overs:
msg = ("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed('snapshots')})
raise exception.SnapshotLimitExceeded(
allowed=quotas['snapshots'])
options = {'share_id': share['id'],
'size': share['size'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': constants.STATUS_CREATING,
'progress': '0%',
'share_size': share['size'],
'display_name': name,
'display_description': description,
'share_proto': share['share_proto']}
try:
snapshot = self.db.share_snapshot_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.snapshot_delete(context, share['id'])
finally:
QUOTAS.rollback(context, reservations)
# If replicated share, create snapshot instances for each replica
if share.get('has_replicas'):
snapshot = self.db.share_snapshot_get(context, snapshot['id'])
share_instance_id = snapshot['instance']['share_instance_id']
replicas = self.db.share_replicas_get_all_by_share(
context, share['id'])
replicas = [r for r in replicas if r['id'] != share_instance_id]
snapshot_instance = {
'status': constants.STATUS_CREATING,
'progress': '0%',
}
for replica in replicas:
snapshot_instance.update({'share_instance_id': replica['id']})
self.db.share_snapshot_instance_create(
context, snapshot['id'], snapshot_instance)
self.share_rpcapi.create_replicated_snapshot(
context, share, snapshot)
else:
self.share_rpcapi.create_snapshot(context, share, snapshot)
return snapshot
def migration_start(
self, context, share, dest_host, force_host_assisted_migration,
preserve_metadata, writable, nondisruptive, preserve_snapshots,
new_share_network=None, new_share_type=None):
"""Migrates share to a new host."""
if force_host_assisted_migration and (
preserve_metadata or writable or nondisruptive or
preserve_snapshots):
msg = _('Invalid parameter combination. Cannot set parameters '
'"nondisruptive", "writable", "preserve_snapshots" or '
'"preserve_metadata" to True when enabling the '
'"force_host_assisted_migration" option.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
share_instance = share.instance
# NOTE(gouthamr): Ensure share does not have replicas.
# Currently share migrations are disallowed for replicated shares.
if share.has_replicas:
msg = _('Share %s has replicas. Remove the replicas before '
'attempting to migrate the share.') % share['id']
LOG.error(msg)
raise exception.Conflict(err=msg)
# TODO(ganso): We do not support migrating shares in or out of groups
# for now.
if share.get('share_group_id'):
msg = _('Share %s is a member of a group. This operation is not '
'currently supported for shares that are members of '
'groups.') % share['id']
LOG.error(msg)
raise exception.InvalidShare(reason=msg)
# We only handle "available" share for now
if share_instance['status'] != constants.STATUS_AVAILABLE:
msg = _('Share instance %(instance_id)s status must be available, '
'but current status is: %(instance_status)s.') % {
'instance_id': share_instance['id'],
'instance_status': share_instance['status']}
raise exception.InvalidShare(reason=msg)
# Access rules status must not be error
if share_instance['access_rules_status'] == constants.STATUS_ERROR:
msg = _('Share instance %(instance_id)s access rules status must '
'not be in %(error)s when attempting to start a '
'migration.') % {
'instance_id': share_instance['id'],
'error': constants.STATUS_ERROR}
raise exception.InvalidShare(reason=msg)
self._check_is_share_busy(share)
if force_host_assisted_migration:
# We only handle shares without snapshots for
# host-assisted migration
snaps = self.db.share_snapshot_get_all_for_share(context,
share['id'])
if snaps:
msg = _("Share %s must not have snapshots when using "
"host-assisted migration.") % share['id']
raise exception.Conflict(err=msg)
dest_host_host = share_utils.extract_host(dest_host)
# Make sure the host is in the list of available hosts
utils.validate_service_host(context, dest_host_host)
if new_share_type:
share_type = new_share_type
new_share_type_id = new_share_type['id']
dhss = share_type['extra_specs']['driver_handles_share_servers']
dhss = strutils.bool_from_string(dhss, strict=True)
if (dhss and not new_share_network and
not share_instance['share_network_id']):
msg = _(
"New share network must be provided when share type of"
" given share %s has extra_spec "
"'driver_handles_share_servers' as True.") % share['id']
raise exception.InvalidInput(reason=msg)
else:
share_type = {}
share_type_id = share_instance['share_type_id']
if share_type_id:
share_type = share_types.get_share_type(context, share_type_id)
new_share_type_id = share_instance['share_type_id']
dhss = share_type['extra_specs']['driver_handles_share_servers']
dhss = strutils.bool_from_string(dhss, strict=True)
if dhss:
if new_share_network:
new_share_network_id = new_share_network['id']
else:
new_share_network_id = share_instance['share_network_id']
else:
if new_share_network:
msg = _(
"New share network must not be provided when share type of"
" given share %s has extra_spec "
"'driver_handles_share_servers' as False.") % share['id']
raise exception.InvalidInput(reason=msg)
new_share_network_id = None
# Make sure the destination is different than the source
if (new_share_network_id == share_instance['share_network_id'] and
new_share_type_id == share_instance['share_type_id'] and
dest_host == share_instance['host']):
msg = ("Destination host (%(dest_host)s), share network "
"(%(dest_sn)s) or share type (%(dest_st)s) are the same "
"as the current host's '%(src_host)s', '%(src_sn)s' and "
"'%(src_st)s' respectively. Nothing to be done.") % {
'dest_host': dest_host,
'dest_sn': new_share_network_id,
'dest_st': new_share_type_id,
'src_host': share_instance['host'],
'src_sn': share_instance['share_network_id'],
'src_st': share_instance['share_type_id'],
}
LOG.info(msg)
self.db.share_update(
context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})
return 200
service = self.db.service_get_by_args(
context, dest_host_host, 'manila-share')
request_spec = self._get_request_spec_dict(
share,
share_type,
availability_zone_id=service['availability_zone_id'],
share_network_id=new_share_network_id)
self.db.share_update(
context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_STARTING})
self.db.share_instance_update(context, share_instance['id'],
{'status': constants.STATUS_MIGRATING})
self.scheduler_rpcapi.migrate_share_to_host(
context, share['id'], dest_host, force_host_assisted_migration,
preserve_metadata, writable, nondisruptive, preserve_snapshots,
new_share_network_id, new_share_type_id, request_spec)
return 202
def migration_complete(self, context, share):
if share['task_state'] not in (
constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE):
msg = self._migration_validate_error_message(share)
if msg is None:
msg = _("First migration phase of share %s not completed"
" yet.") % share['id']
LOG.error(msg)
raise exception.InvalidShare(reason=msg)
share_instance_id, new_share_instance_id = (
self.get_migrating_instances(share))
share_instance_ref = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
self.share_rpcapi.migration_complete(context, share_instance_ref,
new_share_instance_id)
def get_migrating_instances(self, share):
share_instance_id = None
new_share_instance_id = None
for instance in share.instances:
if instance['status'] == constants.STATUS_MIGRATING:
share_instance_id = instance['id']
if instance['status'] == constants.STATUS_MIGRATING_TO:
new_share_instance_id = instance['id']
if None in (share_instance_id, new_share_instance_id):
msg = _("Share instances %(instance_id)s and "
"%(new_instance_id)s in inconsistent states, cannot"
" continue share migration for share %(share_id)s"
".") % {'instance_id': share_instance_id,
'new_instance_id': new_share_instance_id,
'share_id': share['id']}
raise exception.ShareMigrationFailed(reason=msg)
return share_instance_id, new_share_instance_id
def migration_get_progress(self, context, share):
if share['task_state'] == (
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
share_instance_id, migrating_instance_id = (
self.get_migrating_instances(share))
share_instance_ref = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
service_host = share_utils.extract_host(share_instance_ref['host'])
service = self.db.service_get_by_args(
context, service_host, 'manila-share')
if utils.service_is_up(service):
try:
result = self.share_rpcapi.migration_get_progress(
context, share_instance_ref, migrating_instance_id)
except Exception:
msg = _("Failed to obtain migration progress of share "
"%s.") % share['id']
LOG.exception(msg)
raise exception.ShareMigrationError(reason=msg)
else:
result = None
elif share['task_state'] == (
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
data_rpc = data_rpcapi.DataAPI()
LOG.info("Sending request to get share migration information"
" of share %s." % share['id'])
services = self.db.service_get_all_by_topic(context, 'manila-data')
if len(services) > 0 and utils.service_is_up(services[0]):
try:
result = data_rpc.data_copy_get_progress(
context, share['id'])
except Exception:
msg = _("Failed to obtain migration progress of share "
"%s.") % share['id']
LOG.exception(msg)
raise exception.ShareMigrationError(reason=msg)
else:
result = None
else:
result = self._migration_get_progress_state(share)
if not (result and result.get('total_progress') is not None):
msg = self._migration_validate_error_message(share)
if msg is None:
msg = _("Migration progress of share %s cannot be obtained at "
"this moment.") % share['id']
LOG.error(msg)
raise exception.InvalidShare(reason=msg)
return result
def _migration_get_progress_state(self, share):
task_state = share['task_state']
if task_state in (constants.TASK_STATE_MIGRATION_SUCCESS,
constants.TASK_STATE_DATA_COPYING_ERROR,
constants.TASK_STATE_MIGRATION_CANCELLED,
constants.TASK_STATE_MIGRATION_COMPLETING,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_DATA_COPYING_COMPLETING,
constants.TASK_STATE_DATA_COPYING_CANCELLED,
constants.TASK_STATE_MIGRATION_ERROR):
return {'total_progress': 100}
elif task_state in (constants.TASK_STATE_MIGRATION_STARTING,
constants.TASK_STATE_MIGRATION_DRIVER_STARTING,
constants.TASK_STATE_DATA_COPYING_STARTING,
constants.TASK_STATE_MIGRATION_IN_PROGRESS):
return {'total_progress': 0}
else:
return None
def _migration_validate_error_message(self, share):
task_state = share['task_state']
if task_state == constants.TASK_STATE_MIGRATION_SUCCESS:
msg = _("Migration of share %s has already "
"completed.") % share['id']
elif task_state in (None, constants.TASK_STATE_MIGRATION_ERROR):
msg = _("There is no migration being performed for share %s "
"at this moment.") % share['id']
elif task_state == constants.TASK_STATE_MIGRATION_CANCELLED:
msg = _("Migration of share %s was already "
"cancelled.") % share['id']
elif task_state in (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED):
msg = _("Migration of share %s has already completed first "
"phase.") % share['id']
else:
return None
return msg
def migration_cancel(self, context, share):
migrating = True
if share['task_state'] in (
constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
share_instance_id, migrating_instance_id = (
self.get_migrating_instances(share))
share_instance_ref = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
service_host = share_utils.extract_host(share_instance_ref['host'])
service = self.db.service_get_by_args(
context, service_host, 'manila-share')
if utils.service_is_up(service):
self.share_rpcapi.migration_cancel(
context, share_instance_ref, migrating_instance_id)
else:
migrating = False
elif share['task_state'] == (
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
data_rpc = data_rpcapi.DataAPI()
LOG.info("Sending request to cancel migration of "
"share %s." % share['id'])
services = self.db.service_get_all_by_topic(context, 'manila-data')
if len(services) > 0 and utils.service_is_up(services[0]):
try:
data_rpc.data_copy_cancel(context, share['id'])
except Exception:
msg = _("Failed to cancel migration of share "
"%s.") % share['id']
LOG.exception(msg)
raise exception.ShareMigrationError(reason=msg)
else:
migrating = False
else:
migrating = False
if not migrating:
msg = self._migration_validate_error_message(share)
if msg is None:
msg = _("Migration of share %s cannot be cancelled at this "
"moment.") % share['id']
LOG.error(msg)
raise exception.InvalidShare(reason=msg)
@policy.wrap_check_policy('share')
def delete_snapshot(self, context, snapshot, force=False):
statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR)
if not (force or snapshot['aggregate_status'] in statuses):
msg = _("Share Snapshot status must be one of %(statuses)s.") % {
"statuses": statuses}
raise exception.InvalidShareSnapshot(reason=msg)
share = self.db.share_get(context, snapshot['share_id'])
snapshot_instances = (
self.db.share_snapshot_instance_get_all_with_filters(
context, {'snapshot_ids': snapshot['id']})
)
for snapshot_instance in snapshot_instances:
self.db.share_snapshot_instance_update(
context, snapshot_instance['id'],
{'status': constants.STATUS_DELETING})
if share['has_replicas']:
self.share_rpcapi.delete_replicated_snapshot(
context, snapshot, share['instance']['host'],
share_id=share['id'], force=force)
else:
self.share_rpcapi.delete_snapshot(
context, snapshot, share['instance']['host'], force=force)
@policy.wrap_check_policy('share')
def update(self, context, share, fields):
if 'is_public' in fields:
try:
fields['is_public'] = strutils.bool_from_string(
fields['is_public'], strict=True)
except ValueError as e:
raise exception.InvalidParameterValue(six.text_type(e))
return self.db.share_update(context, share['id'], fields)
@policy.wrap_check_policy('share')
def snapshot_update(self, context, snapshot, fields):
return self.db.share_snapshot_update(context, snapshot['id'], fields)
def get(self, context, share_id):
rv = self.db.share_get(context, share_id)
if not rv['is_public']:
policy.check_policy(context, 'share', 'get', rv)
return rv
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc'):
policy.check_policy(context, 'share', 'get_all')
if search_opts is None:
search_opts = {}
LOG.debug("Searching for shares by: %s", search_opts)
# Prepare filters
filters = {}
if 'export_location_id' in search_opts:
filters['export_location_id'] = search_opts.pop(
'export_location_id')
if 'export_location_path' in search_opts:
filters['export_location_path'] = search_opts.pop(
'export_location_path')
if 'metadata' in search_opts:
filters['metadata'] = search_opts.pop('metadata')
if not isinstance(filters['metadata'], dict):
msg = _("Wrong metadata filter provided: "
"%s.") % six.text_type(filters['metadata'])
raise exception.InvalidInput(reason=msg)
if 'extra_specs' in search_opts:
# Verify policy for extra-specs access
extensions.extension_authorizer(
'share', 'types_extra_specs')(context)
filters['extra_specs'] = search_opts.pop('extra_specs')
if not isinstance(filters['extra_specs'], dict):
msg = _("Wrong extra specs filter provided: "
"%s.") % six.text_type(filters['extra_specs'])
raise exception.InvalidInput(reason=msg)
if not (isinstance(sort_key, six.string_types) and sort_key):
msg = _("Wrong sort_key filter provided: "
"'%s'.") % six.text_type(sort_key)
raise exception.InvalidInput(reason=msg)
if not (isinstance(sort_dir, six.string_types) and sort_dir):
msg = _("Wrong sort_dir filter provided: "
"'%s'.") % six.text_type(sort_dir)
raise exception.InvalidInput(reason=msg)
is_public = search_opts.pop('is_public', False)
is_public = strutils.bool_from_string(is_public, strict=True)
# Get filtered list of shares
if 'host' in search_opts:
policy.check_policy(context, 'share', 'list_by_host')
if 'share_server_id' in search_opts:
# NOTE(vponomaryov): this is project_id independent
policy.check_policy(context, 'share', 'list_by_share_server_id')
shares = self.db.share_get_all_by_share_server(
context, search_opts.pop('share_server_id'), filters=filters,
sort_key=sort_key, sort_dir=sort_dir)
elif (context.is_admin and 'all_tenants' in search_opts):
shares = self.db.share_get_all(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir)
else:
shares = self.db.share_get_all_by_project(
context, project_id=context.project_id, filters=filters,
is_public=is_public, sort_key=sort_key, sort_dir=sort_dir)
# NOTE(vponomaryov): we do not need 'all_tenants' opt anymore
search_opts.pop('all_tenants', None)
if search_opts:
results = []
for s in shares:
# values in search_opts can be only strings
if all(s.get(k, None) == v for k, v in search_opts.items()):
results.append(s)
shares = results
return shares
def get_snapshot(self, context, snapshot_id):
policy.check_policy(context, 'share_snapshot', 'get_snapshot')
return self.db.share_snapshot_get(context, snapshot_id)
def get_all_snapshots(self, context, search_opts=None,
sort_key='share_id', sort_dir='desc'):
policy.check_policy(context, 'share_snapshot', 'get_all_snapshots')
search_opts = search_opts or {}
LOG.debug("Searching for snapshots by: %s", search_opts)
# Read and remove key 'all_tenants' if was provided
all_tenants = search_opts.pop('all_tenants', None)
string_args = {'sort_key': sort_key, 'sort_dir': sort_dir}
string_args.update(search_opts)
for k, v in string_args.items():
if not (isinstance(v, six.string_types) and v):
msg = _("Wrong '%(k)s' filter provided: "
"'%(v)s'.") % {'k': k, 'v': string_args[k]}
raise exception.InvalidInput(reason=msg)
if (context.is_admin and all_tenants):
snapshots = self.db.share_snapshot_get_all(
context, filters=search_opts,
sort_key=sort_key, sort_dir=sort_dir)
else:
snapshots = self.db.share_snapshot_get_all_by_project(
context, context.project_id, filters=search_opts,
sort_key=sort_key, sort_dir=sort_dir)
# Remove key 'usage' if provided
search_opts.pop('usage', None)
if search_opts:
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.items():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
return snapshots
def get_latest_snapshot_for_share(self, context, share_id):
"""Get the newest snapshot of a share."""
return self.db.share_snapshot_get_latest_for_share(context, share_id)
@staticmethod
def _is_invalid_share_instance(instance):
return (instance['host'] is None
or instance['status'] in constants.
INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES)
def allow_access(self, ctx, share, access_type, access_to,
access_level=None):
"""Allow access to share."""
# Access rule validation:
if access_level not in constants.ACCESS_LEVELS + (None, ):
msg = _("Invalid share access level: %s.") % access_level
raise exception.InvalidShareAccess(reason=msg)
share_access_list = self.db.share_access_get_all_by_type_and_access(
ctx, share['id'], access_type, access_to)
if len(share_access_list) > 0:
raise exception.ShareAccessExists(access_type=access_type,
access=access_to)
# Share instance validation
if any(instance for instance in share.instances
if self._is_invalid_share_instance(instance)):
msg = _("New access rules cannot be applied while the share or "
"any of its replicas or migration copies lacks a valid "
"host or is in an invalid state.")
raise exception.InvalidShare(message=msg)
values = {
'share_id': share['id'],
'access_type': access_type,
'access_to': access_to,
'access_level': access_level,
}
access = self.db.share_access_create(ctx, values)
for share_instance in share.instances:
self.allow_access_to_instance(ctx, share_instance)
return access
def allow_access_to_instance(self, context, share_instance):
self._conditionally_transition_share_instance_access_rules_status(
context, share_instance)
self.share_rpcapi.update_access(context, share_instance)
def _conditionally_transition_share_instance_access_rules_status(
self, context, share_instance):
conditionally_change = {
constants.STATUS_ACTIVE: constants.SHARE_INSTANCE_RULES_SYNCING,
}
self.access_helper.get_and_update_share_instance_access_rules_status(
context, conditionally_change=conditionally_change,
share_instance_id=share_instance['id'])
def deny_access(self, ctx, share, access):
"""Deny access to share."""
if any(instance for instance in share.instances if
self._is_invalid_share_instance(instance)):
msg = _("Access rules cannot be denied while the share, "
"any of its replicas or migration copies lacks a valid "
"host or is in an invalid state.")
raise exception.InvalidShare(message=msg)
for share_instance in share.instances:
self.deny_access_to_instance(ctx, share_instance, access)
def deny_access_to_instance(self, context, share_instance, access):
self._conditionally_transition_share_instance_access_rules_status(
context, share_instance)
updates = {'state': constants.ACCESS_STATE_QUEUED_TO_DENY}
self.access_helper.get_and_update_share_instance_access_rule(
context, access['id'], updates=updates,
share_instance_id=share_instance['id'])
self.share_rpcapi.update_access(context, share_instance)
def access_get_all(self, context, share):
"""Returns all access rules for share."""
policy.check_policy(context, 'share', 'access_get_all')
rules = self.db.share_access_get_all_for_share(context, share['id'])
return rules
def access_get(self, context, access_id):
"""Returns access rule with the id."""
policy.check_policy(context, 'share', 'access_get')
rule = self.db.share_access_get(context, access_id)
return rule
@policy.wrap_check_policy('share')
def get_share_metadata(self, context, share):
"""Get all metadata associated with a share."""
rv = self.db.share_metadata_get(context, share['id'])
return dict(rv.items())
@policy.wrap_check_policy('share')
def delete_share_metadata(self, context, share, key):
"""Delete the given metadata item from a share."""
self.db.share_metadata_delete(context, share['id'], key)
def _check_is_share_busy(self, share):
"""Raises an exception if share is busy with an active task."""
if share.is_busy:
msg = _("Share %(share_id)s is busy as part of an active "
"task: %(task)s.") % {
'share_id': share['id'],
'task': share['task_state']
}
raise exception.ShareBusyException(reason=msg)
def _check_metadata_properties(self, context, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.items():
if not k:
msg = _("Metadata property key is blank.")
LOG.warning(msg)
raise exception.InvalidShareMetadata(message=msg)
if len(k) > 255:
msg = _("Metadata property key is "
"greater than 255 characters.")
LOG.warning(msg)
raise exception.InvalidShareMetadataSize(message=msg)
if not v:
msg = _("Metadata property value is blank.")
LOG.warning(msg)
raise exception.InvalidShareMetadata(message=msg)
if len(v) > 1023:
msg = _("Metadata property value is "
"greater than 1023 characters.")
LOG.warning(msg)
raise exception.InvalidShareMetadataSize(message=msg)
@policy.wrap_check_policy('share')
def update_share_metadata(self, context, share, metadata, delete=False):
"""Updates or creates share metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig_meta = self.get_share_metadata(context, share)
if delete:
_metadata = metadata
else:
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(context, _metadata)
self.db.share_metadata_update(context, share['id'],
_metadata, delete)
return _metadata
def get_share_network(self, context, share_net_id):
return self.db.share_network_get(context, share_net_id)
def extend(self, context, share, new_size):
policy.check_policy(context, 'share', 'extend')
if share['status'] != constants.STATUS_AVAILABLE:
msg_params = {
'valid_status': constants.STATUS_AVAILABLE,
'share_id': share['id'],
'status': share['status'],
}
msg = _("Share %(share_id)s status must be '%(valid_status)s' "
"to extend, but current status is: "
"%(status)s.") % msg_params
raise exception.InvalidShare(reason=msg)
self._check_is_share_busy(share)
size_increase = int(new_size) - share['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s).") % {'new_size': new_size,
'size': share['size']})
raise exception.InvalidInput(reason=msg)
try:
# we give the user_id of the share, to update the quota usage
# for the user, who created the share, because on share delete
# only this quota will be decreased
reservations = QUOTAS.reserve(context,
project_id=share['project_id'],
gigabytes=size_increase,
user_id=share['user_id'])
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return usages[name]['reserved'] + usages[name]['in_use']
msg = ("Quota exceeded for %(s_pid)s, tried to extend share "
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed).")
LOG.error(msg, {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.ShareSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, share, {'status': constants.STATUS_EXTENDING})
self.share_rpcapi.extend_share(context, share, new_size, reservations)
LOG.info("Extend share request issued successfully.",
resource=share)
def shrink(self, context, share, new_size):
policy.check_policy(context, 'share', 'shrink')
status = six.text_type(share['status']).lower()
valid_statuses = (constants.STATUS_AVAILABLE,
constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR)
if status not in valid_statuses:
msg_params = {
'valid_status': ", ".join(valid_statuses),
'share_id': share['id'],
'status': status,
}
msg = _("Share %(share_id)s status must in (%(valid_status)s) "
"to shrink, but current status is: "
"%(status)s.") % msg_params
raise exception.InvalidShare(reason=msg)
self._check_is_share_busy(share)
size_decrease = int(share['size']) - int(new_size)
if size_decrease <= 0 or new_size <= 0:
msg = (_("New size for shrink must be less "
"than current size and greater than 0 (current: %(size)s,"
" new: %(new_size)s)") % {'new_size': new_size,
'size': share['size']})
raise exception.InvalidInput(reason=msg)
self.update(context, share, {'status': constants.STATUS_SHRINKING})
self.share_rpcapi.shrink_share(context, share, new_size)
LOG.info("Shrink share (id=%(id)s) request issued successfully."
" New size: %(size)s" % {'id': share['id'],
'size': new_size})
def snapshot_allow_access(self, context, snapshot, access_type, access_to):
"""Allow access to a share snapshot."""
filters = {'access_to': access_to,
'access_type': access_type}
access_list = self.db.share_snapshot_access_get_all_for_share_snapshot(
context, snapshot['id'], filters)
if len(access_list) > 0:
raise exception.ShareSnapshotAccessExists(access_type=access_type,
access=access_to)
values = {
'share_snapshot_id': snapshot['id'],
'access_type': access_type,
'access_to': access_to,
}
if any((instance['status'] != constants.STATUS_AVAILABLE) or
(instance['share_instance']['host'] is None)
for instance in snapshot.instances):
msg = _("New access rules cannot be applied while the snapshot or "
"any of its replicas or migration copies lacks a valid "
"host or is not in %s state.") % constants.STATUS_AVAILABLE
raise exception.InvalidShareSnapshotInstance(reason=msg)
access = self.db.share_snapshot_access_create(context, values)
for snapshot_instance in snapshot.instances:
self.share_rpcapi.snapshot_update_access(
context, snapshot_instance)
return access
def snapshot_deny_access(self, context, snapshot, access):
"""Deny access to a share snapshot."""
if any((instance['status'] != constants.STATUS_AVAILABLE) or
(instance['share_instance']['host'] is None)
for instance in snapshot.instances):
msg = _("Access rules cannot be denied while the snapshot or "
"any of its replicas or migration copies lacks a valid "
"host or is not in %s state.") % constants.STATUS_AVAILABLE
raise exception.InvalidShareSnapshotInstance(reason=msg)
for snapshot_instance in snapshot.instances:
rule = self.db.share_snapshot_instance_access_get(
context, access['id'], snapshot_instance['id'])
self.db.share_snapshot_instance_access_update(
context, rule['access_id'], snapshot_instance['id'],
{'state': constants.ACCESS_STATE_QUEUED_TO_DENY})
self.share_rpcapi.snapshot_update_access(
context, snapshot_instance)
def snapshot_access_get_all(self, context, snapshot):
"""Returns all access rules for share snapshot."""
rules = self.db.share_snapshot_access_get_all_for_share_snapshot(
context, snapshot['id'], {})
return rules
def snapshot_access_get(self, context, access_id):
"""Returns snapshot access rule with the id."""
rule = self.db.share_snapshot_access_get(context, access_id)
return rule
def snapshot_export_locations_get(self, context, snapshot):
return self.db.share_snapshot_export_locations_get(context, snapshot)
def snapshot_export_location_get(self, context, el_id):
return self.db.share_snapshot_instance_export_location_get(context,
el_id)
|
the-stack_0_27846
|
import logging
from flask import jsonify, request, redirect, url_for
class WebService:
def __init__(self, flask_app, url_shortener):
self.flask_app = flask_app
self.url_shortener = url_shortener
self._configure_routes()
def _configure_routes(self):
logging.debug("Configuring flask routes")
self.flask_app.add_url_rule(
"/shorten_url", "shorten", self.shorten, methods=["POST"]
)
self.flask_app.add_url_rule("/<key>", "lookup", self.lookup, methods=["GET"])
def shorten(self):
if not request.json:
return self.error_response("No json payload found")
url = request.json.get("url")
if not url:
return self.error_response("No url found")
logging.info("Shortening url %s", url)
try:
key = self.url_shortener.url_to_key(url)
except ValueError as error:
return self.error_response(str(error))
shortened_url = url_for("lookup", _external=True, key=key)
logging.debug("Shortened url: %s", shortened_url)
response = jsonify({"shortened_url": shortened_url})
response.status_code = 201
return response
def lookup(self, key):
try:
url = self.url_shortener.key_to_url(key)
except Exception as error:
return self.error_response(str(error), 404)
logging.info("Redirecting to %s", url)
return redirect(url, code=301)
def error_response(self, message, status_code=400):
logging.warning("error: %s", message)
response = jsonify({"error": message})
response.status_code = status_code
return response
|
the-stack_0_27847
|
__name__ = 'ezaggrid'
name_url = __name__.replace('_', '-')
__version__ = '0.2.6'
__description__ = 'display dataframe in ag-grid'
__long_description__ = 'See repo README'
__author__ = 'oscar6echo'
__author_email__ = '[email protected]'
__url__ = 'https://github.com/{}/{}'.format(__author__,
name_url)
__download_url__ = 'https://github.com/{}/{}/tarball/{}'.format(__author__,
name_url,
__version__)
__keywords__ = ['javascript', 'dataframe', 'ag-grid', 'display']
__license__ = 'MIT'
__classifiers__ = ['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
__include_package_data__ = True
__package_data__ = {
'templates':
['templates/iframe.tpl.html',
'templates/main.tpl.html',
'templates/script.tpl.js'
'templates/json.js'
'templates/helpers.js'
],
}
__zip_safe__ = False
|
the-stack_0_27848
|
# Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
from oslo.config import cfg
from nova import test
from nova.virt.vmwareapi import read_write_util
CONF = cfg.CONF
class ReadWriteUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(ReadWriteUtilTestCase, self).setUp()
def test_ipv6_host(self):
ipv6_host = 'fd8c:215d:178e:c51e:200:c9ff:fed1:584c'
self.mox.StubOutWithMock(httplib.HTTPConnection, 'endheaders')
httplib.HTTPConnection.endheaders()
self.mox.ReplayAll()
file = read_write_util.VMwareHTTPWriteFile(ipv6_host,
'fake_dc',
'fake_ds',
dict(),
'/tmp/fake.txt',
0)
self.assertEqual(ipv6_host, file.conn.host)
self.assertEqual(443, file.conn.port)
|
the-stack_0_27850
|
#!/usr/bin/env python
"""
Copyright (c) 2011, Willow Garage, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Willow Garage, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import rospy
import copy
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from visualization_msgs.msg import *
from geometry_msgs.msg import Quaternion, Point
from tf.broadcaster import TransformBroadcaster
from tf.transformations import quaternion_from_euler
from librepilot.msg import AutopilotInfo
import rospy
import copy
import time
import tf
from random import random
from math import pi, sin
import numpy as np
NAME_SPACE = "machine_"
GOAL_NAME = "goal_"
server = None
menu_handler = MenuHandler()
br = None
counter = 0
def frameCallback(msg):
global counter, br
time = rospy.Time.now()
br.sendTransform((0, 0, 0), (0, 0, 0, 1.0), time, "goal_link", "world")
counter += 1
def processFeedback(feedback):
s = "Feedback from marker '" + feedback.marker_name
s += "' / control '" + feedback.control_name + "'"
mp = ""
if feedback.mouse_point_valid:
mp = " at " + str(feedback.mouse_point.x)
mp += ", " + str(feedback.mouse_point.y)
mp += ", " + str(feedback.mouse_point.z)
mp += " in frame " + feedback.header.frame_id
if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK:
rospy.loginfo(s + ": button click" + mp + ".")
elif feedback.event_type == InteractiveMarkerFeedback.MENU_SELECT:
rospy.loginfo(
s + ": menu item " + str(feedback.menu_entry_id) + " clicked" + mp + "."
)
elif feedback.event_type == InteractiveMarkerFeedback.POSE_UPDATE:
rospy.loginfo(s + ": pose changed")
elif feedback.event_type == InteractiveMarkerFeedback.MOUSE_DOWN:
rospy.loginfo(s + ": mouse down" + mp + ".")
elif feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP:
rospy.loginfo(s + ": mouse up" + mp + ".")
server.applyChanges()
def alignMarker(feedback):
pose = feedback.pose
pose.position.x = round(pose.position.x - 0.5) + 0.5
pose.position.y = round(pose.position.y - 0.5) + 0.5
rospy.loginfo(
feedback.marker_name
+ ": aligning position = "
+ str(feedback.pose.position.x)
+ ","
+ str(feedback.pose.position.y)
+ ","
+ str(feedback.pose.position.z)
+ " to "
+ str(pose.position.x)
+ ","
+ str(pose.position.y)
+ ","
+ str(pose.position.z)
)
server.setPose(feedback.marker_name, pose)
server.applyChanges()
def rand(min_, max_):
return min_ + random() * (max_ - min_)
def makeBox(msg):
marker = Marker()
marker.type = Marker.CUBE
marker.scale.x = msg.scale * 0.45
marker.scale.y = msg.scale * 0.45
marker.scale.z = msg.scale * 0.45
marker.color.r = 0.5
marker.color.g = 0.5
marker.color.b = 0.5
marker.color.a = 1.0
return marker
def makeBoxControl(msg):
control = InteractiveMarkerControl()
control.always_visible = True
control.markers.append(makeBox(msg))
msg.controls.append(control)
return control
def saveMarker(int_marker):
server.insert(int_marker, processFeedback)
# Marker Creation
def makeQuadrocopterMarker(position, orientation):
int_marker = InteractiveMarker()
int_marker.header.frame_id = "goal_link"
int_marker.pose.position = position
int_marker.pose.orientation = orientation
int_marker.scale = 1
int_marker.name = "goal"
int_marker.description = "goal"
makeBoxControl(int_marker)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 1
control.orientation.z = 0
control.interaction_mode = InteractiveMarkerControl.MOVE_ROTATE
int_marker.controls.append(copy.deepcopy(control))
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
int_marker.controls.append(control)
server.insert(int_marker, processFeedback)
def generate_goal(x_min, x_max, y_min, y_max, z_min, z_max, v_min, v_max):
x = np.random.uniform(x_min, x_max)
y = np.random.uniform(y_min, y_max)
z = np.random.uniform(z_min, z_max)
v = np.random.uniform(v_min, v_max)
phi = 0
the = 0
psi = np.random.uniform(-pi, pi)
q = quaternion_from_euler(phi, the, psi)
return x, y, z, v, phi, the, psi, q
def distance_to_origin_far_enough(position, origin=np.array([0, 0, 100])):
dist = np.linalg.norm(position - origin)
if dist > 30:
return True
else:
return False
def sample_new_goal(x_min, x_max, y_min, y_max, z_min, z_max, v_min, v_max):
far_enough = False
while far_enough == False:
x, y, z, v, phi, the, psi, q = generate_goal(
x_min, x_max, y_min, y_max, z_min, z_max, v_min, v_max
)
far_enough = distance_to_origin_far_enough(np.array([x, y, z]))
return x, y, z, v, phi, the, psi, q
if __name__ == "__main__":
robotID = "0"
x_max, x_min = 105, -105
y_max, y_min = 105, -105
z_max, z_min = 210, 5
v_max, v_min = 8, 0
if len(sys.argv) > 1:
robotID = sys.argv[1]
x_max, x_min = float(sys.argv[2]), -float(sys.argv[2])
y_max, y_min = float(sys.argv[3]), -float(sys.argv[3])
z_max, z_min = float(sys.argv[4]), float(sys.argv[5])
v_max, v_min = float(sys.argv[6]), 0
np.random.seed(123 + int(robotID))
rospy.loginfo("[ Goal Node ] Launching...")
rospy.init_node("GOAL_Node_" + robotID, anonymous=False)
vel_cmd_pub = rospy.Publisher(
"goal_" + robotID + "/AutopilotInfo", AutopilotInfo, queue_size=1
)
br = TransformBroadcaster()
server = InteractiveMarkerServer("goal_" + robotID)
menu_handler.insert("First Entry", callback=processFeedback)
menu_handler.insert("Second Entry", callback=processFeedback)
sub_menu_handle = menu_handler.insert("Submenu")
menu_handler.insert("First Entry", parent=sub_menu_handle, callback=processFeedback)
menu_handler.insert(
"Second Entry", parent=sub_menu_handle, callback=processFeedback
)
times = 0
while not rospy.is_shutdown():
if times % 180 == 0:
x, y, z, v, phi, the, psi, q = sample_new_goal(
x_min, x_max, y_min, y_max, z_min, z_max, v_min, v_max
)
times = 0
x, y, z = 50, 50, 100 # hack to fix the goal for testing impala
v = 5
position = Point(x, y, z)
velocity = Point(v, 0, 0)
orientation = Quaternion(q[0], q[1], q[2], q[3])
makeQuadrocopterMarker(position=position, orientation=orientation)
server.applyChanges()
vel_cmd = AutopilotInfo()
vel_cmd.VelocityDesired = velocity
vel_cmd_pub.publish(vel_cmd)
rospy.Timer(rospy.Duration(0.01), frameCallback)
rospy.loginfo("[ Goal Node ] -----------------------")
rospy.loginfo("[ Goal Node ] position = (%2.1f, %2.1f, %2.1f)\n" % (x, y, z))
rospy.loginfo("[ Goal Node ] velocity = %2.1f\n" % (v))
rospy.loginfo(
"[ Goal Node ] orientation = (%2.1f, %2.1f, %2.1f)]\n" % (phi, the, psi)
)
times += 1
time.sleep(1)
|
the-stack_0_27852
|
# coding:utf-8
"""
Filename : data_collector.py
Role : data collection
@author : Sunwaee
"""
from typing import Dict, List
import torch
from transformers import (
MT5Tokenizer,
)
def trim_batch(input_ids, pad_token_id, attention_mask=None):
"""
Remove columns that are populated exclusively by pad_token_id.
:param input_ids: input ids
:param pad_token_id: pad token id
:param attention_mask: attention mask
:return: input_ids and eventually attention mask
"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]
class DataCollector:
"""
Data collection.
"""
def __init__(self, tokenizer: MT5Tokenizer, mode: str = 'training', using_tpu: bool = False) -> None:
"""
Initiliazes DataCollector.
:param tokenizer: tokenizer
:param mode: mode
:param using_tpu: whether to use tpu or not
"""
self.tokenizer = tokenizer
self.using_tpu = using_tpu
self.mode = mode
def __call__(self, batch: List) -> Dict[str, torch.Tensor]:
"""
Takes a list of samples and collates them into a batch.
:param batch: list of samples
:return: dictionary of tensors
"""
# Stacking elements
input_ids = torch.stack([example['source_ids'] for example in batch])
target_ids = torch.stack([example['target_ids'] for example in batch])
attention_mask = torch.stack([example['attention_mask'] for example in batch])
# Setting pad token id
pad_token_id = self.tokenizer.pad_token_id
# Preventing trim on TPU use
if not self.using_tpu:
input_ids, attention_mask = trim_batch(input_ids, pad_token_id, attention_mask=attention_mask)
target_ids = trim_batch(target_ids, pad_token_id)
# Shifting decoder inputs to the right
lm_labels = target_ids.clone()
decoder_input_ids = self._shift_right_mt5(lm_labels)
if self.mode == 'training':
lm_labels[lm_labels[:, :] == pad_token_id] = -100
# Creating dictionary with results
params = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": lm_labels,
"decoder_input_ids": decoder_input_ids
}
return params
def _shift_right_mt5(self, input_ids):
"""
Shifts inputs to the right.
:param input_ids:
:return:
"""
# Setting pad token id
pad_token_id = self.tokenizer.pad_token_id
# Assertions to prevent bugs
assert pad_token_id is not None, \
"self.model.config.pad_token_id has to be defined."
# Shifting inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = pad_token_id
# Replacing possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
# Assertions to make sure output is bugfree
assert torch.all(shifted_input_ids >= 0).item(), \
"Verify that `labels` has only positive values and -100"
return shifted_input_ids
|
the-stack_0_27855
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import os
from pathlib import Path
import numpy as np
import pandas as pd
from gluonts.dataset.field_names import FieldName
from gluonts.dataset.repository._util import metadata, save_to_file
def generate_m5_dataset(
dataset_path: Path, pandas_freq: str, prediction_length: int
):
cal_path = f"{dataset_path}/calendar.csv"
sales_path = f"{dataset_path}/sales_train_validation.csv"
if not os.path.exists(cal_path) or not os.path.exists(sales_path):
raise RuntimeError(
f"M5 data is available on Kaggle (https://www.kaggle.com/c/m5-forecasting-accuracy/data). "
f"You first need to agree to the terms of the competition before being able to download the data. "
f"After you have done that, please supply the files at {dataset_path}."
)
# Read M5 data from dataset_path
calendar = pd.read_csv(cal_path)
sales_train_validation = pd.read_csv(sales_path)
submission_prediction_length = prediction_length * 2
############################################################
# dynamic features,是每一天的活动/节假日类型
# Build dynamic features
############################################################
cal_features = calendar.drop(
[
"date",
"wm_yr_wk",
"weekday",
"wday",
"month",
"year",
"event_name_1",
"event_name_2",
"d",
],
axis=1,
)
cal_features["event_type_1"] = cal_features["event_type_1"].apply(
lambda x: 0 if str(x) == "nan" else 1
)
cal_features["event_type_2"] = cal_features["event_type_2"].apply(
lambda x: 0 if str(x) == "nan" else 1
)
test_cal_features = cal_features.values.T # 注意这儿做了转置,把日期转化为列,转化以后,行起始是特征字段。值要么为0,要么为1的指示变量
train_cal_features = test_cal_features[
:, : -submission_prediction_length - prediction_length
]
test_cal_features = test_cal_features[:, :-submission_prediction_length]
test_cal_features_list = [test_cal_features] * len(sales_train_validation) # len(sales_train_validation)是每个品类的销售数据(有多个target),所以这儿需要"*"的操作
train_cal_features_list = [train_cal_features] * len(
sales_train_validation
)
############################################################
# Build static features
############################################################
state_ids = (
sales_train_validation["state_id"].astype("category").cat.codes.values
)
state_ids_un = np.unique(state_ids) # 国家
store_ids = (
sales_train_validation["store_id"].astype("category").cat.codes.values
)
store_ids_un = np.unique(store_ids) # 货架
cat_ids = (
sales_train_validation["cat_id"].astype("category").cat.codes.values
)
cat_ids_un = np.unique(cat_ids) # 一级分类
dept_ids = (
sales_train_validation["dept_id"].astype("category").cat.codes.values
)
dept_ids_un = np.unique(dept_ids) # 二级分类
item_ids = (
sales_train_validation["item_id"].astype("category").cat.codes.values
)
item_ids_un = np.unique(item_ids) # item_id是具体的每个商品
stat_cat_list = [item_ids, dept_ids, cat_ids, store_ids, state_ids]
stat_cat = np.concatenate(stat_cat_list)
stat_cat = stat_cat.reshape(len(stat_cat_list), len(item_ids)).T
cardinalities = [
len(item_ids_un),
len(dept_ids_un),
len(cat_ids_un),
len(store_ids_un),
len(state_ids_un),
]
############################################################
# Build target series
############################################################
train_df = sales_train_validation.drop(
["id", "item_id", "dept_id", "cat_id", "store_id", "state_id"], axis=1
)
test_target_values = train_df.values.copy()
train_target_values = [ts[:-prediction_length] for ts in train_df.values]
dates = ["2011-01-29 00:00:00" for _ in range(len(sales_train_validation))]
############################################################
# Create metadata file
############################################################
meta_file = dataset_path / "metadata.json"
with open(meta_file, "w") as f:
f.write(
json.dumps(
metadata(
cardinality=cardinalities,
freq=pandas_freq,
prediction_length=prediction_length,
)
)
)
############################################################
# Build training set
############################################################
train_file = dataset_path / "train" / "data.json"
train_ds = [
{
FieldName.TARGET: target.tolist(),
FieldName.START: start,
FieldName.FEAT_DYNAMIC_REAL: fdr.tolist(),
FieldName.FEAT_STATIC_CAT: fsc.tolist(),
}
for (target, start, fdr, fsc) in zip(
train_target_values, dates, train_cal_features_list, stat_cat
)
]
save_to_file(train_file, train_ds)
############################################################
# Build testing set
############################################################
test_file = dataset_path / "test" / "data.json"
test_ds = [
{
FieldName.TARGET: target.tolist(),
FieldName.START: start,
FieldName.FEAT_DYNAMIC_REAL: fdr.tolist(),
FieldName.FEAT_STATIC_CAT: fsc.tolist(),
}
for (target, start, fdr, fsc) in zip(
test_target_values, dates, test_cal_features_list, stat_cat
)
]
save_to_file(test_file, test_ds)
|
the-stack_0_27857
|
import os
import structlog
import random
from collections import namedtuple
WORKER_THREADS_PER_CORE = os.environ.get('WORKER_THREADS_PER_CORE', 128)
Task = namedtuple('Task', ['priority', 'blocking_perc', 'random', 'func', 'args', 'kwargs', 'log_bindings', 'sink'])
def task_wrapper(func, args=None, kwargs=None, log_bindings=None, blocking_perc=16, sink=None, priority=1):
args = args or ()
kwargs = kwargs or {}
log_bindings = log_bindings or {}
return Task(
func=func,
args=args,
kwargs=kwargs,
random=random.random(),
log_bindings=log_bindings,
blocking_perc=blocking_perc,
priority=priority,
sink=sink
)
def iotask_wrapper(func, args=None, kwargs=None, log_bindings=None, priority=1):
return task_wrapper(func, args, kwargs, log_bindings, 1, priority=priority)
def cputask_wrapper(func, args=None, kwargs=None, log_bindings=None, priority=1):
return task_wrapper(func, args, kwargs, log_bindings, WORKER_THREADS_PER_CORE // 2, priority=priority)
|
the-stack_0_27858
|
"""
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the array,
and it should return false if every element is distinct.
[1,2,3,1] -> true
[1,2,3,4] -> false
[1,1,1,3,3,4,3,2,4,2] -> true
from leetcode
https://leetcode.com/problems/contains-duplicate/submissions/
"""
from typing import List
def containsDuplicate(arr):
contains_duplicates = False
count_dict = {}
for num in arr:
if num in count_dict:
count_dict[num] += 1
else:
count_dict[num] = 1
for value in count_dict.values():
if value > 1:
contains_duplicates = True
# print("True")
return True
# print("False")
return False
def containsDuplicates2(arr: List[int]) -> bool:
num_set = set()
for num in arr:
if num in num_set:
return True
num_set.add(num)
return False
def containsDuplicates3(arr: List[int]) -> bool:
num_set = set(arr)
return len(num_set) != len(arr)
# containsDuplicate([1, 2, 3, 1])
# containsDuplicate([1, 2, 3, 4])
print(containsDuplicates3([1, 1, 1, 3, 3, 4, 3, 2, 4, 2]))
|
the-stack_0_27859
|
# constants for old pymilvus API test
import utils.util_pymilvus as utils
default_fields = utils.gen_default_fields()
default_binary_fields = utils.gen_binary_default_fields()
default_entity = utils.gen_entities(1)
default_raw_binary_vector, default_binary_entity = utils.gen_binary_entities(1)
default_entity_row = utils.gen_entities_rows(1)
default_raw_binary_vector_row, default_binary_entity_row = utils.gen_binary_entities_rows(1)
default_entities = utils.gen_entities(utils.default_nb)
default_raw_binary_vectors, default_binary_entities = utils.gen_binary_entities(utils.default_nb)
default_entities_new = utils.gen_entities_new(utils.default_nb)
default_raw_binary_vectors_new, default_binary_entities_new = utils.gen_binary_entities_new(utils.default_nb)
default_entities_rows = utils.gen_entities_rows(utils.default_nb)
default_raw_binary_vectors_rows, default_binary_entities_rows = utils.gen_binary_entities_rows(utils.default_nb)
|
the-stack_0_27860
|
#
# Copyright (c) 2013-2014, PagerDuty, Inc. <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#JSONSTORE object that persists json data automatically from db file
import errno
import json
import os
class JsonStore(object):
"""
A file-based JSON store to persist JSON-based data atomically.
"""
def __init__(self, db_name, db_dir):
from pdagentutil import \
ensure_readable_directory, ensure_writable_directory
ensure_readable_directory(db_dir)
ensure_writable_directory(db_dir)
self._path = os.path.join(db_dir, db_name)
self._backup_path = os.path.join(db_dir, "%s.bak" % db_name)
def get(self):
"""
Get existing data from the JSON in the db file.
Returns None if it cannot load the data for any reason.
This includes bad json, file does not exist or any other
error reading the file.
"""
fp = None
try:
fp = open(self._path, "r")
return json.load(fp)
except IOError as e:
# alright if no such file exists, not fine if any other error.
if e.errno != errno.ENOENT:
raise
return None
except ValueError:
# file had bad json in it.
return None
finally:
if fp:
fp.close()
def set(self, json_data):
"""
Save given data into the db file in JSON format.
All errors are allowed through (ie not caught within).
This can be: json error, file permission error or any
other error writing the file.
"""
fp = open(self._backup_path, "w")
try:
json.dump(
json_data,
fp,
indent=4,
separators=(',', ': '),
sort_keys=True)
finally:
fp.close()
os.rename(self._backup_path, self._path)
|
the-stack_0_27861
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import html
import json
import logging
import traceback
from telegram import (ReplyKeyboardRemove, Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery,
ReplyKeyboardMarkup, ParseMode)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
ConversationHandler, CallbackContext, CallbackQueryHandler, RegexHandler)
from bot.constant import Text, KeyboardText
from db.model import UserVote, User, UserVoice
from setting import CHANNEL_CHAT_ID, BOT_TOKEN, DEVELOPER_CHAT_ID
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
NAME, NEW_NAME, VOICE, LOCATION, BIO = range(5)
END = ConversationHandler.END
cancel_markup = ReplyKeyboardMarkup([[KeyboardText.cancel]], one_time_keyboard=True)
def start(update, context):
update.message.reply_text(Text.start_description, reply_markup=ReplyKeyboardRemove())
return END
def send_voice(update, context):
chat_id = update.effective_chat.id
user = User.select().where(User.chat_id == chat_id).first()
if user:
context.user_data['user'] = user
update.message.reply_text(f'سلام {user.name} عزیز\n'
f'وویس مورد نظرت رو آپلود یا فوروارد کن.',
reply_markup=cancel_markup)
return VOICE
else:
update.message.reply_text(Text.choose_name, reply_markup=cancel_markup)
return NAME
def request_change_name(update, context):
chat_id = update.effective_chat.id
user = User.select().where(User.chat_id == chat_id).first()
if user:
context.user_data['user'] = user
update.message.reply_text(f"اسم قبلی شما {user.name} است\n"
"لطفا نام جدید خود را وارد کنید:", reply_markup=cancel_markup)
return NEW_NAME
else:
update.message.reply_text(Text.choose_name, reply_markup=cancel_markup)
return NAME
def change_name(update, context):
name = update.message.text
user = context.user_data['user']
user.name = name
user.save()
update.message.reply_text("تغییر نام شما با موفقیت انجام شد😇\n"
f"نام جدید شما 👈🏻 {name}\n"
f"شروع مجدد /start", reply_markup=ReplyKeyboardRemove())
return END
def pick_a_name(update, context):
name = update.message.text
chat_id = update.effective_chat.id
user = User.create(chat_id=chat_id, name=name, username=update.effective_chat.username)
context.user_data['user'] = user
update.message.reply_text(f'ممنون {name} عزیز☺️\n'
f'به جمع رادیو توییتر خوش اومدی🎉\n'
f'حالا اولین وویس خودت رو آپلود کن',
reply_markup=cancel_markup)
return VOICE
def voice(update: Update, context: CallbackContext):
voice_message = update.message
# commit voice in database
UserVoice.create(file_id=voice_message.voice.file_id,
chat_id=voice_message.chat_id,
message_id=voice_message.message_id,
user_username=voice_message.chat.username)
name = context.user_data['user'].name
name = name.replace(" ", "_")
caption = "#" + name
# if voice_message.voice.duration > int(VOICE_DURATION_LIMIT):
# update.message.reply_text('اوه چه زیاد😯\n'
# f'زمان وویس باید کمتر از {VOICE_DURATION_LIMIT} ثانیه باشه.')
keyboard = [
[InlineKeyboardButton(Text.like, callback_data=Text.like + Text.separator + "0" + Text.separator + "0"),
InlineKeyboardButton(Text.dislike, callback_data=Text.dislike + Text.separator + "0" + Text.separator + "0")]]
context.bot.send_voice(chat_id=CHANNEL_CHAT_ID, voice=voice_message.voice, caption=caption,
reply_markup=InlineKeyboardMarkup(keyboard))
update.message.reply_text('وویس شما با موفقیت به کانال ارسال شد😍\n'
'شروع مجدد /start', reply_markup=ReplyKeyboardRemove())
return END
def do_vote(vote, like_count, dislike_count):
if vote == Text.like:
like_count = str(int(like_count) + 1)
elif vote == Text.dislike:
dislike_count = str(int(dislike_count) + 1)
return create_inline_button(like_count, dislike_count)
def do_un_vote(vote, like_count, dislike_count):
if vote == Text.like:
like_count = str(int(like_count) - 1)
elif vote == Text.dislike:
dislike_count = str(int(dislike_count) - 1)
return create_inline_button(like_count, dislike_count)
def do_change_vote(vote, like_count, dislike_count):
if vote == Text.like:
dislike_count = str(int(dislike_count) - 1)
like_count = str(int(like_count) + 1)
elif vote == Text.dislike:
dislike_count = str(int(dislike_count) + 1)
like_count = str(int(like_count) - 1)
return create_inline_button(like_count, dislike_count)
def create_inline_button(like_count, dislike_count):
like_callback_data = Text.like + Text.separator + like_count + Text.separator + dislike_count
dislike_callback_data = Text.dislike + Text.separator + like_count + Text.separator + dislike_count
keyboard = [[InlineKeyboardButton(Text.like + like_count, callback_data=like_callback_data),
InlineKeyboardButton(Text.dislike + dislike_count, callback_data=dislike_callback_data)]]
return keyboard
def parse_callback_data(data):
data = data.split(Text.separator)
vote = data[0]
like_count = data[1]
dislike_count = data[2]
return vote, like_count, dislike_count
def button(update: Update, context):
query = update.callback_query
chat_id = update.effective_chat.id
message_id = update.effective_message.message_id
if isinstance(query, CallbackQuery):
# get passed data
data = query.data
vote, like_count, dislike_count = parse_callback_data(data)
# check last user vote
user_last_vote = UserVote.select().where(
(UserVote.chat_id == chat_id) &
(UserVote.message_id == message_id)).first()
if user_last_vote:
if user_last_vote.vote == vote:
user_last_vote.delete_instance()
query.answer(show_alert=True, text="You took your reaction back")
# undo a vote
keyboard = do_un_vote(vote, like_count, dislike_count)
else:
user_last_vote.vote = vote
user_last_vote.save()
query.answer(show_alert=True, text="You " + vote + " this")
# change vote
keyboard = do_change_vote(vote, like_count, dislike_count)
else:
UserVote.create(chat_id=chat_id, message_id=message_id, vote=vote)
query.answer(show_alert=True, text="You " + vote + " this")
# do vote
keyboard = do_vote(vote, like_count, dislike_count)
query.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(keyboard))
def cancel(update, context):
update.message.reply_text(Text.cancel, reply_markup=ReplyKeyboardRemove())
return END
def error_handler(update: Update, context: CallbackContext):
"""Log the error and send a telegram message to notify the developer."""
# Log the error before we do anything else, so we can see it even if something breaks.
logger.error(msg="Exception while handling an update:", exc_info=context.error)
# traceback.format_exception returns the usual python message about an exception, but as a
# list of strings rather than a single string, so we have to join them together.
tb_list = traceback.format_exception(None, context.error, context.error.__traceback__)
tb = ''.join(tb_list)
# Build the message with some markup and additional information about what happened.
# You might need to add some logic to deal with messages longer than the 4096 character limit.
message = (
'An exception was raised while handling an update\n'
'<pre>update = {}</pre>\n\n'
'<pre>context.chat_data = {}</pre>\n\n'
'<pre>context.user_data = {}</pre>\n\n'
'<pre>{}</pre>'
).format(
html.escape(json.dumps(update.to_dict(), indent=2, ensure_ascii=False)),
html.escape(str(context.chat_data)),
html.escape(str(context.user_data)),
html.escape(tb)
)
# Finally, send the message
context.bot.send_message(chat_id=DEVELOPER_CHAT_ID, text=message, parse_mode=ParseMode.HTML)
def run_bot():
updater = Updater(BOT_TOKEN, use_context=True)
dp = updater.dispatcher
send_voice_handler = ConversationHandler(
allow_reentry=True,
entry_points=[CommandHandler('send_voice', send_voice)],
states={
NAME: [MessageHandler(Filters.text, pick_a_name)],
VOICE: [MessageHandler(Filters.voice, voice)],
},
fallbacks=[MessageHandler(Filters.regex("^" + KeyboardText.cancel + "$"), cancel),
CommandHandler('start', start)]
)
change_name_handler = ConversationHandler(
allow_reentry=True,
entry_points=[CommandHandler('change_name', request_change_name)],
states={
NEW_NAME: [MessageHandler(Filters.text, change_name)],
NAME: [MessageHandler(Filters.text, pick_a_name)],
},
fallbacks=[MessageHandler(Filters.regex("^" + KeyboardText.cancel + "$"), cancel),
CommandHandler('start', start)]
)
dp.add_handler(CommandHandler("start", start))
dp.add_handler(send_voice_handler)
dp.add_handler(change_name_handler)
dp.add_handler(CallbackQueryHandler(button))
dp.add_error_handler(error_handler)
updater.start_polling()
updater.idle()
|
the-stack_0_27862
|
import warnings
from collections import OrderedDict
import numpy as np
import torch
from torch import nn as nn
from torch.nn import ModuleDict
from torch.nn import Parameter
from torch.nn import functional as F
from ..constraints import positive
class ConfigurationError(Exception):
pass
# ------------------ Base Classes -------------------------
class Readout:
def initialize(self, *args, **kwargs):
raise NotImplementedError("initialize is not implemented for ", self.__class__.__name__)
def __repr__(self):
s = super().__repr__()
s += " [{} regularizers: ".format(self.__class__.__name__)
ret = []
for attr in filter(
lambda x: not x.startswith("_") and ("gamma" in x or "pool" in x or "positive" in x),
dir(self),
):
ret.append("{} = {}".format(attr, getattr(self, attr)))
return s + "|".join(ret) + "]\n"
class SpatialXFeatureLinear(nn.Module):
"""
Factorized fully connected layer. Weights are a sum of outer products between a spatial filter and a feature vector.
"""
def __init__(
self,
in_shape,
outdims,
bias,
normalize=True,
init_noise=1e-3,
constrain_pos=False,
**kwargs,
):
super().__init__()
self.in_shape = in_shape
self.outdims = outdims
self.normalize = normalize
c, w, h = in_shape
self.spatial = Parameter(torch.Tensor(self.outdims, w, h))
self.features = Parameter(torch.Tensor(self.outdims, c))
self.init_noise = init_noise
self.constrain_pos = constrain_pos
if bias:
bias = Parameter(torch.Tensor(self.outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.initialize()
@property
def normalized_spatial(self):
positive(self.spatial)
if self.normalize:
norm = self.spatial.pow(2).sum(dim=1, keepdim=True)
norm = norm.sum(dim=2, keepdim=True).sqrt().expand_as(self.spatial) + 1e-6
weight = self.spatial / norm
else:
weight = self.spatial
return weight
# TODO: Fix weight property -> self.positive is not defined
@property
def weight(self):
if self.positive:
positive(self.features)
n = self.outdims
c, w, h = self.in_shape
return self.normalized_spatial.view(n, 1, w, h) * self.features.view(n, c, 1, 1)
def l1(self, average=False):
n = self.outdims
c, w, h = self.in_shape
ret = (
self.normalized_spatial.view(self.outdims, -1).abs().sum(dim=1, keepdim=True)
* self.features.view(self.outdims, -1).abs().sum(dim=1)
).sum()
if average:
ret = ret / (n * c * w * h)
return ret
def initialize(self):
self.spatial.data.normal_(0, self.init_noise)
self.features.data.normal_(0, self.init_noise)
if self.bias is not None:
self.bias.data.fill_(0)
def forward(self, x, shift=None):
if self.constrain_pos:
positive(self.features)
positive(self.normalized_spatial)
y = torch.einsum("ncwh,owh->nco", x, self.normalized_spatial)
y = torch.einsum("nco,oc->no", y, self.features)
if self.bias is not None:
y = y + self.bias
return y
def __repr__(self):
return (
("normalized " if self.normalize else "")
+ self.__class__.__name__
+ " ("
+ "{} x {} x {}".format(*self.in_shape)
+ " -> "
+ str(self.outdims)
+ ")"
)
class ClonedReadout(Readout, nn.Module):
"""
This readout clones another readout while applying a linear transformation on the output. Used for MultiDatasets
with matched neurons where the x-y positions in the grid stay the same but the predicted responses are rescaled due
to varying experimental conditions.
"""
def __init__(self, original_readout, **kwargs):
super().__init__()
self._source = original_readout
self.alpha = Parameter(torch.ones(self._source.features.shape[-1]))
self.beta = Parameter(torch.zeros(self._source.features.shape[-1]))
def forward(self, x):
x = self._source(x) * self.alpha + self.beta
return x
def feature_l1(self, average=True):
""" Regularization is only applied on the scaled feature weights, not on the bias."""
if average:
return (self._source.features * self.alpha).abs().mean()
else:
return (self._source.features * self.alpha).abs().sum()
def initialize(self):
self.alpha.data.fill_(1.0)
self.beta.data.fill_(0.0)
class PointPooled2d(nn.Module):
def __init__(
self,
in_shape,
outdims,
pool_steps,
bias,
pool_kern,
init_range,
align_corners=True,
**kwargs,
):
"""
This readout learns a point in the core feature space for each neuron, with help of torch.grid_sample, that best
predicts its response. Multiple average pooling steps are applied to reduce search space in each stage and thereby, faster convergence to the best prediction point.
The readout receives the shape of the core as 'in_shape', number of pooling stages to be performed as 'pool_steps', the kernel size and stride length
to be used for pooling as 'pool_kern', the number of units/neurons being predicted as 'outdims', 'bias' specifying whether
or not bias term is to be used and 'init_range' range for initialising the grid with uniform distribution, U(-init_range,init_range).
The grid parameter contains the normalized locations (x, y coordinates in the core feature space) and is clipped to [-1.1] as it a
requirement of the torch.grid_sample function. The feature parameter learns the best linear mapping from the pooled feature
map from a given location to a unit's response with or without an additional elu non-linearity.
Args:
in_shape (list): shape of the input feature map [channels, width, height]
outdims (int): number of output units
pool_steps (int): number of pooling stages
bias (bool): adds a bias term
pool_kern (int): filter size and stride length used for pooling the feature map
init_range (float): intialises the grid with Uniform([-init_range, init_range])
[expected: positive value <=1]
align_corners (bool): Keyword agrument to gridsample for bilinear interpolation.
It changed behavior in PyTorch 1.3. The default of align_corners = True is setting the
behavior to pre PyTorch 1.3 functionality for comparability.
"""
super().__init__()
if init_range > 1.0 or init_range <= 0.0:
raise ValueError("init_range is not within required limit!")
self._pool_steps = pool_steps
self.in_shape = in_shape
c, w, h = in_shape
self.outdims = outdims
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2)) # x-y coordinates for each neuron
self.features = Parameter(
torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims)
) # weight matrix mapping the core features to the output units
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.pool_kern = pool_kern
self.avg = nn.AvgPool2d(
(pool_kern, pool_kern), stride=pool_kern, count_include_pad=False
) # setup kernel of size=[pool_kern,pool_kern] with stride=pool_kern
self.init_range = init_range
self.align_corners = align_corners
self.initialize()
@property
def pool_steps(self):
return self._pool_steps
@pool_steps.setter
def pool_steps(self, value):
assert value >= 0 and int(value) - value == 0, "new pool steps must be a non-negative integer"
if value != self._pool_steps:
print("Resizing readout features")
c, w, h = self.in_shape
self._pool_steps = int(value)
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, self.outdims))
self.features.data.fill_(1 / self.in_shape[0])
def initialize(self):
"""
Initialize function initialises the grid, features or weights and bias terms.
"""
self.grid.data.uniform_(-self.init_range, self.init_range)
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
def feature_l1(self, average=True):
"""
Returns l1 regularization term for features.
Args:
average(bool): if True, use mean of weights for regularization
"""
if average:
return self.features.abs().mean()
else:
return self.features.abs().sum()
def forward(self, x, shift=None, out_idx=None):
"""
Propagates the input forwards through the readout
Args:
x: input data
shift: shifts the location of the grid (from eye-tracking data)
out_idx: index of neurons to be predicted
Returns:
y: neuronal activity
"""
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, w, h = x.size()
c_in, w_in, h_in = self.in_shape
if [c_in, w_in, h_in] != [c, w, h]:
raise ValueError("the specified feature map dimension is not the readout's expected input dimension")
m = self.pool_steps + 1 # the input feature is considered the first pooling stage
feat = self.features.view(1, m * c, self.outdims)
if out_idx is None:
grid = self.grid
bias = self.bias
outdims = self.outdims
else:
if isinstance(out_idx, np.ndarray):
if out_idx.dtype == bool:
out_idx = np.where(out_idx)[0]
feat = feat[:, :, out_idx]
grid = self.grid[:, out_idx]
if self.bias is not None:
bias = self.bias[out_idx]
outdims = len(out_idx)
if shift is None:
grid = grid.expand(N, outdims, 1, 2)
else:
# shift grid based on shifter network's prediction
grid = grid.expand(N, outdims, 1, 2) + shift[:, None, None, :]
pools = [F.grid_sample(x, grid, align_corners=self.align_corners)]
for _ in range(self.pool_steps):
_, _, w_pool, h_pool = x.size()
if w_pool * h_pool == 1:
warnings.warn("redundant pooling steps: pooled feature map size is already 1X1, consider reducing it")
x = self.avg(x)
pools.append(F.grid_sample(x, grid, align_corners=self.align_corners))
y = torch.cat(pools, dim=1)
y = (y.squeeze(-1) * feat).sum(1).view(N, outdims)
if self.bias is not None:
y = y + bias
return y
def __repr__(self):
c, w, h = self.in_shape
r = self.__class__.__name__ + " (" + "{} x {} x {}".format(c, w, h) + " -> " + str(self.outdims) + ")"
if self.bias is not None:
r += " with bias"
r += " and pooling for {} steps\n".format(self.pool_steps)
for ch in self.children():
r += " -> " + ch.__repr__() + "\n"
return r
class SpatialTransformerPooled3d(nn.Module):
def __init__(
self,
in_shape,
outdims,
pool_steps=1,
positive=False,
bias=True,
init_range=0.05,
kernel_size=2,
stride=2,
grid=None,
stop_grad=False,
align_corners=True,
):
super().__init__()
self._pool_steps = pool_steps
self.in_shape = in_shape
c, t, w, h = in_shape
self.outdims = outdims
self.positive = positive
if grid is None:
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2))
else:
self.grid = grid
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims))
self.register_buffer("mask", torch.ones_like(self.features))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.avg = nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=False)
self.init_range = init_range
self.initialize()
self.stop_grad = stop_grad
self.align_corners = align_corners
@property
def pool_steps(self):
return self._pool_steps
@pool_steps.setter
def pool_steps(self, value):
assert value >= 0 and int(value) - value == 0, "new pool steps must be a non-negative integer"
if value != self._pool_steps:
print("Resizing readout features")
c, t, w, h = self.in_shape
outdims = self.outdims
self._pool_steps = int(value)
self.features = Parameter(torch.Tensor(1, c * (self._pool_steps + 1), 1, outdims))
self.mask = torch.ones_like(self.features)
self.features.data.fill_(1 / self.in_shape[0])
def initialize(self, init_noise=1e-3, grid=True):
# randomly pick centers within the spatial map
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
if grid:
self.grid.data.uniform_(-self.init_range, self.init_range)
def feature_l1(self, average=True, subs_idx=None):
subs_idx = subs_idx if subs_idx is not None else slice(None)
if average:
return self.features[..., subs_idx].abs().mean()
else:
return self.features[..., subs_idx].abs().sum()
def reset_fisher_prune_scores(self):
self._prune_n = 0
self._prune_scores = self.features.detach() * 0
def update_fisher_prune_scores(self):
self._prune_n += 1
if self.features.grad is None:
raise ValueError("You need to run backward first")
self._prune_scores += (0.5 * self.features.grad.pow(2) * self.features.pow(2)).detach()
@property
def fisher_prune_scores(self):
return self._prune_scores / self._prune_n
def prune(self):
idx = (self.fisher_prune_scores + 1e6 * (1 - self.mask)).squeeze().argmin(dim=0)
nt = idx.new
seq = nt(np.arange(len(idx)))
self.mask[:, idx, :, seq] = 0
self.features.data[:, idx, :, seq] = 0
def forward(self, x, shift=None, subs_idx=None):
if self.stop_grad:
x = x.detach()
self.features.data *= self.mask
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, t, w, h = x.size()
m = self._pool_steps + 1
if subs_idx is not None:
feat = self.features[..., subs_idx].contiguous()
outdims = feat.size(-1)
feat = feat.view(1, m * c, outdims)
grid = self.grid[:, subs_idx, ...]
else:
grid = self.grid
feat = self.features.view(1, m * c, self.outdims)
outdims = self.outdims
if shift is None:
grid = grid.expand(N * t, outdims, 1, 2)
else:
grid = grid.expand(N, outdims, 1, 2)
grid = torch.stack([grid + shift[:, i, :][:, None, None, :] for i in range(t)], 1)
grid = grid.contiguous().view(-1, outdims, 1, 2)
z = x.contiguous().transpose(2, 1).contiguous().view(-1, c, w, h)
pools = [F.grid_sample(z, grid, align_corners=self.align_corners)]
for i in range(self._pool_steps):
z = self.avg(z)
pools.append(F.grid_sample(z, grid, align_corners=self.align_corners))
y = torch.cat(pools, dim=1)
y = (y.squeeze(-1) * feat).sum(1).view(N, t, outdims)
if self.bias is not None:
if subs_idx is None:
y = y + self.bias
else:
y = y + self.bias[subs_idx]
return y
def __repr__(self):
c, _, w, h = self.in_shape
r = self.__class__.__name__ + " (" + "{} x {} x {}".format(c, w, h) + " -> " + str(self.outdims) + ")"
if self.bias is not None:
r += " with bias"
if self.stop_grad:
r += ", stop_grad=True"
r += "\n"
for ch in self.children():
r += " -> " + ch.__repr__() + "\n"
return r
class Pyramid(nn.Module):
_filter_dict = {
"gauss5x5": np.float32(
[
[0.003765, 0.015019, 0.023792, 0.015019, 0.003765],
[0.015019, 0.059912, 0.094907, 0.059912, 0.015019],
[0.023792, 0.094907, 0.150342, 0.094907, 0.023792],
[0.015019, 0.059912, 0.094907, 0.059912, 0.015019],
[0.003765, 0.015019, 0.023792, 0.015019, 0.003765],
]
),
"gauss3x3": np.float32([[1 / 16, 1 / 8, 1 / 16], [1 / 8, 1 / 4, 1 / 8], [1 / 16, 1 / 8, 1 / 16]]),
"laplace5x5": np.outer(np.float32([1, 4, 6, 4, 1]), np.float32([1, 4, 6, 4, 1])) / 256,
}
def __init__(self, scale_n=4, type="gauss5x5", downsample=True):
"""
Setup Laplace image pyramid
Args:
scale_n: number of Laplace pyramid layers to construct
type: type of Gaussian filter used in pyramid construction. Valid options are: 'gauss5x5', 'gauss3x3', and 'laplace5x5'
downsample: whether to downsample the image in each layer. Defaults to True
"""
super().__init__()
self.type = type
self.downsample = downsample
h = self._filter_dict[type]
self.register_buffer("filter", torch.from_numpy(h))
self.scale_n = scale_n
self._kern = h.shape[0]
self._pad = self._kern // 2
self._filter_cache = None
def lap_split(self, img):
N, c, h, w = img.size()
if self._filter_cache is not None and self._filter_cache.size(0) == c:
filter = self._filter_cache
else:
filter = self.filter.expand(c, 1, self._kern, self._kern).contiguous()
self._filter_cache = filter
# the necessary output padding depends on even/odd of the dimension
output_padding = (h + 1) % 2, (w + 1) % 2
smooth = F.conv2d(img, filter, padding=self._pad, groups=c)
if self.downsample:
lo = smooth[:, :, ::2, ::2]
lo2 = 4 * F.conv_transpose2d(
lo,
filter,
stride=2,
padding=self._pad,
output_padding=output_padding,
groups=c,
)
else:
lo = lo2 = smooth
hi = img - lo2
return lo, hi
def forward(self, img):
levels = []
for i in range(self.scale_n):
img, hi = self.lap_split(img)
levels.append(hi)
levels.append(img)
return levels
def __repr__(self):
return "Pyramid(scale_n={scale_n}, padding={_pad}, downsample={downsample}, type={type})".format(
**self.__dict__
)
class PointPyramid2d(nn.Module):
def __init__(
self,
in_shape,
outdims,
scale_n,
positive,
bias,
init_range,
downsample,
type,
align_corners=True,
**kwargs,
):
super().__init__()
self.in_shape = in_shape
c, w, h = in_shape
self.outdims = outdims
self.positive = positive
self.gauss_pyramid = Pyramid(scale_n=scale_n, downsample=downsample, type=type)
self.grid = Parameter(torch.Tensor(1, outdims, 1, 2))
self.features = Parameter(torch.Tensor(1, c * (scale_n + 1), 1, outdims))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.init_range = init_range
self.align_corners = align_corners
self.initialize()
def initialize(self):
self.grid.data.uniform_(-self.init_range, self.init_range)
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
def group_sparsity(self, group_size):
f = self.features.size(1)
n = f // group_size
ret = 0
for chunk in range(0, f, group_size):
ret = ret + (self.features[:, chunk : chunk + group_size, ...].pow(2).mean(1) + 1e-12).sqrt().mean() / n
return ret
def feature_l1(self, average=True):
if average:
return self.features.abs().mean()
else:
return self.features.abs().sum()
def forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, w, h = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
if shift is None:
grid = self.grid.expand(N, self.outdims, 1, 2)
else:
grid = self.grid.expand(N, self.outdims, 1, 2) + shift[:, None, None, :]
pools = [F.grid_sample(xx, grid, align_corners=self.align_corners) for xx in self.gauss_pyramid(x)]
y = torch.cat(pools, dim=1).squeeze(-1)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def __repr__(self):
c, w, h = self.in_shape
r = self.__class__.__name__ + " (" + "{} x {} x {}".format(c, w, h) + " -> " + str(self.outdims) + ")"
if self.bias is not None:
r += " with bias"
for ch in self.children():
r += " -> " + ch.__repr__() + "\n"
return r
class FullGaussian2d(nn.Module):
"""
A readout using a spatial transformer layer whose positions are sampled from one Gaussian per neuron. Mean
and covariance of that Gaussian are learned.
Args:
in_shape (list, tuple): shape of the input feature map [channels, width, height]
outdims (int): number of output units
bias (bool): adds a bias term
init_mu_range (float): initialises the the mean with Uniform([-init_range, init_range])
[expected: positive value <=1]. Default: 0.1
init_sigma (float): The standard deviation of the Gaussian with `init_sigma` when `gauss_type` is
'isotropic' or 'uncorrelated'. When `gauss_type='full'` initialize the square root of the
covariance matrix with with Uniform([-init_sigma, init_sigma]). Default: 1
batch_sample (bool): if True, samples a position for each image in the batch separately
[default: True as it decreases convergence time and performs just as well]
align_corners (bool): Keyword agrument to gridsample for bilinear interpolation.
It changed behavior in PyTorch 1.3. The default of align_corners = True is setting the
behavior to pre PyTorch 1.3 functionality for comparability.
gauss_type (str): Which Gaussian to use. Options are 'isotropic', 'uncorrelated', or 'full' (default).
grid_mean_predictor (dict): Parameters for a predictor of the mean grid locations. Has to have a form like
{
'hidden_layers':0,
'hidden_features':20,
'final_tanh': False,
}
shared_features (dict): Used when the feature vectors are shared (within readout between neurons) or between
this readout and other readouts. Has to be a dictionary of the form
{
'match_ids': (numpy.array),
'shared_features': torch.nn.Parameter or None
}
The match_ids are used to match things that should be shared within or across scans.
If `shared_features` is None, this readout will create its own features. If it is set to
a feature Parameter of another readout, it will replace the features of this readout. It will be
access in increasing order of the sorted unique match_ids. For instance, if match_ids=[2,0,0,1],
there should be 3 features in order [0,1,2]. When this readout creates features, it will do so in
that order.
shared_grid (dict): Like `shared_features`. Use dictionary like
{
'match_ids': (numpy.array),
'shared_grid': torch.nn.Parameter or None
}
See documentation of `shared_features` for specification.
source_grid (numpy.array):
Source grid for the grid_mean_predictor.
Needs to be of size neurons x grid_mean_predictor[input_dimensions]
"""
def __init__(
self,
in_shape,
outdims,
bias,
init_mu_range=0.1,
init_sigma=1,
batch_sample=True,
align_corners=True,
gauss_type="full",
grid_mean_predictor=None,
shared_features=None,
shared_grid=None,
source_grid=None,
**kwargs,
):
super().__init__()
# determines whether the Gaussian is isotropic or not
self.gauss_type = gauss_type
if init_mu_range > 1.0 or init_mu_range <= 0.0 or init_sigma <= 0.0:
raise ValueError("either init_mu_range doesn't belong to [0.0, 1.0] or init_sigma_range is non-positive")
# store statistics about the images and neurons
self.in_shape = in_shape
self.outdims = outdims
# sample a different location per example
self.batch_sample = batch_sample
# position grid shape
self.grid_shape = (1, outdims, 1, 2)
# the grid can be predicted from another grid
self._predicted_grid = False
self._shared_grid = False
self._original_grid = not self._predicted_grid
if grid_mean_predictor is None and shared_grid is None:
self._mu = Parameter(torch.Tensor(*self.grid_shape)) # mean location of gaussian for each neuron
elif grid_mean_predictor is not None and shared_grid is not None:
raise ConfigurationError("Shared grid_mean_predictor and shared_grid_mean cannot both be set")
elif grid_mean_predictor is not None:
self.init_grid_predictor(source_grid=source_grid, **grid_mean_predictor)
elif shared_grid is not None:
self.initialize_shared_grid(**(shared_grid or {}))
if gauss_type == "full":
self.sigma_shape = (1, outdims, 2, 2)
elif gauss_type == "uncorrelated":
self.sigma_shape = (1, outdims, 1, 2)
elif gauss_type == "isotropic":
self.sigma_shape = (1, outdims, 1, 1)
else:
raise ValueError(f'gauss_type "{gauss_type}" not known')
self.init_sigma = init_sigma
self.sigma = Parameter(torch.Tensor(*self.sigma_shape)) # standard deviation for gaussian for each neuron
self.initialize_features(**(shared_features or {}))
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.init_mu_range = init_mu_range
self.align_corners = align_corners
self.initialize()
@property
def shared_features(self):
return self._features
@property
def shared_grid(self):
return self._mu
@property
def features(self):
if self._shared_features:
return self.scales * self._features[..., self.feature_sharing_index]
else:
return self._features
@property
def grid(self):
return self.sample_grid(batch_size=1, sample=False)
def feature_l1(self, average=True):
"""
Returns the l1 regularization term either the mean or the sum of all weights
Args:
average(bool): if True, use mean of weights for regularization
"""
if self._original_features:
if average:
return self._features.abs().mean()
else:
return self._features.abs().sum()
else:
return 0
@property
def mu(self):
if self._predicted_grid:
return self.mu_transform(self.source_grid.squeeze()).view(*self.grid_shape)
elif self._shared_grid:
if self._original_grid:
return self._mu[:, self.grid_sharing_index, ...]
else:
return self.mu_transform(self._mu.squeeze())[self.grid_sharing_index].view(*self.grid_shape)
else:
return self._mu
def sample_grid(self, batch_size, sample=None):
"""
Returns the grid locations from the core by sampling from a Gaussian distribution
Args:
batch_size (int): size of the batch
sample (bool/None): sample determines whether we draw a sample from Gaussian distribution, N(mu,sigma), defined per neuron
or use the mean, mu, of the Gaussian distribution without sampling.
if sample is None (default), samples from the N(mu,sigma) during training phase and
fixes to the mean, mu, during evaluation phase.
if sample is True/False, overrides the model_state (i.e training or eval) and does as instructed
"""
with torch.no_grad():
self.mu.clamp_(min=-1, max=1) # at eval time, only self.mu is used so it must belong to [-1,1]
if self.gauss_type != "full":
self.sigma.clamp_(min=0) # sigma/variance i s always a positive quantity
grid_shape = (batch_size,) + self.grid_shape[1:]
sample = self.training if sample is None else sample
if sample:
norm = self.mu.new(*grid_shape).normal_()
else:
norm = self.mu.new(*grid_shape).zero_() # for consistency and CUDA capability
if self.gauss_type != "full":
return torch.clamp(
norm * self.sigma + self.mu, min=-1, max=1
) # grid locations in feature space sampled randomly around the mean self.mu
else:
return torch.clamp(
torch.einsum("ancd,bnid->bnic", self.sigma, norm) + self.mu,
min=-1,
max=1,
) # grid locations in feature space sampled randomly around the mean self.mu
def init_grid_predictor(self, source_grid, hidden_features=20, hidden_layers=0, final_tanh=False):
self._original_grid = False
layers = [nn.Linear(source_grid.shape[1], hidden_features if hidden_layers > 0 else 2)]
for i in range(hidden_layers):
layers.extend(
[
nn.ELU(),
nn.Linear(hidden_features, hidden_features if i < hidden_layers - 1 else 2),
]
)
if final_tanh:
layers.append(nn.Tanh())
self.mu_transform = nn.Sequential(*layers)
source_grid = source_grid - source_grid.mean(axis=0, keepdims=True)
source_grid = source_grid / np.abs(source_grid).max()
self.register_buffer("source_grid", torch.from_numpy(source_grid.astype(np.float32)))
self._predicted_grid = True
def initialize(self):
"""
Initializes the mean, and sigma of the Gaussian readout along with the features weights
"""
if not self._predicted_grid or self._original_grid:
self._mu.data.uniform_(-self.init_mu_range, self.init_mu_range)
if self.gauss_type != "full":
self.sigma.data.fill_(self.init_sigma)
else:
self.sigma.data.uniform_(-self.init_sigma, self.init_sigma)
self._features.data.fill_(1 / self.in_shape[0])
if self._shared_features:
self.scales.data.fill_(1.0)
if self.bias is not None:
self.bias.data.fill_(0)
def initialize_features(self, match_ids=None, shared_features=None):
"""
The internal attribute `_original_features` in this function denotes whether this instance of the FullGuassian2d
learns the original features (True) or if it uses a copy of the features from another instance of FullGaussian2d
via the `shared_features` (False). If it uses a copy, the feature_l1 regularizer for this copy will return 0
"""
c, w, h = self.in_shape
self._original_features = True
if match_ids is not None:
assert self.outdims == len(match_ids)
n_match_ids = len(np.unique(match_ids))
if shared_features is not None:
assert shared_features.shape == (
1,
c,
1,
n_match_ids,
), f"shared features need to have shape (1, {c}, 1, {n_match_ids})"
self._features = shared_features
self._original_features = False
else:
self._features = Parameter(
torch.Tensor(1, c, 1, n_match_ids)
) # feature weights for each channel of the core
self.scales = Parameter(torch.Tensor(1, 1, 1, self.outdims)) # feature weights for each channel of the core
_, sharing_idx = np.unique(match_ids, return_inverse=True)
self.register_buffer("feature_sharing_index", torch.from_numpy(sharing_idx))
self._shared_features = True
else:
self._features = Parameter(
torch.Tensor(1, c, 1, self.outdims)
) # feature weights for each channel of the core
self._shared_features = False
def initialize_shared_grid(self, match_ids=None, shared_grid=None):
c, w, h = self.in_shape
if match_ids is None:
raise ConfigurationError("match_ids must be set for sharing grid")
assert self.outdims == len(match_ids), "There must be one match ID per output dimension"
n_match_ids = len(np.unique(match_ids))
if shared_grid is not None:
assert shared_grid.shape == (
1,
n_match_ids,
1,
2,
), f"shared grid needs to have shape (1, {n_match_ids}, 1, 2)"
self._mu = shared_grid
self._original_grid = False
self.mu_transform = nn.Linear(2, 2)
self.mu_transform.bias.data.fill_(0.0)
self.mu_transform.weight.data = torch.eye(2)
else:
self._mu = Parameter(torch.Tensor(1, n_match_ids, 1, 2)) # feature weights for each channel of the core
_, sharing_idx = np.unique(match_ids, return_inverse=True)
self.register_buffer("grid_sharing_index", torch.from_numpy(sharing_idx))
self._shared_grid = True
def forward(self, x, sample=None, shift=None, out_idx=None):
"""
Propagates the input forwards through the readout
Args:
x: input data
sample (bool/None): sample determines whether we draw a sample from Gaussian distribution, N(mu,sigma), defined per neuron
or use the mean, mu, of the Gaussian distribution without sampling.
if sample is None (default), samples from the N(mu,sigma) during training phase and
fixes to the mean, mu, during evaluation phase.
if sample is True/False, overrides the model_state (i.e training or eval) and does as instructed
shift (bool): shifts the location of the grid (from eye-tracking data)
out_idx (bool): index of neurons to be predicted
Returns:
y: neuronal activity
"""
N, c, w, h = x.size()
c_in, w_in, h_in = self.in_shape
if (c_in, w_in, h_in) != (c, w, h):
raise ValueError("the specified feature map dimension is not the readout's expected input dimension")
feat = self.features.view(1, c, self.outdims)
bias = self.bias
outdims = self.outdims
if self.batch_sample:
# sample the grid_locations separately per image per batch
grid = self.sample_grid(batch_size=N, sample=sample) # sample determines sampling from Gaussian
else:
# use one sampled grid_locations for all images in the batch
grid = self.sample_grid(batch_size=1, sample=sample).expand(N, outdims, 1, 2)
if out_idx is not None:
if isinstance(out_idx, np.ndarray):
if out_idx.dtype == bool:
out_idx = np.where(out_idx)[0]
feat = feat[:, :, out_idx]
grid = grid[:, out_idx]
if bias is not None:
bias = bias[out_idx]
outdims = len(out_idx)
if shift is not None:
grid = grid + shift[:, None, None, :]
y = F.grid_sample(x, grid, align_corners=self.align_corners)
y = (y.squeeze(-1) * feat).sum(1).view(N, outdims)
if self.bias is not None:
y = y + bias
return y
def __repr__(self):
c, w, h = self.in_shape
r = self.gauss_type + " "
r += self.__class__.__name__ + " (" + "{} x {} x {}".format(c, w, h) + " -> " + str(self.outdims) + ")"
if self.bias is not None:
r += " with bias"
if self._shared_features:
r += ", with {} features".format("original" if self._original_features else "shared")
if self._predicted_grid:
r += ", with predicted grid"
if self._shared_grid:
r += ", with {} grid".format("original" if self._original_grid else "shared")
for ch in self.children():
r += " -> " + ch.__repr__() + "\n"
return r
class RemappedGaussian2d(FullGaussian2d):
"""
A readout using a spatial transformer layer whose positions are sampled from one Gaussian per neuron. Mean
and covariance of that Gaussian are learned. In addition, there is an image dependent remapping of neurons
locations.
For most parameters see: FullGaussian2d
Args:
remap_layers (int): number of layers of the remapping network
remap_kernel (int): conv kernel size of the remapping network
max_remap_amplitude (int): maximal amplitude of remapping (factor on output of remapping network)
"""
def __init__(self, *args, remap_layers=2, remap_kernel=3, max_remap_amplitude=0.2, **kwargs):
super().__init__(*args, **kwargs)
channels, width, height = self.in_shape
remapper = nn.Sequential()
for i in range(remap_layers - 1):
remapper.add_module(f"conv{i}", nn.Conv2d(channels, channels, remap_kernel, padding=True))
remapper.add_module(f"norm{i}", nn.BatchNorm2d(channels))
remapper.add_module(f"nonlin{i}", nn.ELU())
else:
remapper.add_module(
f"conv{remap_layers}",
nn.Conv2d(channels, 2, remap_kernel, padding=True),
)
remapper.add_module(f"norm{remap_layers}", nn.BatchNorm2d(2))
remapper.add_module(f"nonlin{remap_layers}", nn.Tanh())
self.remap_field = remapper
self.max_remap_amplitude = max_remap_amplitude
@staticmethod
def init_conv(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
def initialize_remap_field(self):
self.apply(self.init_conv)
def forward(self, x, sample=None, shift=None, out_idx=None):
offset_field = self.remap_field(x) * self.max_remap_amplitude
N, c, w, h = x.size()
c_in, w_in, h_in = self.in_shape
if (c_in, w_in, h_in) != (c, w, h):
raise ValueError("the specified feature map dimension is not the readout's expected input dimension")
feat = self.features.view(1, c, self.outdims)
bias = self.bias
outdims = self.outdims
if self.batch_sample:
# sample the grid_locations separately per image per batch
grid = self.sample_grid(batch_size=N, sample=sample) # sample determines sampling from Gaussian
else:
# use one sampled grid_locations for all images in the batch
grid = self.sample_grid(batch_size=1, sample=sample).expand(N, outdims, 1, 2)
if out_idx is not None:
if isinstance(out_idx, np.ndarray):
if out_idx.dtype == bool:
out_idx = np.where(out_idx)[0]
feat = feat[:, :, out_idx]
grid = grid[:, out_idx]
if bias is not None:
bias = bias[out_idx]
outdims = len(out_idx)
offsets = F.grid_sample(offset_field, grid, align_corners=self.align_corners)
grid = grid + offsets.permute(0, 2, 3, 1)
if shift is not None:
grid = grid + shift[:, None, None, :]
y = F.grid_sample(x, grid, align_corners=self.align_corners)
y = (y.squeeze(-1) * feat).sum(1).view(N, outdims)
if self.bias is not None:
y = y + bias
return y
class Gaussian3d(nn.Module):
"""
This readout instantiates an object that can used to learn a point in the core feature space for each neuron,
sampled from a Gaussian distribution with some mean and variance at train but set to mean at test time, that best predicts its response.
The readout receives the shape of the core as 'in_shape', the number of units/neurons being predicted as 'outdims', 'bias' specifying whether
or not bias term is to be used and 'init_range' range for initialising the mean and variance of the gaussian distribution from which we sample to
uniform distribution, U(-init_mu_range,init_mu_range) and uniform distribution, U(0.0, init_sigma_range) respectively.
The grid parameter contains the normalized locations (x, y coordinates in the core feature space) and is clipped to [-1.1] as it a
requirement of the torch.grid_sample function. The feature parameter learns the best linear mapping between the feature
map from a given location, sample from Gaussian at train time but set to mean at eval time, and the unit's response with or without an additional elu non-linearity.
Args:
in_shape (list): shape of the input feature map [channels, width, height]
outdims (int): number of output units
bias (bool): adds a bias term
init_mu_range (float): initialises the the mean with Uniform([-init_range, init_range])
[expected: positive value <=1]
init_sigma_range (float): initialises sigma with Uniform([0.0, init_sigma_range]).
It is recommended however to use a fixed initialization, for faster convergence.
For this, set fixed_sigma to True.
batch_sample (bool): if True, samples a position for each image in the batch separately
[default: True as it decreases convergence time and performs just as well]
align_corners (bool): Keyword agrument to gridsample for bilinear interpolation.
It changed behavior in PyTorch 1.3. The default of align_corners = True is setting the
behavior to pre PyTorch 1.3 functionality for comparability.
fixed_sigma (bool). Recommended behavior: True. But set to false for backwards compatibility.
If true, initialized the sigma not in a range, but with the exact value given for all neurons.
"""
def __init__(
self,
in_shape,
outdims,
bias,
init_mu_range=0.5,
init_sigma_range=0.5,
batch_sample=True,
align_corners=True,
fixed_sigma=False,
**kwargs,
):
super().__init__()
if init_mu_range > 1.0 or init_mu_range <= 0.0 or init_sigma_range <= 0.0:
raise ValueError("init_mu_range or init_sigma_range is not within required limit!")
self.in_shape = in_shape
self.outdims = outdims
self.batch_sample = batch_sample
self.grid_shape = (1, 1, outdims, 1, 3)
self.mu = Parameter(torch.Tensor(*self.grid_shape)) # mean location of gaussian for each neuron
self.sigma = Parameter(torch.Tensor(*self.grid_shape)) # standard deviation for gaussian for each neuron
self.features = Parameter(torch.Tensor(1, 1, 1, outdims)) # saliency weights for each channel from core
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.init_mu_range = init_mu_range
self.init_sigma_range = init_sigma_range
self.align_corners = align_corners
self.fixed_sigma = fixed_sigma
self.initialize()
def sample_grid(self, batch_size, sample=None):
"""
Returns the grid locations from the core by sampling from a Gaussian distribution
Args:
batch_size (int): size of the batch
sample (bool/None): sample determines whether we draw a sample from Gaussian distribution, N(mu,sigma), defined per neuron
or use the mean, mu, of the Gaussian distribution without sampling.
if sample is None (default), samples from the N(mu,sigma) during training phase and
fixes to the mean, mu, during evaluation phase.
if sample is True/False, overrides the model_state (i.e training or eval) and does as instructed
"""
with torch.no_grad():
self.mu.clamp_(min=-1, max=1) # at eval time, only self.mu is used so it must belong to [-1,1]
self.sigma.clamp_(min=0) # sigma/variance is always a positive quantity
grid_shape = (batch_size,) + self.grid_shape[1:]
sample = self.training if sample is None else sample
if sample:
norm = self.mu.new(*grid_shape).normal_()
else:
norm = self.mu.new(*grid_shape).zero_() # for consistency and CUDA capability
return torch.clamp(
norm * self.sigma + self.mu, min=-1, max=1
) # grid locations in feature space sampled randomly around the mean self.mu
@property
def grid(self):
return self.sample_grid(batch_size=1, sample=False)
def initialize(self):
self.mu.data.uniform_(-self.init_mu_range, self.init_mu_range)
if self.fixed_sigma:
self.sigma.data.uniform_(self.init_sigma_range, self.init_sigma_range)
else:
self.sigma.data.uniform_(0, self.init_sigma_range)
warnings.warn(
"sigma is sampled from uniform distribuiton, instead of a fixed value. Consider setting "
"fixed_sigma to True"
)
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
def forward(self, x, sample=None, shift=None, out_idx=None):
"""
Propagates the input forwards through the readout
Args:
x: input data
sample (bool/None): sample determines whether we draw a sample from Gaussian distribution, N(mu,sigma), defined per neuron
or use the mean, mu, of the Gaussian distribution without sampling.
if sample is None (default), samples from the N(mu,sigma) during training phase and
fixes to the mean, mu, during evaluation phase.
if sample is True/False, overrides the model_state (i.e training or eval) and does as instructed
shift (bool): shifts the location of the grid (from eye-tracking data)
out_idx (bool): index of neurons to be predicted
Returns:
y: neuronal activity
"""
N, c, w, h = x.size()
c_in, w_in, h_in = self.in_shape
if (c_in, w_in, h_in) != (c, w, h):
raise ValueError("the specified feature map dimension is not the readout's expected input dimension")
x = x.view(N, 1, c, w, h)
feat = self.features
bias = self.bias
outdims = self.outdims
if self.batch_sample:
# sample the grid_locations separately per image per batch
grid = self.sample_grid(batch_size=N, sample=sample) # sample determines sampling from Gaussian
else:
# use one sampled grid_locations for all images in the batch
grid = self.sample_grid(batch_size=1, sample=sample).expand(N, outdims, 1, 3)
if out_idx is not None:
# out_idx specifies the indices to subset of neurons for training/testing
if isinstance(out_idx, np.ndarray):
if out_idx.dtype == bool:
out_idx = np.where(out_idx)[0]
feat = feat[:, :, :, out_idx]
grid = grid[:, :, out_idx]
if bias is not None:
bias = bias[out_idx]
outdims = len(out_idx)
if shift is not None:
grid = grid + shift[:, None, None, :]
y = F.grid_sample(x, grid, align_corners=self.align_corners)
y = (y.squeeze(-1) * feat).sum(1).view(N, outdims)
if self.bias is not None:
y = y + bias
return y
class UltraSparse(nn.Module):
"""
This readout instantiates an object that can used to learn one or more features (with or without
a shared mean in the x-y plane) in the core feature space for each neuron, sampled from a Gaussian distribution
with some mean and variance at training but set to mean at test time, that best predicts its response.
The readout receives the shape of the core as 'in_shape', the number of units/neurons being predicted as 'outdims', 'bias' specifying whether
or not bias term is to be used and 'init_range' range for initialising the mean and variance of the gaussian distribution from which we sample to
uniform distribution, U(-init_mu_range,init_mu_range) and uniform distribution, U(0.0, init_sigma_range) respectively.
The grid parameter contains the normalized locations (x, y coordinates in the core feature space) and is clipped to [-1.1] as it a
requirement of the torch.grid_sample function. The feature parameter learns the best linear mapping between the feature
map from a given location, sample from Gaussian at train time but set to mean at eval time, and the unit's response with or without an additional elu non-linearity.
Args:
in_shape (list): shape of the input feature map [channels, width, height]
outdims (int): number of output units
bias (bool): adds a bias term
init_mu_range (float): initialises the the mean with Uniform([-init_range, init_range])
[expected: positive value <=1]
init_sigma_range (float): initialises sigma with Uniform([0.0, init_sigma_range])
batch_sample (bool): if True, samples a position for each image in the batch separately
[default: True as it decreases convergence time and performs just as well]
num_filters (int): number of points in the core-features to be learned for each neuron
[default: 1, an instance of sparsest readout]
shared_mean (bool): if True, the mean in the x-y plane (image-plane) is shared across all channels
[default: False]
align_corners (bool): Keyword agrument to gridsample for bilinear interpolation.
It changed behavior in PyTorch 1.3. The default of align_corners = True is setting the
behavior to pre PyTorch 1.3 functionality for comparability.
fixed_sigma (bool). Recommended behavior: True. But set to false for backwards compatibility.
If true, initialized the sigma not in a range, but with the exact value given for all neurons.
"""
def __init__(
self,
in_shape,
outdims,
bias,
init_mu_range,
init_sigma_range,
batch_sample=True,
num_filters=1,
shared_mean=False,
align_corners=True,
fixed_sigma=False,
**kwargs,
):
super().__init__()
if init_mu_range > 1.0 or init_mu_range <= 0.0 or init_sigma_range <= 0.0:
raise ValueError("either init_mu_range doesn't belong to [0.0, 1.0] or init_sigma_range is non-positive!")
self.in_shape = in_shape
c, w, h = in_shape
self.outdims = outdims
self.batch_sample = batch_sample
self.num_filters = num_filters
self.shared_mean = shared_mean
self.grid_shape = (1, 1, outdims * num_filters, 1, 3)
if shared_mean:
self.gridxy_shape = (1, 1, outdims, 1, 2)
self.gridch_shape = (1, 1, outdims * num_filters, 1, 1)
self.mu_xy = Parameter(
torch.Tensor(*self.gridxy_shape)
) # mean location (in xy dim) of gaussian for each neuron
self.mu_ch = Parameter(
torch.Tensor(*self.gridch_shape)
) # mean location (in ch dim) of gaussian for each neuron
self.sigma_xy = Parameter(
torch.Tensor(*self.gridxy_shape)
) # standard deviation for gaussian for each neuron
self.sigma_ch = Parameter(torch.Tensor(*self.gridch_shape))
else:
self.mu = Parameter(torch.Tensor(*self.grid_shape)) # mean location of gaussian for each neuron
self.sigma = Parameter(torch.Tensor(*self.grid_shape)) # standard deviation for gaussian for each neuron
self.features = Parameter(
torch.Tensor(1, 1, outdims, num_filters)
) # saliency weights for each channel from core
if bias:
bias = Parameter(torch.Tensor(outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.init_mu_range = init_mu_range
self.init_sigma_range = init_sigma_range
self.align_corners = align_corners
self.fixed_sigma = fixed_sigma
self.initialize()
def sample_grid(self, batch_size, sample=None):
"""
Returns the grid locations from the core by sampling from a Gaussian distribution
Args:
batch_size (int): size of the batch
sample (bool/None): sample determines whether we draw a sample from Gaussian distribution, N(mu,sigma), defined per neuron
or use the mean, mu, of the Gaussian distribution without sampling.
if sample is None (default), samples from the N(mu,sigma) during training phase and
fixes to the mean, mu, during evaluation phase.
if sample is True/False, overrides the model_state (i.e training or eval) and does as instructed
"""
if self.shared_mean:
# sample an xy location and keep it same across all filter channels
# explicit clamping of mu and sigma along the channel dimension was needed as the clamping post cat was not working
with torch.no_grad():
self.mu_ch.clamp_(min=-1, max=1) # at eval time, only self.mu is used so it must belong to [-1,1]
self.sigma_ch.clamp_(min=0) # sigma/variance is always a positive quantity
self.mu = torch.cat((self.mu_xy.repeat(1, 1, self.num_filters, 1, 1), self.mu_ch), 4)
self.sigma = torch.cat((self.sigma_xy.repeat(1, 1, self.num_filters, 1, 1), self.sigma_ch), 4)
with torch.no_grad():
self.mu.clamp_(min=-1, max=1)
self.sigma.clamp_(min=0)
grid_shape = (batch_size,) + self.grid_shape[1:]
sample = self.training if sample is None else sample
if sample:
norm = self.mu.new(*grid_shape).normal_()
else:
norm = self.mu.new(*grid_shape).zero_() # for consistency and CUDA capability
return torch.clamp(
norm * self.sigma + self.mu, min=-1, max=1
) # grid locations in feature space sampled randomly around the mean self.mu
@property
def grid(self):
return self.sample_grid(batch_size=1, sample=False)
def feature_l1(self, average=True):
"""
Returns the l1 regularization term either the mean or the sum of all weights
Args:
average(bool): if True, use mean of weights for regularization
"""
if average:
return self.features.abs().mean()
else:
return self.features.abs().sum()
def initialize(self):
if self.shared_mean:
# initialise mu and sigma separately for xy and channel dimension.
self.mu_ch.data.uniform_(-1, 1)
self.mu_xy.data.uniform_(-self.init_mu_range, self.init_mu_range)
if self.fixed_sigma:
self.sigma_ch.data.uniform_(self.init_sigma_range, self.init_sigma_range)
self.sigma_xy.data.uniform_(self.init_sigma_range, self.init_sigma_range)
else:
self.sigma_ch.data.uniform_(0, self.init_sigma_range)
self.sigma_xy.data.uniform_(0, self.init_sigma_range)
warnings.warn(
"sigma is sampled from uniform distribuiton, instead of a fixed value. Consider setting "
"fixed_sigma to True"
)
else:
# initialise mu and sigma for x,y and channel dimensions.
self.mu.data.uniform_(-self.init_mu_range, self.init_mu_range)
self.sigma.data.uniform_(0, self.init_sigma_range)
self.features.data.fill_(1 / self.in_shape[0])
if self.bias is not None:
self.bias.data.fill_(0)
def forward(self, x, sample=True, shift=None, out_idx=None):
"""
Propagates the input forwards through the readout
Args:
x: input data
sample (bool/None): sample determines whether we draw a sample from Gaussian distribution, N(mu,sigma), defined per neuron
or use the mean, mu, of the Gaussian distribution without sampling.
if sample is None (default), samples from the N(mu,sigma) during training phase and
fixes to the mean, mu, during evaluation phase.
if sample is True/False, overrides the model_state (i.e training or eval) and does as instructed
shift (bool): shifts the location of the grid (from eye-tracking data)
out_idx (bool): index of neurons to be predicted
Returns:
y: neuronal activity
"""
N, c, w, h = x.size()
c_in, w_in, h_in = self.in_shape
if (c_in, w_in, h_in) != (c, w, h):
raise ValueError("the specified feature map dimension is not the readout's expected input dimension")
x = x.view(N, 1, c, w, h)
feat = self.features
bias = self.bias
outdims = self.outdims
if self.batch_sample:
# sample the grid_locations separately per image per batch
grid = self.sample_grid(batch_size=N, sample=sample) # sample determines sampling from Gaussian
else:
# use one sampled grid_locations for all images in the batch
grid = self.sample_grid(batch_size=1, sample=sample).expand(N, 1, outdims * self.num_filters, 1, 3)
if out_idx is not None:
# predict output only for neurons given by out_idx
if isinstance(out_idx, np.ndarray):
if out_idx.dtype == bool:
out_idx = np.where(out_idx)[0]
feat = feat[:, :, :, out_idx]
grid = grid[:, :, out_idx]
if bias is not None:
bias = bias[out_idx]
outdims = len(out_idx)
if shift is not None: # it might not be valid now but have kept it for future devop.
grid = grid + shift[:, None, None, :]
y = F.grid_sample(x, grid, align_corners=self.align_corners).squeeze(-1)
z = y.view((N, 1, self.num_filters, outdims)).permute(0, 1, 3, 2) # reorder the dims
z = torch.einsum(
"nkpf,mkpf->np", z, feat
) # dim: batch_size, 1, num_neurons, num_filters -> batch_size, num_neurons
if self.bias is not None:
z = z + bias
return z
def __repr__(self):
c, w, h = self.in_shape
r = self.__class__.__name__ + " (" + "{} x {} x {}".format(c, w, h) + " -> " + str(self.outdims) + ")"
if self.bias is not None:
r += " with bias"
for ch in self.children():
r += " -> " + ch.__repr__() + "\n"
return r
class AttentionReadout(nn.Module):
def __init__(
self,
in_shape,
outdims,
bias,
init_noise=1e-3,
attention_kernel=1,
attention_layers=1,
**kwargs,
):
super().__init__()
self.in_shape = in_shape
self.outdims = outdims
c, w, h = in_shape
self.features = Parameter(torch.Tensor(self.outdims, c))
attention = nn.Sequential()
for i in range(attention_layers - 1):
attention.add_module(f"conv{i}", nn.Conv2d(c, c, attention_kernel, padding=attention_kernel > 1))
attention.add_module(f"norm{i}", nn.BatchNorm2d(c))
attention.add_module(f"nonlin{i}", nn.ELU())
else:
attention.add_module(
f"conv{attention_layers}",
nn.Conv2d(c, outdims, attention_kernel, padding=attention_kernel > 1),
)
self.attention = attention
self.init_noise = init_noise
if bias:
bias = Parameter(torch.Tensor(self.outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
self.initialize()
@staticmethod
def init_conv(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
def initialize_attention(self):
self.apply(self.init_conv)
def initialize(self):
self.features.data.normal_(0, self.init_noise)
if self.bias is not None:
self.bias.data.fill_(0)
self.initialize_attention()
def feature_l1(self, average=True):
if average:
return self.features.abs().mean()
else:
return self.features.abs().sum()
def forward(self, x, shift=None):
attention = self.attention(x)
b, c, w, h = attention.shape
attention = F.softmax(attention.view(b, c, -1), dim=-1).view(b, c, w, h)
y = torch.einsum("bnwh,bcwh->bcn", attention, x)
y = torch.einsum("bcn,nc->bn", y, self.features)
if self.bias is not None:
y = y + self.bias
return y
def __repr__(self):
return self.__class__.__name__ + " (" + "{} x {} x {}".format(*self.in_shape) + " -> " + str(self.outdims) + ")"
# ------------ Multi Readouts ------------------------
class MultiReadout(Readout, ModuleDict):
_base_readout = None
def __init__(self, in_shape, loaders, gamma_readout, clone_readout=False, **kwargs):
if self._base_readout is None:
raise ValueError("Attribute _base_readout must be set")
super().__init__()
self.in_shape = in_shape
self.neurons = OrderedDict([(k, loader.dataset.n_neurons) for k, loader in loaders.items()])
if "positive" in kwargs:
self._positive = kwargs["positive"]
self.gamma_readout = gamma_readout # regularisation strength
for i, (k, n_neurons) in enumerate(self.neurons.items()):
if i == 0 or clone_readout is False:
self.add_module(
k,
self._base_readout(in_shape=in_shape, outdims=n_neurons, **kwargs),
)
original_readout = k
elif i > 0 and clone_readout is True:
self.add_module(k, ClonedReadout(self[original_readout], **kwargs))
def initialize(self, mean_activity_dict):
for k, mu in mean_activity_dict.items():
self[k].initialize()
if hasattr(self[k], "bias"):
self[k].bias.data = mu.squeeze() - 1
def regularizer(self, readout_key):
return self[readout_key].feature_l1() * self.gamma_readout
@property
def positive(self):
if hasattr(self, "_positive"):
return self._positive
else:
return False
@positive.setter
def positive(self, value):
self._positive = value
for k in self:
self[k].positive = value
class MultiplePointPyramid2d(MultiReadout):
_base_readout = PointPyramid2d
class MultipleGaussian3d(MultiReadout):
"""
Instantiates multiple instances of Gaussian3d Readouts
usually used when dealing with different datasets or areas sharing the same core.
Args:
in_shape (list): shape of the input feature map [channels, width, height]
loaders (list): a list of dataset objects
gamma_readout (float): regularisation term for the readout which is usally set to 0.0 for gaussian3d readout
as it contains one dimensional weight
"""
_base_readout = Gaussian3d
# Make sure this is not a bug
def regularizer(self, readout_key):
return self.gamma_readout
class MultiplePointPooled2d(MultiReadout):
"""
Instantiates multiple instances of PointPool2d Readouts
usually used when dealing with more than one dataset sharing the same core.
"""
_base_readout = PointPooled2d
class MultipleFullGaussian2d(MultiReadout):
"""
Instantiates multiple instances of FullGaussian2d Readouts
usually used when dealing with more than one dataset sharing the same core.
Args:
in_shape (list): shape of the input feature map [channels, width, height]
loaders (list): a list of dataloaders
gamma_readout (float): regularizer for the readout
"""
_base_readout = FullGaussian2d
class MultipleUltraSparse(MultiReadout):
"""
This class instantiates multiple instances of UltraSparseReadout
useful when dealing with multiple datasets
Args:
in_shape (list): shape of the input feature map [channels, width, height]
loaders (list): a list of dataset objects
gamma_readout (float): regularisation term for the readout which is usally set to 0.0 for UltraSparseReadout readout
as it contains one dimensional weight
"""
_base_readout = UltraSparse
|
the-stack_0_27863
|
"""
Copyright (C) 2019-2020 Craig Thomas
This project uses an MIT style license - see LICENSE for details.
A Color Computer Assembler - see the README.md file for details.
"""
# I M P O R T S ###############################################################
from cocoasm.virtualfiles.virtualfile import VirtualFile, CoCoFile
from cocoasm.values import Value
from typing import NamedTuple
# C L A S S E S ###############################################################
class Preamble(NamedTuple):
"""
The Preamble class is used to store information relating to a binary file
on a disk image. The Preamble only contains the load address and the length
of data for the binary file.
"""
load_addr: Value = None
data_length: Value = None
class Postamble(NamedTuple):
"""
The Postamble class is used to store information relating t a binary file
on a disk image. The Postamble is stored at the end of a binary file and
contains the exec address for the binary.
"""
exec_addr: Value = None
class DiskFile(VirtualFile):
FAT_OFFSET = 78592
DIR_OFFSET = 78848
HALF_TRACK_LEN = 2304
def __init__(self):
super().__init__()
self.raw_data = []
def is_correct_type(self):
if not self.host_file:
raise ValueError("No file currently open")
if not self.read_mode:
raise ValueError("[{}] not open for reading".format(self.filename))
self.host_file.seek(0, 2)
size = self.host_file.tell()
return True if size == 161280 else False
def list_files(self, filenames=None):
files = []
# Read the File Allocation Table
self.host_file.seek(DiskFile.FAT_OFFSET, 0)
fat = self.host_file.read(256)
# Move through elements in the Directory Table and read them into CoCoFile objects
self.host_file.seek(DiskFile.DIR_OFFSET, 0)
for file_number in range(0, 72):
next_byte = Value.create_from_byte(self.host_file.peek(1)[:1])
if next_byte.hex() == "00" or next_byte.hex() == "FF":
self.host_file.seek(32, 1)
else:
name = "{}".format(self.host_file.read(8).decode("utf-8").replace(" ", ""))
extension = "{}".format(self.host_file.read(3).decode("utf-8"))
file_type = Value.create_from_byte(self.host_file.read(1))
data_type = Value.create_from_byte(self.host_file.read(1))
starting_granule = Value.create_from_byte(self.host_file.read(1))
current_location = self.host_file.tell()
preamble = DiskFile.read_preamble(self.host_file, starting_granule.int)
file_data = self.read_data(
self.host_file,
starting_granule.int,
has_preamble=True,
data_length=preamble.data_length.int,
fat=fat,
)
postamble = DiskFile.read_postamble(self.host_file)
self.host_file.seek(current_location, 0)
coco_file = CoCoFile(
name=name,
extension=extension,
type=file_type,
data_type=data_type,
load_addr=preamble.load_addr,
exec_addr=postamble.exec_addr,
data=file_data,
ignore_gaps=True
)
files.append(coco_file)
self.host_file.seek(19, 1)
return files
@classmethod
def seek_granule(cls, file, granule):
"""
Seeks to the specified granule in the disk image. Modifies the file
object pointer to start at the specified granule.
:param file: the file object to use
:param granule: the granule to seek to
"""
granule_offset = DiskFile.HALF_TRACK_LEN * granule
if granule > 33:
granule_offset += DiskFile.HALF_TRACK_LEN * 2
file.seek(granule_offset, 0)
@classmethod
def read_preamble(cls, file, starting_granule):
"""
Reads the preamble data for the file. The preamble is a collection of 5
bytes at the start of a binary file:
byte 0 - always $00
byte 1,2 - the data length of the file
byte 3,4 - the load address for the file
:param file: the file object to modify
:param starting_granule: the granule number that contains the preamble
:return: a populated Preamble object
"""
DiskFile.seek_granule(file, starting_granule)
preamble_flag = Value.create_from_byte(file.read(1))
if preamble_flag.hex() != "00":
raise ValueError("Invalid preamble flag {}".format(preamble_flag.hex()))
return Preamble(
data_length=Value.create_from_byte(file.read(2)),
load_addr=Value.create_from_byte(file.read(2))
)
@classmethod
def read_postamble(cls, file):
"""
Reads the postamble of a binary file. The postamble is a collection of
5 bytes as follows:
byte 0 - always $FF
byte 1,2 - always $00, $00
byte 3,4 - the exec address of the binary file
:param file: the file object to modify
:return: a populated Postamble object
"""
postamble_flag = Value.create_from_byte(file.read(1))
if postamble_flag.hex() != "FF":
raise ValueError("Invalid first postamble flag {}".format(postamble_flag.hex()))
postamble_flag = Value.create_from_byte(file.read(2))
if postamble_flag.hex() != "00":
raise ValueError("Invalid second postamble flag {}".format(postamble_flag.hex()))
return Postamble(
exec_addr=Value.create_from_byte(file.read(2)),
)
@classmethod
def read_data(cls, file, starting_granule, has_preamble=False, data_length=0, fat=[]):
"""
Reads a collection of data from a disk image.
:param file: the file object containing data to read from
:param starting_granule: the starting granule for the file
:param has_preamble: whether there is a preamble to be read
:param data_length: the length of data to read
:param fat: the File Allocation Table data for the disk
:return: the raw data from the specified file
"""
DiskFile.seek_granule(file, starting_granule)
file_data = []
chunk_size = DiskFile.HALF_TRACK_LEN
# Skip over preamble if it exists
if has_preamble:
file.read(5)
chunk_size -= 5
# Check to see if we are reading more than one granule
if data_length > chunk_size:
for _ in range(chunk_size):
file_data.append(Value.create_from_byte(file.read(1)).int)
data_length -= 1
next_granule = fat[starting_granule]
file_data.extend(DiskFile.read_data(file, next_granule, data_length=data_length, fat=fat))
else:
for _ in range(data_length):
file_data.append(Value.create_from_byte(file.read(1)).int)
return file_data
def save_to_host_file(self, coco_file):
pass
# E N D O F F I L E #######################################################
|
the-stack_0_27866
|
import ctypes as ct
import sys
import os
# Define the record header struct
class HEADER(ct.Structure):
_fields_ = [("RecordStatus", ct.c_ubyte),
("UserID", ct.c_ubyte),
("Channel", ct.c_ubyte),
("DataFormat", ct.c_ubyte),
("SerialNumber", ct.c_uint32),
("RecordNumber", ct.c_uint32),
("SamplePeriod", ct.c_int32),
("Timestamp", ct.c_int64),
("RecordStart", ct.c_int64),
("RecordLength", ct.c_uint32),
("Reserved", ct.c_uint32)]
# This function loads the ADQAPI library using ctypes
def adqapi_load(path='', quiet=False):
if os.name == 'nt':
if path == '':
ADQAPI = ct.cdll.LoadLibrary('ADQAPI.dll')
else:
ADQAPI = ct.cdll.LoadLibrary(path)
else:
if path == '':
ADQAPI = ct.cdll.LoadLibrary('libadq.so')
else:
ADQAPI = ct.cdll.LoadLibrary(path)
# Manually set return type from some ADQAPI functions
ADQAPI.CreateADQControlUnit.restype = ct.c_void_p
ADQAPI.ADQ_GetRevision.restype = ct.c_void_p
ADQAPI.ADQ_GetPtrStream.restype = ct.POINTER(ct.c_int16)
ADQAPI.ADQControlUnit_FindDevices.argtypes = [ct.c_void_p]
ADQAPI.ADQ_GetBoardSerialNumber.restype = ct.c_char_p
ADQAPI.ADQ_GetBoardProductName.restype = ct.c_char_p
# Print ADQAPI revision
if not quiet:
print('ADQAPI loaded, revision {:d}.'.format(ADQAPI.ADQAPI_GetRevision()))
return ADQAPI
# This function unloads the ADQAPI library using ctypes
def adqapi_unload(ADQAPI):
if os.name == 'nt':
# Unload DLL
ct.windll.kernel32.FreeLibrary(ADQAPI._handle)
# Convenience function when printing status from ADQAPI functions
def adq_status(status):
if (status==0):
return 'FAILURE'
else:
return 'OK'
# Print revision info for an ADQ device
def print_adq_device_revisions(ADQAPI, adq_cu, adq_num):
# Get revision info from ADQ
rev = ADQAPI.ADQ_GetRevision(adq_cu, adq_num)
revision = ct.cast(rev,ct.POINTER(ct.c_int))
print('\nConnected to ADQ #{:d}'.format(adq_num))
# Print revision information
print('FPGA Revision: {}'.format(revision[0]))
if (revision[1]):
print('Local copy')
else:
print('SVN Managed')
if (revision[2]):
print('Mixed Revision')
else :
print('SVN Updated')
print('')
# This function sets an alternating background color for a matplotlib plot
def alternate_background(ax, start_point, widths, labels=False,
color='#dddddd'):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
ax.relim()
# update ax.viewLim using the new dataLim
ax.autoscale_view()
plt.draw()
# Calculate starting points
edges = start_point+np.cumsum(np.append([0],widths))
# Set plot x axis length
ax.set_xlim(start_point, edges[-1])
ylim=ax.get_ylim()
# Draw colored fields for every other width
for idx in range(1,len(edges)-1,2):
ax.add_patch(
patches.Rectangle(
(edges[idx], ylim[0]), # point(x,y)
widths[idx], # width
ylim[1]-ylim[0], # height
facecolor=color,
edgecolor='none',
zorder=-20
)
)
# Optionally draw labels
if labels==True:
for idx in range(0,len(edges)-1):
# Set y-position 1% under top
ypos=(ylim[1])-0.01*(ylim[1]-ylim[0])
# Enumerate fields
plt.text(edges[idx], ypos,
'R{}'.format(idx), verticalalignment='top')
|
the-stack_0_27867
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open("README.md") as f:
readme = f.read()
with open("LICENSE") as f:
license = f.read()
setup(
name="wamr-python",
version="0.1.0",
description="A WebAssembly runtime powered by WAMR",
long_description=readme,
author="The WAMR Project Developers",
author_email="[email protected]",
url="https://github.com/bytecodealliance/wamr-python",
license=license,
packages=["wamr"],
)
|
the-stack_0_27868
|
"""
Copy files as all sub-scales
.. note:: all paths are hard-coded to be used in CMP grid
"""
import logging
import os
import shutil
import sys
import pandas as pd
import tqdm
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.benchmark import ImRegBenchmark
from birl.utilities.dataset import parse_path_scale
from bm_ANHIR.generate_regist_pairs import VAL_STATUS_TRAIN
PATH_CSV = '/datagrid/Medical/dataset_ANHIR/images/dataset_medium.csv'
PATH_IMAGES = '/datagrid/Medical/dataset_ANHIR/images'
PATH_IMAGES_ALL = '/datagrid/Medical/dataset_ANHIR/images_private'
PATH_LANDMARKS = '/datagrid/Medical/dataset_ANHIR/landmarks'
PATH_LANDMARKS_ALL = '/datagrid/Medical/dataset_ANHIR/landmarks_user'
SCALES = (2, 5, 10, 15, 20, 25, 50, 100)
FOLDER_NAME = 'scale-%ipc'
FORCE_COPY = False
def main(path_csv, path_in, path_out, col_name, train_only=True):
""" main entry point
:param str path_csv: path to dataset cover
:param str path_in: path to input images
:param str path_out: path to output images
:param str col_name: column from the cover table
:param bool train_only: use only training cases
"""
df = pd.read_csv(path_csv)
if train_only:
df = df[df[ImRegBenchmark.COL_STATUS] == VAL_STATUS_TRAIN]
files = df[col_name]
for p_file in tqdm.tqdm(files, desc=col_name):
scale = parse_path_scale(os.path.dirname(p_file))
# print(scale, SCALES[:SCALES.index(scale)])
tissue_name = p_file.split(os.path.sep)[0]
case_name = os.path.basename(p_file)
for sc in SCALES[:SCALES.index(scale) + 1]:
path_file = os.path.join(tissue_name, FOLDER_NAME % sc, case_name)
path_dir = os.path.join(path_out, tissue_name, FOLDER_NAME % sc)
if not os.path.isdir(path_dir):
os.makedirs(path_dir)
path_src = os.path.join(path_in, path_file)
path_dst = os.path.join(path_out, path_file)
if not os.path.isfile(path_src):
logging.debug('missing source file: %s', path_src)
continue
# print(path_src, path_dst)
if not os.path.isfile(path_dst) or FORCE_COPY:
shutil.copy(path_src, path_dst)
elif os.path.isfile(path_dst) and not FORCE_COPY:
logging.debug('existing target file: %s', path_dst)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('running...')
main(PATH_CSV, PATH_LANDMARKS_ALL, PATH_LANDMARKS, ImRegBenchmark.COL_POINTS_REF, train_only=True)
main(PATH_CSV, PATH_LANDMARKS_ALL, PATH_LANDMARKS, ImRegBenchmark.COL_POINTS_MOVE, train_only=False)
main(PATH_CSV, PATH_IMAGES_ALL, PATH_IMAGES, ImRegBenchmark.COL_IMAGE_REF, train_only=False)
main(PATH_CSV, PATH_IMAGES_ALL, PATH_IMAGES, ImRegBenchmark.COL_IMAGE_MOVE, train_only=False)
logging.info('Done >]')
|
the-stack_0_27869
|
#!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import io
import logging
from pypykatz.commons.common import *
from pypykatz.crypto.des import *
from pypykatz.crypto.aes import AESModeOfOperationCBC
from pypykatz.lsadecryptor.lsa_templates import *
class LsaDecryptor(PackageDecryptor):
def __init__(self, reader, decryptor_template, sysinfo):
super().__init__('LsaDecryptor', None, sysinfo, reader)
self.decryptor_template = decryptor_template
self.iv = None
self.aes_key = None
self.des_key = None
self.acquire_crypto_material()
def acquire_crypto_material(self):
self.log('Acquireing crypto stuff...')
sigpos = self.find_signature()
self.reader.move(sigpos)
data = self.reader.peek(0x50)
self.log('Memory looks like this around the signature\n%s' % hexdump(data, start = sigpos))
self.iv = self.get_IV(sigpos)
self.des_key = self.get_des_key(sigpos)
self.aes_key = self.get_aes_key(sigpos)
def get_des_key(self, pos):
self.log('Acquireing DES key...')
return self.get_key(pos, self.decryptor_template.key_pattern.offset_to_DES_key_ptr)
def get_aes_key(self, pos):
self.log('Acquireing AES key...')
return self.get_key(pos, self.decryptor_template.key_pattern.offset_to_AES_key_ptr)
def find_signature(self):
self.log('Looking for main struct signature in memory...')
fl = self.reader.find_in_module('lsasrv.dll', self.decryptor_template.key_pattern.signature)
if len(fl) == 0:
logging.warning('signature not found! %s' % self.decryptor_template.key_pattern.signature.hex())
raise Exception('LSA signature not found!')
self.log('Found candidates on the following positions: %s' % ' '.join(hex(x) for x in fl))
self.log('Selecting first one @ 0x%08x' % fl[0])
return fl[0]
def get_IV(self, pos):
self.log('Reading IV')
#print('Offset to IV: %s' % hex(self.decryptor_template.key_pattern.offset_to_IV_ptr))
ptr_iv = self.reader.get_ptr_with_offset(pos + self.decryptor_template.key_pattern.offset_to_IV_ptr)
self.log('IV pointer takes us to 0x%08x' % ptr_iv)
self.reader.move(ptr_iv)
data = self.reader.read(self.decryptor_template.key_pattern.IV_length)
self.log('IV data: %s' % hexdump(data))
return data
def get_key(self, pos, key_offset):
ptr_key = self.reader.get_ptr_with_offset(pos + key_offset)
self.log('key handle pointer is @ 0x%08x' % ptr_key)
ptr_key = self.reader.get_ptr(ptr_key)
self.log('key handle is @ 0x%08x' % ptr_key)
self.reader.move(ptr_key)
data = self.reader.peek(0x50)
self.log('BCRYPT_HANLE_KEY_DATA\n%s' % hexdump(data, start = ptr_key))
kbhk = self.decryptor_template.key_handle_struct(self.reader)
if kbhk.verify():
ptr_key = kbhk.ptr_key.value
self.reader.move(ptr_key)
data = self.reader.peek(0x50)
self.log('BCRYPT_KEY_DATA\n%s' % hexdump(data, start = ptr_key))
kbk = kbhk.ptr_key.read(self.reader, self.decryptor_template.key_struct)
self.log('HARD_KEY SIZE: 0x%x' % kbk.size)
if kbk.verify():
self.log('HARD_KEY data:\n%s' % hexdump(kbk.hardkey.data))
return kbk.hardkey.data
def decrypt(self, encrypted):
# TODO: NT version specific, move from here in subclasses.
cleartext = b''
size = len(encrypted)
if size:
if size % 8:
if not self.aes_key or not self.iv:
return cleartext
cipher = AESModeOfOperationCBC(self.aes_key, iv = self.iv)
n = 16
for block in [encrypted[i:i+n] for i in range(0, len(encrypted), n)]: #terrible, terrible workaround
cleartext += cipher.decrypt(block)
else:
if not self.des_key or not self.iv:
return cleartext
#cipher = DES3.new(self.des_key, DES3.MODE_CBC, self.iv[:8])
cipher = triple_des(self.des_key, CBC, self.iv[:8])
cleartext = cipher.decrypt(encrypted)
return cleartext
def dump(self):
self.log('Recovered LSA encryption keys\n')
self.log('IV ({}): {}'.format(len(self.iv), self.iv.hex()))
self.log('DES_KEY ({}): {}'.format(len(self.des_key), self.des_key.hex()))
self.log('AES_KEY ({}): {}'.format(len(self.aes_key), self.aes_key.hex()))
|
the-stack_0_27871
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def show_hiragana(msg):
print("sub: " + msg.data)
if __name__ == "__main__":
rospy.init_node("led_sub")
sub = rospy.Subscriber('MorsePulse', String, show_hiragana)
rospy.spin()
|
the-stack_0_27872
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# fuction that returns sigmoid value of x
def sigmoid(theta, X):
return 1/(1+np.exp(-np.dot(X, theta.T)))
# Calculates cost of training model. We have to minimize this cost
def gradient(X, theta, y):
m = len(y)
sigmoid_result = sigmoid(theta, X)
value = (-1/m)*np.sum(y*np.log(sigmoid_result) + (1-y)*np.log(1-sigmoid_result))
return value
# Returns partial diffrentiation value of gradient
def log_gradient(theta, X, y):
m = len(y)
h = sigmoid(theta, X) - y
result = (1/m)*np.dot(h.T, X)
return result
# Original gradient descent algorithm which minimize the cost function (gradient)
def gradientDescent(X, theta, y, learning_rate=.01, converge_change=.00001):
cost = gradient(X, theta, y)
change_cost=1
n=1
while(change_cost > converge_change):
old_cost=cost
theta = theta - learning_rate*log_gradient(theta,X,y)
cost = gradient(X, theta, y)
change_cost = old_cost - cost
n+=1
print(n)
return theta
# Method to train our model on training data
def fit(X_train, y_train):
theta = np.zeros(X_train.shape[1])
updated_parameters = gradientDescent(X_train, theta, y_train)
return updated_parameters
# Method to predict output on new data or test data
def predict(X_test, final_theta):
predicted_probabilities = sigmoid(final_theta, X_test)
predicted_value = np.where(predicted_probabilities >= .5, 1, 0)
return predicted_value
# method to visualize logistic regression. A plot between all samples and decision boundary
def plot_reg(X, y, theta):
'''
function to plot decision boundary
'''
# labelled observations
X=np.array(X)
x_0 = pd.DataFrame(X[np.where(y == 0)])
x_1 = pd.DataFrame(X[np.where(y == 1)])
# plotting points with diff color for diff label
plt.scatter(x_0.iloc[:,1], x_0.iloc[:,2], c='b', label='y = 0')
plt.scatter(x_1.iloc[:,1], x_1.iloc[:, 2], c='r', label='y = 1')
print(theta)
# plotting decision boundary
x1 = np.arange(0, 10, 1)
x2 = -(theta[0] + theta[1]*x1)/theta[2]
plt.plot(x1, x2, c='k', label='reg line')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.show()
if __name__ == "__main__":
df = pd.read_csv("data.csv")
df.insert(0, 'x0', 1.0)
X_train = df.iloc[:,0:3]
y_train = df['label']
parameters = fit(X_train, y_train)
X_test = np.array([[1, 5.123, 6.872], [1, 1.239, 6.165], [1, 8.6254, 7.829], [1, 2.382, 7.525], [1, 9.282, 1.626], [1, 3.272, 5.737], [1, 6.345, 4.276], [1, 3.372, 8.238]])
result = predict(X_test, parameters)
print(result)
plot_reg(X_train, y_train, parameters)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.