ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3f987b8e464135f9e331a215c2c0a6073e02ab | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from symbol.resnet import *
from symbol.config import config
from symbol.processing import bbox_pred, clip_boxes, nms
import face_embedding
from mapr_streams_python import Consumer, KafkaError, Producer
import numpy as np
import cv2, os, json, time, sys, pickle
import mxnet as mx
import argparse, random, sklearn
import tensorflow as tf
from scipy import misc
from sklearn.decomposition import PCA
from time import sleep
from easydict import EasyDict as edict
from mtcnn_detector import MtcnnDetector
import face_image, face_preprocess
from flask import Flask, Response
app = Flask(__name__)
@app.route('/')
def index():
return Response(kafkastream(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def ch_dev(arg_params, aux_params, ctx):
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def resize(im, target_size, max_size):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale
def get_face_embedding(filename, arg_params, aux_params, sym, model, ctx):
img_orig = cv2.imread(filename)
img_orig = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
img, scale = resize(img_orig.copy(), 600, 1000)
im_info = np.array([[img.shape[0], img.shape[1], scale]], dtype=np.float32) # (h, w, scale)
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
arg_params["data"] = mx.nd.array(img, ctx)
arg_params["im_info"] = mx.nd.array(im_info, ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
output_dict = {name: nd for name, nd in zip(sym.list_outputs(), exe.outputs)}
rois = output_dict['rpn_rois_output'].asnumpy()[:, 1:] # first column is index
scores = output_dict['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output_dict['bbox_pred_reshape_output'].asnumpy()[0]
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, (im_info[0][0], im_info[0][1]))
cls_boxes = pred_boxes[:, 4:8]
cls_scores = scores[:, 1]
keep = np.where(cls_scores >0.6)[0]
cls_boxes = cls_boxes[keep, :]
cls_scores = cls_scores[keep]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets.astype(np.float32), 0.3)
dets = dets[keep, :]
bbox = dets[0, :4]
roundfunc = lambda t: int(round(t/scale))
vfunc = np.vectorize(roundfunc)
bbox = vfunc(bbox)
f_vector, jpeg = model.get_feature(img_orig, bbox, None)
fT = f_vector.T
return fT
def kafkastream():
if args.gpuid >= 0:
ctx = mx.gpu(args.gpuid)
else:
ctx = mx.cpu()
_, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0)
arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
sym = resnet_50(num_class=2)
model = face_embedding.FaceModel(args.gpuid)
f1T = get_face_embedding(args.filename, arg_params, aux_params, sym, model, ctx)
c = Consumer({'group.id': args.groupid,
'default.topic.config': {'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false'}})
c.subscribe([args.readstream+':'+args.readtopic])
running = True
p = Producer({'streams.producer.default.stream': args.writestream})
while running:
msg = c.poll(timeout=0)
if msg is None: continue
if not msg.error():
pickle_vector = pickle.loads(msg.value())
nparr = np.fromstring(pickle_vector[0], np.uint8)
img_orig = cv2.imdecode(nparr, 1)
bbox_vector = pickle_vector[1]
print(len(bbox_vector))
embedding_vector = pickle_vector[2]
if len(embedding_vector) > 0:
sim_vector = [np.dot(f, f1T) for f in embedding_vector]
idx = sim_vector.index(max(sim_vector))
bbox = bbox_vector[idx]
sim = sim_vector[idx]
if sim > args.threshold:
img = cv2.cvtColor(img_orig, cv2.COLOR_RGB2BGR)
cv2.rectangle(img, (int(round(bbox[0])), int(round(bbox[1]))),
(int(round(bbox[2])), int(round(bbox[3]))), (0, 255, 0), 2)
ret, jpeg = cv2.imencode('.png', img)
bytecode = jpeg.tobytes()
time.sleep(args.timeout)
yield (b'--frame\r\n'
b'Content-Type: image/png\r\n\r\n' + bytecode + b'\r\n\r\n')
if args.writetostream:
p.produce(args.writetopic, jpeg.tostring())
print(args.writetopic)
elif msg.error().code() != KafkaError._PARTITION_EOF:
print(msg.error())
running = False
c.close()
p.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='mapr consumer settings')
parser.add_argument('--groupid', default='dong001', help='mapr consumer to read from')
parser.add_argument('--gpuid', default='-1', type=int, help='')
parser.add_argument('--port', default='5013', type=int, help='')
parser.add_argument('--threshold', default='0.3', type=float, help='')
parser.add_argument('--readstream', default='/tmp/processedvideostream', help='')
parser.add_argument('--writestream', default='/tmp/identifiedstream', help='')
parser.add_argument('--timeout', default='0.3', type=float, help='')
parser.add_argument('--writetostream', default='0', type=int, help='')
parser.add_argument('--writetopic', default='sam', help='topic to write to')
parser.add_argument('--readtopic', default='topic1', help='topic to write to')
parser.add_argument('--filename', default='sam_.jpg', help='')
args = parser.parse_args()
app.run(host='0.0.0.0', port=args.port, debug=True)
|
py | 1a3f987fbc1d08006d7616db2b19aa6772c9d1bf | # additional transforms for okutama-action dataset
import random
from PIL import Image, ImageOps
class GroupRandomVerticalFlip(object):
"""
Randomly vertical flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, is_flow=False):
self.is_flow = is_flow
def __call__(self, img_group, is_flow=False):
v = random.random()
if v < 0.5:
ret = [img.transpose(Image.FLIP_TOP_BOTTOM) for img in img_group]
if self.is_flow:
for i in range(1, len(ret), 2):
# invert y_flow pixel values when flipping
ret[i] = ImageOps.invert(ret[i])
return ret
else:
return img_group
|
py | 1a3f99851fa6d5c6fbb5e367a6f3a84c300f20c3 | #!/usr/bin/env python3
# Copyright 2011 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Processes an LLVM assembly (.ll) file, adding debugging information.
You can then run the .ll file in the LLVM interpreter (lli) and
compare that to the output when compiled using emscripten.
"""
from __future__ import print_function
import re
import sys
ALLOW_POINTERS = True
ALLOW_MISC = True
MEMCPY = False
MEMCPY2 = False
NO_DLMALLOC = False
JS_LIB_PRINTING = True
POSTAMBLE = '''
@.emscripten.autodebug.str = private constant [10 x i8] c"AD:%d,%d\\0A\\00", align 1 ; [#uses=1]
@.emscripten.autodebug.str.f = private constant [11 x i8] c"AD:%d,%lf\\0A\\00", align 1 ; [#uses=1]
@.emscripten.autodebug.str.64 = private constant [13 x i8] c"AD:%d,%d,%d\\0A\\00", align 1 ; [#uses=1]
'''
if JS_LIB_PRINTING:
POSTAMBLE += '''
; [#uses=1]
declare void @emscripten_autodebug_i64(i32 %line, i64 %value)
; [#uses=1]
declare void @emscripten_autodebug_i32(i32 %line, i32 %value)
; [#uses=1]
declare void @emscripten_autodebug_i16(i32 %line, i16 %value)
; [#uses=1]
declare void @emscripten_autodebug_i8(i32 %line, i8 %value)
; [#uses=1]
declare void @emscripten_autodebug_float(i32 %line, float %value)
; [#uses=1]
declare void @emscripten_autodebug_double(i32 %line, double %value)
'''
else:
POSTAMBLE += '''
; [#uses=1]
define void @emscripten_autodebug_i64(i32 %line, i64 %value) {
entry:
%0 = trunc i64 %value to i32
%1 = lshr i64 %value, 32
%2 = trunc i64 %1 to i32
%3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.emscripten.autodebug.str.64, i32 0, i32 0), i32 %line, i32 %0, i32 %2) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i32(i32 %line, i32 %value) {
entry:
%0 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %value) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i16(i32 %line, i16 %value) {
entry:
%0 = zext i16 %value to i32 ; [#uses=1]
%1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %0) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_i8(i32 %line, i8 %value) {
entry:
%0 = zext i8 %value to i32 ; [#uses=1]
%1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.emscripten.autodebug.str, i32 0, i32 0), i32 %line, i32 %0) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_float(i32 %line, float %value) {
entry:
%0 = fpext float %value to double ; [#uses=1]
%1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.emscripten.autodebug.str.f, i32 0, i32 0), i32 %line, double %0) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
; [#uses=1]
define void @emscripten_autodebug_double(i32 %line, double %value) {
entry:
%0 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.emscripten.autodebug.str.f, i32 0, i32 0), i32 %line, double %value) ; [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
'''
if MEMCPY:
POSTAMBLE = '''
@.emscripten.memcpy.str = private constant [7 x i8] c"MC:%d\\0A\\00", align 1 ; [#uses=1]
''' + POSTAMBLE + '''
; [#uses=1]
define void @emscripten_memcpy(i8* %destination, i8* %source, i32 %num, i32 %whati, i1 %sthis) nounwind {
entry:
%destination.addr = alloca i8*, align 4 ; [#uses=3]
%source.addr = alloca i8*, align 4 ; [#uses=2]
%num.addr = alloca i32, align 4 ; [#uses=3]
%i = alloca i32, align 4 ; [#uses=5]
%src = alloca i8*, align 4 ; [#uses=5]
%dst = alloca i8*, align 4 ; [#uses=4]
store i8* %destination, i8** %destination.addr, align 4
store i8* %source, i8** %source.addr, align 4
store i32 %num, i32* %num.addr, align 4
%tmp = load i8** %source.addr, align 4 ; [#uses=1]
store i8* %tmp, i8** %src, align 4
%tmp2 = load i8** %destination.addr, align 4 ; [#uses=1]
store i8* %tmp2, i8** %dst, align 4
store i32 0, i32* %i, align 4
%tmp31 = load i32* %i, align 4 ; [#uses=1]
%tmp42 = load i32* %num.addr, align 4 ; [#uses=1]
%cmp3 = icmp ult i32 %tmp31, %tmp42 ; [#uses=1]
br i1 %cmp3, label %for.body, label %for.end
for.body: ; preds = %for.body, %entry
%tmp5 = load i8** %src, align 4 ; [#uses=1]
%tmp6 = load i8* %tmp5 ; [#uses=1]
%conv = zext i8 %tmp6 to i32 ; [#uses=1]
%call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8]* @.emscripten.memcpy.str, i32 0, i32 0), i32 %conv); [#uses=0]
%tmp7 = load i8** %src, align 4 ; [#uses=1]
%tmp8 = load i8* %tmp7 ; [#uses=1]
%tmp9 = load i8** %dst, align 4 ; [#uses=1]
store i8 %tmp8, i8* %tmp9
%tmp10 = load i32* %i, align 4 ; [#uses=1]
%inc = add i32 %tmp10, 1 ; [#uses=1]
store i32 %inc, i32* %i, align 4
%tmp11 = load i8** %src, align 4 ; [#uses=1]
%incdec.ptr = getelementptr inbounds i8* %tmp11, i32 1 ; [#uses=1]
store i8* %incdec.ptr, i8** %src, align 4
%tmp12 = load i8** %dst, align 4 ; [#uses=1]
%incdec.ptr13 = getelementptr inbounds i8* %tmp12, i32 1 ; [#uses=1]
store i8* %incdec.ptr13, i8** %dst, align 4
%tmp3 = load i32* %i, align 4 ; [#uses=1]
%tmp4 = load i32* %num.addr, align 4 ; [#uses=1]
%cmp = icmp ult i32 %tmp3, %tmp4 ; [#uses=1]
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body, %entry
%tmp14 = load i8** %destination.addr, align 4 ; [#uses=1]
ret void
}
'''
def main():
global POSTAMBLE
filename, ofilename = sys.argv[1], sys.argv[2]
with open(filename, 'r') as f:
data = f.read()
if not re.search(r'(declare.*@printf\(|define.*@printf\()', data):
POSTAMBLE += '''
; [#uses=1]
declare i32 @printf(i8*, ...)
'''
summaries = re.search(r'\^0 = module:', data)
if summaries:
summaries_start = summaries.start()
# Strip ThinLTO summaries since we don't want to have to generate
# summaries for the functions we are adding. Currently llvm-as will
# assert if it finds summaries for some, but not all, functions.
print("warning: stripping ThinLTO summaries", file=sys.stderr)
data = data[:summaries_start]
lines_added = 0
in_func = False
added_entry = False
lines = data.splitlines()
for i in range(len(lines)):
if MEMCPY:
if not lines[i].startswith('declare void'):
lines[i] = lines[i].replace('@llvm.memcpy.p0i8.p0i8.i32', '@emscripten_memcpy')
try:
pre = ''
if lines[i].startswith('define '):
in_func = True
if NO_DLMALLOC and ('@malloc(' in lines[i] or '@free(' in lines[i] or '@sys_alloc(' in lines[i] or '@segment_holding(' in lines[i] or '@init_top(' in lines[i] or '@add_segment(' in lines[i] or '@tmalloc_small(' in lines[i]):
in_func = False
if in_func:
added_entry = False
if 'printf' in lines[i] or '__fwritex' in lines[i] or '__towrite' in lines[i] or 'pop_arg391' in lines[i] or 'fmt_u' in lines[i] or 'pad(' in lines[i] or 'stdout_write' in lines[i] or 'stdio_write' in lines[i] or 'syscall' in lines[i]:
if not JS_LIB_PRINTING:
in_func = False # do not add logging in musl printing code, which would infinitely recurse
elif lines[i].startswith('}'):
in_func = False
elif in_func and not added_entry and ' = alloca' not in lines[i] and lines[i].startswith(' '):
# This is a good place to mark entry to this function
added_entry = True
index = i + 1 + lines_added
pre = ' call void @emscripten_autodebug_i32(i32 -1, i32 %d)' % index
elif in_func and lines[i].startswith(' ret '):
# This is a good place to mark entry to this function
index = i + 1 + lines_added
pre = ' call void @emscripten_autodebug_i32(i32 -2, i32 %d)' % index
if in_func:
m = re.match(r' store (?P<type>i64|i32|i16|i8|float|double|%?[\w\.\*]+) (?P<var>%?[\w.+_]+), .*', lines[i])
if m:
index = i + 1 + lines_added
if m.group('type') in ['i8', 'i16', 'i32', 'i64', 'float', 'double']:
lines[i] += '\n call void @emscripten_autodebug_%s(i32 %d, %s %s)' % (m.group('type'), index, m.group('type'), m.group('var'))
lines_added += 1
elif ALLOW_POINTERS and m.group('type').endswith('*') and m.group('type').count('*') == 1:
lines[i] += '\n %%ead.%d = ptrtoint %s %s to i32' % (index, m.group('type'), m.group('var'))
lines[i] += '\n call void @emscripten_autodebug_i32(i32 %d, i32 %%ead.%d)' % (index, index)
lines_added += 2
continue
m = re.match(r' %(?P<var>[\w_.]+) = load (?P<type>i64|i32|i16|i8|float|double+)\* [^(].*.*', lines[i])
if m:
index = i + 1 + lines_added
lines[i] += '\n call void @emscripten_autodebug_%s(i32 %d, %s %%%s)' % (m.group('type'), index, m.group('type'), m.group('var'))
lines_added += 1
continue
if ALLOW_MISC:
# call is risky - return values can be i32 (i8*) (i16)
m = re.match(r' %(?P<var>[\w_.]+) = (mul|add) (nsw )?(?P<type>i64|i32|i16|i8|float|double+) .*', lines[i])
if m:
index = i + 1 + lines_added
lines[i] += '\n call void @emscripten_autodebug_%s(i32 %d, %s %%%s)' % (m.group('type'), index, m.group('type'), m.group('var'))
lines_added += 1
continue
if MEMCPY2:
m = re.match(r' call void @llvm\.memcpy\.p0i8\.p0i8\.i32\(i8\* %(?P<dst>[\w_.]+), i8\* %(?P<src>[\w_.]+), i32 8, i32 (?P<align>\d+),.*', lines[i])
if m:
index = i + 1 + lines_added
lines[i] += '\n %%adtemp%d = load i8* %%%s, align 1' % (index, m.group('src')) + \
'\n call void @emscripten_autodebug_i8(i32 %d, i8 %%adtemp%d)' % (index, index)
lines_added += 3
continue
m = re.match('[^ ].*; preds = ', lines[i])
if m:
# basic block
if len(lines) > i + 1 and 'phi' not in lines[i + 1] and 'landingpad' not in lines[i + 1]:
lines[i] += '\n call void @emscripten_autodebug_i32(i32 -10, i32 %d)' % (i + 1 + lines_added,)
lines_added += 1
continue
finally:
if len(pre):
lines[i] = pre + '\n' + lines[i]
lines_added += 1
ll = '\n'.join(lines) + '\n'
meta_start = ll.find('\n!')
with open(ofilename, 'w') as f:
f.write(ll[:meta_start] + '\n' + POSTAMBLE + '\n' + ll[meta_start:])
print('Success.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
py | 1a3f9a1221971b31372a926aad2f60a8dba94cbf | from flask import Flask, render_template, request, json
from module import utils
from os import remove
import face_recognition
app = Flask(
__name__,
static_url_path="",
static_folder="static",
template_folder="template"
)
@app.route("/", methods=["GET","POST"])
def index():
if request.method == "GET":
return render_template("index.html")
else:
encoding = []
local = "template/media/ori.jpg"
path = utils.b64_img(request.form['image'])
for i in [local, path]:
encoding.append(face_recognition.face_encodings(face_recognition.load_image_file(i))[0])
remove(path)
if face_recognition.compare_faces([encoding[0]], encoding[1])[0]:
result = "Wajah cocok"
else:
result = "Wajah tidak cocok"
return app.response_class(
response=json.dumps({
'status': result
}),
mimetype='application/json'
)
if __name__ == "__main__":
app.run() |
py | 1a3f9a87ca0f032545c12630bd17d440f8f4c4b6 | """Script to produce catalogues for use in stacking analysis.
The catalogues themselves are randomly produced for the purpose of trialing
the code. Modification of variable n can produces a catalogue with an
arbitrary number of sources.
"""
import numpy as np
import os
import logging
import random
import zlib
from flarestack.shared import catalogue_dir
cat_dtype = [
("ra_rad", np.float), ("dec_rad", np.float),
("base_weight", np.float),
("injection_weight_modifier", np.float),
("ref_time_mjd", np.float),
("start_time_mjd", np.float),
("end_time_mjd", np.float),
('distance_mpc', np.float), ('source_name', 'a30'),
]
def single_source(sindec, ra_rad=np.pi):
"""Produces a catalogue with a single source_path.
:param sindec: Sin(Declination) of Source
:param ra: Right Ascension in radians
:return: Source Array
"""
sources = np.empty(
1, dtype=cat_dtype)
ref_time = 55800.4164699
sources['ra_rad'] = np.array([ra_rad])
sources['dec_rad'] = np.arcsin(sindec)
sources['base_weight'] = np.array([1.])
sources['injection_weight_modifier'] = np.array([1.])
sources['distance_mpc'] = np.array([1.0])
sources['ref_time_mjd'] = (np.array([ref_time]))
sources['start_time_mjd'] = (np.array([ref_time - 50]))
sources['end_time_mjd'] = (np.array([ref_time + 100]))
sources['source_name'] = 'PS_dec=' + str(sindec)
return sources
def build_ps_cat_name(sindec):
return catalogue_dir + "single_source/sindec_" + '{0:.2f}'.format(sindec)\
+ ".npy"
def build_ps_stack_cat_name(sindecs):
return f"{catalogue_dir}multi_source/{zlib.adler32(str(list(sindecs)).encode())}.npy"
def make_single_source(sindec):
cat = single_source(sindec)
save_path = build_ps_cat_name(sindec)
try:
os.makedirs(os.path.dirname(save_path))
except FileExistsError:
pass
logging.info("Saving to {0}".format(save_path))
np.save(save_path, cat)
def ps_catalogue_name(sindec):
name = build_ps_cat_name(sindec)
if not os.path.isfile(name):
make_single_source(sindec)
return name
def make_stacked_source(sindecs):
cat = []
for sindec in sindecs:
ra_rad = random.random() ** 2 * np.pi
cat.append(single_source(sindec, ra_rad=ra_rad))
cat = np.array(cat, dtype=cat[0].dtype).T[0]
save_path = build_ps_stack_cat_name(sindecs)
try:
os.makedirs(os.path.dirname(save_path))
except FileExistsError:
pass
logging.info("Saving to {0}".format(save_path))
np.save(save_path, cat)
def ps_stack_catalogue_name(*args):
name = build_ps_stack_cat_name(args)
if not os.path.isfile(name):
make_stacked_source(args)
return name
def make_single_sources():
"""Makes single-source catalogues for a variety of sindec intervals."""
logging.info("Making single-source catalogues for the following sin(declinations):")
sindecs = np.linspace(1.00, -1.00, 41)
logging.info(sindecs)
try:
os.makedirs(os.path.dirname(ps_catalogue_name(0.0)))
except OSError:
pass
for sindec in sindecs:
make_single_source(sindec)
logging.info("Single Source catalogues created!")
def custom_sources(name, ra, dec, weight, distance,
injection_modifier=None, ref_time=np.nan,
start_time=np.nan, end_time=np.nan):
"""Creates a catalogue array,
:param name: Source Name
:param ra: Right Ascension (Degrees)
:param dec: Declination (Degrees)
:param weight: Relative Weights
:param distance: Distance to source (a.u.)
:param ref_time: Reference Time (MJD)
:param start_time: Start Time for window (MJD)
:param end_time: End Time for window (MJD)
:return: Catalogue Array
"""
sources = np.empty(np.array([ra]).__len__(), dtype=cat_dtype)
sources['ra_rad'] = np.deg2rad(np.array([ra]))
sources['dec_rad'] = np.deg2rad(np.array([dec]))
# If some sources are to be brighter than others, a non-uniform weight
# array can be passed.
sources['base_weight'] = np.array([weight])
# The source distance can be provided, in arbitrary units. The injector
# and reconstructor will weight sources according to 1/ (distance ^ 2).
sources['distance_mpc'] = np.array([distance])
# The sources can have a modified injection weight. This means the
# weights used in the likelihood will not match the weights used in the
# injection stage
if injection_modifier is not None:
sources["injection_weight_modifier"] = np.array(injection_modifier)
else:
sources["injection_weight_modifier"] = np.ones_like(ra)
# The source reference time can be arbitrarily defined, for example as
# the discovery date or the date of lightcurve peak. It is important that
# this is consistently defined between sources. Box Time PDFs can be defined
# relative to this point.
sources['ref_time_mjd'] = (np.array([ref_time]))
# The source csan also be assigned fixed start and end times. Fixed Box
# Time PDFs can be defined relative to these values. This allows for the
# Time PDF duration to vary between sources.
sources['start_time_mjd'] = (np.array([start_time]))
sources['end_time_mjd'] = (np.array([end_time]))
sources['source_name'] = np.array([name])
return sources
|
py | 1a3f9aa79c024840d6ccb6efd3acf991e6e143f6 | import json
from herbieapp.services import logging, SchemaRegistry, SchemaPackage
from herbieapp.models import Schema
class SchemaImporter:
def __init__(self):
self._logger = logging.getLogger(__name__)
self._schema_package = SchemaPackage()
def import_schemas(self):
schema_list = self._schema_package.get_all_json_schemas()
if len(schema_list) is 0:
self._logger.error('No schemas defined!')
return 0
self._logger.info('Schema import started!')
for schema in schema_list:
schema_data = json.loads(schema)
self._create_update_json_schema(schema_data['business_entity'], schema_data['version'], schema_data['data'])
self._logger.info('Schemas imported successfully!')
return 0
def _create_update_json_schema(self, business_entity: str, version: str, data: str):
schema = Schema.objects.filter(name=business_entity, version=version)
reg = SchemaRegistry()
reg.find_schema(business_entity, version)
schema_data = json.loads(data) if data != '' else {}
if schema.exists() is False:
json_schema = Schema()
json_schema.name = business_entity
json_schema.version = version
json_schema.content = schema_data
json_schema.save()
else:
schema.update(name=business_entity, version=version, content=schema_data)
|
py | 1a3f9abfafdbfbb0bf60702461786f271131d2c4 | import numpy as np
from opytimizer.optimizers.science import eo
from opytimizer.spaces import search
def test_eo_params():
params = {
'a1': 2.0,
'a2': 1.0,
'GP': 0.5,
'V': 1.0
}
new_eo = eo.EO(params=params)
assert new_eo.a1 == 2.0
assert new_eo.a2 == 1.0
assert new_eo.GP == 0.5
assert new_eo.V == 1.0
def test_eo_params_setter():
new_eo = eo.EO()
try:
new_eo.a1 = 'a'
except:
new_eo.a1 = 2.0
try:
new_eo.a1 = -1
except:
new_eo.a1 = 2.0
assert new_eo.a1 == 2.0
try:
new_eo.a2 = 'b'
except:
new_eo.a2 = 1.0
try:
new_eo.a2 = -1
except:
new_eo.a2 = 1.0
assert new_eo.a2 == 1.0
try:
new_eo.GP = 'c'
except:
new_eo.GP = 0.5
try:
new_eo.GP = -1
except:
new_eo.GP = 0.5
assert new_eo.GP == 0.5
try:
new_eo.V = 'd'
except:
new_eo.V = 1.0
try:
new_eo.V = -1
except:
new_eo.V = 1.0
assert new_eo.V == 1.0
def test_eo_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
try:
new_eo.C = 1
except:
new_eo.C = []
assert new_eo.C == []
def test_eo_calculate_equilibrium():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
new_eo._calculate_equilibrium(search_space.agents)
def test_eo_average_concentration():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
C_avg = new_eo._average_concentration(square)
assert type(C_avg).__name__ == 'Agent'
def test_eo_update():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_eo = eo.EO()
new_eo.compile(search_space)
new_eo.update(search_space, square, 1, 10)
|
py | 1a3f9b51f6c04567671dbdd24ac1835526ff0aef | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Base class for MIME specializations."""
__all__ = ['MIMEBase']
import email.policy
from email import message
class MIMEBase(message.Message):
"""Base class for MIME specializations."""
def __init__(self, _maintype, _subtype, *, policy=None, **_params):
"""This constructor adds a Content-Type: and a MIME-Version: header.
The Content-Type: header is taken from the _maintype and _subtype
arguments. Additional parameters for this header are taken from the
keyword arguments.
"""
if policy is None:
policy = email.policy.compat32
message.Message.__init__(self, policy=policy)
ctype = '%s/%s' % (_maintype, _subtype)
self.add_header('Content-Type', ctype, **_params)
self['MIME-Version'] = '1.0'
|
py | 1a3f9b98f222dc358cfe8e10b88f15c4b60bd116 | from django.test import TestCase
from django.core.urlresolvers import reverse
class ViewsTest(TestCase):
def test_root(self):
response = self.client.get(reverse('services.views.root'))
self.assertEqual(response.status_code, 200)
def test_advertise(self):
response = self.client.get(reverse('services.views.advertise'))
self.assertEqual(response.status_code, 200)
def test_bootstrap(self):
response = self.client.get(reverse('services.views.bootstrap'))
self.assertEqual(response.status_code, 200)
def test_api(self):
response = self.client.get(reverse('services.views.api'))
self.assertEqual(response.status_code, 200) |
py | 1a3f9bf447313cea1466cc1842e28528500b0195 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import shutil
import tempfile
import subprocess
from typing import List, Any, Union, Optional, Dict
from pathlib import Path
class TemporaryDirectoryCopy(tempfile.TemporaryDirectory): # type: ignore
"""Creates a full copy of a directory inside a temporary directory
This class can be used as TemporaryDirectory but:
- the created copy path is available through the copyname attribute
- the contextmanager returns the clean copy path
- the directory where the temporary directory will be created
can be controlled through the CLEAN_COPY_DIRECTORY environment
variable
"""
key = "CLEAN_COPY_DIRECTORY"
@classmethod
def set_clean_copy_environment_variable(cls, directory: Union[Path, str]) -> None:
"""Sets the CLEAN_COPY_DIRECTORY environment variable in
order for subsequent calls to use this directory as base for the
copies.
"""
assert Path(directory).exists(), "Directory does not exist"
os.environ[cls.key] = str(directory)
# pylint: disable=redefined-builtin
def __init__(self, source: Union[Path, str], dir: Optional[Union[Path, str]] = None) -> None:
if dir is None:
dir = os.environ.get(self.key, None)
super().__init__(prefix="tmp_clean_copy_", dir=dir)
self.copyname = Path(self.name) / Path(source).name
shutil.copytree(str(source), str(self.copyname))
def __enter__(self) -> Path:
super().__enter__()
return self.copyname
class FailedJobError(RuntimeError):
"""Job failed during processing
"""
class CommandFunction:
"""Wraps a command as a function in order to make sure it goes through the
pipeline and notify when it is finished.
The output is a string containing everything that has been sent to stdout
Parameters
----------
command: list
command to run, as a list
verbose: bool
prints the command and stdout at runtime
cwd: Path/str
path to the location where the command must run from
Returns
-------
str
Everything that has been sent to stdout
"""
def __init__(self, command: List[str], verbose: bool = False, cwd: Optional[Union[str, Path]] = None,
env: Optional[Dict[str, str]] = None) -> None:
if not isinstance(command, list):
raise TypeError("The command must be provided as a list")
self.command = command
self.verbose = verbose
self.cwd = None if cwd is None else str(cwd)
self.env = env
def __call__(self, *args: Any, **kwargs: Any) -> str:
"""Call the cammand line with addidional arguments
The keyword arguments will be sent as --{key}={val}
The logs are bufferized. They will be printed if the job fails, or sent as output of the function
Errors are provided with the internal stderr
"""
# TODO make the following command more robust (probably fails in multiple cases)
full_command = self.command + [str(x) for x in args] + ["--{}={}".format(x, y) for x, y in kwargs.items()]
if self.verbose:
print(f"The following command is sent: {full_command}")
outlines: List[str] = []
with subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, cwd=self.cwd, env=self.env) as process:
try:
for line in iter(process.stdout.readline, b''):
if not line:
break
outlines.append(line.decode().strip())
if self.verbose:
print(outlines[-1], flush=True)
except Exception: # pylint: disable=broad-except
process.kill()
process.wait()
raise FailedJobError("Job got killed for an unknown reason.")
stderr = process.communicate()[1] # we already got stdout
stdout = "\n".join(outlines)
retcode = process.poll()
if stderr and (retcode or self.verbose):
print(stderr.decode(), file=sys.stderr)
if retcode:
subprocess_error = subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr)
raise FailedJobError(stderr.decode()) from subprocess_error
return stdout
|
py | 1a3f9c03f9039c36fc67d2fbbb45f8d696781013 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENT_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test that publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required to
access ingredients"""
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'test123'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredients_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_limited_to_user(self):
"""Test that ingredients are returned
for the authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENT_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipe(self):
"""Testing filtering ingredient by assigned recipe"""
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apple'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Turkey'
)
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_unique(self):
"""Test filtering ingredient by assigned return unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name="Cheese")
recipe1 = Recipe.objects.create(
title='Eggs benedict',
time_minutes=20,
price=12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
py | 1a3f9c0f985bde1fde26bff660cb02d6eff18692 | import torch
import skimage
import torch.nn.functional as F
import numpy as np
from skimage.segmentation import watershed
from skimage.segmentation import find_boundaries
from scipy import ndimage
from skimage import morphology as morph
from skimage.segmentation import watershed
from skimage.segmentation import find_boundaries
import kornia
def compute_density_loss(logits, points, sigma=1):
kernel_size = (3, 3)
sigma_list = (sigma, sigma)
gfilter = kornia.filters.get_gaussian_kernel2d(kernel_size, sigma_list)
density = kornia.filters.filter2D(points[None].float(), kernel=gfilter[None], border_type='reflect')
diff = (logits[:, 1] - density)**2
loss = torch.sqrt(diff.mean())
return loss |
py | 1a3f9db480b8fd42fe9090f48f174f4e82d3b474 | from asyncio import Lock, create_task
from time import time
from pyrogram import filters
from pyrogram.types import Message
from wbb import BOT_ID, SUDOERS
from wbb.core.sections import bold, section, w
tasks = {}
TASKS_LOCK = Lock()
arrow = lambda x: (x.text if x else "") + "\n`→`"
def all_tasks():
return tasks
async def add_task(
taskFunc,
task_name,
*args,
**kwargs,
):
async with TASKS_LOCK:
global tasks
task_id = (list(tasks.keys())[-1] + 1) if tasks else 0
task = create_task(
taskFunc(*args, **kwargs),
name=task_name,
)
tasks[task_id] = task, int(time())
return task, task_id
async def rm_task(task_id=None):
global tasks
async with TASKS_LOCK:
for key, value in list(tasks.items()):
if value[0].done() or value[0].cancelled():
del tasks[key]
if (task_id is not None) and (task_id in tasks):
task = tasks[task_id][0]
if not task.done():
task.cancel()
del tasks[task_id]
async def _get_tasks_text():
await rm_task() # Clean completed tasks
if not tasks:
return f"{arrow('')} No pending task"
text = bold("Tasks") + "\n"
for i, task in enumerate(list(tasks.items())):
indent = w * 4
t, started = task[1]
elapsed = round(time() - started)
info = t._repr_info()
id = task[0]
text += section(
f"{indent}Task {i}",
body={
"Name": t.get_name(),
"Task ID": id,
"Status": info[0].capitalize(),
"Origin": info[2].split("/")[-1].replace(">", ""),
"Running since": f"{elapsed}s",
},
indent=8,
)
return text
|
py | 1a3f9f2576f525228f39369b0a4300bbe8f4739c | import subprocess
from text2speech.modules import TTS, TTSValidator
class ESpeakNG(TTS):
audio_ext = "wav"
def __init__(self, config=None):
config = config or {"lang": "en-us", "voice": "m1"}
super(ESpeakNG, self).__init__(config, ESpeakNGValidator(self),
ssml_tags=["speak", "say-as", "voice",
"audio", "prosody", "break",
"emphasis", "sub",
"tts:style", "p", "s",
"mark"])
@property
def gender(self):
return self.voice[0]
def modify_tag(self, tag):
"""Override to modify each supported ssml tag"""
if "%" in tag:
if "-" in tag:
val = tag.split("-")[1].split("%")[0]
tag = tag.replace("-", "").replace("%", "")
new_val = int(val) / 100
tag = tag.replace(val, new_val)
elif "+" in tag:
val = tag.split("+")[1].split("%")[0]
tag = tag.replace("+", "").replace("%", "")
new_val = int(val) / 100
tag = tag.replace(val, new_val)
return tag
def get_tts(self, sentence, wav_file):
subprocess.call(
['espeak-ng', '-m', "-w", wav_file, '-v', self.lang + '+' +
self.voice, sentence])
return wav_file, None
def describe_voices(self):
output = subprocess.check_output(["espeak-ng", "--voices"]).decode(
"utf-8")
voices = {}
for v in output.split("\n")[1:]:
if len(v.split()) < 3:
continue
_, lang_code = v.split()[:2]
voices[lang_code] = ["m1", "m2", "m3", "m4", "m5", "m6", "m7",
"f1", "f2", "f3", "f4", "f5", "croak",
"whisper"]
return voices
class ESpeakNGValidator(TTSValidator):
def __init__(self, tts):
super(ESpeakNGValidator, self).__init__(tts)
def validate_connection(self):
try:
subprocess.call(['espeak-ng', '--version'])
except:
raise Exception(
'ESpeak is not installed. Run: sudo apt-get install espeak-ng')
def get_tts_class(self):
return ESpeakNG
|
py | 1a3f9f64277026594a52bc0c174e2bfc4aa1244b | import datetime
import json
import operator
import time
from typing import Any, Callable, Generator, List, Optional, Tuple, Union
from sqlalchemy import inspect
from wtforms import Form, ValidationError, fields, widgets
from sqladmin import widgets as sqladmin_widgets
from sqladmin.helpers import as_str
__all__ = [
"DateField",
"DateTimeField",
"JSONField",
"QuerySelectField",
"QuerySelectMultipleField",
"Select2Field",
"Select2TagsField",
"TimeField",
]
class DateField(fields.DateField):
"""
Add custom DatePickerWidget for data-format and data-date-format fields
"""
widget = sqladmin_widgets.DatePickerWidget()
class DateTimeField(fields.DateTimeField):
"""
Allows modifying the datetime format of a DateTimeField using form_args.
"""
widget = sqladmin_widgets.DateTimePickerWidget()
def __init__(
self,
label: str = None,
validators: list = None,
format: str = None,
**kwargs: Any,
) -> None:
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param format:
Format for text to date conversion. Defaults to '%Y-%m-%d %H:%M:%S'
:param kwargs:
Any additional parameters
"""
super().__init__(label, validators, **kwargs)
self.format = format or "%Y-%m-%d %H:%M:%S"
class TimeField(fields.Field):
"""
A text field which stores a `datetime.time` object.
Accepts time string in multiple formats: 20:10, 20:10:00, 10:00 am, 9:30pm, etc.
"""
widget = sqladmin_widgets.TimePickerWidget()
def __init__(
self,
label: str = None,
validators: list = None,
formats: List[str] = None,
default_format: str = None,
**kwargs: Any,
) -> None:
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param formats:
Supported time formats, as a enumerable.
:param default_format:
Default time format. Defaults to '%H:%M:%S'
:param kwargs:
Any additional parameters
"""
super().__init__(label, validators, **kwargs)
self.formats = formats or (
"%H:%M:%S",
"%H:%M",
"%I:%M:%S%p",
"%I:%M%p",
"%I:%M:%S %p",
"%I:%M %p",
)
self.default_format = default_format or "%H:%M:%S"
self.data: Optional[datetime.time]
def _value(self) -> str:
if self.raw_data:
return " ".join(self.raw_data)
elif self.data is not None:
return self.data.strftime(self.default_format)
else:
return ""
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
date_str = " ".join(valuelist)
if date_str.strip():
for format in self.formats:
try:
timetuple = time.strptime(date_str, format)
self.data = datetime.time(
timetuple.tm_hour, timetuple.tm_min, timetuple.tm_sec
)
return
except ValueError:
pass
raise ValueError("Invalid time format")
else:
self.data = None
class Select2Field(fields.SelectField):
"""
`Select2 <https://github.com/select2/select2>`_ styled select widget.
"""
widget = sqladmin_widgets.Select2Widget()
def __init__(
self,
label: str = None,
validators: list = None,
coerce: type = str,
choices: Union[list, Callable] = None,
allow_blank: bool = False,
blank_text: str = None,
**kwargs: Any,
) -> None:
super().__init__(label, validators, coerce, choices, **kwargs)
self.allow_blank = allow_blank
self.blank_text = blank_text or " "
def iter_choices(self) -> Generator[Tuple[str, str, bool], None, None]:
choices = self.choices or []
if self.allow_blank:
yield ("__None", self.blank_text, self.data is None)
for choice in choices:
if isinstance(choice, tuple):
yield (choice[0], choice[1], self.coerce(choice[0]) == self.data)
else:
yield (
choice.value,
choice.name,
self.coerce(choice.value) == self.data,
)
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
if valuelist[0] == "__None":
self.data = None
else:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext("Invalid Choice: could not coerce"))
def pre_validate(self, form: Form) -> None:
if self.allow_blank and self.data is None:
return
super().pre_validate(form)
class Select2TagsField(fields.StringField):
"""
`Select2 <https://github.com/select2/select2>`_ styled text field.
"""
widget = sqladmin_widgets.Select2TagsWidget()
def __init__(
self,
label: str = None,
validators: list = None,
save_as_list: bool = False,
coerce: type = str,
**kwargs: Any,
) -> None:
"""
Initialization
:param save_as_list:
If `True` then populate ``obj`` using list else string
"""
self.save_as_list = save_as_list
self.coerce = coerce
super().__init__(label, validators, **kwargs)
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
if self.save_as_list:
self.data = [
self.coerce(v.strip()) for v in valuelist[0].split(",") if v.strip()
]
else:
self.data = self.coerce(valuelist[0])
def _value(self) -> str:
if isinstance(self.data, (list, tuple)):
return ",".join(as_str(v) for v in self.data)
elif self.data:
return as_str(self.data)
else:
return ""
class JSONField(fields.TextAreaField):
def _value(self) -> str:
if self.raw_data:
return self.raw_data[0]
elif self.data:
return as_str(json.dumps(self.data, ensure_ascii=False))
else:
return "{}"
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
value = valuelist[0]
# allow saving blank field as None
if not value:
self.data = None
return
try:
self.data = json.loads(valuelist[0])
except ValueError:
raise ValueError(self.gettext("Invalid JSON"))
class QuerySelectField(fields.SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(
self,
object_list: list = None,
label: str = None,
validators: list = None,
get_label: Union[Callable, str] = None,
allow_blank: bool = False,
blank_text: str = "",
**kwargs: Any,
) -> None:
super().__init__(label=label, validators=validators, **kwargs)
self._object_list = object_list or []
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, str):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._data: Optional[tuple]
self._formdata: Optional[Union[str, List[str]]]
@property
def data(self) -> Optional[tuple]:
if self._formdata is not None:
for pk, obj in self._object_list:
if pk == self._formdata:
self.data = obj
break
return self._data
@data.setter
def data(self, data: tuple) -> None:
self._data = data
self._formdata = None
def iter_choices(self) -> Generator[Tuple[str, str, bool], None, None]:
if self.allow_blank:
yield ("__None", self.blank_text, self.data is None)
identity = inspect(self.data).identity[0] if self.data else "__None"
for pk, obj in self._object_list:
yield (pk, self.get_label(obj), pk == str(identity))
def process_formdata(self, valuelist: List[str]) -> None:
if valuelist:
if self.allow_blank and valuelist[0] == "__None":
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form: Form) -> None:
data = self.data
if data is not None:
for _, obj in self._object_list:
if data == obj:
break
else: # pragma: no cover
raise ValidationError(self.gettext("Not a valid choice"))
elif self._formdata or not self.allow_blank:
raise ValidationError(self.gettext("Not a valid choice"))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(
self,
object_list: list = None,
label: str = None,
validators: list = None,
default: Any = None,
**kwargs: Any,
) -> None:
default = default or []
super().__init__(label=label, validators=validators, default=default, **kwargs)
self._object_list = object_list or []
if kwargs.get("allow_blank", False):
import warnings
warnings.warn(
"allow_blank=True does not do anything for QuerySelectMultipleField."
)
self._invalid_formdata = False
self._formdata: Optional[List[str]] = None
self._data: Optional[tuple] = None
@property
def data(self) -> Optional[tuple]:
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._object_list:
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self.data = data or self._data # type: ignore
return self._data
@data.setter
def data(self, data: tuple) -> None:
self._data = data
self._formdata = None
def iter_choices(self) -> Generator[Tuple[str, Any, bool], None, None]:
if self.data is not None:
primary_keys = [str(inspect(m).identity[0]) for m in self.data]
for pk, obj in self._object_list:
yield (pk, self.get_label(obj), pk in primary_keys)
def process_formdata(self, valuelist: List[str]) -> None:
self._formdata = list(set(valuelist))
def pre_validate(self, form: Form) -> None:
if self._invalid_formdata:
raise ValidationError(self.gettext("Not a valid choice"))
elif self.data:
pk_list = [x[0] for x in self._object_list]
for v in self.data:
identity = inspect(v).identity
if identity and str(identity[0]) not in pk_list: # pragma: no cover
raise ValidationError(self.gettext("Not a valid choice"))
|
py | 1a3fa01e671cb7b50b3239dd9fdc26a9fb65f223 | _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_icdar2021.py'
rpn_weight = 0.7
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
sampling=False,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
sampling=True,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
]),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(2000, 900), (2000, 600)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 724),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'Icdar2021Dataset'
# data_root = '/home/weibaole/disk1/gpu/Workspace/Datas/ICDAR2021/'
data_root = '/home/wbl/workspace/data/ICDAR2021/'
classes = ('embedded', 'isolated',)
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'TrM_isolated.json',
img_prefix=data_root + 'TrM/',
classes=classes,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'VaM_isolated.json',
img_prefix=data_root + 'VaM/',
classes=classes,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'VaM_isolated.json',
img_prefix=data_root + 'VaM/',
classes=classes,
pipeline=test_pipeline))
# model training and testing settings
train_cfg = dict(
rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
rpn_proposal=dict(max_num=300, nms_thr=0.8),
rcnn=dict(
assigner=dict(pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(type='RandomSampler', num=256)))
test_cfg = dict(rpn=dict(max_num=300, nms_thr=0.8), rcnn=dict(score_thr=1e-3))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(step=[16, 22])
total_epochs = 24
|
py | 1a3fa088dffc4817e0b04b12cfc814847c080d86 | def divide(a, b):
try:
result = a / b
except (ZeroDivisionError, TypeError) as err:
print("Something went wrong!")
print(err)
else:
print(f"{a} divided by {b} is {result}")
print(divide('a', 2)) |
py | 1a3fa20a45029a1209f44b58932498dc03d8d3e7 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# Library: pip3 install opencv-python
import cv2
# Load the cascade
# /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/cv2/data/haarcascade_frontalface_alt.xml
face_cascade = cv2.CascadeClassifier('face_detector.xml')
# Read the input image
img = cv2.imread('img_test.jpg')
# Detect faces in the image
faces = face_cascade.detectMultiScale(img, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 250, 205), 2)
# Export the result
cv2.imwrite('img_test.png', img)
print('Found {0} face(s)!'.format(len(faces)), '\nSuccessfully saved')
|
py | 1a3fa29fec02d437872129fbd8d23876d8977df3 | #!/usr/bin/env python3
from marshmallow import Schema, fields, RAISE
from marshmallow import ValidationError
from marshmallow.validate import Range
class BytesField(fields.Field):
def _validate(self, value):
if not isinstance(value, bytes):
raise ValidationError('Invalid input type.')
if value is None or value == b'':
raise ValidationError('Invalid value')
class CacheSchema(Schema):
content = BytesField(required=True)
status_code = fields.Integer(required=True, validate=Range(min=100, max=599))
headers = fields.Dict(required=True)
class Meta:
unknown = RAISE
|
py | 1a3fa33f5f6d3963516bca714dc7856162b8eba4 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
import json
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.tests \
import utils as tables_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.check_constraint.tests import utils as chk_constraint_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.exclusion_constraint.tests import utils as exclusion_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.foreign_key.tests import utils as fk_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.\
constraints.index_constraint.tests import utils as index_constraint_utils
from . import utils as constraints_utils
class ConstraintDeleteMultipleTestCase(BaseTestGenerator):
"""This class will delete constraints under table node."""
url = '/browser/constraints/nodes/'
# Generates scenarios from cast_test_data.json file
scenarios = utils.generate_scenarios("constraints_get_nodes",
constraints_utils.test_cases)
def setUp(self):
# Load test data
self.data = self.test_data
# Create db connection
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a table.")
# Create schema
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a table.")
# Create table
self.table_name = "table_constraint_delete_%s" % \
(str(uuid.uuid4())[1:8])
self.table_id = tables_utils.create_table(self.server,
self.db_name,
self.schema_name,
self.table_name)
# Create Check Constraints
self.check_constraint_name = "test_constraint_delete_%s" % \
(str(uuid.uuid4())[1:8])
self.check_constraint_id = \
chk_constraint_utils.create_check_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.check_constraint_name)
self.check_constraint_name_1 = "test_constraint_delete1_%s" % (
str(uuid.uuid4())[1:8])
self.check_constraint_id_1 = \
chk_constraint_utils.create_check_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.check_constraint_name_1)
# Create Exclusion Constraint
self.exclustion_constraint_name = "test_exclusion_get_%s" % (
str(uuid.uuid4())[1:8])
self.exclustion_constraint_id = \
exclusion_utils.create_exclusion_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.exclustion_constraint_name
)
# Create Foreign Key
self.foreign_table_name = "foreign_table_foreignkey_get_%s" % \
(str(uuid.uuid4())[1:8])
self.foreign_table_id = tables_utils.create_table(
self.server, self.db_name, self.schema_name,
self.foreign_table_name)
self.foreign_key_name = "test_foreignkey_get_%s" % \
(str(uuid.uuid4())[1:8])
self.foreign_key_id = fk_utils.create_foreignkey(
self.server, self.db_name, self.schema_name, self.table_name,
self.foreign_table_name)
# Create Primary Key
self.primary_key_name = "test_primary_key_get_%s" % \
(str(uuid.uuid4())[1:8])
self.primary_key_id = \
index_constraint_utils.create_index_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.primary_key_name, "PRIMARY KEY")
# Create Unique Key constraint
self.unique_constraint_name = "test_unique_constraint_get_%s" % (
str(uuid.uuid4())[1:8])
self.unique_constraint_id = \
index_constraint_utils.create_index_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.unique_constraint_name, "UNIQUE")
def runTest(self):
"""This function will delete constraints under table node."""
if self.is_positive_test:
response = constraints_utils.api_get(self)
# Assert response
utils.assert_status_code(self, response)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
|
py | 1a3fa3c9b42d6700c4666fc22d3500447ef712e7 | from rest_framework import viewsets
from .models import Product, Category, ProductType
from .serializers import ProductSerializer, ProductTypeSerializer, CategorySerializer
class ProductViewSet(viewsets.ModelViewSet):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ProductTypeViewSet(viewsets.ModelViewSet):
queryset = ProductType.objects.all()
serializer_class = ProductTypeSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
|
py | 1a3fa41c9b14129bc8ce817bedd98f2ee493b731 | import pygame
from main.main import main
if __name__ == '__main__':
#main()
result = main() |
py | 1a3fa72904f44467e771cfda151bf6a4f50ae45a | import extract_sift, extract_global, retrieval, config
import os, shutil, argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Evaluate dataset')
parser.add_argument(
'--sift_mode', # mode = 0 -> SIFT detector; 1 -> Hessian affine detector
type=int,
required=False,
default=1
)
parser.add_argument(
'--num_threads',
type=int,
required=False,
default=8
)
args = parser.parse_args()
return args
def main(args):
videosearch_dir = '../videosearch'
db_dir = os.path.join(config.DATASET_DIR, 'model_frames')
query_dir = os.path.join(config.DATASET_DIR, 'queries')
extract_sift.extract(videosearch_dir, db_dir, args.sift_mode, args.num_threads)
extract_sift.extract(videosearch_dir, query_dir, args.sift_mode, args.num_threads)
extract_global.generateFrameLists(config.DATASET_DIR)
if __name__ == '__main__':
main(parse_arguments())
|
py | 1a3fa7bda6a241001aca52fe64eca24c13dff69f | #Built in Python
import os
import sys
import glob
#Standard Packages
from astropy.io import ascii
from astropy import table
from astropy.time import Time
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
matplotlib.style.use('seaborn-colorblind')
from scipy.interpolate import interp1d
#Installed for this project
import extinction
#Mine
import visualization
#get_ipython().magic('matplotlib inline')
import connect_to_sndavis
import define_filters
import supernova
FIG_DIR = '../figures'
def build_sn_list():
db, cursor = connect_to_sndavis.get_cursor()
# 6 = II, 17=IIP-like, 18=IIL-like
query_str = '{} {} {} {} {} {} {} {} {} {}'.format(
'SELECT DISTINCT idsupernovae.`id`, sntype,name, slope, slopetype',
'FROM idsupernovae',
'JOIN supernovanames',
'ON idsupernovae.`id` = supernovanames.`targetid`',
'JOIN snslope',
'ON idsupernovae.`id` = snslope.`targetid` ',
'WHERE (sntype = 6 ',
'OR sntype = 17',
'OR sntype = 18)',
"AND slopetype = 's50';")
query = cursor.execute(query_str)
results = cursor.fetchall()
id = []
name = []
slope = []
for idict in results:
id.append(idict['id'])
name.append(idict['name'])
slope.append(idict['slope'])
tbdata = table.Table([id, name, slope], names = ['id', 'name', 's50'])
return tbdata
def find_closest_slope(tbdata):
slope_diff = np.abs((tbdata['s50'] - tbdata['s50'][tbdata['name']=='ASASSN-15oz']))
indx = np.argsort(slope_diff)
return indx
def compare_sn(snname1, snname2, rank, band='all', sn2_phase_offset = 0):
sn1 = supernova.LightCurve2(snname1)
sn1.get_photometry(band=band)
sn2 = supernova.LightCurve2(snname2)
sn2.get_photometry(band=band)
common_bands = set(sn1.apparent_mag.keys())&(sn2.apparent_mag.keys())
fig = plt.figure(figsize=[8.5, 11])
for plot_num, iband in enumerate(common_bands):
if plot_num == 5:
plt.savefig(os.path.join(FIG_DIR, 'similar_lc_{}_2.pdf'.format(sn2.name)))
plt.close()
fig = plt.figure(figsize=[8.5, 11])
ax = fig.add_subplot(3, 2, plot_num%6+1)
ax.plot(sn1.phase[iband], sn1.apparent_mag[iband]/sn1.apparent_mag[iband].min(), 'o', label = sn1.name, markersize=2)
ax.plot(sn2.phase[iband]+sn2_phase_offset, sn2.apparent_mag[iband]/sn2.apparent_mag[iband].min(), 's', label = sn2.name, markersize=2)
ax.set_title('{}, {} band, rank={}'.format(sn2.name, iband, rank), fontsize='small')
ax.set_ylim(ymax=np.max((sn1.apparent_mag[iband])[sn1.phase[iband] < 100])/sn1.apparent_mag[iband].min()+0.05)
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_xlim(0, 100)
ax.legend(loc='best', fontsize='xx-small')
fig.tight_layout()
plt.savefig(os.path.join(FIG_DIR, 'similar_lc_{}.pdf'.format(sn2.name)))
plt.close()
if __name__ == "__main__":
num_sn = 10
tbdata = build_sn_list()
best_match_indx = find_closest_slope(tbdata)
print(tbdata[best_match_indx[1:num_sn+1]])
for rank, sn_indx in enumerate(best_match_indx[1:num_sn+1]): #start at 1 b/c the SN is always the best match to itself
compare_sn('ASASSN-15oz', tbdata['name'][sn_indx], rank+1)
compare_sn('ASASSN-15oz', '2016zb', 1, sn2_phase_offset = 8)
|
py | 1a3fa8693cb69db8444b9b5d3ea6339ed17aabdc | import time
import urllib
import urllib2
from bs4 import BeautifulSoup
from google import search
from slackclient import SlackClient
# from nltk.sentiment.vader import SentimentIntensityAnalyzer
import config
bot_name = 'ninja'
bot_id = SlackClient(config.bot_id['BOT_ID'])
at_bot = "<@" + str(bot_id) + ">:"
slack_client = SlackClient(config.slack_token['SLACK_TOKEN'])
def parse_data(slack_data):
inputdata = slack_data
if inputdata and len(inputdata) > 0:
for data in inputdata:
if data and 'text' in data != bot_id:
return data['text'], data['channel']
return None, None
def chat(input_command, channel):
input_command = input_command.replace("<@" + str(bot_id) + "> ", "")
so_url = "http://stackoverflow.com"
for url in search(urllib.quote_plus(input_command.encode('utf8'))):
if "http://stackoverflow.com/" in url:
so_url = url
slack_client.api_call("chat.postMessage", channel=channel, text=str(url), as_user=True)
break
else:
continue
try:
page = urllib2.urlopen(so_url)
soup = BeautifulSoup(page.read())
result = soup.find(attrs={'class': 'answer accepted-answer'})
if result is not None:
res = result.find(attrs={'class': 'post-text'})
for a in res:
if a.string is None:
a.string = ' '
slack_client.api_call("chat.postMessage", channel=channel, text="```" + res.get_text() + "```",
as_user=True)
# slack_client.api_call("chat.postMessage", channel=channel,
# text="```" + sentimentalAnalyser(res.get_text()) + "```",
# as_user=True)
# print(sentimentalAnalyser(res.get_text()))
except IndexError:
page = urllib2.urlopen(so_url)
soup = BeautifulSoup(page.read())
result = soup.find(attrs={'class': 'answer'})
if result is not None:
res = result.find(attrs={'class': 'post-text'})
for a in res:
if a.string is None:
a.string = ' '
slack_client.api_call("chat.postMessage", channel=channel, text="```" + res.get_text() + "```",
as_user=True)
# slack_client.api_call("chat.postMessage", channel=channel,
# text="```" + "Sentiment: " + sentimentalAnalyser(res.get_text()) + "```",
# as_user=True)
# print(sentimentalAnalyser(res.get_text()))
except:
print("Could not parse")
slack_client.api_call("chat.postMessage", channel=channel, text="Could not find a relevant link", as_user=True)
raise
# def sentimentalAnalyser(data):
# sresult = []
# stringData = data
# sid = SentimentIntensityAnalyzer()
# ss = sid.polarity_scores(stringData)
# '''for k in sorted(ss):
# print('{0}: {1}, '.format(k, ss[k]))
# print()'''
# for k in sorted(ss):
# sresult.append('{0}'.format(ss[k]))
# print(sresult[0])
# return sresult[0]
def ninjafy():
if slack_client.rtm_connect():
print("Connected")
while True:
input_command, channel = parse_data(slack_client.rtm_read())
if input_command and channel:
chat(input_command, channel)
time.sleep(1)
else:
print("Connection failed")
if __name__ == '__main__':
ninjafy()
|
py | 1a3fa9507711ac898393cae9b0aee9b4daf2395e | import tkinter as tk
from tkinter import messagebox
class FillAllFields(Exception):
pass
class StudentAlreadyRegistered(Exception):
pass
class EmptyField(Exception):
pass
class MatriculaRepeated(Exception):
pass
class Estudante:
def __init__(self, nroMatric, nome):
self.__nroMatric = nroMatric
self.__nome = nome
def getNroMatric(self):
return self.__nroMatric
def getNome(self):
return self.__nome
class LimiteInsereEstudantes(tk.Toplevel):
def __init__(self, controle):
tk.Toplevel.__init__(self)
self.geometry('250x100')
self.title("Estudante")
self.controle = controle
self.frameNro = tk.Frame(self)
self.frameNome = tk.Frame(self)
self.frameButton = tk.Frame(self)
self.frameNro.pack()
self.frameNome.pack()
self.frameButton.pack()
self.labelNro = tk.Label(self.frameNro, text="Nro Matrícula: ")
self.labelNome = tk.Label(self.frameNome, text="Nome: ")
self.labelNro.pack(side="left")
self.labelNome.pack(side="left")
self.inputNro = tk.Entry(self.frameNro, width=20)
self.inputNro.pack(side="left")
self.inputNome = tk.Entry(self.frameNome, width=20)
self.inputNome.pack(side="left")
self.buttonSubmit = tk.Button(self.frameButton, text="Enter")
self.buttonSubmit.pack(side="left")
self.buttonSubmit.bind("<Button>", controle.enterHandler)
self.buttonClear = tk.Button(self.frameButton, text="Clear")
self.buttonClear.pack(side="left")
self.buttonClear.bind("<Button>", controle.clearHandler)
self.buttonFecha = tk.Button(self.frameButton, text="Concluído")
self.buttonFecha.pack(side="left")
self.buttonFecha.bind("<Button>", controle.fechaHandler)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class LimiteMostraEstudantes():
def __init__(self, str):
messagebox.showinfo('Lista de alunos', str)
class LimiteConsultaEstudantes(tk.Toplevel):
def __init__(self, controle):
tk.Toplevel.__init__(self)
self.geometry('250x100')
self.title("Consultar estudante")
self.controle = controle
self.frameNro = tk.Frame(self)
self.frameButton = tk.Frame(self)
self.frameNro.pack()
self.frameButton.pack()
self.labelNro = tk.Label(self.frameNro, text='Nro Matrícula: ')
self.labelNro.pack(side='left')
self.inputNro = tk.Entry(self.frameNro, width=20)
self.inputNro.pack(side='left')
self.buttonConsulta = tk.Button(
self.frameButton, text='Consultar', font=('Negrito', 9))
self.buttonConsulta.pack(side='left')
self.buttonConsulta.bind("<Button>", controle.consultaHandler)
self.buttonConcluido = tk.Button(
self.frameButton, text='Concluído', font=('Negrito', 9))
self.buttonConcluido.pack(side='left')
self.buttonConcluido.bind("<Button>", controle.concluiHandler)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class CtrlEstudante():
def __init__(self):
self.listaEstudantes = [
Estudante('1001', 'Joao Santos'),
Estudante('1002', 'Marina Cintra'),
Estudante('1003', 'Felipe Reis'),
Estudante('1004', 'Ana Souza')
]
def getEstudante(self, nroMatric):
estRet = None
for est in self.listaEstudantes:
if est.getNroMatric() == nroMatric:
estRet = est
return estRet
def getListaNroMatric(self):
listaNro = []
for est in self.listaEstudantes:
listaNro.append(est.getNroMatric())
return listaNro
def insereEstudantes(self):
self.limiteIns = LimiteInsereEstudantes(self)
def mostraEstudantes(self):
if len(self.listaEstudantes) == 0:
str = "Não existem alunos cadastrados"
self.limiteLista = LimiteMostraEstudantes(str)
else:
str = "Nro Matric. -- Nome\n"
for est in self.listaEstudantes:
str += est.getNroMatric() + ' -- ' + est.getNome() + '\n'
self.limiteLista = LimiteMostraEstudantes(str)
def consultaEstudantes(self):
self.limiteCon = LimiteConsultaEstudantes(self)
def enterHandler(self, event):
try:
if len(self.limiteIns.inputNro.get()) == 0 or len(self.limiteIns.inputNome.get()) == 0:
raise FillAllFields()
for estud in self.listaEstudantes:
if estud.getNroMatric() == self.limiteIns.inputNro.get() and estud.getNome() == self.limiteIns.inputNome.get():
raise StudentAlreadyRegistered()
if estud.getNroMatric() == self.limiteIns.inputNro.get():
raise MatriculaRepeated()
except StudentAlreadyRegistered:
self.limiteIns.mostraJanela(
'Cuidado, atenção!', 'Estudante já cadastrado!')
except FillAllFields:
self.limiteIns.mostraJanela(
'Cuidado, atenção!', 'Por favor, preencha todos os campos!')
except MatriculaRepeated:
self.limiteIns.mostraJanela(
'Cuidado, atenção!', 'Número de matrícula já está existe!')
else:
nroMatric = self.limiteIns.inputNro.get()
nome = self.limiteIns.inputNome.get()
estudante = Estudante(nroMatric, nome)
self.listaEstudantes.append(estudante)
self.limiteIns.mostraJanela(
'Sucesso', 'Estudante cadastrado com sucesso')
self.clearHandler(event)
def clearHandler(self, event):
self.limiteIns.inputNro.delete(0, len(self.limiteIns.inputNro.get()))
self.limiteIns.inputNome.delete(0, len(self.limiteIns.inputNome.get()))
def fechaHandler(self, event):
self.limiteIns.destroy()
def consultaHandler(self, event):
try:
if len(self.limiteCon.inputNro.get()) == 0:
raise EmptyField()
except EmptyField:
str = 'Campo de matrícula vazio! Por favor, digite um número de matrícula!'
self.limiteCon.mostraJanela('Erro', str)
else:
nroMatric = self.limiteCon.inputNro.get()
est = self.getEstudante(nroMatric)
if est == None:
str = (f'Não existe aluno com a matrícula {nroMatric}')
self.limiteCon.mostraJanela('Aluno não encontrado', str)
self.limiteCon.inputNro.delete(
0, len(self.limiteCon.inputNro.get()))
else:
str = 'Informações do aluno consultado:\n'
str += 'Nro Matric. -- Nome\n'
str += est.getNroMatric() + ' -- ' + est.getNome()
self.limiteCon.mostraJanela('Aluno encontrado', str)
self.limiteCon.inputNro.delete(
0, len(self.limiteCon.inputNro.get()))
def concluiHandler(self, event):
self.limiteCon.destroy()
|
py | 1a3fa9af1f4161c505a160beb00fa2f71d237d03 | '''
Created on February 2, 2017
@author: optas
'''
import warnings
import os.path as osp
import tensorflow as tf
import numpy as np
from tflearn import is_training
from . in_out import create_dir, pickle_data, unpickle_data
from . general_utils import apply_augmentations, iterate_in_chunks
from . neural_net import Neural_Net, MODEL_SAVER_ID
class Configuration():
def __init__(self, n_input, encoder, decoder, encoder_args={}, decoder_args={},
training_epochs=200, batch_size=10, learning_rate=0.001, denoising=False,
saver_step=None, train_dir=None, z_rotate=False, loss='chamfer', gauss_augment=None,
saver_max_to_keep=None, loss_display_step=1, debug=False,
n_z=None, n_output=None, latent_vs_recon=1.0, consistent_io=None, completing=False):
# Parameters for any AE
self.n_input = n_input
self.is_denoising = denoising
self.is_completing = completing
self.loss = loss.lower()
self.decoder = decoder
self.encoder = encoder
self.encoder_args = encoder_args
self.decoder_args = decoder_args
# Training related parameters
self.batch_size = batch_size
self.learning_rate = learning_rate
self.loss_display_step = loss_display_step
self.saver_step = saver_step
self.train_dir = train_dir
self.gauss_augment = gauss_augment
self.z_rotate = z_rotate
self.saver_max_to_keep = saver_max_to_keep
self.training_epochs = training_epochs
self.debug = debug
# Used in VAE
self.latent_vs_recon = np.array([latent_vs_recon], dtype=np.float32)[0]
self.n_z = n_z
# Used in AP
if n_output is None:
self.n_output = n_input
else:
self.n_output = n_output
self.consistent_io = consistent_io
def exists_and_is_not_none(self, attribute):
return hasattr(self, attribute) and getattr(self, attribute) is not None
def __str__(self):
keys = self.__dict__.keys()
vals = self.__dict__.values()
index = np.argsort(keys)
res = ''
for i in index:
if callable(vals[i]):
v = vals[i].__name__
else:
v = str(vals[i])
res += '%30s: %s\n' % (str(keys[i]), v)
return res
def save(self, file_name):
pickle_data(file_name + '.pickle', self)
with open(file_name + '.txt', 'w') as fout:
fout.write(self.__str__())
@staticmethod
def load(file_name):
return unpickle_data(file_name + '.pickle').next()
class AutoEncoder(Neural_Net):
'''Basis class for a Neural Network that implements an Auto-Encoder in TensorFlow.
'''
def __init__(self, name, graph, configuration):
Neural_Net.__init__(self, name, graph)
self.is_denoising = configuration.is_denoising
self.is_completing = configuration.is_completing
self.n_input = configuration.n_input
self.n_output = configuration.n_output
in_shape = [None] + self.n_input
out_shape = [None] + self.n_output
with tf.variable_scope(name):
self.x = tf.placeholder(tf.float32, in_shape)
if self.is_denoising:
self.gt = tf.placeholder(tf.float32, out_shape)
elif self.is_completing:
self.gt = tf.placeholder(tf.float32, out_shape)
else:
self.gt = self.x
def partial_fit(self, X, GT=None):
'''Trains the model with mini-batches of input data.
If GT is not None, then the reconstruction loss compares the output of the net that is fed X, with the GT.
This can be useful when training for instance a denoising auto-encoder.
Returns:
The loss of the mini-batch.
The reconstructed (output) point-clouds.
'''
is_training(True, session=self.sess)
try:
if GT is not None:
_, loss, recon = self.sess.run((self.train_step, self.loss, self.x_reconstr), feed_dict={self.x: X, self.gt: GT})
else:
_, loss, recon = self.sess.run((self.train_step, self.loss, self.x_reconstr), feed_dict={self.x: X})
is_training(False, session=self.sess)
except Exception:
raise
finally:
is_training(False, session=self.sess)
return recon, loss
def reconstruct(self, X, GT=None, compute_loss=True):
'''Use AE to reconstruct given data.
GT will be used to measure the loss (e.g., if X is a noisy version of the GT)'''
if compute_loss:
loss = self.loss
else:
loss = self.no_op
if GT is None:
return self.sess.run((self.x_reconstr, loss), feed_dict={self.x: X})
else:
return self.sess.run((self.x_reconstr, loss), feed_dict={self.x: X, self.gt: GT})
def transform(self, X):
'''Transform data by mapping it into the latent space.'''
return self.sess.run(self.z, feed_dict={self.x: X})
def interpolate(self, x, y, steps):
''' Interpolate between and x and y input vectors in latent space.
x, y np.arrays of size (n_points, dim_embedding).
'''
in_feed = np.vstack((x, y))
z1, z2 = self.transform(in_feed.reshape([2] + self.n_input))
all_z = np.zeros((steps + 2, len(z1)))
for i, alpha in enumerate(np.linspace(0, 1, steps + 2)):
all_z[i, :] = (alpha * z2) + ((1.0 - alpha) * z1)
return self.sess.run((self.x_reconstr), {self.z: all_z})
def decode(self, z):
if np.ndim(z) == 1: # single example
z = np.expand_dims(z, 0)
return self.sess.run((self.x_reconstr), {self.z: z})
def train(self, train_data, configuration, log_file=None, held_out_data=None):
c = configuration
stats = []
if c.saver_step is not None:
create_dir(c.train_dir)
for _ in xrange(c.training_epochs):
loss, duration = self._single_epoch_train(train_data, c)
epoch = int(self.sess.run(self.increment_epoch))
stats.append((epoch, loss, duration))
if epoch % c.loss_display_step == 0:
print("Epoch:", '%04d' % (epoch), 'training time (minutes)=', "{:.4f}".format(duration / 60.0), "loss=", "{:.9f}".format(loss))
if log_file is not None:
log_file.write('%04d\t%.9f\t%.4f\n' % (epoch, loss, duration / 60.0))
# Save the models checkpoint periodically.
if c.saver_step is not None and (epoch % c.saver_step == 0 or epoch - 1 == 0):
checkpoint_path = osp.join(c.train_dir, MODEL_SAVER_ID)
self.saver.save(self.sess, checkpoint_path, global_step=self.epoch)
if c.exists_and_is_not_none('summary_step') and (epoch % c.summary_step == 0 or epoch - 1 == 0):
summary = self.sess.run(self.merged_summaries)
self.train_writer.add_summary(summary, epoch)
if held_out_data is not None and c.exists_and_is_not_none('held_out_step') and (epoch % c.held_out_step == 0):
loss, duration = self._single_epoch_train(held_out_data, c, only_fw=True)
print("Held Out Data :", 'forward time (minutes)=', "{:.4f}".format(duration / 60.0), "loss=", "{:.9f}".format(loss))
if log_file is not None:
log_file.write('On Held_Out: %04d\t%.9f\t%.4f\n' % (epoch, loss, duration / 60.0))
return stats
def evaluate(self, in_data, configuration, ret_pre_augmentation=False):
n_examples = in_data.num_examples
data_loss = 0.
pre_aug = None
if self.is_denoising:
original_data, ids, feed_data = in_data.full_epoch_data(shuffle=False)
if ret_pre_augmentation:
pre_aug = feed_data.copy()
if feed_data is None:
feed_data = original_data
feed_data = apply_augmentations(feed_data, configuration) # This is a new copy of the batch.
elif self.is_completing:
# TODO:
else:
original_data, ids, _ = in_data.full_epoch_data(shuffle=False)
feed_data = apply_augmentations(original_data, configuration)
b = configuration.batch_size
reconstructions = np.zeros([n_examples] + self.n_output)
for i in xrange(0, n_examples, b):
if self.is_denoising:
reconstructions[i:i + b], loss = self.reconstruct(feed_data[i:i + b], original_data[i:i + b])
elif self.is_completing:
#TODO
else:
reconstructions[i:i + b], loss = self.reconstruct(feed_data[i:i + b])
# Compute average loss
data_loss += (loss * len(reconstructions[i:i + b]))
data_loss /= float(n_examples)
if pre_aug is not None:
return reconstructions, data_loss, np.squeeze(feed_data), ids, np.squeeze(original_data), pre_aug
else:
return reconstructions, data_loss, np.squeeze(feed_data), ids, np.squeeze(original_data)
def embedding_at_tensor(self, dataset, conf, feed_original=True, apply_augmentation=False, tensor_name='bottleneck'):
'''
Observation: the NN-neighborhoods seem more reasonable when we do not apply the augmentation.
Observation: the next layer after latent (z) might be something interesting.
tensor_name: e.g. model.name + '_1/decoder_fc_0/BiasAdd:0'
'''
batch_size = conf.batch_size
original, ids, noise = dataset.full_epoch_data(shuffle=False)
if feed_original:
feed = original
else:
feed = noise
if feed is None:
feed = original
feed_data = feed
if apply_augmentation:
feed_data = apply_augmentations(feed, conf)
embedding = []
if tensor_name == 'bottleneck':
for b in iterate_in_chunks(feed_data, batch_size):
embedding.append(self.transform(b.reshape([len(b)] + conf.n_input)))
else:
embedding_tensor = self.graph.get_tensor_by_name(tensor_name)
for b in iterate_in_chunks(feed_data, batch_size):
codes = self.sess.run(embedding_tensor, feed_dict={self.x: b.reshape([len(b)] + conf.n_input)})
embedding.append(codes)
embedding = np.vstack(embedding)
return feed, embedding, ids
def get_latent_codes(self, pclouds, batch_size=100):
''' Convenience wrapper of self.transform to get the latent (bottle-neck) codes for a set of input point
clouds.
Args:
pclouds (N, K, 3) numpy array of N point clouds with K points each.
'''
latent_codes = []
idx = np.arange(len(pclouds))
for b in iterate_in_chunks(idx, batch_size):
latent_codes.append(self.transform(pclouds[b]))
return np.vstack(latent_codes)
|
py | 1a3fa9fc75cca547ac5e3fdcbf0c6f59b7d4bb04 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
from turtle import Turtle # 引入turtle库的turtle模块
import turtle
p = Turtle()
p.speed(2) # 设置速度
p.pensize(3) # 设置线条粗细
p.color('black', 'yellow') # 笔的颜色及填充颜色
p.begin_fill() # 开始填充
for i in range(5): # 5条线
p.fd(200) # 向前200
p.right(144) # 向右144度 与向左216度的结果是一样的
p.end_fill() # 结束填充
turtle.done()
|
py | 1a3faaa4b3f55ee2e7616253d82fc178a8bbf7f3 | from .base_service import BaseService
from src.schemas.classes import ClasseOut, ClasseIn, SectionIn, SectionOut, LevelOut, LevelIn
from src.models.classes import Section, Level, Classe
class SectionService(BaseService):
model_class: Section = Section
schema_class_in: SectionIn = SectionIn
schema_class_out: SectionOut = SectionOut
class LevelService(BaseService):
model_class: Level = Level
schema_class_in: LevelIn = LevelIn
schema_class_out: LevelOut = LevelOut
class ClasseService(BaseService):
model_class: Classe = Classe
schema_class_in: ClasseIn = ClasseIn
schema_class_out: ClasseOut = ClasseOut
|
py | 1a3faac2e76963ea37d64045ecfddc1d6f3dbe84 | # Functions
# First-class Object: In python, everything is treated as an object including all the data types, functions too.
# Therefore, a function is also known as a first-class object and can be passed around as arguments
# Inner-function: It is possile to define functions inside a function. That function is called an inner function.
print('First-class Object')
def func1(name):
return f"Hello{name}"
def func2(name):
return f"{name}, how you doin?"
def func3(func4):
return func4('Dear learner')
print(func3(func1))
print(func3(func2))
print()
print('Inner function')
def func():
print("first function")
def func1():
print('first child function')
def func2():
print("second child function")
func2()
func1()
func()
def functi(n):
def functi1():
return 'edgar'
def functi2():
return 'python'
if n == 1:
return functi1
else:
return functi2
a = functi(1)
b = functi(2)
print(a())
print(b()) |
py | 1a3fab9999a6ce90e8f062036c0243e1c77868a6 | qtd = int(input())
i = 1
while(i <= qtd):
if(i < qtd):
print("Ho ", end = "")
elif(i == qtd):
print("Ho!")
i = i + 1 |
py | 1a3fad9e15e9d6306d12aad3be9f4240e9fdfb03 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2014 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The PyBuilder reactor module.
Operates a build process by instrumenting an ExecutionManager from the
execution module.
"""
import imp
import os.path
from pybuilder.core import (TASK_ATTRIBUTE, DEPENDS_ATTRIBUTE,
DESCRIPTION_ATTRIBUTE, AFTER_ATTRIBUTE,
BEFORE_ATTRIBUTE, INITIALIZER_ATTRIBUTE,
ACTION_ATTRIBUTE, ONLY_ONCE_ATTRIBUTE,
Project, NAME_ATTRIBUTE, ENVIRONMENTS_ATTRIBUTE)
from pybuilder.errors import PyBuilderException, ProjectValidationFailedException
from pybuilder.pluginloader import (BuiltinPluginLoader,
DispatchingPluginLoader,
ThirdPartyPluginLoader,
DownloadingPluginLoader)
from pybuilder.utils import as_list
from pybuilder.execution import Action, Initializer, Task
class BuildSummary(object):
def __init__(self, project, task_execution_summaries):
self.project = project
self.task_summaries = task_execution_summaries
class Reactor(object):
_current_instance = None
@staticmethod
def current_instance():
return Reactor._current_instance
def __init__(self, logger, execution_manager, plugin_loader=None):
self.logger = logger
self.execution_manager = execution_manager
if not plugin_loader:
builtin_plugin_loader = BuiltinPluginLoader(self.logger)
installed_thirdparty_plugin_loader = ThirdPartyPluginLoader(self.logger)
downloading_thirdparty_plugin_loader = DownloadingPluginLoader(self.logger)
self.plugin_loader = DispatchingPluginLoader(
self.logger, builtin_plugin_loader, installed_thirdparty_plugin_loader, downloading_thirdparty_plugin_loader)
else:
self.plugin_loader = plugin_loader
self._plugins = []
self.project = None
def require_plugin(self, plugin):
if plugin not in self._plugins:
try:
self._plugins.append(plugin)
self.import_plugin(plugin)
except: # NOQA
self._plugins.remove(plugin)
raise
def get_plugins(self):
return self._plugins
def get_tasks(self):
return self.execution_manager.tasks
def validate_project(self):
validation_messages = self.project.validate()
if len(validation_messages) > 0:
raise ProjectValidationFailedException(validation_messages)
def prepare_build(self,
property_overrides=None,
project_directory=".",
project_descriptor="build.py"):
if not property_overrides:
property_overrides = {}
Reactor._current_instance = self
project_directory, project_descriptor = self.verify_project_directory(
project_directory, project_descriptor)
self.logger.debug("Loading project module from %s", project_descriptor)
self.project = Project(basedir=project_directory)
self.project_module = self.load_project_module(project_descriptor)
self.apply_project_attributes()
self.override_properties(property_overrides)
self.logger.debug("Have loaded plugins %s", ", ".join(self._plugins))
self.collect_tasks_and_actions_and_initializers(self.project_module)
self.execution_manager.resolve_dependencies()
def build(self, tasks=None, environments=None):
if not tasks:
tasks = []
if not environments:
environments = []
Reactor._current_instance = self
if environments:
self.logger.info(
"Activated environments: %s", ", ".join(environments))
self.execution_manager.execute_initializers(
environments, logger=self.logger, project=self.project)
self.log_project_properties()
self.validate_project()
tasks = as_list(tasks)
if not len(tasks):
if self.project.default_task:
tasks += as_list(self.project.default_task)
else:
raise PyBuilderException("No default task given.")
execution_plan = self.execution_manager.build_execution_plan(tasks)
self.logger.debug("Execution plan is %s", ", ".join(
[task.name for task in execution_plan]))
self.logger.info(
"Building %s version %s", self.project.name, self.project.version)
self.logger.info("Executing build in %s", self.project.basedir)
if len(tasks) == 1:
self.logger.info("Going to execute task %s", tasks[0])
else:
list_of_tasks = ", ".join(tasks)
self.logger.info("Going to execute tasks: %s", list_of_tasks)
task_execution_summaries = self.execution_manager.execute_execution_plan(
execution_plan,
logger=self.logger,
project=self.project,
reactor=self)
return BuildSummary(self.project, task_execution_summaries)
def execute_task(self, task_name):
execution_plan = self.execution_manager.build_execution_plan(task_name)
self.execution_manager.execute_execution_plan(execution_plan,
logger=self.logger,
project=self.project,
reactor=self)
def override_properties(self, property_overrides):
for property_override in property_overrides:
self.project.set_property(
property_override, property_overrides[property_override])
def log_project_properties(self):
formatted = ""
for key in sorted(self.project.properties):
formatted += "\n%40s : %s" % (key, self.project.get_property(key))
self.logger.debug("Project properties: %s", formatted)
def import_plugin(self, plugin):
self.logger.debug("Loading plugin '%s'", plugin)
plugin_module = self.plugin_loader.load_plugin(self.project, plugin)
self.collect_tasks_and_actions_and_initializers(plugin_module)
def collect_tasks_and_actions_and_initializers(self, project_module):
for name in dir(project_module):
candidate = getattr(project_module, name)
if hasattr(candidate, NAME_ATTRIBUTE):
name = getattr(candidate, NAME_ATTRIBUTE)
elif hasattr(candidate, "__name__"):
name = candidate.__name__
description = getattr(candidate, DESCRIPTION_ATTRIBUTE) if hasattr(
candidate, DESCRIPTION_ATTRIBUTE) else ""
if hasattr(candidate, TASK_ATTRIBUTE) and getattr(candidate, TASK_ATTRIBUTE):
dependencies = getattr(candidate, DEPENDS_ATTRIBUTE) if hasattr(
candidate, DEPENDS_ATTRIBUTE) else None
self.logger.debug("Found task %s", name)
self.execution_manager.register_task(
Task(name, candidate, dependencies, description))
elif hasattr(candidate, ACTION_ATTRIBUTE) and getattr(candidate, ACTION_ATTRIBUTE):
before = getattr(candidate, BEFORE_ATTRIBUTE) if hasattr(
candidate, BEFORE_ATTRIBUTE) else None
after = getattr(candidate, AFTER_ATTRIBUTE) if hasattr(
candidate, AFTER_ATTRIBUTE) else None
only_once = False
if hasattr(candidate, ONLY_ONCE_ATTRIBUTE):
only_once = getattr(candidate, ONLY_ONCE_ATTRIBUTE)
self.logger.debug("Found action %s", name)
self.execution_manager.register_action(
Action(name, candidate, before, after, description, only_once))
elif hasattr(candidate, INITIALIZER_ATTRIBUTE) and getattr(candidate, INITIALIZER_ATTRIBUTE):
environments = []
if hasattr(candidate, ENVIRONMENTS_ATTRIBUTE):
environments = getattr(candidate, ENVIRONMENTS_ATTRIBUTE)
self.execution_manager.register_initializer(
Initializer(name, candidate, environments, description))
def apply_project_attributes(self):
self.propagate_property("name")
self.propagate_property("version")
self.propagate_property("default_task")
self.propagate_property("summary")
self.propagate_property("home_page")
self.propagate_property("description")
self.propagate_property("authors")
self.propagate_property("license")
self.propagate_property("url")
def propagate_property(self, property):
if hasattr(self.project_module, property):
value = getattr(self.project_module, property)
setattr(self.project, property, value)
@staticmethod
def load_project_module(project_descriptor):
try:
return imp.load_source("build", project_descriptor)
except ImportError as e:
raise PyBuilderException(
"Error importing project descriptor %s: %s" % (project_descriptor, e))
@staticmethod
def verify_project_directory(project_directory, project_descriptor):
project_directory = os.path.abspath(project_directory)
if not os.path.exists(project_directory):
raise PyBuilderException(
"Project directory does not exist: %s", project_directory)
if not os.path.isdir(project_directory):
raise PyBuilderException(
"Project directory is not a directory: %s", project_directory)
project_descriptor_full_path = os.path.join(
project_directory, project_descriptor)
if not os.path.exists(project_descriptor_full_path):
raise PyBuilderException(
"Project directory does not contain descriptor file: %s",
project_descriptor_full_path)
if not os.path.isfile(project_descriptor_full_path):
raise PyBuilderException(
"Project descriptor is not a file: %s", project_descriptor_full_path)
return project_directory, project_descriptor_full_path
|
py | 1a3fae817526b6f12de4f77fc5168b683333ab40 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.devtools.testing_v1.types import test_execution
from .base import TestExecutionServiceTransport, DEFAULT_CLIENT_INFO
class TestExecutionServiceGrpcTransport(TestExecutionServiceTransport):
"""gRPC backend transport for TestExecutionService.
A service for requesting test executions and querying their
status.
This service is part of Firebase Test Lab. To learn about how to
use the product, and how to integrate it with your system,
visit https://firebase.google.com/docs/test-lab.
Each test execution will wait for available capacity. It will
then be invoked as described. The test may be invoked multiple
times if an infrastructure failure is detected. Results and
other files generated by the test will be stored in an external
storage system.
The TestExecutionService models this behavior using two resource
types:
- TestMatrix: a group of one or more TestExecutions, built by
taking a product of values over a pre-defined set of axes. In
the case of Android Tests, for example, device model and OS
version are two axes of the matrix.
- TestExecution: a single execution of one or more test targets
on a single device. These are created automatically when a
TestMatrix is created.
This service returns any error codes from the canonical error
space (i.e. google.rpc.Code). The errors which may be returned
are specified on each method. In addition, any method may return
UNAVAILABLE or INTERNAL.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'testing.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'testing.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def create_test_matrix(self) -> Callable[
[test_execution.CreateTestMatrixRequest],
test_execution.TestMatrix]:
r"""Return a callable for the create test matrix method over gRPC.
Creates and runs a matrix of tests according to the given
specifications. Unsupported environments will be returned in the
state UNSUPPORTED. A test matrix is limited to use at most 2000
devices in parallel.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to write to
project
- INVALID_ARGUMENT - if the request is malformed or if the
matrix tries to use too many simultaneous devices.
Returns:
Callable[[~.CreateTestMatrixRequest],
~.TestMatrix]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_test_matrix' not in self._stubs:
self._stubs['create_test_matrix'] = self.grpc_channel.unary_unary(
'/google.devtools.testing.v1.TestExecutionService/CreateTestMatrix',
request_serializer=test_execution.CreateTestMatrixRequest.serialize,
response_deserializer=test_execution.TestMatrix.deserialize,
)
return self._stubs['create_test_matrix']
@property
def get_test_matrix(self) -> Callable[
[test_execution.GetTestMatrixRequest],
test_execution.TestMatrix]:
r"""Return a callable for the get test matrix method over gRPC.
Checks the status of a test matrix.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to read
project
- INVALID_ARGUMENT - if the request is malformed
- NOT_FOUND - if the Test Matrix does not exist
Returns:
Callable[[~.GetTestMatrixRequest],
~.TestMatrix]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_test_matrix' not in self._stubs:
self._stubs['get_test_matrix'] = self.grpc_channel.unary_unary(
'/google.devtools.testing.v1.TestExecutionService/GetTestMatrix',
request_serializer=test_execution.GetTestMatrixRequest.serialize,
response_deserializer=test_execution.TestMatrix.deserialize,
)
return self._stubs['get_test_matrix']
@property
def cancel_test_matrix(self) -> Callable[
[test_execution.CancelTestMatrixRequest],
test_execution.CancelTestMatrixResponse]:
r"""Return a callable for the cancel test matrix method over gRPC.
Cancels unfinished test executions in a test matrix. This call
returns immediately and cancellation proceeds asynchronously. If
the matrix is already final, this operation will have no effect.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to read
project
- INVALID_ARGUMENT - if the request is malformed
- NOT_FOUND - if the Test Matrix does not exist
Returns:
Callable[[~.CancelTestMatrixRequest],
~.CancelTestMatrixResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'cancel_test_matrix' not in self._stubs:
self._stubs['cancel_test_matrix'] = self.grpc_channel.unary_unary(
'/google.devtools.testing.v1.TestExecutionService/CancelTestMatrix',
request_serializer=test_execution.CancelTestMatrixRequest.serialize,
response_deserializer=test_execution.CancelTestMatrixResponse.deserialize,
)
return self._stubs['cancel_test_matrix']
def close(self):
self.grpc_channel.close()
__all__ = (
'TestExecutionServiceGrpcTransport',
)
|
py | 1a3faee654401847b7d7661298ed10dea3b9f163 | ##############################################################
# #
# Program Code for Fred Inmoov #
# Of the Cyber_One YouTube Channel #
# https://www.youtube.com/cyber_one #
# #
# This is version 5 #
# Divided up into sub programs #
# Coded for the Nixie Version of MyRobotLab. #
# #
# Running on MyRobotLab (MRL) http://myrobotlab.org/ #
# Fred in a modified Inmmov robot, you can find all the #
# origonal files on the Inmoov web site. http://inmoov.fr/ #
# #
# 7.Servo_LeftArm_Config.py #
# This is where the configuration settings for the Left #
# Arm Servos are located. #
# #
##############################################################
print "Creating the Servo Left Arm Config"
# The Left OmoPlate lifts the arm at the shoulder out away
# from the body, this servo is located into the torso cavity
# and drives a rotary Piston setup in the Official InMoov Build.
EnableLeftOmoPlate = False
LeftOmoPlateAttachment = "Back" # "arduioLeft"
LeftOmoPlatePin = 15 # 11
LeftOmoPlateMinPos = 0 # 10
LeftOmoPlateMaxPos = 180 # 80
LeftOmoPlateMaxSpeed = 120 #
# The left shoulder is a worm drive setup the pitches the
# left arm up in a forward direction.
EnableLeftShoulder = False
LeftShoulderAttachment = "Back" # "arduioLeft"
LeftShoulderPin = 14 # 10
LeftShoulderMinPos = 0 # 0
LeftShoulderMaxPos = 180 # 180
LeftShoulderMaxSpeed = 120 #
# This servo is located near the shoulder servo and rotates
# the arm
EnableLeftRotate = False
LeftRotateAttachment = "Back" # "arduioLeft"
LeftRotatePin = 13 # 9
LeftRotateMinPos = 0 # 40
LeftRotateMaxPin = 180 # 180
LeftRotateMaxSpeed = 120 #
# This servo is located in the bicep and operates the elbow.
EnableLeftBicep = False
LeftBicepAttachment = "LeftArm" # "arduioLeft"
LeftBicepPin = 1 # 8
LeftBicepMinPos = 0 # 0
LeftBicepMaxPos = 180 # 90
LeftBicepMaxSpeed = 120 #
|
py | 1a3faf690aa3e414d170a133dbbd1c6b54f01823 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AlertNotificationListArgs',
'AlertQueryListArgs',
'EtlEtlSinkArgs',
'OssShipperParquetConfigArgs',
'StoreEncryptConfArgs',
'StoreEncryptConfUserCmkInfoArgs',
'StoreIndexFieldSearchArgs',
'StoreIndexFieldSearchJsonKeyArgs',
'StoreIndexFullTextArgs',
'StoreShardArgs',
]
@pulumi.input_type
class AlertNotificationListArgs:
def __init__(__self__, *,
content: pulumi.Input[str],
type: pulumi.Input[str],
email_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mobile_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_uri: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] content: Notice content of alarm.
:param pulumi.Input[str] type: Notification type. support Email, SMS, DingTalk, MessageCenter.
:param pulumi.Input[Sequence[pulumi.Input[str]]] email_lists: Email address list.
:param pulumi.Input[Sequence[pulumi.Input[str]]] mobile_lists: SMS sending mobile number.
:param pulumi.Input[str] service_uri: Request address.
"""
pulumi.set(__self__, "content", content)
pulumi.set(__self__, "type", type)
if email_lists is not None:
pulumi.set(__self__, "email_lists", email_lists)
if mobile_lists is not None:
pulumi.set(__self__, "mobile_lists", mobile_lists)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter
def content(self) -> pulumi.Input[str]:
"""
Notice content of alarm.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: pulumi.Input[str]):
pulumi.set(self, "content", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Notification type. support Email, SMS, DingTalk, MessageCenter.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="emailLists")
def email_lists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Email address list.
"""
return pulumi.get(self, "email_lists")
@email_lists.setter
def email_lists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "email_lists", value)
@property
@pulumi.getter(name="mobileLists")
def mobile_lists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
SMS sending mobile number.
"""
return pulumi.get(self, "mobile_lists")
@mobile_lists.setter
def mobile_lists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "mobile_lists", value)
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[pulumi.Input[str]]:
"""
Request address.
"""
return pulumi.get(self, "service_uri")
@service_uri.setter
def service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_uri", value)
@pulumi.input_type
class AlertQueryListArgs:
def __init__(__self__, *,
chart_title: pulumi.Input[str],
end: pulumi.Input[str],
logstore: pulumi.Input[str],
query: pulumi.Input[str],
start: pulumi.Input[str],
time_span_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] chart_title: chart title
:param pulumi.Input[str] end: end time. example: 20s.
:param pulumi.Input[str] logstore: Query logstore
:param pulumi.Input[str] query: query corresponding to chart. example: * AND aliyun.
:param pulumi.Input[str] start: begin time. example: -60s.
:param pulumi.Input[str] time_span_type: default Custom. No need to configure this parameter.
"""
pulumi.set(__self__, "chart_title", chart_title)
pulumi.set(__self__, "end", end)
pulumi.set(__self__, "logstore", logstore)
pulumi.set(__self__, "query", query)
pulumi.set(__self__, "start", start)
if time_span_type is not None:
pulumi.set(__self__, "time_span_type", time_span_type)
@property
@pulumi.getter(name="chartTitle")
def chart_title(self) -> pulumi.Input[str]:
"""
chart title
"""
return pulumi.get(self, "chart_title")
@chart_title.setter
def chart_title(self, value: pulumi.Input[str]):
pulumi.set(self, "chart_title", value)
@property
@pulumi.getter
def end(self) -> pulumi.Input[str]:
"""
end time. example: 20s.
"""
return pulumi.get(self, "end")
@end.setter
def end(self, value: pulumi.Input[str]):
pulumi.set(self, "end", value)
@property
@pulumi.getter
def logstore(self) -> pulumi.Input[str]:
"""
Query logstore
"""
return pulumi.get(self, "logstore")
@logstore.setter
def logstore(self, value: pulumi.Input[str]):
pulumi.set(self, "logstore", value)
@property
@pulumi.getter
def query(self) -> pulumi.Input[str]:
"""
query corresponding to chart. example: * AND aliyun.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: pulumi.Input[str]):
pulumi.set(self, "query", value)
@property
@pulumi.getter
def start(self) -> pulumi.Input[str]:
"""
begin time. example: -60s.
"""
return pulumi.get(self, "start")
@start.setter
def start(self, value: pulumi.Input[str]):
pulumi.set(self, "start", value)
@property
@pulumi.getter(name="timeSpanType")
def time_span_type(self) -> Optional[pulumi.Input[str]]:
"""
default Custom. No need to configure this parameter.
"""
return pulumi.get(self, "time_span_type")
@time_span_type.setter
def time_span_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_span_type", value)
@pulumi.input_type
class EtlEtlSinkArgs:
def __init__(__self__, *,
endpoint: pulumi.Input[str],
logstore: pulumi.Input[str],
name: pulumi.Input[str],
project: pulumi.Input[str],
access_key_id: Optional[pulumi.Input[str]] = None,
access_key_secret: Optional[pulumi.Input[str]] = None,
kms_encrypted_access_key_id: Optional[pulumi.Input[str]] = None,
kms_encrypted_access_key_secret: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] endpoint: Delivery target logstore region.
:param pulumi.Input[str] logstore: Delivery target logstore.
:param pulumi.Input[str] name: Delivery target name.
:param pulumi.Input[str] project: The project where the target logstore is delivered.
:param pulumi.Input[str] access_key_id: Delivery target logstore access key id.
:param pulumi.Input[str] access_key_secret: Delivery target logstore access key secret.
:param pulumi.Input[str] kms_encrypted_access_key_id: An KMS encrypts access key id used to a log etl job. If the `access_key_id` is filled in, this field will be ignored.
:param pulumi.Input[str] kms_encrypted_access_key_secret: An KMS encrypts access key secret used to a log etl job. If the `access_key_secret` is filled in, this field will be ignored.
:param pulumi.Input[str] role_arn: Sts role info under delivery target logstore. `role_arn` and `(access_key_id, access_key_secret)` fill in at most one. If you do not fill in both, then you must fill in `(kms_encrypted_access_key_id, kms_encrypted_access_key_secret, kms_encryption_access_key_id_context, kms_encryption_access_key_secret_context)` to use KMS to get the key pair.
:param pulumi.Input[str] type: ETL sinks type, the default value is AliyunLOG.
"""
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "logstore", logstore)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "project", project)
if access_key_id is not None:
pulumi.set(__self__, "access_key_id", access_key_id)
if access_key_secret is not None:
pulumi.set(__self__, "access_key_secret", access_key_secret)
if kms_encrypted_access_key_id is not None:
pulumi.set(__self__, "kms_encrypted_access_key_id", kms_encrypted_access_key_id)
if kms_encrypted_access_key_secret is not None:
pulumi.set(__self__, "kms_encrypted_access_key_secret", kms_encrypted_access_key_secret)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
"""
Delivery target logstore region.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter
def logstore(self) -> pulumi.Input[str]:
"""
Delivery target logstore.
"""
return pulumi.get(self, "logstore")
@logstore.setter
def logstore(self, value: pulumi.Input[str]):
pulumi.set(self, "logstore", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Delivery target name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The project where the target logstore is delivered.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="accessKeyId")
def access_key_id(self) -> Optional[pulumi.Input[str]]:
"""
Delivery target logstore access key id.
"""
return pulumi.get(self, "access_key_id")
@access_key_id.setter
def access_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_key_id", value)
@property
@pulumi.getter(name="accessKeySecret")
def access_key_secret(self) -> Optional[pulumi.Input[str]]:
"""
Delivery target logstore access key secret.
"""
return pulumi.get(self, "access_key_secret")
@access_key_secret.setter
def access_key_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_key_secret", value)
@property
@pulumi.getter(name="kmsEncryptedAccessKeyId")
def kms_encrypted_access_key_id(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts access key id used to a log etl job. If the `access_key_id` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_access_key_id")
@kms_encrypted_access_key_id.setter
def kms_encrypted_access_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_access_key_id", value)
@property
@pulumi.getter(name="kmsEncryptedAccessKeySecret")
def kms_encrypted_access_key_secret(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts access key secret used to a log etl job. If the `access_key_secret` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_access_key_secret")
@kms_encrypted_access_key_secret.setter
def kms_encrypted_access_key_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_access_key_secret", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Sts role info under delivery target logstore. `role_arn` and `(access_key_id, access_key_secret)` fill in at most one. If you do not fill in both, then you must fill in `(kms_encrypted_access_key_id, kms_encrypted_access_key_secret, kms_encryption_access_key_id_context, kms_encryption_access_key_secret_context)` to use KMS to get the key pair.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
ETL sinks type, the default value is AliyunLOG.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class OssShipperParquetConfigArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
type: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class StoreEncryptConfArgs:
def __init__(__self__, *,
enable: Optional[pulumi.Input[bool]] = None,
encrypt_type: Optional[pulumi.Input[str]] = None,
user_cmk_info: Optional[pulumi.Input['StoreEncryptConfUserCmkInfoArgs']] = None):
"""
:param pulumi.Input[bool] enable: enable encryption. Default `false`
:param pulumi.Input[str] encrypt_type: Supported encryption type, only supports `default(AES)`,` m4`
:param pulumi.Input['StoreEncryptConfUserCmkInfoArgs'] user_cmk_info: User bring your own key (BYOK) encryption.[Refer to details](https://www.alibabacloud.com/help/zh/doc-detail/187853.htm?spm=a2c63.p38356.b99.673.cafa2b38qBskFV)
"""
if enable is not None:
pulumi.set(__self__, "enable", enable)
if encrypt_type is not None:
pulumi.set(__self__, "encrypt_type", encrypt_type)
if user_cmk_info is not None:
pulumi.set(__self__, "user_cmk_info", user_cmk_info)
@property
@pulumi.getter
def enable(self) -> Optional[pulumi.Input[bool]]:
"""
enable encryption. Default `false`
"""
return pulumi.get(self, "enable")
@enable.setter
def enable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable", value)
@property
@pulumi.getter(name="encryptType")
def encrypt_type(self) -> Optional[pulumi.Input[str]]:
"""
Supported encryption type, only supports `default(AES)`,` m4`
"""
return pulumi.get(self, "encrypt_type")
@encrypt_type.setter
def encrypt_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypt_type", value)
@property
@pulumi.getter(name="userCmkInfo")
def user_cmk_info(self) -> Optional[pulumi.Input['StoreEncryptConfUserCmkInfoArgs']]:
"""
User bring your own key (BYOK) encryption.[Refer to details](https://www.alibabacloud.com/help/zh/doc-detail/187853.htm?spm=a2c63.p38356.b99.673.cafa2b38qBskFV)
"""
return pulumi.get(self, "user_cmk_info")
@user_cmk_info.setter
def user_cmk_info(self, value: Optional[pulumi.Input['StoreEncryptConfUserCmkInfoArgs']]):
pulumi.set(self, "user_cmk_info", value)
@pulumi.input_type
class StoreEncryptConfUserCmkInfoArgs:
def __init__(__self__, *,
arn: pulumi.Input[str],
cmk_key_id: pulumi.Input[str],
region_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] arn: role arn
:param pulumi.Input[str] cmk_key_id: User master key id
:param pulumi.Input[str] region_id: Region id where the user master key id is located
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "cmk_key_id", cmk_key_id)
pulumi.set(__self__, "region_id", region_id)
@property
@pulumi.getter
def arn(self) -> pulumi.Input[str]:
"""
role arn
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: pulumi.Input[str]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="cmkKeyId")
def cmk_key_id(self) -> pulumi.Input[str]:
"""
User master key id
"""
return pulumi.get(self, "cmk_key_id")
@cmk_key_id.setter
def cmk_key_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cmk_key_id", value)
@property
@pulumi.getter(name="regionId")
def region_id(self) -> pulumi.Input[str]:
"""
Region id where the user master key id is located
"""
return pulumi.get(self, "region_id")
@region_id.setter
def region_id(self, value: pulumi.Input[str]):
pulumi.set(self, "region_id", value)
@pulumi.input_type
class StoreIndexFieldSearchArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
alias: Optional[pulumi.Input[str]] = None,
case_sensitive: Optional[pulumi.Input[bool]] = None,
enable_analytics: Optional[pulumi.Input[bool]] = None,
include_chinese: Optional[pulumi.Input[bool]] = None,
json_keys: Optional[pulumi.Input[Sequence[pulumi.Input['StoreIndexFieldSearchJsonKeyArgs']]]] = None,
token: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: When using the json_keys field, this field is required.
:param pulumi.Input[str] alias: The alias of one field.
:param pulumi.Input[bool] case_sensitive: Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
:param pulumi.Input[bool] enable_analytics: Whether to enable field analytics. Default to true.
:param pulumi.Input[bool] include_chinese: Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
:param pulumi.Input[Sequence[pulumi.Input['StoreIndexFieldSearchJsonKeyArgs']]] json_keys: Use nested index when type is json
:param pulumi.Input[str] token: The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
:param pulumi.Input[str] type: The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
pulumi.set(__self__, "name", name)
if alias is not None:
pulumi.set(__self__, "alias", alias)
if case_sensitive is not None:
pulumi.set(__self__, "case_sensitive", case_sensitive)
if enable_analytics is not None:
pulumi.set(__self__, "enable_analytics", enable_analytics)
if include_chinese is not None:
pulumi.set(__self__, "include_chinese", include_chinese)
if json_keys is not None:
pulumi.set(__self__, "json_keys", json_keys)
if token is not None:
pulumi.set(__self__, "token", token)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
When using the json_keys field, this field is required.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The alias of one field.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="caseSensitive")
def case_sensitive(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "case_sensitive")
@case_sensitive.setter
def case_sensitive(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "case_sensitive", value)
@property
@pulumi.getter(name="enableAnalytics")
def enable_analytics(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable field analytics. Default to true.
"""
return pulumi.get(self, "enable_analytics")
@enable_analytics.setter
def enable_analytics(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_analytics", value)
@property
@pulumi.getter(name="includeChinese")
def include_chinese(self) -> Optional[pulumi.Input[bool]]:
"""
Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "include_chinese")
@include_chinese.setter
def include_chinese(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_chinese", value)
@property
@pulumi.getter(name="jsonKeys")
def json_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StoreIndexFieldSearchJsonKeyArgs']]]]:
"""
Use nested index when type is json
"""
return pulumi.get(self, "json_keys")
@json_keys.setter
def json_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StoreIndexFieldSearchJsonKeyArgs']]]]):
pulumi.set(self, "json_keys", value)
@property
@pulumi.getter
def token(self) -> Optional[pulumi.Input[str]]:
"""
The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class StoreIndexFieldSearchJsonKeyArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
alias: Optional[pulumi.Input[str]] = None,
doc_value: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: When using the json_keys field, this field is required.
:param pulumi.Input[str] alias: The alias of one field.
:param pulumi.Input[bool] doc_value: Whether to enable statistics. default to true.
:param pulumi.Input[str] type: The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
pulumi.set(__self__, "name", name)
if alias is not None:
pulumi.set(__self__, "alias", alias)
if doc_value is not None:
pulumi.set(__self__, "doc_value", doc_value)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
When using the json_keys field, this field is required.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The alias of one field.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="docValue")
def doc_value(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable statistics. default to true.
"""
return pulumi.get(self, "doc_value")
@doc_value.setter
def doc_value(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "doc_value", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class StoreIndexFullTextArgs:
def __init__(__self__, *,
case_sensitive: Optional[pulumi.Input[bool]] = None,
include_chinese: Optional[pulumi.Input[bool]] = None,
token: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] case_sensitive: Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
:param pulumi.Input[bool] include_chinese: Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
:param pulumi.Input[str] token: The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
"""
if case_sensitive is not None:
pulumi.set(__self__, "case_sensitive", case_sensitive)
if include_chinese is not None:
pulumi.set(__self__, "include_chinese", include_chinese)
if token is not None:
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="caseSensitive")
def case_sensitive(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "case_sensitive")
@case_sensitive.setter
def case_sensitive(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "case_sensitive", value)
@property
@pulumi.getter(name="includeChinese")
def include_chinese(self) -> Optional[pulumi.Input[bool]]:
"""
Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "include_chinese")
@include_chinese.setter
def include_chinese(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_chinese", value)
@property
@pulumi.getter
def token(self) -> Optional[pulumi.Input[str]]:
"""
The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token", value)
@pulumi.input_type
class StoreShardArgs:
def __init__(__self__, *,
begin_key: Optional[pulumi.Input[str]] = None,
end_key: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] id: The ID of the log project. It formats of `<project>:<name>`.
"""
if begin_key is not None:
pulumi.set(__self__, "begin_key", begin_key)
if end_key is not None:
pulumi.set(__self__, "end_key", end_key)
if id is not None:
pulumi.set(__self__, "id", id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="beginKey")
def begin_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "begin_key")
@begin_key.setter
def begin_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "begin_key", value)
@property
@pulumi.getter(name="endKey")
def end_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "end_key")
@end_key.setter
def end_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_key", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the log project. It formats of `<project>:<name>`.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
|
py | 1a3faf9e2053b8f656fc8daff1383b4ef6e79d8c | """
sphinx.domains.c
~~~~~~~~~~~~~~~~
The C language domain.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from typing import (Any, Callable, Dict, Generator, Iterator, List, Optional, Tuple, TypeVar,
Union, cast)
from docutils import nodes
from docutils.nodes import Element, Node, TextElement, system_message
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx60Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.roles import SphinxRole, XRefRole
from sphinx.transforms import SphinxTransform
from sphinx.transforms.post_transforms import ReferencesResolver
from sphinx.util import logging
from sphinx.util.cfamily import (ASTAttribute, ASTBaseBase, ASTBaseParenExprList, BaseParser,
DefinitionError, NoOldIdError, StringifyTransform,
UnsupportedMultiCharacterCharLiteral, anon_identifier_re,
binary_literal_re, char_literal_re, float_literal_re,
float_literal_suffix_re, hex_literal_re, identifier_re,
integer_literal_re, integers_literal_suffix_re,
octal_literal_re, verify_description_mode)
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import make_refnode
from sphinx.util.typing import OptionSpec
logger = logging.getLogger(__name__)
T = TypeVar('T')
DeclarationType = Union[
"ASTStruct", "ASTUnion", "ASTEnum", "ASTEnumerator",
"ASTType", "ASTTypeWithInit", "ASTMacro",
]
# https://en.cppreference.com/w/c/keyword
_keywords = [
'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', 'double',
'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'inline', 'int', 'long',
'register', 'restrict', 'return', 'short', 'signed', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while',
'_Alignas', '_Alignof', '_Atomic', '_Bool', '_Complex',
'_Decimal32', '_Decimal64', '_Decimal128',
'_Generic', '_Imaginary', '_Noreturn', '_Static_assert', '_Thread_local',
]
# These are only keyword'y when the corresponding headers are included.
# They are used as default value for c_extra_keywords.
_macroKeywords = [
'alignas', 'alignof', 'bool', 'complex', 'imaginary', 'noreturn', 'static_assert',
'thread_local',
]
# these are ordered by preceedence
_expression_bin_ops = [
['||', 'or'],
['&&', 'and'],
['|', 'bitor'],
['^', 'xor'],
['&', 'bitand'],
['==', '!=', 'not_eq'],
['<=', '>=', '<', '>'],
['<<', '>>'],
['+', '-'],
['*', '/', '%'],
['.*', '->*']
]
_expression_unary_ops = ["++", "--", "*", "&", "+", "-", "!", "not", "~", "compl"]
_expression_assignment_ops = ["=", "*=", "/=", "%=", "+=", "-=",
">>=", "<<=", "&=", "and_eq", "^=", "xor_eq", "|=", "or_eq"]
_max_id = 1
_id_prefix = [None, 'c.', 'Cv2.']
# Ids are used in lookup keys which are used across pickled files,
# so when _max_id changes, make sure to update the ENV_VERSION.
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
# bool, complex, and imaginary are macro "keywords", so they are handled seperately
_simple_type_specifiers_re = re.compile(r"""(?x)
\b(
void|_Bool
|signed|unsigned
|short|long
|char
|int
|__uint128|__int128
|__int(8|16|32|64|128) # extension
|float|double
|_Decimal(32|64|128)
|_Complex|_Imaginary
|__float80|_Float64x|__float128|_Float128|__ibm128 # extension
|__fp16 # extension
|_Sat|_Fract|fract|_Accum|accum # extension
)\b
""")
class _DuplicateSymbolError(Exception):
def __init__(self, symbol: "Symbol", declaration: "ASTDeclaration") -> None:
assert symbol
assert declaration
self.symbol = symbol
self.declaration = declaration
def __str__(self) -> str:
return "Internal C duplicate symbol error:\n%s" % self.symbol.dump(0)
class ASTBase(ASTBaseBase):
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
# Names
################################################################################
class ASTIdentifier(ASTBaseBase):
def __init__(self, identifier: str) -> None:
assert identifier is not None
assert len(identifier) != 0
self.identifier = identifier
def __eq__(self, other: Any) -> bool:
return type(other) is ASTIdentifier and self.identifier == other.identifier
def is_anon(self) -> bool:
return self.identifier[0] == '@'
# and this is where we finally make a difference between __str__ and the display string
def __str__(self) -> str:
return self.identifier
def get_display_string(self) -> str:
return "[anonymous]" if self.is_anon() else self.identifier
def describe_signature(self, signode: TextElement, mode: str, env: "BuildEnvironment",
prefix: str, symbol: "Symbol") -> None:
# note: slightly different signature of describe_signature due to the prefix
verify_description_mode(mode)
if self.is_anon():
node = addnodes.desc_sig_name(text="[anonymous]")
else:
node = addnodes.desc_sig_name(self.identifier, self.identifier)
if mode == 'markType':
targetText = prefix + self.identifier
pnode = addnodes.pending_xref('', refdomain='c',
reftype='identifier',
reftarget=targetText, modname=None,
classname=None)
pnode['c:parent_key'] = symbol.get_lookup_key()
pnode += node
signode += pnode
elif mode == 'lastIsName':
nameNode = addnodes.desc_name()
nameNode += node
signode += nameNode
elif mode == 'noneIsName':
signode += node
else:
raise Exception('Unknown description mode: %s' % mode)
class ASTNestedName(ASTBase):
def __init__(self, names: List[ASTIdentifier], rooted: bool) -> None:
assert len(names) > 0
self.names = names
self.rooted = rooted
@property
def name(self) -> "ASTNestedName":
return self
def get_id(self, version: int) -> str:
return '.'.join(str(n) for n in self.names)
def _stringify(self, transform: StringifyTransform) -> str:
res = '.'.join(transform(n) for n in self.names)
if self.rooted:
return '.' + res
else:
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
# just print the name part, with template args, not template params
if mode == 'noneIsName':
if self.rooted:
assert False, "Can this happen?" # TODO
signode += nodes.Text('.')
for i in range(len(self.names)):
if i != 0:
assert False, "Can this happen?" # TODO
signode += nodes.Text('.')
n = self.names[i]
n.describe_signature(signode, mode, env, '', symbol)
elif mode == 'param':
assert not self.rooted, str(self)
assert len(self.names) == 1
self.names[0].describe_signature(signode, 'noneIsName', env, '', symbol)
elif mode == 'markType' or mode == 'lastIsName' or mode == 'markName':
# Each element should be a pending xref targeting the complete
# prefix.
prefix = ''
first = True
names = self.names[:-1] if mode == 'lastIsName' else self.names
# If lastIsName, then wrap all of the prefix in a desc_addname,
# else append directly to signode.
# TODO: also for C?
# NOTE: Breathe previously relied on the prefix being in the desc_addname node,
# so it can remove it in inner declarations.
dest = signode
if mode == 'lastIsName':
dest = addnodes.desc_addname()
if self.rooted:
prefix += '.'
if mode == 'lastIsName' and len(names) == 0:
signode += addnodes.desc_sig_punctuation('.', '.')
else:
dest += addnodes.desc_sig_punctuation('.', '.')
for i in range(len(names)):
ident = names[i]
if not first:
dest += addnodes.desc_sig_punctuation('.', '.')
prefix += '.'
first = False
txt_ident = str(ident)
if txt_ident != '':
ident.describe_signature(dest, 'markType', env, prefix, symbol)
prefix += txt_ident
if mode == 'lastIsName':
if len(self.names) > 1:
dest += addnodes.desc_sig_punctuation('.', '.')
signode += dest
self.names[-1].describe_signature(signode, mode, env, '', symbol)
else:
raise Exception('Unknown description mode: %s' % mode)
################################################################################
# Expressions
################################################################################
class ASTExpression(ASTBase):
pass
# Primary expressions
################################################################################
class ASTLiteral(ASTExpression):
pass
class ASTBooleanLiteral(ASTLiteral):
def __init__(self, value: bool) -> None:
self.value = value
def _stringify(self, transform: StringifyTransform) -> str:
if self.value:
return 'true'
else:
return 'false'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_keyword(txt, txt)
class ASTNumberLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_literal_number(txt, txt)
class ASTCharLiteral(ASTLiteral):
def __init__(self, prefix: str, data: str) -> None:
self.prefix = prefix # may be None when no prefix
self.data = data
decoded = data.encode().decode('unicode-escape')
if len(decoded) == 1:
self.value = ord(decoded)
else:
raise UnsupportedMultiCharacterCharLiteral(decoded)
def _stringify(self, transform: StringifyTransform) -> str:
if self.prefix is None:
return "'" + self.data + "'"
else:
return self.prefix + "'" + self.data + "'"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_literal_char(txt, txt)
class ASTStringLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode += addnodes.desc_sig_literal_string(txt, txt)
class ASTIdExpression(ASTExpression):
def __init__(self, name: ASTNestedName):
# note: this class is basically to cast a nested name as an expression
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def get_id(self, version: int) -> str:
return self.name.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.name.describe_signature(signode, mode, env, symbol)
class ASTParenExpr(ASTExpression):
def __init__(self, expr):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '(' + transform(self.expr) + ')'
def get_id(self, version: int) -> str:
return self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_punctuation('(', '(')
self.expr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
# Postfix expressions
################################################################################
class ASTPostfixOp(ASTBase):
pass
class ASTPostfixCallExpr(ASTPostfixOp):
def __init__(self, lst: Union["ASTParenExprList", "ASTBracedInitList"]) -> None:
self.lst = lst
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.lst)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.lst.describe_signature(signode, mode, env, symbol)
class ASTPostfixArray(ASTPostfixOp):
def __init__(self, expr: ASTExpression) -> None:
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '[' + transform(self.expr) + ']'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_punctuation('[', '[')
self.expr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(']', ']')
class ASTPostfixInc(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '++'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_operator('++', '++')
class ASTPostfixDec(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '--'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_operator('--', '--')
class ASTPostfixMemberOfPointer(ASTPostfixOp):
def __init__(self, name):
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return '->' + transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_operator('->', '->')
self.name.describe_signature(signode, 'noneIsName', env, symbol)
class ASTPostfixExpr(ASTExpression):
def __init__(self, prefix: ASTExpression, postFixes: List[ASTPostfixOp]):
self.prefix = prefix
self.postFixes = postFixes
def _stringify(self, transform: StringifyTransform) -> str:
res = [transform(self.prefix)]
for p in self.postFixes:
res.append(transform(p))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.prefix.describe_signature(signode, mode, env, symbol)
for p in self.postFixes:
p.describe_signature(signode, mode, env, symbol)
# Unary expressions
################################################################################
class ASTUnaryOpExpr(ASTExpression):
def __init__(self, op: str, expr: ASTExpression):
self.op = op
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
if self.op[0] in 'cn':
return self.op + " " + transform(self.expr)
else:
return self.op + transform(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.op[0] in 'cn':
signode += addnodes.desc_sig_keyword(self.op, self.op)
signode += addnodes.desc_sig_space()
else:
signode += addnodes.desc_sig_operator(self.op, self.op)
self.expr.describe_signature(signode, mode, env, symbol)
class ASTSizeofType(ASTExpression):
def __init__(self, typ):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof(" + transform(self.typ) + ")"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_keyword('sizeof', 'sizeof')
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
class ASTSizeofExpr(ASTExpression):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof " + transform(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_keyword('sizeof', 'sizeof')
signode += addnodes.desc_sig_space()
self.expr.describe_signature(signode, mode, env, symbol)
class ASTAlignofExpr(ASTExpression):
def __init__(self, typ: "ASTType"):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "alignof(" + transform(self.typ) + ")"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_keyword('alignof', 'alignof')
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
# Other expressions
################################################################################
class ASTCastExpr(ASTExpression):
def __init__(self, typ: "ASTType", expr: ASTExpression):
self.typ = typ
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.typ))
res.append(')')
res.append(transform(self.expr))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
self.expr.describe_signature(signode, mode, env, symbol)
class ASTBinOpExpr(ASTBase):
def __init__(self, exprs: List[ASTExpression], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode += addnodes.desc_sig_space()
op = self.ops[i - 1]
if ord(op[0]) >= ord('a') and ord(op[0]) <= ord('z'):
signode += addnodes.desc_sig_keyword(op, op)
else:
signode += addnodes.desc_sig_operator(op, op)
signode += addnodes.desc_sig_space()
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTAssignmentExpr(ASTExpression):
def __init__(self, exprs: List[ASTExpression], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode += addnodes.desc_sig_space()
op = self.ops[i - 1]
if ord(op[0]) >= ord('a') and ord(op[0]) <= ord('z'):
signode += addnodes.desc_sig_keyword(op, op)
else:
signode += addnodes.desc_sig_operator(op, op)
signode += addnodes.desc_sig_space()
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTFallbackExpr(ASTExpression):
def __init__(self, expr: str):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return self.expr
def get_id(self, version: int) -> str:
return str(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.literal(self.expr, self.expr)
################################################################################
# Types
################################################################################
class ASTTrailingTypeSpec(ASTBase):
pass
class ASTTrailingTypeSpecFundamental(ASTTrailingTypeSpec):
def __init__(self, names: List[str]) -> None:
assert len(names) != 0
self.names = names
def _stringify(self, transform: StringifyTransform) -> str:
return ' '.join(self.names)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
first = True
for n in self.names:
if not first:
signode += addnodes.desc_sig_space()
else:
first = False
signode += addnodes.desc_sig_keyword_type(n, n)
class ASTTrailingTypeSpecName(ASTTrailingTypeSpec):
def __init__(self, prefix: str, nestedName: ASTNestedName) -> None:
self.prefix = prefix
self.nestedName = nestedName
@property
def name(self) -> ASTNestedName:
return self.nestedName
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.prefix:
res.append(self.prefix)
res.append(' ')
res.append(transform(self.nestedName))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.prefix:
signode += addnodes.desc_sig_keyword(self.prefix, self.prefix)
signode += addnodes.desc_sig_space()
self.nestedName.describe_signature(signode, mode, env, symbol=symbol)
class ASTFunctionParameter(ASTBase):
def __init__(self, arg: "ASTTypeWithInit", ellipsis: bool = False) -> None:
self.arg = arg
self.ellipsis = ellipsis
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=False)
def _stringify(self, transform: StringifyTransform) -> str:
if self.ellipsis:
return '...'
else:
return transform(self.arg)
def describe_signature(self, signode: Any, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.ellipsis:
signode += addnodes.desc_sig_punctuation('...', '...')
else:
self.arg.describe_signature(signode, mode, env, symbol=symbol)
class ASTParameters(ASTBase):
def __init__(self, args: List[ASTFunctionParameter], attrs: List[ASTAttribute]) -> None:
self.args = args
self.attrs = attrs
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.args
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append('(')
first = True
for a in self.args:
if not first:
res.append(', ')
first = False
res.append(str(a))
res.append(')')
for attr in self.attrs:
res.append(' ')
res.append(transform(attr))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
# only use the desc_parameterlist for the outer list, not for inner lists
if mode == 'lastIsName':
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
else:
signode += addnodes.desc_sig_punctuation('(', '(')
first = True
for arg in self.args:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
first = False
arg.describe_signature(signode, 'markType', env, symbol=symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
for attr in self.attrs:
signode += addnodes.desc_sig_space()
attr.describe_signature(signode)
class ASTDeclSpecsSimple(ASTBaseBase):
def __init__(self, storage: str, threadLocal: str, inline: bool,
restrict: bool, volatile: bool, const: bool, attrs: List[Any]) -> None:
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
self.restrict = restrict
self.volatile = volatile
self.const = const
self.attrs = attrs
def mergeWith(self, other: "ASTDeclSpecsSimple") -> "ASTDeclSpecsSimple":
if not other:
return self
return ASTDeclSpecsSimple(self.storage or other.storage,
self.threadLocal or other.threadLocal,
self.inline or other.inline,
self.volatile or other.volatile,
self.const or other.const,
self.restrict or other.restrict,
self.attrs + other.attrs)
def _stringify(self, transform: StringifyTransform) -> str:
res: List[str] = []
res.extend(transform(attr) for attr in self.attrs)
if self.storage:
res.append(self.storage)
if self.threadLocal:
res.append(self.threadLocal)
if self.inline:
res.append('inline')
if self.restrict:
res.append('restrict')
if self.volatile:
res.append('volatile')
if self.const:
res.append('const')
return ' '.join(res)
def describe_signature(self, modifiers: List[Node]) -> None:
def _add(modifiers: List[Node], text: str) -> None:
if len(modifiers) > 0:
modifiers.append(addnodes.desc_sig_space())
modifiers.append(addnodes.desc_sig_keyword(text, text))
for attr in self.attrs:
if len(modifiers) > 0:
modifiers.append(addnodes.desc_sig_space())
modifiers.append(attr.describe_signature(modifiers))
if self.storage:
_add(modifiers, self.storage)
if self.threadLocal:
_add(modifiers, self.threadLocal)
if self.inline:
_add(modifiers, 'inline')
if self.restrict:
_add(modifiers, 'restrict')
if self.volatile:
_add(modifiers, 'volatile')
if self.const:
_add(modifiers, 'const')
class ASTDeclSpecs(ASTBase):
def __init__(self, outer: str,
leftSpecs: ASTDeclSpecsSimple,
rightSpecs: ASTDeclSpecsSimple,
trailing: ASTTrailingTypeSpec) -> None:
# leftSpecs and rightSpecs are used for output
# allSpecs are used for id generation TODO: remove?
self.outer = outer
self.leftSpecs = leftSpecs
self.rightSpecs = rightSpecs
self.allSpecs = self.leftSpecs.mergeWith(self.rightSpecs)
self.trailingTypeSpec = trailing
def _stringify(self, transform: StringifyTransform) -> str:
res: List[str] = []
l = transform(self.leftSpecs)
if len(l) > 0:
res.append(l)
if self.trailingTypeSpec:
if len(res) > 0:
res.append(" ")
res.append(transform(self.trailingTypeSpec))
r = str(self.rightSpecs)
if len(r) > 0:
if len(res) > 0:
res.append(" ")
res.append(r)
return "".join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
modifiers: List[Node] = []
self.leftSpecs.describe_signature(modifiers)
for m in modifiers:
signode += m
if self.trailingTypeSpec:
if len(modifiers) > 0:
signode += addnodes.desc_sig_space()
self.trailingTypeSpec.describe_signature(signode, mode, env,
symbol=symbol)
modifiers = []
self.rightSpecs.describe_signature(modifiers)
if len(modifiers) > 0:
signode += addnodes.desc_sig_space()
for m in modifiers:
signode += m
# Declarator
################################################################################
class ASTArray(ASTBase):
def __init__(self, static: bool, const: bool, volatile: bool, restrict: bool,
vla: bool, size: ASTExpression):
self.static = static
self.const = const
self.volatile = volatile
self.restrict = restrict
self.vla = vla
self.size = size
if vla:
assert size is None
if size is not None:
assert not vla
def _stringify(self, transform: StringifyTransform) -> str:
el = []
if self.static:
el.append('static')
if self.restrict:
el.append('restrict')
if self.volatile:
el.append('volatile')
if self.const:
el.append('const')
if self.vla:
return '[' + ' '.join(el) + '*]'
elif self.size:
el.append(transform(self.size))
return '[' + ' '.join(el) + ']'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('[', '[')
addSpace = False
def _add(signode: TextElement, text: str) -> bool:
if addSpace:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_keyword(text, text)
return True
if self.static:
addSpace = _add(signode, 'static')
if self.restrict:
addSpace = _add(signode, 'restrict')
if self.volatile:
addSpace = _add(signode, 'volatile')
if self.const:
addSpace = _add(signode, 'const')
if self.vla:
signode += addnodes.desc_sig_punctuation('*', '*')
elif self.size:
if addSpace:
signode += addnodes.desc_sig_space()
self.size.describe_signature(signode, 'markType', env, symbol)
signode += addnodes.desc_sig_punctuation(']', ']')
class ASTDeclarator(ASTBase):
@property
def name(self) -> ASTNestedName:
raise NotImplementedError(repr(self))
@property
def function_params(self) -> List[ASTFunctionParameter]:
raise NotImplementedError(repr(self))
def require_space_after_declSpecs(self) -> bool:
raise NotImplementedError(repr(self))
class ASTDeclaratorNameParam(ASTDeclarator):
def __init__(self, declId: ASTNestedName,
arrayOps: List[ASTArray], param: ASTParameters) -> None:
self.declId = declId
self.arrayOps = arrayOps
self.param = param
@property
def name(self) -> ASTNestedName:
return self.declId
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.param.function_params
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
for op in self.arrayOps:
res.append(transform(op))
if self.param:
res.append(transform(self.param))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
for op in self.arrayOps:
op.describe_signature(signode, mode, env, symbol)
if self.param:
self.param.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorNameBitField(ASTDeclarator):
def __init__(self, declId: ASTNestedName, size: ASTExpression):
self.declId = declId
self.size = size
@property
def name(self) -> ASTNestedName:
return self.declId
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
res.append(" : ")
res.append(transform(self.size))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation(':', ':')
signode += addnodes.desc_sig_space()
self.size.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorPtr(ASTDeclarator):
def __init__(self, next: ASTDeclarator, restrict: bool, volatile: bool, const: bool,
attrs: Any) -> None:
assert next
self.next = next
self.restrict = restrict
self.volatile = volatile
self.const = const
self.attrs = attrs
@property
def name(self) -> ASTNestedName:
return self.next.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.next.function_params
def require_space_after_declSpecs(self) -> bool:
return self.const or self.volatile or self.restrict or \
len(self.attrs) > 0 or \
self.next.require_space_after_declSpecs()
def _stringify(self, transform: StringifyTransform) -> str:
res = ['*']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and (self.restrict or self.volatile or self.const):
res.append(' ')
if self.restrict:
res.append('restrict')
if self.volatile:
if self.restrict:
res.append(' ')
res.append('volatile')
if self.const:
if self.restrict or self.volatile:
res.append(' ')
res.append('const')
if self.const or self.volatile or self.restrict or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
res.append(' ')
res.append(transform(self.next))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('*', '*')
for a in self.attrs:
a.describe_signature(signode)
if len(self.attrs) > 0 and (self.restrict or self.volatile or self.const):
signode += addnodes.desc_sig_space()
def _add_anno(signode: TextElement, text: str) -> None:
signode += addnodes.desc_sig_keyword(text, text)
if self.restrict:
_add_anno(signode, 'restrict')
if self.volatile:
if self.restrict:
signode += addnodes.desc_sig_space()
_add_anno(signode, 'volatile')
if self.const:
if self.restrict or self.volatile:
signode += addnodes.desc_sig_space()
_add_anno(signode, 'const')
if self.const or self.volatile or self.restrict or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
signode += addnodes.desc_sig_space()
self.next.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorParen(ASTDeclarator):
def __init__(self, inner: ASTDeclarator, next: ASTDeclarator) -> None:
assert inner
assert next
self.inner = inner
self.next = next
# TODO: we assume the name and params are in inner
@property
def name(self) -> ASTNestedName:
return self.inner.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.inner.function_params
def require_space_after_declSpecs(self) -> bool:
return True
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.inner))
res.append(')')
res.append(transform(self.next))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('(', '(')
self.inner.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
self.next.describe_signature(signode, "noneIsName", env, symbol)
# Initializer
################################################################################
class ASTParenExprList(ASTBaseParenExprList):
def __init__(self, exprs: List[ASTExpression]) -> None:
self.exprs = exprs
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
return '(%s)' % ', '.join(exprs)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('(', '(')
first = True
for e in self.exprs:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
else:
first = False
e.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
class ASTBracedInitList(ASTBase):
def __init__(self, exprs: List[ASTExpression], trailingComma: bool) -> None:
self.exprs = exprs
self.trailingComma = trailingComma
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
trailingComma = ',' if self.trailingComma else ''
return '{%s%s}' % (', '.join(exprs), trailingComma)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('{', '{')
first = True
for e in self.exprs:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
else:
first = False
e.describe_signature(signode, mode, env, symbol)
if self.trailingComma:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_punctuation('}', '}')
class ASTInitializer(ASTBase):
def __init__(self, value: Union[ASTBracedInitList, ASTExpression],
hasAssign: bool = True) -> None:
self.value = value
self.hasAssign = hasAssign
def _stringify(self, transform: StringifyTransform) -> str:
val = transform(self.value)
if self.hasAssign:
return ' = ' + val
else:
return val
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.hasAssign:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation('=', '=')
signode += addnodes.desc_sig_space()
self.value.describe_signature(signode, 'markType', env, symbol)
class ASTType(ASTBase):
def __init__(self, declSpecs: ASTDeclSpecs, decl: ASTDeclarator) -> None:
assert declSpecs
assert decl
self.declSpecs = declSpecs
self.decl = decl
@property
def name(self) -> ASTNestedName:
return self.decl.name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.decl.function_params
def _stringify(self, transform: StringifyTransform) -> str:
res = []
declSpecs = transform(self.declSpecs)
res.append(declSpecs)
if self.decl.require_space_after_declSpecs() and len(declSpecs) > 0:
res.append(' ')
res.append(transform(self.decl))
return ''.join(res)
def get_type_declaration_prefix(self) -> str:
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if (self.decl.require_space_after_declSpecs() and
len(str(self.declSpecs)) > 0):
signode += addnodes.desc_sig_space()
# for parameters that don't really declare new names we get 'markType',
# this should not be propagated, but be 'noneIsName'.
if mode == 'markType':
mode = 'noneIsName'
self.decl.describe_signature(signode, mode, env, symbol)
class ASTTypeWithInit(ASTBase):
def __init__(self, type: ASTType, init: ASTInitializer) -> None:
self.type = type
self.init = init
@property
def name(self) -> ASTNestedName:
return self.type.name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return self.type.get_id(version, objectType, symbol)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.type))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, mode, env, symbol)
class ASTMacroParameter(ASTBase):
def __init__(self, arg: ASTNestedName, ellipsis: bool = False,
variadic: bool = False) -> None:
self.arg = arg
self.ellipsis = ellipsis
self.variadic = variadic
def _stringify(self, transform: StringifyTransform) -> str:
if self.ellipsis:
return '...'
elif self.variadic:
return transform(self.arg) + '...'
else:
return transform(self.arg)
def describe_signature(self, signode: Any, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.ellipsis:
signode += addnodes.desc_sig_punctuation('...', '...')
elif self.variadic:
name = str(self)
signode += addnodes.desc_sig_name(name, name)
else:
self.arg.describe_signature(signode, mode, env, symbol=symbol)
class ASTMacro(ASTBase):
def __init__(self, ident: ASTNestedName, args: List[ASTMacroParameter]) -> None:
self.ident = ident
self.args = args
@property
def name(self) -> ASTNestedName:
return self.ident
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.ident))
if self.args is not None:
res.append('(')
first = True
for arg in self.args:
if not first:
res.append(', ')
first = False
res.append(transform(arg))
res.append(')')
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.ident.describe_signature(signode, mode, env, symbol)
if self.args is None:
return
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
class ASTStruct(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTUnion(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnum(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnumerator(ASTBase):
def __init__(self, name: ASTNestedName, init: ASTInitializer) -> None:
self.name = name
self.init = init
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.name))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, 'markType', env, symbol)
class ASTDeclaration(ASTBaseBase):
def __init__(self, objectType: str, directiveType: str,
declaration: Union[DeclarationType, ASTFunctionParameter],
semicolon: bool = False) -> None:
self.objectType = objectType
self.directiveType = directiveType
self.declaration = declaration
self.semicolon = semicolon
self.symbol: Symbol = None
# set by CObject._add_enumerator_to_parent
self.enumeratorScopedSymbol: Symbol = None
def clone(self) -> "ASTDeclaration":
return ASTDeclaration(self.objectType, self.directiveType,
self.declaration.clone(), self.semicolon)
@property
def name(self) -> ASTNestedName:
decl = cast(DeclarationType, self.declaration)
return decl.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
if self.objectType != 'function':
return None
decl = cast(ASTType, self.declaration)
return decl.function_params
def get_id(self, version: int, prefixed: bool = True) -> str:
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
return self.enumeratorScopedSymbol.declaration.get_id(version, prefixed)
id_ = self.declaration.get_id(version, self.objectType, self.symbol)
if prefixed:
return _id_prefix[version] + id_
else:
return id_
def get_newest_id(self) -> str:
return self.get_id(_max_id, True)
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.declaration)
if self.semicolon:
res += ';'
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", options: Dict) -> None:
verify_description_mode(mode)
assert self.symbol
# The caller of the domain added a desc_signature node.
# Always enable multiline:
signode['is_multiline'] = True
# Put each line in a desc_signature_line node.
mainDeclNode = addnodes.desc_signature_line()
mainDeclNode.sphinx_line_type = 'declarator'
mainDeclNode['add_permalink'] = not self.symbol.isRedeclaration
signode += mainDeclNode
if self.objectType == 'member':
pass
elif self.objectType == 'function':
pass
elif self.objectType == 'macro':
pass
elif self.objectType == 'struct':
mainDeclNode += addnodes.desc_sig_keyword('struct', 'struct')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'union':
mainDeclNode += addnodes.desc_sig_keyword('union', 'union')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'enum':
mainDeclNode += addnodes.desc_sig_keyword('enum', 'enum')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'enumerator':
mainDeclNode += addnodes.desc_sig_keyword('enumerator', 'enumerator')
mainDeclNode += addnodes.desc_sig_space()
elif self.objectType == 'type':
decl = cast(ASTType, self.declaration)
prefix = decl.get_type_declaration_prefix()
mainDeclNode += addnodes.desc_sig_keyword(prefix, prefix)
mainDeclNode += addnodes.desc_sig_space()
else:
assert False
self.declaration.describe_signature(mainDeclNode, mode, env, self.symbol)
if self.semicolon:
mainDeclNode += addnodes.desc_sig_punctuation(';', ';')
class SymbolLookupResult:
def __init__(self, symbols: Iterator["Symbol"], parentSymbol: "Symbol",
ident: ASTIdentifier) -> None:
self.symbols = symbols
self.parentSymbol = parentSymbol
self.ident = ident
class LookupKey:
def __init__(self, data: List[Tuple[ASTIdentifier, str]]) -> None:
self.data = data
def __str__(self) -> str:
return '[{}]'.format(', '.join("({}, {})".format(
ident, id_) for ident, id_ in self.data))
class Symbol:
debug_indent = 0
debug_indent_string = " "
debug_lookup = False
debug_show_tree = False
def __copy__(self):
assert False # shouldn't happen
def __deepcopy__(self, memo):
if self.parent:
assert False # shouldn't happen
else:
# the domain base class makes a copy of the initial data, which is fine
return Symbol(None, None, None, None, None)
@staticmethod
def debug_print(*args: Any) -> None:
print(Symbol.debug_indent_string * Symbol.debug_indent, end="")
print(*args)
def _assert_invariants(self) -> None:
if not self.parent:
# parent == None means global scope, so declaration means a parent
assert not self.declaration
assert not self.docname
else:
if self.declaration:
assert self.docname
def __setattr__(self, key: str, value: Any) -> None:
if key == "children":
assert False
else:
return super().__setattr__(key, value)
def __init__(self, parent: "Symbol", ident: ASTIdentifier,
declaration: ASTDeclaration, docname: str, line: int) -> None:
self.parent = parent
# declarations in a single directive are linked together
self.siblingAbove: Symbol = None
self.siblingBelow: Symbol = None
self.ident = ident
self.declaration = declaration
self.docname = docname
self.line = line
self.isRedeclaration = False
self._assert_invariants()
# Remember to modify Symbol.remove if modifications to the parent change.
self._children: List[Symbol] = []
self._anonChildren: List[Symbol] = []
# note: _children includes _anonChildren
if self.parent:
self.parent._children.append(self)
if self.declaration:
self.declaration.symbol = self
# Do symbol addition after self._children has been initialised.
self._add_function_params()
def _fill_empty(self, declaration: ASTDeclaration, docname: str, line: int) -> None:
self._assert_invariants()
assert self.declaration is None
assert self.docname is None
assert self.line is None
assert declaration is not None
assert docname is not None
assert line is not None
self.declaration = declaration
self.declaration.symbol = self
self.docname = docname
self.line = line
self._assert_invariants()
# and symbol addition should be done as well
self._add_function_params()
def _add_function_params(self) -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_function_params:")
# Note: we may be called from _fill_empty, so the symbols we want
# to add may actually already be present (as empty symbols).
# add symbols for function parameters, if any
if self.declaration is not None and self.declaration.function_params is not None:
for p in self.declaration.function_params:
if p.arg is None:
continue
nn = p.arg.name
if nn is None:
continue
# (comparing to the template params: we have checked that we are a declaration)
decl = ASTDeclaration('functionParam', None, p)
assert not nn.rooted
assert len(nn.names) == 1
self._add_symbols(nn, decl, self.docname, self.line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def remove(self) -> None:
if self.parent is None:
return
assert self in self.parent._children
self.parent._children.remove(self)
self.parent = None
def clear_doc(self, docname: str) -> None:
for sChild in self._children:
sChild.clear_doc(docname)
if sChild.declaration and sChild.docname == docname:
sChild.declaration = None
sChild.docname = None
sChild.line = None
if sChild.siblingAbove is not None:
sChild.siblingAbove.siblingBelow = sChild.siblingBelow
if sChild.siblingBelow is not None:
sChild.siblingBelow.siblingAbove = sChild.siblingAbove
sChild.siblingAbove = None
sChild.siblingBelow = None
def get_all_symbols(self) -> Iterator["Symbol"]:
yield self
for sChild in self._children:
yield from sChild.get_all_symbols()
@property
def children(self) -> Iterator["Symbol"]:
yield from self._children
@property
def children_recurse_anon(self) -> Iterator["Symbol"]:
for c in self._children:
yield c
if not c.ident.is_anon():
continue
yield from c.children_recurse_anon
def get_lookup_key(self) -> "LookupKey":
# The pickle files for the environment and for each document are distinct.
# The environment has all the symbols, but the documents has xrefs that
# must know their scope. A lookup key is essentially a specification of
# how to find a specific symbol.
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
key = []
for s in symbols:
if s.declaration is not None:
# TODO: do we need the ID?
key.append((s.ident, s.declaration.get_newest_id()))
else:
key.append((s.ident, None))
return LookupKey(key)
def get_full_nested_name(self) -> ASTNestedName:
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
names = []
for s in symbols:
names.append(s.ident)
return ASTNestedName(names, rooted=False)
def _find_first_named_symbol(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool) -> "Symbol":
# TODO: further simplification from C++ to C
if Symbol.debug_lookup:
Symbol.debug_print("_find_first_named_symbol ->")
res = self._find_named_symbols(ident, matchSelf, recurseInAnon,
searchInSiblings=False)
try:
return next(res)
except StopIteration:
return None
def _find_named_symbols(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool,
searchInSiblings: bool) -> Iterator["Symbol"]:
# TODO: further simplification from C++ to C
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_find_named_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("ident: ", ident)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
def candidates() -> Generator["Symbol", None, None]:
s = self
if Symbol.debug_lookup:
Symbol.debug_print("searching in self:")
print(s.to_string(Symbol.debug_indent + 1), end="")
while True:
if matchSelf:
yield s
if recurseInAnon:
yield from s.children_recurse_anon
else:
yield from s._children
if s.siblingAbove is None:
break
s = s.siblingAbove
if Symbol.debug_lookup:
Symbol.debug_print("searching in sibling:")
print(s.to_string(Symbol.debug_indent + 1), end="")
for s in candidates():
if Symbol.debug_lookup:
Symbol.debug_print("candidate:")
print(s.to_string(Symbol.debug_indent + 1), end="")
if s.ident == ident:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("matches")
Symbol.debug_indent -= 3
yield s
if Symbol.debug_lookup:
Symbol.debug_indent += 2
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
def _symbol_lookup(self, nestedName: ASTNestedName,
onMissingQualifiedSymbol: Callable[["Symbol", ASTIdentifier], "Symbol"], # NOQA
ancestorLookupType: str, matchSelf: bool,
recurseInAnon: bool, searchInSiblings: bool) -> SymbolLookupResult:
# TODO: further simplification from C++ to C
# ancestorLookupType: if not None, specifies the target type of the lookup
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_symbol_lookup:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("nestedName: ", nestedName)
Symbol.debug_print("ancestorLookupType:", ancestorLookupType)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
names = nestedName.names
# find the right starting point for lookup
parentSymbol = self
if nestedName.rooted:
while parentSymbol.parent:
parentSymbol = parentSymbol.parent
if ancestorLookupType is not None:
# walk up until we find the first identifier
firstName = names[0]
while parentSymbol.parent:
if parentSymbol.find_identifier(firstName,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=searchInSiblings):
break
parentSymbol = parentSymbol.parent
if Symbol.debug_lookup:
Symbol.debug_print("starting point:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# and now the actual lookup
for ident in names[:-1]:
symbol = parentSymbol._find_first_named_symbol(
ident, matchSelf=matchSelf, recurseInAnon=recurseInAnon)
if symbol is None:
symbol = onMissingQualifiedSymbol(parentSymbol, ident)
if symbol is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
# We have now matched part of a nested name, and need to match more
# so even if we should matchSelf before, we definitely shouldn't
# even more. (see also issue #2666)
matchSelf = False
parentSymbol = symbol
if Symbol.debug_lookup:
Symbol.debug_print("handle last name from:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# handle the last name
ident = names[-1]
symbols = parentSymbol._find_named_symbols(
ident, matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=searchInSiblings)
if Symbol.debug_lookup:
symbols = list(symbols) # type: ignore
Symbol.debug_indent -= 2
return SymbolLookupResult(symbols, parentSymbol, ident)
def _add_symbols(self, nestedName: ASTNestedName,
declaration: ASTDeclaration, docname: str, line: int) -> "Symbol":
# TODO: further simplification from C++ to C
# Used for adding a whole path of symbols, where the last may or may not
# be an actual declaration.
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("nn: ", nestedName)
Symbol.debug_print("decl: ", declaration)
Symbol.debug_print("location: {}:{}".format(docname, line))
def onMissingQualifiedSymbol(parentSymbol: "Symbol", ident: ASTIdentifier) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols, onMissingQualifiedSymbol:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", ident)
Symbol.debug_indent -= 2
return Symbol(parent=parentSymbol, ident=ident,
declaration=None, docname=None, line=None)
lookupResult = self._symbol_lookup(nestedName,
onMissingQualifiedSymbol,
ancestorLookupType=None,
matchSelf=False,
recurseInAnon=False,
searchInSiblings=False)
assert lookupResult is not None # we create symbols all the way, so that can't happen
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, no symbol:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", lookupResult.ident)
Symbol.debug_print("declaration: ", declaration)
Symbol.debug_print("location: {}:{}".format(docname, line))
Symbol.debug_indent -= 1
symbol = Symbol(parent=lookupResult.parentSymbol,
ident=lookupResult.ident,
declaration=declaration,
docname=docname, line=line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return symbol
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("number symbols:", len(symbols))
Symbol.debug_indent -= 1
if not declaration:
if Symbol.debug_lookup:
Symbol.debug_print("no declaration")
Symbol.debug_indent -= 2
# good, just a scope creation
# TODO: what if we have more than one symbol?
return symbols[0]
noDecl = []
withDecl = []
dupDecl = []
for s in symbols:
if s.declaration is None:
noDecl.append(s)
elif s.isRedeclaration:
dupDecl.append(s)
else:
withDecl.append(s)
if Symbol.debug_lookup:
Symbol.debug_print("#noDecl: ", len(noDecl))
Symbol.debug_print("#withDecl:", len(withDecl))
Symbol.debug_print("#dupDecl: ", len(dupDecl))
# With partial builds we may start with a large symbol tree stripped of declarations.
# Essentially any combination of noDecl, withDecl, and dupDecls seems possible.
# TODO: make partial builds fully work. What should happen when the primary symbol gets
# deleted, and other duplicates exist? The full document should probably be rebuild.
# First check if one of those with a declaration matches.
# If it's a function, we need to compare IDs,
# otherwise there should be only one symbol with a declaration.
def makeCandSymbol() -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_print("begin: creating candidate symbol")
symbol = Symbol(parent=lookupResult.parentSymbol,
ident=lookupResult.ident,
declaration=declaration,
docname=docname, line=line)
if Symbol.debug_lookup:
Symbol.debug_print("end: creating candidate symbol")
return symbol
if len(withDecl) == 0:
candSymbol = None
else:
candSymbol = makeCandSymbol()
def handleDuplicateDeclaration(symbol: "Symbol", candSymbol: "Symbol") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("redeclaration")
Symbol.debug_indent -= 1
Symbol.debug_indent -= 2
# Redeclaration of the same symbol.
# Let the new one be there, but raise an error to the client
# so it can use the real symbol as subscope.
# This will probably result in a duplicate id warning.
candSymbol.isRedeclaration = True
raise _DuplicateSymbolError(symbol, declaration)
if declaration.objectType != "function":
assert len(withDecl) <= 1
handleDuplicateDeclaration(withDecl[0], candSymbol)
# (not reachable)
# a function, so compare IDs
candId = declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("candId:", candId)
for symbol in withDecl:
oldId = symbol.declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("oldId: ", oldId)
if candId == oldId:
handleDuplicateDeclaration(symbol, candSymbol)
# (not reachable)
# no candidate symbol found with matching ID
# if there is an empty symbol, fill that one
if len(noDecl) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("no match, no empty, candSybmol is not None?:", candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
return candSymbol
else:
return makeCandSymbol()
else:
if Symbol.debug_lookup:
Symbol.debug_print(
"no match, but fill an empty declaration, candSybmol is not None?:",
candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
candSymbol.remove()
# assert len(noDecl) == 1
# TODO: enable assertion when we at some point find out how to do cleanup
# for now, just take the first one, it should work fine ... right?
symbol = noDecl[0]
# If someone first opened the scope, and then later
# declares it, e.g,
# .. namespace:: Test
# .. namespace:: nullptr
# .. class:: Test
symbol._fill_empty(declaration, docname, line)
return symbol
def merge_with(self, other: "Symbol", docnames: List[str],
env: "BuildEnvironment") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("merge_with:")
assert other is not None
for otherChild in other._children:
ourChild = self._find_first_named_symbol(
ident=otherChild.ident, matchSelf=False,
recurseInAnon=False)
if ourChild is None:
# TODO: hmm, should we prune by docnames?
self._children.append(otherChild)
otherChild.parent = self
otherChild._assert_invariants()
continue
if otherChild.declaration and otherChild.docname in docnames:
if not ourChild.declaration:
ourChild._fill_empty(otherChild.declaration,
otherChild.docname, otherChild.line)
elif ourChild.docname != otherChild.docname:
name = str(ourChild.declaration)
msg = __("Duplicate C declaration, also defined at %s:%s.\n"
"Declaration is '.. c:%s:: %s'.")
msg = msg % (ourChild.docname, ourChild.line,
ourChild.declaration.directiveType, name)
logger.warning(msg, location=(otherChild.docname, otherChild.line))
else:
# Both have declarations, and in the same docname.
# This can apparently happen, it should be safe to
# just ignore it, right?
pass
ourChild.merge_with(otherChild, docnames, env)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def add_name(self, nestedName: ASTNestedName) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_name:")
res = self._add_symbols(nestedName, declaration=None, docname=None, line=None)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def add_declaration(self, declaration: ASTDeclaration,
docname: str, line: int) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_declaration:")
assert declaration is not None
assert docname is not None
assert line is not None
nestedName = declaration.name
res = self._add_symbols(nestedName, declaration, docname, line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def find_identifier(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool, searchInSiblings: bool
) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_identifier:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", ident)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings:", searchInSiblings)
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
current = self
while current is not None:
if Symbol.debug_lookup:
Symbol.debug_indent += 2
Symbol.debug_print("trying:")
print(current.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
if matchSelf and current.ident == ident:
return current
children = current.children_recurse_anon if recurseInAnon else current._children
for s in children:
if s.ident == ident:
return s
if not searchInSiblings:
break
current = current.siblingAbove
return None
def direct_lookup(self, key: "LookupKey") -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("direct_lookup:")
Symbol.debug_indent += 1
s = self
for name, id_ in key.data:
res = None
for cand in s._children:
if cand.ident == name:
res = cand
break
s = res
if Symbol.debug_lookup:
Symbol.debug_print("name: ", name)
Symbol.debug_print("id: ", id_)
if s is not None:
print(s.to_string(Symbol.debug_indent + 1), end="")
else:
Symbol.debug_print("not found")
if s is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return s
def find_declaration(self, nestedName: ASTNestedName, typ: str,
matchSelf: bool, recurseInAnon: bool) -> "Symbol":
# templateShorthand: missing template parameter lists for templates is ok
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_declaration:")
def onMissingQualifiedSymbol(parentSymbol: "Symbol",
ident: ASTIdentifier) -> "Symbol":
return None
lookupResult = self._symbol_lookup(nestedName,
onMissingQualifiedSymbol,
ancestorLookupType=typ,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=False)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
if lookupResult is None:
return None
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
return None
return symbols[0]
def to_string(self, indent: int) -> str:
res = [Symbol.debug_indent_string * indent]
if not self.parent:
res.append('::')
else:
if self.ident:
res.append(str(self.ident))
else:
res.append(str(self.declaration))
if self.declaration:
res.append(": ")
if self.isRedeclaration:
res.append('!!duplicate!! ')
res.append(str(self.declaration))
if self.docname:
res.append('\t(')
res.append(self.docname)
res.append(')')
res.append('\n')
return ''.join(res)
def dump(self, indent: int) -> str:
res = [self.to_string(indent)]
for c in self._children:
res.append(c.dump(indent + 1))
return ''.join(res)
class DefinitionParser(BaseParser):
@property
def language(self) -> str:
return 'C'
@property
def id_attributes(self):
return self.config.c_id_attributes
@property
def paren_attributes(self):
return self.config.c_paren_attributes
def _parse_string(self) -> str:
if self.current_char != '"':
return None
startPos = self.pos
self.pos += 1
escape = False
while True:
if self.eof:
self.fail("Unexpected end during inside string.")
elif self.current_char == '"' and not escape:
self.pos += 1
break
elif self.current_char == '\\':
escape = True
else:
escape = False
self.pos += 1
return self.definition[startPos:self.pos]
def _parse_literal(self) -> ASTLiteral:
# -> integer-literal
# | character-literal
# | floating-literal
# | string-literal
# | boolean-literal -> "false" | "true"
self.skip_ws()
if self.skip_word('true'):
return ASTBooleanLiteral(True)
if self.skip_word('false'):
return ASTBooleanLiteral(False)
pos = self.pos
if self.match(float_literal_re):
self.match(float_literal_suffix_re)
return ASTNumberLiteral(self.definition[pos:self.pos])
for regex in [binary_literal_re, hex_literal_re,
integer_literal_re, octal_literal_re]:
if self.match(regex):
self.match(integers_literal_suffix_re)
return ASTNumberLiteral(self.definition[pos:self.pos])
string = self._parse_string()
if string is not None:
return ASTStringLiteral(string)
# character-literal
if self.match(char_literal_re):
prefix = self.last_match.group(1) # may be None when no prefix
data = self.last_match.group(2)
try:
return ASTCharLiteral(prefix, data)
except UnicodeDecodeError as e:
self.fail("Can not handle character literal. Internal error was: %s" % e)
except UnsupportedMultiCharacterCharLiteral:
self.fail("Can not handle character literal"
" resulting in multiple decoded characters.")
return None
def _parse_paren_expression(self) -> ASTExpression:
# "(" expression ")"
if self.current_char != '(':
return None
self.pos += 1
res = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' in end of parenthesized expression.")
return ASTParenExpr(res)
def _parse_primary_expression(self) -> ASTExpression:
# literal
# "(" expression ")"
# id-expression -> we parse this with _parse_nested_name
self.skip_ws()
res: ASTExpression = self._parse_literal()
if res is not None:
return res
res = self._parse_paren_expression()
if res is not None:
return res
nn = self._parse_nested_name()
if nn is not None:
return ASTIdExpression(nn)
return None
def _parse_initializer_list(self, name: str, open: str, close: str
) -> Tuple[List[ASTExpression], bool]:
# Parse open and close with the actual initializer-list in between
# -> initializer-clause '...'[opt]
# | initializer-list ',' initializer-clause '...'[opt]
# TODO: designators
self.skip_ws()
if not self.skip_string_and_ws(open):
return None, None
if self.skip_string(close):
return [], False
exprs = []
trailingComma = False
while True:
self.skip_ws()
expr = self._parse_expression()
self.skip_ws()
exprs.append(expr)
self.skip_ws()
if self.skip_string(close):
break
if not self.skip_string_and_ws(','):
self.fail("Error in %s, expected ',' or '%s'." % (name, close))
if self.current_char == close and close == '}':
self.pos += 1
trailingComma = True
break
return exprs, trailingComma
def _parse_paren_expression_list(self) -> ASTParenExprList:
# -> '(' expression-list ')'
# though, we relax it to also allow empty parens
# as it's needed in some cases
#
# expression-list
# -> initializer-list
exprs, trailingComma = self._parse_initializer_list("parenthesized expression-list",
'(', ')')
if exprs is None:
return None
return ASTParenExprList(exprs)
def _parse_braced_init_list(self) -> ASTBracedInitList:
# -> '{' initializer-list ','[opt] '}'
# | '{' '}'
exprs, trailingComma = self._parse_initializer_list("braced-init-list", '{', '}')
if exprs is None:
return None
return ASTBracedInitList(exprs, trailingComma)
def _parse_postfix_expression(self) -> ASTPostfixExpr:
# -> primary
# | postfix "[" expression "]"
# | postfix "[" braced-init-list [opt] "]"
# | postfix "(" expression-list [opt] ")"
# | postfix "." id-expression // taken care of in primary by nested name
# | postfix "->" id-expression
# | postfix "++"
# | postfix "--"
prefix = self._parse_primary_expression()
# and now parse postfixes
postFixes: List[ASTPostfixOp] = []
while True:
self.skip_ws()
if self.skip_string_and_ws('['):
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of postfix expression.")
postFixes.append(ASTPostfixArray(expr))
continue
if self.skip_string('->'):
if self.skip_string('*'):
# don't steal the arrow
self.pos -= 3
else:
name = self._parse_nested_name()
postFixes.append(ASTPostfixMemberOfPointer(name))
continue
if self.skip_string('++'):
postFixes.append(ASTPostfixInc())
continue
if self.skip_string('--'):
postFixes.append(ASTPostfixDec())
continue
lst = self._parse_paren_expression_list()
if lst is not None:
postFixes.append(ASTPostfixCallExpr(lst))
continue
break
return ASTPostfixExpr(prefix, postFixes)
def _parse_unary_expression(self) -> ASTExpression:
# -> postfix
# | "++" cast
# | "--" cast
# | unary-operator cast -> (* | & | + | - | ! | ~) cast
# The rest:
# | "sizeof" unary
# | "sizeof" "(" type-id ")"
# | "alignof" "(" type-id ")"
self.skip_ws()
for op in _expression_unary_ops:
# TODO: hmm, should we be able to backtrack here?
if op[0] in 'cn':
res = self.skip_word(op)
else:
res = self.skip_string(op)
if res:
expr = self._parse_cast_expression()
return ASTUnaryOpExpr(op, expr)
if self.skip_word_and_ws('sizeof'):
if self.skip_string_and_ws('('):
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'sizeof'.")
return ASTSizeofType(typ)
expr = self._parse_unary_expression()
return ASTSizeofExpr(expr)
if self.skip_word_and_ws('alignof'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'alignof'.")
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'alignof'.")
return ASTAlignofExpr(typ)
return self._parse_postfix_expression()
def _parse_cast_expression(self) -> ASTExpression:
# -> unary | "(" type-id ")" cast
pos = self.pos
self.skip_ws()
if self.skip_string('('):
try:
typ = self._parse_type(False)
if not self.skip_string(')'):
self.fail("Expected ')' in cast expression.")
expr = self._parse_cast_expression()
return ASTCastExpr(typ, expr)
except DefinitionError as exCast:
self.pos = pos
try:
return self._parse_unary_expression()
except DefinitionError as exUnary:
errs = []
errs.append((exCast, "If type cast expression"))
errs.append((exUnary, "If unary expression"))
raise self._make_multi_error(errs,
"Error in cast expression.") from exUnary
else:
return self._parse_unary_expression()
def _parse_logical_or_expression(self) -> ASTExpression:
# logical-or = logical-and ||
# logical-and = inclusive-or &&
# inclusive-or = exclusive-or |
# exclusive-or = and ^
# and = equality &
# equality = relational ==, !=
# relational = shift <, >, <=, >=
# shift = additive <<, >>
# additive = multiplicative +, -
# multiplicative = pm *, /, %
# pm = cast .*, ->*
def _parse_bin_op_expr(self, opId):
if opId + 1 == len(_expression_bin_ops):
def parser() -> ASTExpression:
return self._parse_cast_expression()
else:
def parser() -> ASTExpression:
return _parse_bin_op_expr(self, opId + 1)
exprs = []
ops = []
exprs.append(parser())
while True:
self.skip_ws()
pos = self.pos
oneMore = False
for op in _expression_bin_ops[opId]:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
if op == '&' and self.current_char == '&':
# don't split the && 'token'
self.pos -= 1
# and btw. && has lower precedence, so we are done
break
try:
expr = parser()
exprs.append(expr)
ops.append(op)
oneMore = True
break
except DefinitionError:
self.pos = pos
if not oneMore:
break
return ASTBinOpExpr(exprs, ops)
return _parse_bin_op_expr(self, 0)
def _parse_conditional_expression_tail(self, orExprHead: Any) -> ASTExpression:
# -> "?" expression ":" assignment-expression
return None
def _parse_assignment_expression(self) -> ASTExpression:
# -> conditional-expression
# | logical-or-expression assignment-operator initializer-clause
# -> conditional-expression ->
# logical-or-expression
# | logical-or-expression "?" expression ":" assignment-expression
# | logical-or-expression assignment-operator initializer-clause
exprs = []
ops = []
orExpr = self._parse_logical_or_expression()
exprs.append(orExpr)
# TODO: handle ternary with _parse_conditional_expression_tail
while True:
oneMore = False
self.skip_ws()
for op in _expression_assignment_ops:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
expr = self._parse_logical_or_expression()
exprs.append(expr)
ops.append(op)
oneMore = True
if not oneMore:
break
return ASTAssignmentExpr(exprs, ops)
def _parse_constant_expression(self) -> ASTExpression:
# -> conditional-expression
orExpr = self._parse_logical_or_expression()
# TODO: use _parse_conditional_expression_tail
return orExpr
def _parse_expression(self) -> ASTExpression:
# -> assignment-expression
# | expression "," assignment-expression
# TODO: actually parse the second production
return self._parse_assignment_expression()
def _parse_expression_fallback(
self, end: List[str],
parser: Callable[[], ASTExpression],
allow: bool = True) -> ASTExpression:
# Stupidly "parse" an expression.
# 'end' should be a list of characters which ends the expression.
# first try to use the provided parser
prevPos = self.pos
try:
return parser()
except DefinitionError as e:
# some places (e.g., template parameters) we really don't want to use fallback,
# and for testing we may want to globally disable it
if not allow or not self.allowFallbackExpressionParsing:
raise
self.warn("Parsing of expression failed. Using fallback parser."
" Error was:\n%s" % e)
self.pos = prevPos
# and then the fallback scanning
assert end is not None
self.skip_ws()
startPos = self.pos
if self.match(_string_re):
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
brackets = {'(': ')', '{': '}', '[': ']'}
symbols: List[str] = []
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
break
if self.current_char in brackets.keys():
symbols.append(brackets[self.current_char])
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
self.pos += 1
if len(end) > 0 and self.eof:
self.fail("Could not find end of expression starting at %d."
% startPos)
value = self.definition[startPos:self.pos].strip()
return ASTFallbackExpr(value.strip())
def _parse_nested_name(self) -> ASTNestedName:
names: List[Any] = []
self.skip_ws()
rooted = False
if self.skip_string('.'):
rooted = True
while 1:
self.skip_ws()
if not self.match(identifier_re):
self.fail("Expected identifier in nested name.")
identifier = self.matched_text
# make sure there isn't a keyword
if identifier in _keywords:
self.fail("Expected identifier in nested name, "
"got keyword: %s" % identifier)
if self.matched_text in self.config.c_extra_keywords:
msg = "Expected identifier, got user-defined keyword: %s." \
+ " Remove it from c_extra_keywords to allow it as identifier.\n" \
+ "Currently c_extra_keywords is %s."
self.fail(msg % (self.matched_text,
str(self.config.c_extra_keywords)))
ident = ASTIdentifier(identifier)
names.append(ident)
self.skip_ws()
if not self.skip_string('.'):
break
return ASTNestedName(names, rooted)
def _parse_simple_type_specifier(self) -> Optional[str]:
if self.match(_simple_type_specifiers_re):
return self.matched_text
for t in ('bool', 'complex', 'imaginary'):
if t in self.config.c_extra_keywords:
if self.skip_word(t):
return t
return None
def _parse_simple_type_specifiers(self) -> ASTTrailingTypeSpecFundamental:
names: List[str] = []
self.skip_ws()
while True:
t = self._parse_simple_type_specifier()
if t is None:
break
names.append(t)
self.skip_ws()
if len(names) == 0:
return None
return ASTTrailingTypeSpecFundamental(names)
def _parse_trailing_type_spec(self) -> ASTTrailingTypeSpec:
# fundamental types, https://en.cppreference.com/w/c/language/type
# and extensions
self.skip_ws()
res = self._parse_simple_type_specifiers()
if res is not None:
return res
# prefixed
prefix = None
self.skip_ws()
for k in ('struct', 'enum', 'union'):
if self.skip_word_and_ws(k):
prefix = k
break
nestedName = self._parse_nested_name()
return ASTTrailingTypeSpecName(prefix, nestedName)
def _parse_parameters(self, paramMode: str) -> ASTParameters:
self.skip_ws()
if not self.skip_string('('):
if paramMode == 'function':
self.fail('Expecting "(" in parameters.')
else:
return None
args = []
self.skip_ws()
if not self.skip_string(')'):
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTFunctionParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in parameters.')
break
# note: it seems that function arguments can always be named,
# even in function pointers and similar.
arg = self._parse_type_with_init(outer=None, named='single')
# TODO: parse default parameters # TODO: didn't we just do that?
args.append(ASTFunctionParameter(arg))
self.skip_ws()
if self.skip_string(','):
continue
elif self.skip_string(')'):
break
else:
self.fail(
'Expecting "," or ")" in parameters, '
'got "%s".' % self.current_char)
attrs = []
while True:
attr = self._parse_attribute()
if attr is None:
break
attrs.append(attr)
return ASTParameters(args, attrs)
def _parse_decl_specs_simple(self, outer: str, typed: bool) -> ASTDeclSpecsSimple:
"""Just parse the simple ones."""
storage = None
threadLocal = None
inline = None
restrict = None
volatile = None
const = None
attrs = []
while 1: # accept any permutation of a subset of some decl-specs
self.skip_ws()
if not storage:
if outer == 'member':
if self.skip_word('auto'):
storage = 'auto'
continue
if self.skip_word('register'):
storage = 'register'
continue
if outer in ('member', 'function'):
if self.skip_word('static'):
storage = 'static'
continue
if self.skip_word('extern'):
storage = 'extern'
continue
if outer == 'member' and not threadLocal:
if self.skip_word('thread_local'):
threadLocal = 'thread_local'
continue
if self.skip_word('_Thread_local'):
threadLocal = '_Thread_local'
continue
if outer == 'function' and not inline:
inline = self.skip_word('inline')
if inline:
continue
if not restrict and typed:
restrict = self.skip_word('restrict')
if restrict:
continue
if not volatile and typed:
volatile = self.skip_word('volatile')
if volatile:
continue
if not const and typed:
const = self.skip_word('const')
if const:
continue
attr = self._parse_attribute()
if attr:
attrs.append(attr)
continue
break
return ASTDeclSpecsSimple(storage, threadLocal, inline,
restrict, volatile, const, attrs)
def _parse_decl_specs(self, outer: str, typed: bool = True) -> ASTDeclSpecs:
if outer:
if outer not in ('type', 'member', 'function'):
raise Exception('Internal error, unknown outer "%s".' % outer)
leftSpecs = self._parse_decl_specs_simple(outer, typed)
rightSpecs = None
if typed:
trailing = self._parse_trailing_type_spec()
rightSpecs = self._parse_decl_specs_simple(outer, typed)
else:
trailing = None
return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing)
def _parse_declarator_name_suffix(
self, named: Union[bool, str], paramMode: str, typed: bool
) -> ASTDeclarator:
assert named in (True, False, 'single')
# now we should parse the name, and then suffixes
if named == 'single':
if self.match(identifier_re):
if self.matched_text in _keywords:
self.fail("Expected identifier, "
"got keyword: %s" % self.matched_text)
if self.matched_text in self.config.c_extra_keywords:
msg = "Expected identifier, got user-defined keyword: %s." \
+ " Remove it from c_extra_keywords to allow it as identifier.\n" \
+ "Currently c_extra_keywords is %s."
self.fail(msg % (self.matched_text,
str(self.config.c_extra_keywords)))
identifier = ASTIdentifier(self.matched_text)
declId = ASTNestedName([identifier], rooted=False)
else:
declId = None
elif named:
declId = self._parse_nested_name()
else:
declId = None
arrayOps = []
while 1:
self.skip_ws()
if typed and self.skip_string('['):
self.skip_ws()
static = False
const = False
volatile = False
restrict = False
while True:
if not static:
if self.skip_word_and_ws('static'):
static = True
continue
if not const:
if self.skip_word_and_ws('const'):
const = True
continue
if not volatile:
if self.skip_word_and_ws('volatile'):
volatile = True
continue
if not restrict:
if self.skip_word_and_ws('restrict'):
restrict = True
continue
break
vla = False if static else self.skip_string_and_ws('*')
if vla:
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
size = None
else:
if self.skip_string(']'):
size = None
else:
def parser():
return self._parse_expression()
size = self._parse_expression_fallback([']'], parser)
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
arrayOps.append(ASTArray(static, const, volatile, restrict, vla, size))
else:
break
param = self._parse_parameters(paramMode)
if param is None and len(arrayOps) == 0:
# perhaps a bit-field
if named and paramMode == 'type' and typed:
self.skip_ws()
if self.skip_string(':'):
size = self._parse_constant_expression()
return ASTDeclaratorNameBitField(declId=declId, size=size)
return ASTDeclaratorNameParam(declId=declId, arrayOps=arrayOps,
param=param)
def _parse_declarator(self, named: Union[bool, str], paramMode: str,
typed: bool = True) -> ASTDeclarator:
# 'typed' here means 'parse return type stuff'
if paramMode not in ('type', 'function'):
raise Exception(
"Internal error, unknown paramMode '%s'." % paramMode)
prevErrors = []
self.skip_ws()
if typed and self.skip_string('*'):
self.skip_ws()
restrict = False
volatile = False
const = False
attrs = []
while 1:
if not restrict:
restrict = self.skip_word_and_ws('restrict')
if restrict:
continue
if not volatile:
volatile = self.skip_word_and_ws('volatile')
if volatile:
continue
if not const:
const = self.skip_word_and_ws('const')
if const:
continue
attr = self._parse_attribute()
if attr is not None:
attrs.append(attr)
continue
break
next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorPtr(next=next,
restrict=restrict, volatile=volatile, const=const,
attrs=attrs)
if typed and self.current_char == '(': # note: peeking, not skipping
# maybe this is the beginning of params, try that first,
# otherwise assume it's noptr->declarator > ( ptr-declarator )
pos = self.pos
try:
# assume this is params
res = self._parse_declarator_name_suffix(named, paramMode,
typed)
return res
except DefinitionError as exParamQual:
msg = "If declarator-id with parameters"
if paramMode == 'function':
msg += " (e.g., 'void f(int arg)')"
prevErrors.append((exParamQual, msg))
self.pos = pos
try:
assert self.current_char == '('
self.skip_string('(')
# TODO: hmm, if there is a name, it must be in inner, right?
# TODO: hmm, if there must be parameters, they must b
# inside, right?
inner = self._parse_declarator(named, paramMode, typed)
if not self.skip_string(')'):
self.fail("Expected ')' in \"( ptr-declarator )\"")
next = self._parse_declarator(named=False,
paramMode="type",
typed=typed)
return ASTDeclaratorParen(inner=inner, next=next)
except DefinitionError as exNoPtrParen:
self.pos = pos
msg = "If parenthesis in noptr-declarator"
if paramMode == 'function':
msg += " (e.g., 'void (*f(int arg))(double)')"
prevErrors.append((exNoPtrParen, msg))
header = "Error in declarator"
raise self._make_multi_error(prevErrors, header) from exNoPtrParen
pos = self.pos
try:
return self._parse_declarator_name_suffix(named, paramMode, typed)
except DefinitionError as e:
self.pos = pos
prevErrors.append((e, "If declarator-id"))
header = "Error in declarator or parameters"
raise self._make_multi_error(prevErrors, header) from e
def _parse_initializer(self, outer: str = None, allowFallback: bool = True
) -> ASTInitializer:
self.skip_ws()
if outer == 'member' and False: # TODO
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit, hasAssign=False)
if not self.skip_string('='):
return None
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit)
if outer == 'member':
fallbackEnd: List[str] = []
elif outer is None: # function parameter
fallbackEnd = [',', ')']
else:
self.fail("Internal error, initializer for outer '%s' not "
"implemented." % outer)
def parser():
return self._parse_assignment_expression()
value = self._parse_expression_fallback(fallbackEnd, parser, allow=allowFallback)
return ASTInitializer(value)
def _parse_type(self, named: Union[bool, str], outer: str = None) -> ASTType:
"""
named=False|'single'|True: 'single' is e.g., for function objects which
doesn't need to name the arguments, but otherwise is a single name
"""
if outer: # always named
if outer not in ('type', 'member', 'function'):
raise Exception('Internal error, unknown outer "%s".' % outer)
assert named
if outer == 'type':
# We allow type objects to just be a name.
prevErrors = []
startPos = self.pos
# first try without the type
try:
declSpecs = self._parse_decl_specs(outer=outer, typed=False)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=False)
self.assert_end(allowSemicolon=True)
except DefinitionError as exUntyped:
desc = "If just a name"
prevErrors.append((exUntyped, desc))
self.pos = startPos
try:
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, paramMode=outer)
except DefinitionError as exTyped:
self.pos = startPos
desc = "If typedef-like declaration"
prevErrors.append((exTyped, desc))
# Retain the else branch for easier debugging.
# TODO: it would be nice to save the previous stacktrace
# and output it here.
if True:
header = "Type must be either just a name or a "
header += "typedef-like declaration."
raise self._make_multi_error(prevErrors, header) from exTyped
else:
# For testing purposes.
# do it again to get the proper traceback (how do you
# reliably save a traceback when an exception is
# constructed?)
self.pos = startPos
typed = True
declSpecs = self._parse_decl_specs(outer=outer, typed=typed)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=typed)
elif outer == 'function':
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, paramMode=outer)
else:
paramMode = 'type'
if outer == 'member': # i.e., member
named = True
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=named, paramMode=paramMode)
return ASTType(declSpecs, decl)
def _parse_type_with_init(self, named: Union[bool, str], outer: str) -> ASTTypeWithInit:
if outer:
assert outer in ('type', 'member', 'function')
type = self._parse_type(outer=outer, named=named)
init = self._parse_initializer(outer=outer)
return ASTTypeWithInit(type, init)
def _parse_macro(self) -> ASTMacro:
self.skip_ws()
ident = self._parse_nested_name()
if ident is None:
self.fail("Expected identifier in macro definition.")
self.skip_ws()
if not self.skip_string_and_ws('('):
return ASTMacro(ident, None)
if self.skip_string(')'):
return ASTMacro(ident, [])
args = []
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTMacroParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in macro parameters.')
break
if not self.match(identifier_re):
self.fail("Expected identifier in macro parameters.")
nn = ASTNestedName([ASTIdentifier(self.matched_text)], rooted=False)
# Allow named variadic args:
# https://gcc.gnu.org/onlinedocs/cpp/Variadic-Macros.html
self.skip_ws()
if self.skip_string_and_ws('...'):
args.append(ASTMacroParameter(nn, False, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in macro parameters.')
break
args.append(ASTMacroParameter(nn))
if self.skip_string_and_ws(','):
continue
elif self.skip_string_and_ws(')'):
break
else:
self.fail("Expected identifier, ')', or ',' in macro parameter list.")
return ASTMacro(ident, args)
def _parse_struct(self) -> ASTStruct:
name = self._parse_nested_name()
return ASTStruct(name)
def _parse_union(self) -> ASTUnion:
name = self._parse_nested_name()
return ASTUnion(name)
def _parse_enum(self) -> ASTEnum:
name = self._parse_nested_name()
return ASTEnum(name)
def _parse_enumerator(self) -> ASTEnumerator:
name = self._parse_nested_name()
self.skip_ws()
init = None
if self.skip_string('='):
self.skip_ws()
def parser() -> ASTExpression:
return self._parse_constant_expression()
initVal = self._parse_expression_fallback([], parser)
init = ASTInitializer(initVal)
return ASTEnumerator(name, init)
def parse_pre_v3_type_definition(self) -> ASTDeclaration:
self.skip_ws()
declaration: DeclarationType = None
if self.skip_word('struct'):
typ = 'struct'
declaration = self._parse_struct()
elif self.skip_word('union'):
typ = 'union'
declaration = self._parse_union()
elif self.skip_word('enum'):
typ = 'enum'
declaration = self._parse_enum()
else:
self.fail("Could not parse pre-v3 type directive."
" Must start with 'struct', 'union', or 'enum'.")
return ASTDeclaration(typ, typ, declaration, False)
def parse_declaration(self, objectType: str, directiveType: str) -> ASTDeclaration:
if objectType not in ('function', 'member',
'macro', 'struct', 'union', 'enum', 'enumerator', 'type'):
raise Exception('Internal error, unknown objectType "%s".' % objectType)
if directiveType not in ('function', 'member', 'var',
'macro', 'struct', 'union', 'enum', 'enumerator', 'type'):
raise Exception('Internal error, unknown directiveType "%s".' % directiveType)
declaration: DeclarationType = None
if objectType == 'member':
declaration = self._parse_type_with_init(named=True, outer='member')
elif objectType == 'function':
declaration = self._parse_type(named=True, outer='function')
elif objectType == 'macro':
declaration = self._parse_macro()
elif objectType == 'struct':
declaration = self._parse_struct()
elif objectType == 'union':
declaration = self._parse_union()
elif objectType == 'enum':
declaration = self._parse_enum()
elif objectType == 'enumerator':
declaration = self._parse_enumerator()
elif objectType == 'type':
declaration = self._parse_type(named=True, outer='type')
else:
assert False
if objectType != 'macro':
self.skip_ws()
semicolon = self.skip_string(';')
else:
semicolon = False
return ASTDeclaration(objectType, directiveType, declaration, semicolon)
def parse_namespace_object(self) -> ASTNestedName:
return self._parse_nested_name()
def parse_xref_object(self) -> ASTNestedName:
name = self._parse_nested_name()
# if there are '()' left, just skip them
self.skip_ws()
self.skip_string('()')
self.assert_end()
return name
def parse_expression(self) -> Union[ASTExpression, ASTType]:
pos = self.pos
res: Union[ASTExpression, ASTType] = None
try:
res = self._parse_expression()
self.skip_ws()
self.assert_end()
except DefinitionError as exExpr:
self.pos = pos
try:
res = self._parse_type(False)
self.skip_ws()
self.assert_end()
except DefinitionError as exType:
header = "Error when parsing (type) expression."
errs = []
errs.append((exExpr, "If expression"))
errs.append((exType, "If type"))
raise self._make_multi_error(errs, header) from exType
return res
def _make_phony_error_name() -> ASTNestedName:
return ASTNestedName([ASTIdentifier("PhonyNameDueToError")], rooted=False)
class CObject(ObjectDescription[ASTDeclaration]):
"""
Description of a C language object.
"""
option_spec: OptionSpec = {
'noindexentry': directives.flag,
}
def _add_enumerator_to_parent(self, ast: ASTDeclaration) -> None:
assert ast.objectType == 'enumerator'
# find the parent, if it exists && is an enum
# then add the name to the parent scope
symbol = ast.symbol
assert symbol
assert symbol.ident is not None
parentSymbol = symbol.parent
assert parentSymbol
if parentSymbol.parent is None:
# TODO: we could warn, but it is somewhat equivalent to
# enumeratorss, without the enum
return # no parent
parentDecl = parentSymbol.declaration
if parentDecl is None:
# the parent is not explicitly declared
# TODO: we could warn, but?
return
if parentDecl.objectType != 'enum':
# TODO: maybe issue a warning, enumerators in non-enums is weird,
# but it is somewhat equivalent to enumeratorss, without the enum
return
if parentDecl.directiveType != 'enum':
return
targetSymbol = parentSymbol.parent
s = targetSymbol.find_identifier(symbol.ident, matchSelf=False, recurseInAnon=True,
searchInSiblings=False)
if s is not None:
# something is already declared with that name
return
declClone = symbol.declaration.clone()
declClone.enumeratorScopedSymbol = symbol
Symbol(parent=targetSymbol, ident=symbol.ident,
declaration=declClone,
docname=self.env.docname, line=self.get_source_info()[1])
def add_target_and_index(self, ast: ASTDeclaration, sig: str,
signode: TextElement) -> None:
ids = []
for i in range(1, _max_id + 1):
try:
id = ast.get_id(version=i)
ids.append(id)
except NoOldIdError:
assert i < _max_id
# let's keep the newest first
ids = list(reversed(ids))
newestId = ids[0]
assert newestId # shouldn't be None
name = ast.symbol.get_full_nested_name().get_display_string().lstrip('.')
if newestId not in self.state.document.ids:
# always add the newest id
assert newestId
signode['ids'].append(newestId)
# only add compatibility ids when there are no conflicts
for id in ids[1:]:
if not id: # is None when the element didn't exist in that version
continue
if id not in self.state.document.ids:
signode['ids'].append(id)
self.state.document.note_explicit_target(signode)
if 'noindexentry' not in self.options:
indexText = self.get_index_text(name)
self.indexnode['entries'].append(('single', indexText, newestId, '', None))
@property
def object_type(self) -> str:
raise NotImplementedError()
@property
def display_object_type(self) -> str:
return self.object_type
def get_index_text(self, name: str) -> str:
return _('%s (C %s)') % (name, self.display_object_type)
def parse_definition(self, parser: DefinitionParser) -> ASTDeclaration:
return parser.parse_declaration(self.object_type, self.objtype)
def parse_pre_v3_type_definition(self, parser: DefinitionParser) -> ASTDeclaration:
return parser.parse_pre_v3_type_definition()
def describe_signature(self, signode: TextElement, ast: ASTDeclaration,
options: Dict) -> None:
ast.describe_signature(signode, 'lastIsName', self.env, options)
def run(self) -> List[Node]:
env = self.state.document.settings.env # from ObjectDescription.run
if 'c:parent_symbol' not in env.temp_data:
root = env.domaindata['c']['root_symbol']
env.temp_data['c:parent_symbol'] = root
env.ref_context['c:parent_key'] = root.get_lookup_key()
# When multiple declarations are made in the same directive
# they need to know about each other to provide symbol lookup for function parameters.
# We use last_symbol to store the latest added declaration in a directive.
env.temp_data['c:last_symbol'] = None
return super().run()
def handle_signature(self, sig: str, signode: TextElement) -> ASTDeclaration:
parentSymbol: Symbol = self.env.temp_data['c:parent_symbol']
parser = DefinitionParser(sig, location=signode, config=self.env.config)
try:
try:
ast = self.parse_definition(parser)
parser.assert_end()
except DefinitionError as eOrig:
if not self.env.config['c_allow_pre_v3']:
raise
if self.objtype != 'type':
raise
try:
ast = self.parse_pre_v3_type_definition(parser)
parser.assert_end()
except DefinitionError:
raise eOrig
self.object_type = ast.objectType # type: ignore
if self.env.config['c_warn_on_allowed_pre_v3']:
msg = "{}: Pre-v3 C type directive '.. c:type:: {}' converted to " \
"'.. c:{}:: {}'." \
"\nThe original parsing error was:\n{}"
msg = msg.format(RemovedInSphinx60Warning.__name__,
sig, ast.objectType, ast, eOrig)
logger.warning(msg, location=signode)
except DefinitionError as e:
logger.warning(e, location=signode)
# It is easier to assume some phony name than handling the error in
# the possibly inner declarations.
name = _make_phony_error_name()
symbol = parentSymbol.add_name(name)
self.env.temp_data['c:last_symbol'] = symbol
raise ValueError from e
try:
symbol = parentSymbol.add_declaration(
ast, docname=self.env.docname, line=self.get_source_info()[1])
# append the new declaration to the sibling list
assert symbol.siblingAbove is None
assert symbol.siblingBelow is None
symbol.siblingAbove = self.env.temp_data['c:last_symbol']
if symbol.siblingAbove is not None:
assert symbol.siblingAbove.siblingBelow is None
symbol.siblingAbove.siblingBelow = symbol
self.env.temp_data['c:last_symbol'] = symbol
except _DuplicateSymbolError as e:
# Assume we are actually in the old symbol,
# instead of the newly created duplicate.
self.env.temp_data['c:last_symbol'] = e.symbol
msg = __("Duplicate C declaration, also defined at %s:%s.\n"
"Declaration is '.. c:%s:: %s'.")
msg = msg % (e.symbol.docname, e.symbol.line, self.display_object_type, sig)
logger.warning(msg, location=signode)
if ast.objectType == 'enumerator':
self._add_enumerator_to_parent(ast)
# note: handle_signature may be called multiple time per directive,
# if it has multiple signatures, so don't mess with the original options.
options = dict(self.options)
self.describe_signature(signode, ast, options)
return ast
def before_content(self) -> None:
lastSymbol: Symbol = self.env.temp_data['c:last_symbol']
assert lastSymbol
self.oldParentSymbol = self.env.temp_data['c:parent_symbol']
self.oldParentKey: LookupKey = self.env.ref_context['c:parent_key']
self.env.temp_data['c:parent_symbol'] = lastSymbol
self.env.ref_context['c:parent_key'] = lastSymbol.get_lookup_key()
def after_content(self) -> None:
self.env.temp_data['c:parent_symbol'] = self.oldParentSymbol
self.env.ref_context['c:parent_key'] = self.oldParentKey
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id for C objects.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return 'c.' + name
class CMemberObject(CObject):
object_type = 'member'
@property
def display_object_type(self) -> str:
# the distinction between var and member is only cosmetic
assert self.objtype in ('member', 'var')
return self.objtype
_function_doc_field_types = [
TypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='expr', typenames=('type',)),
GroupedField('retval', label=_('Return values'),
names=('retvals', 'retval'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
]
class CFunctionObject(CObject):
object_type = 'function'
doc_field_types = _function_doc_field_types.copy()
class CMacroObject(CObject):
object_type = 'macro'
doc_field_types = _function_doc_field_types.copy()
class CStructObject(CObject):
object_type = 'struct'
class CUnionObject(CObject):
object_type = 'union'
class CEnumObject(CObject):
object_type = 'enum'
class CEnumeratorObject(CObject):
object_type = 'enumerator'
class CTypeObject(CObject):
object_type = 'type'
class CNamespaceObject(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting stuff in
namespace foo.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
rootSymbol = self.env.domaindata['c']['root_symbol']
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
symbol = rootSymbol
stack: List[Symbol] = []
else:
parser = DefinitionParser(self.arguments[0],
location=self.get_location(),
config=self.env.config)
try:
name = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_location())
name = _make_phony_error_name()
symbol = rootSymbol.add_name(name)
stack = [symbol]
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['c:parent_key'] = symbol.get_lookup_key()
return []
class CNamespacePushObject(SphinxDirective):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
return []
parser = DefinitionParser(self.arguments[0],
location=self.get_location(),
config=self.env.config)
try:
name = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_location())
name = _make_phony_error_name()
oldParent = self.env.temp_data.get('c:parent_symbol', None)
if not oldParent:
oldParent = self.env.domaindata['c']['root_symbol']
symbol = oldParent.add_name(name)
stack = self.env.temp_data.get('c:namespace_stack', [])
stack.append(symbol)
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['c:parent_key'] = symbol.get_lookup_key()
return []
class CNamespacePopObject(SphinxDirective):
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
stack = self.env.temp_data.get('c:namespace_stack', None)
if not stack or len(stack) == 0:
logger.warning("C namespace pop on empty stack. Defaulting to global scope.",
location=self.get_location())
stack = []
else:
stack.pop()
if len(stack) > 0:
symbol = stack[-1]
else:
symbol = self.env.domaindata['c']['root_symbol']
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['cp:parent_key'] = symbol.get_lookup_key()
return []
class AliasNode(nodes.Element):
def __init__(self, sig: str, aliasOptions: dict,
document: Any, env: "BuildEnvironment" = None,
parentKey: LookupKey = None) -> None:
super().__init__()
self.sig = sig
self.aliasOptions = aliasOptions
self.document = document
if env is not None:
if 'c:parent_symbol' not in env.temp_data:
root = env.domaindata['c']['root_symbol']
env.temp_data['c:parent_symbol'] = root
env.ref_context['c:parent_key'] = root.get_lookup_key()
self.parentKey = env.ref_context['c:parent_key']
else:
assert parentKey is not None
self.parentKey = parentKey
def copy(self) -> 'AliasNode':
return self.__class__(self.sig, self.aliasOptions, self.document,
env=None, parentKey=self.parentKey)
class AliasTransform(SphinxTransform):
default_priority = ReferencesResolver.default_priority - 1
def _render_symbol(self, s: Symbol, maxdepth: int, skipThis: bool,
aliasOptions: dict, renderOptions: dict,
document: Any) -> List[Node]:
if maxdepth == 0:
recurse = True
elif maxdepth == 1:
recurse = False
else:
maxdepth -= 1
recurse = True
nodes: List[Node] = []
if not skipThis:
signode = addnodes.desc_signature('', '')
nodes.append(signode)
s.declaration.describe_signature(signode, 'markName', self.env, renderOptions)
if recurse:
if skipThis:
childContainer: Union[List[Node], addnodes.desc] = nodes
else:
content = addnodes.desc_content()
desc = addnodes.desc()
content.append(desc)
desc.document = document
desc['domain'] = 'c'
# 'desctype' is a backwards compatible attribute
desc['objtype'] = desc['desctype'] = 'alias'
desc['noindex'] = True
childContainer = desc
for sChild in s.children:
if sChild.declaration is None:
continue
childNodes = self._render_symbol(
sChild, maxdepth=maxdepth, skipThis=False,
aliasOptions=aliasOptions, renderOptions=renderOptions,
document=document)
childContainer.extend(childNodes)
if not skipThis and len(desc.children) != 0:
nodes.append(content)
return nodes
def apply(self, **kwargs: Any) -> None:
for node in self.document.traverse(AliasNode):
node = cast(AliasNode, node)
sig = node.sig
parentKey = node.parentKey
try:
parser = DefinitionParser(sig, location=node,
config=self.env.config)
name = parser.parse_xref_object()
except DefinitionError as e:
logger.warning(e, location=node)
name = None
if name is None:
# could not be parsed, so stop here
signode = addnodes.desc_signature(sig, '')
signode.clear()
signode += addnodes.desc_name(sig, sig)
node.replace_self(signode)
continue
rootSymbol: Symbol = self.env.domains['c'].data['root_symbol']
parentSymbol: Symbol = rootSymbol.direct_lookup(parentKey)
if not parentSymbol:
print("Target: ", sig)
print("ParentKey: ", parentKey)
print(rootSymbol.dump(1))
assert parentSymbol # should be there
s = parentSymbol.find_declaration(
name, 'any',
matchSelf=True, recurseInAnon=True)
if s is None:
signode = addnodes.desc_signature(sig, '')
node.append(signode)
signode.clear()
signode += addnodes.desc_name(sig, sig)
logger.warning("Could not find C declaration for alias '%s'." % name,
location=node)
node.replace_self(signode)
continue
# Declarations like .. var:: int Missing::var
# may introduce symbols without declarations.
# But if we skip the root then it is ok to start recursion from it.
if not node.aliasOptions['noroot'] and s.declaration is None:
signode = addnodes.desc_signature(sig, '')
node.append(signode)
signode.clear()
signode += addnodes.desc_name(sig, sig)
logger.warning(
"Can not render C declaration for alias '%s'. No such declaration." % name,
location=node)
node.replace_self(signode)
continue
nodes = self._render_symbol(s, maxdepth=node.aliasOptions['maxdepth'],
skipThis=node.aliasOptions['noroot'],
aliasOptions=node.aliasOptions,
renderOptions=dict(), document=node.document)
node.replace_self(nodes)
class CAliasObject(ObjectDescription):
option_spec: OptionSpec = {
'maxdepth': directives.nonnegative_int,
'noroot': directives.flag,
}
def run(self) -> List[Node]:
"""
On purpose this doesn't call the ObjectDescription version, but is based on it.
Each alias signature may expand into multiple real signatures if 'noroot'.
The code is therefore based on the ObjectDescription version.
"""
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
node = addnodes.desc()
node.document = self.state.document
node['domain'] = self.domain
# 'desctype' is a backwards compatible attribute
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = True
self.names: List[str] = []
aliasOptions = {
'maxdepth': self.options.get('maxdepth', 1),
'noroot': 'noroot' in self.options,
}
if aliasOptions['noroot'] and aliasOptions['maxdepth'] == 1:
logger.warning("Error in C alias declaration."
" Requested 'noroot' but 'maxdepth' 1."
" When skipping the root declaration,"
" need 'maxdepth' 0 for infinite or at least 2.",
location=self.get_location())
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
node.append(AliasNode(sig, aliasOptions, self.state.document, env=self.env))
return [node]
class CXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element,
has_explicit_title: bool, title: str, target: str) -> Tuple[str, str]:
refnode.attributes.update(env.ref_context)
if not has_explicit_title:
# major hax: replace anon names via simple string manipulation.
# Can this actually fail?
title = anon_identifier_re.sub("[anonymous]", str(title))
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
return title, target
def run(self) -> Tuple[List[Node], List[system_message]]:
if not self.env.config['c_allow_pre_v3']:
return super().run()
text = self.text.replace('\n', ' ')
parser = DefinitionParser(text, location=self.get_location(),
config=self.env.config)
try:
parser.parse_xref_object()
# it succeeded, so let it through
return super().run()
except DefinitionError as eOrig:
# try as if it was an c:expr
parser.pos = 0
try:
ast = parser.parse_expression()
except DefinitionError:
# that didn't go well, just default back
return super().run()
classes = ['xref', 'c', 'c-texpr']
parentSymbol = self.env.temp_data.get('cpp:parent_symbol', None)
if parentSymbol is None:
parentSymbol = self.env.domaindata['c']['root_symbol']
signode = nodes.inline(classes=classes)
ast.describe_signature(signode, 'markType', self.env, parentSymbol)
if self.env.config['c_warn_on_allowed_pre_v3']:
msg = "{}: Pre-v3 C type role ':c:type:`{}`' converted to ':c:expr:`{}`'."
msg += "\nThe original parsing error was:\n{}"
msg = msg.format(RemovedInSphinx60Warning.__name__, text, text, eOrig)
logger.warning(msg, location=self.get_location())
return [signode], []
class CExprRole(SphinxRole):
def __init__(self, asCode: bool) -> None:
super().__init__()
if asCode:
# render the expression as inline code
self.class_type = 'c-expr'
else:
# render the expression as inline text
self.class_type = 'c-texpr'
def run(self) -> Tuple[List[Node], List[system_message]]:
text = self.text.replace('\n', ' ')
parser = DefinitionParser(text, location=self.get_location(),
config=self.env.config)
# attempt to mimic XRefRole classes, except that...
try:
ast = parser.parse_expression()
except DefinitionError as ex:
logger.warning('Unparseable C expression: %r\n%s', text, ex,
location=self.get_location())
# see below
return [addnodes.desc_inline('c', text, text, classes=[self.class_type])], []
parentSymbol = self.env.temp_data.get('c:parent_symbol', None)
if parentSymbol is None:
parentSymbol = self.env.domaindata['c']['root_symbol']
# ...most if not all of these classes should really apply to the individual references,
# not the container node
signode = addnodes.desc_inline('c', classes=[self.class_type])
ast.describe_signature(signode, 'markType', self.env, parentSymbol)
return [signode], []
class CDomain(Domain):
"""C language domain."""
name = 'c'
label = 'C'
object_types = {
# 'identifier' is the one used for xrefs generated in signatures, not in roles
'member': ObjType(_('member'), 'var', 'member', 'data', 'identifier'),
'var': ObjType(_('variable'), 'var', 'member', 'data', 'identifier'),
'function': ObjType(_('function'), 'func', 'identifier', 'type'),
'macro': ObjType(_('macro'), 'macro', 'identifier'),
'struct': ObjType(_('struct'), 'struct', 'identifier', 'type'),
'union': ObjType(_('union'), 'union', 'identifier', 'type'),
'enum': ObjType(_('enum'), 'enum', 'identifier', 'type'),
'enumerator': ObjType(_('enumerator'), 'enumerator', 'identifier'),
'type': ObjType(_('type'), 'identifier', 'type'),
# generated object types
'functionParam': ObjType(_('function parameter'), 'identifier', 'var', 'member', 'data'), # noqa
}
directives = {
'member': CMemberObject,
'var': CMemberObject,
'function': CFunctionObject,
'macro': CMacroObject,
'struct': CStructObject,
'union': CUnionObject,
'enum': CEnumObject,
'enumerator': CEnumeratorObject,
'type': CTypeObject,
# scope control
'namespace': CNamespaceObject,
'namespace-push': CNamespacePushObject,
'namespace-pop': CNamespacePopObject,
# other
'alias': CAliasObject
}
roles = {
'member': CXRefRole(),
'data': CXRefRole(),
'var': CXRefRole(),
'func': CXRefRole(fix_parens=True),
'macro': CXRefRole(),
'struct': CXRefRole(),
'union': CXRefRole(),
'enum': CXRefRole(),
'enumerator': CXRefRole(),
'type': CXRefRole(),
'expr': CExprRole(asCode=True),
'texpr': CExprRole(asCode=False)
}
initial_data: Dict[str, Union[Symbol, Dict[str, Tuple[str, str, str]]]] = {
'root_symbol': Symbol(None, None, None, None, None),
'objects': {}, # fullname -> docname, node_id, objtype
}
def clear_doc(self, docname: str) -> None:
if Symbol.debug_show_tree:
print("clear_doc:", docname)
print("\tbefore:")
print(self.data['root_symbol'].dump(1))
print("\tbefore end")
rootSymbol = self.data['root_symbol']
rootSymbol.clear_doc(docname)
if Symbol.debug_show_tree:
print("\tafter:")
print(self.data['root_symbol'].dump(1))
print("\tafter end")
print("clear_doc end:", docname)
def process_doc(self, env: BuildEnvironment, docname: str,
document: nodes.document) -> None:
if Symbol.debug_show_tree:
print("process_doc:", docname)
print(self.data['root_symbol'].dump(0))
print("process_doc end:", docname)
def process_field_xref(self, pnode: pending_xref) -> None:
pnode.attributes.update(self.env.ref_context)
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
if Symbol.debug_show_tree:
print("merge_domaindata:")
print("\tself:")
print(self.data['root_symbol'].dump(1))
print("\tself end")
print("\tother:")
print(otherdata['root_symbol'].dump(1))
print("\tother end")
print("merge_domaindata end")
self.data['root_symbol'].merge_with(otherdata['root_symbol'],
docnames, self.env)
ourObjects = self.data['objects']
for fullname, (fn, id_, objtype) in otherdata['objects'].items():
if fn in docnames:
if fullname not in ourObjects:
ourObjects[fullname] = (fn, id_, objtype)
# no need to warn on duplicates, the symbol merge already does that
def _resolve_xref_inner(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref,
contnode: Element) -> Tuple[Optional[Element], Optional[str]]:
parser = DefinitionParser(target, location=node, config=env.config)
try:
name = parser.parse_xref_object()
except DefinitionError as e:
logger.warning('Unparseable C cross-reference: %r\n%s', target, e,
location=node)
return None, None
parentKey: LookupKey = node.get("c:parent_key", None)
rootSymbol = self.data['root_symbol']
if parentKey:
parentSymbol: Symbol = rootSymbol.direct_lookup(parentKey)
if not parentSymbol:
print("Target: ", target)
print("ParentKey: ", parentKey)
print(rootSymbol.dump(1))
assert parentSymbol # should be there
else:
parentSymbol = rootSymbol
s = parentSymbol.find_declaration(name, typ,
matchSelf=True, recurseInAnon=True)
if s is None or s.declaration is None:
return None, None
# TODO: check role type vs. object type
declaration = s.declaration
displayName = name.get_display_string()
docname = s.docname
assert docname
return make_refnode(builder, fromdocname, docname,
declaration.get_newest_id(), contnode, displayName
), declaration.objectType
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref,
contnode: Element) -> Optional[Element]:
return self._resolve_xref_inner(env, fromdocname, builder, typ,
target, node, contnode)[0]
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
with logging.suppress_logging():
retnode, objtype = self._resolve_xref_inner(env, fromdocname, builder,
'any', target, node, contnode)
if retnode:
return [('c:' + self.role_for_objtype(objtype), retnode)]
return []
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
rootSymbol = self.data['root_symbol']
for symbol in rootSymbol.get_all_symbols():
if symbol.declaration is None:
continue
assert symbol.docname
fullNestedName = symbol.get_full_nested_name()
name = str(fullNestedName).lstrip('.')
dispname = fullNestedName.get_display_string().lstrip('.')
objectType = symbol.declaration.objectType
docname = symbol.docname
newestId = symbol.declaration.get_newest_id()
yield (name, dispname, objectType, docname, newestId, 1)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(CDomain)
app.add_config_value("c_id_attributes", [], 'env')
app.add_config_value("c_paren_attributes", [], 'env')
app.add_config_value("c_extra_keywords", _macroKeywords, 'env')
app.add_post_transform(AliasTransform)
app.add_config_value("c_allow_pre_v3", False, 'env')
app.add_config_value("c_warn_on_allowed_pre_v3", True, 'env')
return {
'version': 'builtin',
'env_version': 2,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
py | 1a3fafd0d70fcdb79fed02f903ed0e9a92fa6dd0 | import graphene
from django.contrib.auth import get_user_model
from graphene_django import DjangoObjectType
from group.models import GroupMember
class GroupMemberType(DjangoObjectType):
class Meta:
model = GroupMember
class UserType(DjangoObjectType):
is_admin = graphene.String(source='is_admin')
is_staff = graphene.String(source='is_staff')
class Meta:
model = get_user_model()
exclude_fields = ['password']
|
py | 1a3fb01384d8b4e486e3ded4c0143920e6ffb199 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Gibbs sampling inference for (a special case of) STS models.
These methods implement Gibbs sampling steps for STS models that combine a
single LocalLevel component with a linear regression component, with conjugate
InverseGamma priors on the scale and a Gaussian prior on the weights. This model
class is somewhat general, in that we assume that any seasonal/holiday variation
can be encoded in the design matrix of the linear regression. The intent is to
support deployment of STS inference in latency-sensitive applications.
This Gibbs sampler tends to reach acceptable answers much more quickly than
fitting the same models by gradient-based methods (VI or HMC). Because it does
not marginalize out the linear Gaussian latents analytically, it may be more
prone to getting stuck at a single (perhaps suboptimal) posterior explanation;
however, in practice it often finds good solutions.
The speed advantage of Gibbs sampling in this model likely arises from a
combination of:
- Analytically sampling the regression weights once per sampling cycle, instead
of requiring a quadratically-expensive update at each timestep of Kalman
filtering (as in DynamicLinearRegression), or relying on gradient-based
approximate inference (as in LinearRegression).
- Exploiting conjugacy to sample the scale parameters directly.
- Specializing the Gibbs step for the latent level to the case of a
scalar process with identity transitions.
It would be possible to expand this sampler to support additional STS models,
potentially at a cost with respect to some of these performance advantages (and
additional code):
- To support general latent state-space models, one would augment the sampler
state to track all parameters in the model. Each component would need to
register Gibbs sampling steps for its parameters (assuming conjugate priors),
as a function of the sampled latent trajectory. The resampling steps for the
observation_noise_scale and level_scale parameters would then be replaced with
a generic loop over all parameters in the model.
- To support regression with non-Gaussian (e.g., spike-and-slab) priors, one
would need to write a custom prior-specific sampler, analogous to the current
`resample_weights` function.
- For specific models it may be possible to implement an efficient prior
sampling algorithm, analagous to `LocalLevelStateSpaceModel._joint_sample_n`.
This may be significantly faster than the generic sampler and can speed up
the posterior sampling step for the latent trajectory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import sts
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.distributions import normal_conjugate_posteriors
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.sts.internal import util as sts_util
# The sampler state stores current values for each model parameter,
# and auxiliary quantities such as the latent level. It should have the property
# that `model.make_state_space_model(num_timesteps, GibbsSamplerState(...))`
# behaves properly -- i.e., that the state contains all model
# parameters *in the same order* as they are listed in `model.parameters`. This
# is currently enforced by construction in `build_gibbs_fittable_model`.
GibbsSamplerState = collections.namedtuple('GibbsSamplerState', [
'observation_noise_scale', 'level_scale', 'weights', 'level', 'seed'])
def build_model_for_gibbs_fitting(observed_time_series,
design_matrix,
weights_prior,
level_variance_prior,
observation_noise_variance_prior):
"""Builds a StructuralTimeSeries model instance that supports Gibbs sampling.
To support Gibbs sampling, a model must have have conjugate priors on all
scale and weight parameters, and must be constructed so that
`model.parameters` matches the parameters and ordering specified by the
the `GibbsSamplerState` namedtuple. Currently, this includes (only) models
consisting of the sum of a LocalLevel and a LinearRegression component.
Args:
observed_time_series: optional `float` `Tensor` of shape [..., T, 1]`
(omitting the trailing unit dimension is also supported when `T > 1`),
specifying an observed time series. May optionally be an instance of
`tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify
timesteps with missing observations.
design_matrix: float `Tensor` of shape `concat([batch_shape,
[num_timesteps, num_features]])`. This may also optionally be
an instance of `tf.linalg.LinearOperator`.
weights_prior: An instance of `tfd.Normal` representing a scalar prior on
each regression weight. May have batch shape broadcastable to the batch
shape of `observed_time_series`.
level_variance_prior: An instance of `tfd.InverseGamma` representing a prior
on the level variance (`level_scale**2`) of a local level model. May have
batch shape broadcastable to the batch shape of `observed_time_series`.
observation_noise_variance_prior: An instance of `tfd.InverseGamma`
representing a prior on the observation noise variance (
`observation_noise_scale**2`). May have batch shape broadcastable to the
batch shape of `observed_time_series`.
Returns:
model: A `tfp.sts.StructuralTimeSeries` model instance.
"""
if not isinstance(weights_prior, tfd.Normal):
raise ValueError('Weights prior must be a univariate normal distribution.')
if not isinstance(level_variance_prior, tfd.InverseGamma):
raise ValueError(
'Level variance prior must be an inverse gamma distribution.')
if not isinstance(observation_noise_variance_prior, tfd.InverseGamma):
raise ValueError('Observation noise variance prior must be an inverse '
'gamma distribution.')
sqrt = tfb.Invert(tfb.Square()) # Converts variance priors to scale priors.
local_level = sts.LocalLevel(observed_time_series=observed_time_series,
level_scale_prior=sqrt(level_variance_prior),
name='local_level')
regression = sts.LinearRegression(design_matrix=design_matrix,
weights_prior=weights_prior,
name='regression')
model = sts.Sum([local_level, regression],
observed_time_series=observed_time_series,
observation_noise_scale_prior=sqrt(
observation_noise_variance_prior),
# The Gibbs sampling steps in this file do not account for an
# offset to the observed series. Instead, we assume the
# observed series has already been centered and
# scale-normalized.
constant_offset=0.)
model.supports_gibbs_sampling = True
return model
def _get_design_matrix(model):
"""Returns the design matrix for an STS model with a regression component."""
design_matrices = [component.design_matrix for component in model.components
if hasattr(component, 'design_matrix')]
if not design_matrices:
raise ValueError('Model does not contain a regression component.')
if len(design_matrices) > 1:
raise ValueError('Model contains multiple regression components.')
return design_matrices[0]
def fit_with_gibbs_sampling(model,
observed_time_series,
num_results=2000,
num_warmup_steps=200,
compile_steps_with_xla=False,
initial_state=None,
seed=None):
"""Fits parameters for an STS model using Gibbs sampling."""
if not hasattr(model, 'supports_gibbs_sampling'):
raise ValueError('This STS model does not support Gibbs sampling. Models '
'for Gibbs sampling must be created using the '
'method `build_model_for_gibbs_fitting`.')
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
dtype = observed_time_series.dtype
# The canonicalized time series always has trailing dimension `1`,
# because although LinearGaussianSSMs support vector observations, STS models
# describe scalar time series only. For our purposes it'll be cleaner to
# remove this dimension.
observed_time_series = observed_time_series[..., 0]
batch_shape = prefer_static.shape(observed_time_series)[:-1]
if initial_state is None:
initial_state = GibbsSamplerState(
observation_noise_scale=tf.ones(batch_shape, dtype=dtype),
level_scale=tf.ones(batch_shape, dtype=dtype),
weights=tf.zeros(prefer_static.concat([
batch_shape,
_get_design_matrix(model).shape[-1:]], axis=0), dtype=dtype),
level=tf.zeros_like(observed_time_series),
seed=None) # Set below.
if seed and isinstance(seed, six.integer_types):
tf.random.set_seed(seed)
# Always use the passed-in `seed` arg, ignoring any seed in the initial state.
seeded_state = initial_state._asdict()
seeded_state['seed'] = samplers.sanitize_seed(
seed, salt='initial_GibbsSamplerState')
initial_state = GibbsSamplerState(**seeded_state)
sampler_loop_body = _build_sampler_loop_body(
model, observed_time_series, is_missing,
compile_steps_with_xla=compile_steps_with_xla,
seed=seed) # This is still an `int` seed, because the InverseGamma
# sampler currently requires stateful semantics.
samples = tf.scan(sampler_loop_body,
np.arange(num_warmup_steps + num_results),
initial_state)
return tf.nest.map_structure(lambda x: x[num_warmup_steps:], samples)
def one_step_predictive(model,
posterior_samples,
num_forecast_steps=0,
original_mean=0.,
original_scale=1.,
thin_every=10):
"""Constructs a one-step-ahead predictive distribution at every timestep.
Unlike the generic `tfp.sts.one_step_predictive`, this method uses the
latent levels from Gibbs sampling to efficiently construct a predictive
distribution that mixes over posterior samples. The predictive distribution
may also include additional forecast steps.
This method returns the predictive distributions for each timestep given
previous timesteps and sampled model parameters, `p(observed_time_series[t] |
observed_time_series[:t], weights, observation_noise_scale)`. Note that the
posterior values of the weights and noise scale will in general be informed
by observations from all timesteps *including the step being predicted*, so
this is not a strictly kosher probabilistic quantity, but in general we assume
that it's close, i.e., that the step being predicted had very small individual
impact on the overall parameter posterior.
Args:
model: A `tfd.sts.StructuralTimeSeries` model instance. This must be of the
form constructed by `build_model_for_gibbs_sampling`.
posterior_samples: A `GibbsSamplerState` instance in which each element is a
`Tensor` with initial dimension of size `num_samples`.
num_forecast_steps: Python `int` number of additional forecast steps to
append.
Default value: `0`.
original_mean: Optional scalar float `Tensor`, added to the predictive
distribution to undo the effect of input normalization.
Default value: `0.`
original_scale: Optional scalar float `Tensor`, used to rescale the
predictive distribution to undo the effect of input normalization.
Default value: `1.`
thin_every: Optional Python `int` factor by which to thin the posterior
samples, to reduce complexity of the predictive distribution. For example,
if `thin_every=10`, every `10`th sample will be used.
Default value: `10`.
Returns:
predictive_dist: A `tfd.MixtureSameFamily` instance of event shape
`[num_timesteps + num_forecast_steps]` representing the predictive
distribution of each timestep given previous timesteps.
"""
dtype = dtype_util.common_dtype([
posterior_samples.level_scale.dtype,
posterior_samples.observation_noise_scale.dtype,
posterior_samples.level.dtype,
original_mean,
original_scale], dtype_hint=tf.float32)
num_observed_steps = prefer_static.shape(posterior_samples.level)[-1]
original_mean = tf.convert_to_tensor(original_mean, dtype=dtype)
original_scale = tf.convert_to_tensor(original_scale, dtype=dtype)
thinned_samples = tf.nest.map_structure(lambda x: x[::thin_every],
posterior_samples)
# The local level model expects that the level at step t+1 is equal
# to the level at step t (plus transition noise of scale 'level_scale', which
# we account for below).
if num_forecast_steps > 0:
num_batch_dims = prefer_static.rank_from_shape(
prefer_static.shape(thinned_samples.level)) - 2
forecast_level = tf.tile(thinned_samples.level[..., -1:],
tf.concat([tf.ones([num_batch_dims + 1],
dtype=tf.int32),
[num_forecast_steps]], axis=0))
level_pred = tf.concat([thinned_samples.level[..., :1], # t == 0
thinned_samples.level[..., :-1] # 1 <= t < T
] + ([forecast_level] if num_forecast_steps > 0
else []),
axis=-1)
design_matrix = _get_design_matrix(
model).to_dense()[:num_observed_steps + num_forecast_steps]
regression_effect = tf.linalg.matvec(design_matrix, thinned_samples.weights)
y_mean = ((level_pred + regression_effect) *
original_scale[..., tf.newaxis] + original_mean[..., tf.newaxis])
num_steps_from_last_observation = tf.concat([
tf.ones([num_observed_steps], dtype=dtype),
tf.range(1, num_forecast_steps + 1, dtype=dtype)], axis=0)
y_scale = (original_scale * tf.sqrt(
thinned_samples.observation_noise_scale[..., tf.newaxis]**2 +
thinned_samples.level_scale[..., tf.newaxis]**2 *
num_steps_from_last_observation))
num_posterior_draws = prefer_static.shape(y_mean)[0]
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
logits=tf.zeros([num_posterior_draws], dtype=y_mean.dtype)),
components_distribution=tfd.Normal(
loc=dist_util.move_dimension(y_mean, 0, -1),
scale=dist_util.move_dimension(y_scale, 0, -1)))
def _resample_weights(design_matrix, target_residuals, observation_noise_scale,
weights_prior_scale, is_missing=None, seed=None):
"""Samples regression weights from their conditional posterior.
This assumes a conjugate normal regression model,
```
weights ~ Normal(loc=0., covariance_matrix=weights_prior_scale**2 * I)
target_residuals ~ Normal(loc=matvec(design_matrix, weights),
covariance_matrix=observation_noise_scale**2 * I)
```
and returns a sample from `p(weights | target_residuals,
observation_noise_scale, design_matrix)`.
Args:
design_matrix: Float `Tensor` design matrix of shape
`[..., num_timesteps, num_features]`.
target_residuals: Float `Tensor` of shape `[..., num_observations]`
observation_noise_scale: Scalar float `Tensor` (with optional batch shape)
standard deviation of the iid observation noise.
weights_prior_scale: Scalar float `Tensor` (with optional batch shape)
specifying the standard deviation of the Normal prior on regression
weights.
is_missing: Optional `bool` `Tensor` of shape `[..., num_timesteps]`. A
`True` value indicates that the observation for that timestep is missing.
seed: Optional `Python` `int` seed controlling the sampled values.
Returns:
weights: Float `Tensor` of shape `[..., num_features]`, sampled from
the conditional posterior `p(weights | target_residuals,
observation_noise_scale, weights_prior_scale)`.
"""
if is_missing is not None:
# Replace design matrix with zeros at unobserved timesteps. This ensures
# they will not affect the posterior on weights.
design_matrix = tf.where(is_missing[..., tf.newaxis],
tf.zeros_like(design_matrix),
design_matrix)
design_shape = prefer_static.shape(design_matrix)
num_outputs = design_shape[-2]
num_features = design_shape[-1]
iid_prior_scale = tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_features, multiplier=weights_prior_scale)
iid_likelihood_scale = tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_outputs, multiplier=observation_noise_scale)
weights_mean, weights_prec = (
normal_conjugate_posteriors.mvn_conjugate_linear_update(
linear_transformation=design_matrix,
observation=target_residuals,
prior_scale=iid_prior_scale,
likelihood_scale=iid_likelihood_scale))
sampled_weights = weights_prec.cholesky().solvevec(
samplers.normal(
shape=prefer_static.shape(weights_mean),
dtype=design_matrix.dtype, seed=seed), adjoint=True)
return weights_mean + sampled_weights
# `resample_level` requires an explicit builder function because the compiled
# code can only accept `Tensor` arguments, but we need to pass in the
# initial state prior which is a tfd.Distribution.
def _build_resample_level_fn(initial_state_prior,
is_missing=None,
compile_with_xla=False):
"""Builds a method to sample the latent level from its Gibbs posterior."""
@tf.function(autograph=False, experimental_compile=compile_with_xla)
def resample_level(observed_residuals,
level_scale,
observation_noise_scale,
sample_shape=(),
seed=None):
"""Uses Durbin-Koopman sampling to resample the latent level.
Durbin-Koopman sampling [1] is an efficient algorithm to sample from the
posterior latents of a linear Gaussian state space model. This method
implements the algorithm, specialized to the case of a one-dimensional
latent local level model.
[1] Durbin, J. and Koopman, S.J. (2002) A simple and efficient simulation
smoother for state space time series analysis.
Args:
observed_residuals: Float `Tensor` of shape `[..., num_observations]`,
specifying the centered observations `(x - loc)`.
level_scale: Float scalar `Tensor` (may contain batch dimensions)
specifying the standard deviation of the level random walk steps.
observation_noise_scale: Float scalar `Tensor` (may contain batch
dimensions) specifying the standard deviation of the observation noise.
sample_shape: Optional `int` `Tensor` shape of samples to draw.
seed: `int` `Tensor` of shape `[2]` controlling stateless sampling.
Returns:
level: Float `Tensor` resampled latent level, of shape
`[..., num_timesteps]`, where `...` concatenates the sample shape
with any batch shape from `observed_time_series`.
"""
num_timesteps = prefer_static.shape(observed_residuals)[-1]
ssm = sts.LocalLevelStateSpaceModel(
num_timesteps=num_timesteps,
initial_state_prior=initial_state_prior,
observation_noise_scale=observation_noise_scale,
level_scale=level_scale)
return ssm.posterior_sample(observed_residuals[..., tf.newaxis],
sample_shape=sample_shape,
mask=is_missing,
seed=seed)[..., 0]
return resample_level
def _resample_scale(prior_concentration, prior_scale,
observed_residuals, is_missing=None, seed=None):
"""Samples a scale parameter from its conditional posterior.
We assume the conjugate InverseGamma->Normal model:
```
scale ~ Sqrt(InverseGamma(prior_concentration, prior_scale))
for i in [1, ..., num_observations]:
x[i] ~ Normal(loc, scale)
```
in which `loc` is known, and return a sample from `p(scale | x)`.
Args:
prior_concentration: Float `Tensor` concentration parameter of the
InverseGamma prior distribution.
prior_scale: Float `Tensor` scale parameter of the InverseGamma prior
distribution.
observed_residuals: Float `Tensor` of shape `[..., num_observations]`,
specifying the centered observations `(x - loc)`.
is_missing: Optional `bool` `Tensor` of shape `[..., num_observations]`. A
`True` value indicates that the corresponding observation is missing.
seed: Optional `Python` `int` seed controlling the sampled value.
Returns:
sampled_scale: A `Tensor` sample from the posterior `p(scale | x)`.
"""
if is_missing is not None:
num_missing = tf.reduce_sum(tf.cast(is_missing, observed_residuals.dtype),
axis=-1)
num_observations = prefer_static.shape(observed_residuals)[-1]
if is_missing is not None:
observed_residuals = tf.where(is_missing,
tf.zeros_like(observed_residuals),
observed_residuals)
num_observations -= num_missing
variance_posterior = tfd.InverseGamma(
concentration=prior_concentration + num_observations / 2.,
scale=prior_scale + tf.reduce_sum(
tf.square(observed_residuals), axis=-1) / 2.)
return tf.sqrt(variance_posterior.sample(seed=seed))
def _build_sampler_loop_body(model,
observed_time_series,
is_missing=None,
compile_steps_with_xla=False,
seed=None):
"""Builds a Gibbs sampler for the given model and observed data.
Args:
model: A `tf.sts.StructuralTimeSeries` model instance. This must be of the
form constructed by `build_model_for_gibbs_sampling`.
observed_time_series: Float `Tensor` time series of shape
`[..., num_timesteps]`.
is_missing: Optional `bool` `Tensor` of shape `[..., num_timesteps]`. A
`True` value indicates that the observation for that timestep is missing.
compile_steps_with_xla: Optional Python `bool`. If `True`, XLA compilation
is used to accelerate sampling steps when supported.
seed: Optional `Python` `int` seed controlling the sampled values.
Returns:
sampler_loop_body: Python callable that performs a single cycle of Gibbs
sampling. Its first argument is a `GibbsSamplerState`, and it returns a
new `GibbsSamplerState`. The second argument (passed by `tf.scan`) is
ignored.
"""
# Require that the model has exactly the parameters expected by
# `GibbsSamplerState`.
observation_noise_param, level_scale_param, weights_param = model.parameters
if (('observation_noise' not in observation_noise_param.name) or
('level_scale' not in level_scale_param.name) or
('weights' not in weights_param.name)):
raise ValueError('Model parameters {} do not match the expected sampler '
'state.'.format(model.parameters))
level_component = model.components[0]
if not isinstance(level_component, sts.LocalLevel):
raise ValueError('Expected the first model component to be an instance of '
'`tfp.sts.LocalLevel`; instead saw {}'.format(
level_component))
if is_missing is not None: # Ensure series does not contain NaNs.
observed_time_series = tf.where(is_missing,
tf.zeros_like(observed_time_series),
observed_time_series)
num_observed_steps = prefer_static.shape(observed_time_series)[-1]
design_matrix = _get_design_matrix(model).to_dense()[:num_observed_steps]
# Compile the functions that sample from Gibbs conditional posteriors.
# In principle, we should XLA-compile the entire loop body or even the entire
# `fit_with_gibbs_sampling` loop. However, XLA can't currently compile the
# gamma sampling op inside `_resample_scale` (b/141253568), so for now we
# leave that method uncompiled but compile the other two sampling steps.
# Empirically, the vast majority of sampling time is spent in
# `resample_level`, so compiling it gives us most of the wins.
# TODO(davmre): Wrap the entire sampling loop in `tf.function` while still
# XLA-compiling these pieces as appropriate.
# TODO(b/141253568): XLA-compile the entire sampling loop.
compiled_resample_level = _build_resample_level_fn(
initial_state_prior=level_component.initial_state_prior,
is_missing=is_missing,
compile_with_xla=compile_steps_with_xla)
compiled_resample_weights = tf.function(
_resample_weights, autograph=False,
experimental_compile=compile_steps_with_xla)
compiled_resample_scale = tf.function(
_resample_scale, autograph=False,
experimental_compile=False)
# Untransform scale priors -> variance priors by reaching thru Sqrt bijector.
level_scale_variance_prior = level_scale_param.prior.distribution
observation_noise_variance_prior = observation_noise_param.prior.distribution
# InverseGamma samplers are currently stateful, so we only need (and want)
# a single seed for each, shared across loop iterations.
strm = tfp_util.SeedStream(seed, salt='_sampler_loop_body')
observation_noise_scale_seed = strm()
level_scale_seed = strm()
def sampler_loop_body(previous_sample, _):
"""Runs one sampler iteration, resampling all model variables."""
(weights_seed,
level_seed,
loop_seed) = samplers.split_seed(
previous_sample.seed, n=3, salt='sampler_loop_body')
# We encourage a reasonable initialization by sampling the weights first,
# so at the first step they are regressed directly against the observed
# time series. If we instead sampled the level first it might 'explain away'
# some observed variation that we would ultimately prefer to explain through
# the regression weights, because the level can represent arbitrary
# variation, while the weights are limited to representing variation in the
# subspace given by the design matrix.
weights = compiled_resample_weights(
design_matrix=design_matrix,
target_residuals=(observed_time_series - previous_sample.level),
observation_noise_scale=previous_sample.observation_noise_scale,
weights_prior_scale=weights_param.prior.distribution.scale,
is_missing=is_missing,
seed=weights_seed)
regression_residuals = observed_time_series - tf.linalg.matvec(
design_matrix, weights)
level = compiled_resample_level(
observed_residuals=regression_residuals,
level_scale=previous_sample.level_scale,
observation_noise_scale=previous_sample.observation_noise_scale,
seed=level_seed)
# Estimate level scale from the empirical changes in level.
level_scale = compiled_resample_scale(
prior_scale=level_scale_variance_prior.scale,
prior_concentration=level_scale_variance_prior.concentration,
observed_residuals=level[..., 1:] - level[..., :-1],
is_missing=None, seed=level_scale_seed)
# Estimate noise scale from the residuals.
observation_noise_scale = compiled_resample_scale(
prior_scale=observation_noise_variance_prior.scale,
prior_concentration=observation_noise_variance_prior.concentration,
observed_residuals=regression_residuals - level,
is_missing=is_missing, seed=observation_noise_scale_seed)
return GibbsSamplerState(
observation_noise_scale=observation_noise_scale,
level_scale=level_scale,
weights=weights,
level=level,
seed=loop_seed)
return sampler_loop_body
|
py | 1a3fb3645e528ff08dda1cb0635270dc9972a0b2 | # *******************************************************************************************
# *******************************************************************************************
#
# File: gentest.py
# Date: 18th November 2020
# Purpose: Generates test code.
# Author: Paul Robson ([email protected])
#
# *******************************************************************************************
# *******************************************************************************************
import random
class GenerateTestCode(object):
def __init__(self,isFast,seed = 42,varCount = 10,fileName = "test.amo"):
if seed is None:
seed = random.randint(0,99999)
random.seed(seed)
print("Test using "+str(seed))
self.h = open(fileName,"w")
self.createAssert()
self.h.write("fast\n" if isFast else "slow\n")
self.h.write("proc main() {\n")
self.variables = {}
for i in range(0,varCount):
self.createVariable()
#
# Create variable, add to hash, output initialisation code.
#
def createVariable(self):
vName = ""
while vName == "" or vName in self.variables:
vName = "".join([chr(random.randint(0,25)+65) for x in range(0,random.randint(1,5))]).upper()
value = self.getRandom()
self.variables[vName] = value
self.h.write("\tvar {0} {1} !{0}\n".format(vName,value))
#
# Get one constant or variable
#
def pick(self):
if random.randint(0,3) == 0:
varNameList = [x for x in self.variables.keys()]
varName = varNameList[random.randint(0,len(varNameList)-1)]
return [varName,self.variables[varName]]
n = self.getRandom()
return [str(n),n]
#
# Get randomly ranged number
#
def getRandom(self):
return random.randint(0,255) if random.randint(0,1) == 0 else random.randint(0,65535)
#
# End the test code - check the variables and quit
#
def close(self):
for v in self.variables.keys():
self.createTest(v,str(self.variables[v]))
self.h.write("\texit.emulator()\n")
self.h.write("}\n")
self.h.close()
self.h = None
#
# Create assert procedure
#
def createAssert(self):
self.h.write("proc assert(n1,n2,s) {\n")
self.h.write("\tif (n1-n2 <> 0) { print.string(s);print.crlf();halt.program(); }\n")
self.h.write("}\n\n")
#
# Create one test
#
def createTest(self,n1,n2):
self.h.write('\tassert({0},{1},"{2}")\n'.format(n1,n2,n1+"="+n2))
#
# Check that assignments work.
#
def checkAssignment(self,n = 20):
for i in range(0,n):
varNameList = [x for x in self.variables.keys()]
varName = varNameList[random.randint(0,len(varNameList)-1)]
#
newValue = self.pick()
self.h.write("\t{0} !{1}\n".format(newValue[0],varName))
self.variables[varName] = newValue[1]
#
# Check Binary Arithmetic
#
def checkBinary(self,n = 20,opList = None):
allOps = "+-*/%&|^"
opList = opList if opList is not None else allOps
for i in range(0,n):
n1 = self.pick()
n2 = self.pick()
op = opList[random.randint(0,len(opList)-1)]
if (op != "/" and op != "%") or n2[1] != 0:
if op == "+":
r = (n1[1] + n2[1]) & 0xFFFF
elif op == "-":
r = (n1[1] - n2[1]) & 0xFFFF
elif op == "&":
r = (n1[1] & n2[1]) & 0xFFFF
elif op == "|":
r = (n1[1] | n2[1]) & 0xFFFF
elif op == "^":
r = (n1[1] ^ n2[1]) & 0xFFFF
elif op == "*":
r = (n1[1] * n2[1]) & 0xFFFF
elif op == "/":
r = int(n1[1] / n2[1]) & 0xFFFF
elif op == "%":
r = int(n1[1] % n2[1]) & 0xFFFF
else:
assert False
self.createTest(n1[0]+" "+op+" "+n2[0],str(r))
#
# Check unary arithmetic
#
def checkUnary(self,n = 20,opList = None):
allOps = "+-<>"
opList = opList if opList is not None else allOps
for i in range(0,n):
n1 = self.pick()
op = opList[random.randint(0,len(opList)-1)]
if op == "+":
r = (n1[1] + 1) & 0xFFFF
elif op == "-":
r = (n1[1] - 1) & 0xFFFF
elif op == "<":
r = (n1[1] << 1) & 0xFFFF
elif op == ">":
r = (n1[1] >> 1) & 0xFFFF
else:
assert False
self.createTest(n1[0]+" "+op+op,str(r))
if __name__ == "__main__":
gen = GenerateTestCode(True,None,20)
gen.checkAssignment(20)
gen.checkBinary(200)
gen.checkUnary(200)
gen.close() |
py | 1a3fb36bd7d0184209b49b44c4b40625ece9338c | from ._trackmate import trackmate_peak_import
from ._version import __version__
__all__ = ('trackmate_peak_import', '__version__')
|
py | 1a3fb44a66702fb31f8892a18503775fbcf0184c | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import *
from django.http import QueryDict
from django.template import Template, Context
from django.test import TestCase
from django.test.utils import str_prefix
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.safestring import mark_safe
from django.utils import six
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
last_name = CharField()
birthday = DateField()
class FormsTestCase(TestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertHTMLEqual(p.errors.as_ul(), '')
self.assertEqual(p.errors.as_text(), '')
self.assertEqual(p.cleaned_data["first_name"], 'John')
self.assertEqual(p.cleaned_data["last_name"], 'Lennon')
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />')
try:
p['nonexistentfield']
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertHTMLEqual('\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />""")
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(form_output, [
['First name', 'John'],
['Last name', 'Lennon'],
['Birthday', '1940-10-9']
])
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>""")
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {})
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person({'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'})
self.assertHTMLEqual(p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>')
self.assertHTMLEqual(p.as_ul(), '<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>')
self.assertHTMLEqual(p.as_p(), '<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>')
p = Person({'last_name': 'Lennon'})
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertDictEqual(p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']})
self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'})
self.assertEqual(p['first_name'].errors, ['This field is required.'])
self.assertHTMLEqual(p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.')
p = Person()
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />')
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello'}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in the Form,
# even if the Form's data didn't include a value for fields that are not
# required. In this example, the data dictionary doesn't include a value for the
# "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
# empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['nick_name'], '')
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['birth_date'], None)
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form element.
# If it's a string that contains '%s', Django will use that as a format string
# into which the field's name will be inserted. It will also put a <label> around
# the human-readable labels for a field.
p = Person(auto_id='%s_id')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>""")
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" />')
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="[email protected]" />')
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
# A value of '0' should be interpreted as a True value (#16820)
f = SignupForm({'email': '[email protected]', 'get_spam': '0'})
self.assertTrue(f.is_valid())
self.assertTrue(f.cleaned_data.get('get_spam'))
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" />')
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40"></textarea>')
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40"></textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" />')
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20"></textarea>')
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject">Hello</textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" value="I love you." />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you." />')
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected="selected".
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
</select>""")
f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>""")
self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>""")
# Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
# gets a distinct ID, formed by appending an underscore plus the button's
# zero-based index.
f = FrameworkForm(auto_id='id_%s')
self.assertHTMLEqual(str(f['language']), """<ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>""")
# When RadioSelect is used with auto_id, and the whole form is printed using
# either as_table() or as_ul(), the label for the RadioSelect will point to the
# ID of the *first* radio button.
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>""")
self.assertHTMLEqual(f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>""")
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect)
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join([str(bf) for bf in f['name']]), """<label><input type="radio" name="name" value="john" /> John</label>
<label><input type="radio" name="name" value="paul" /> Paul</label>
<label><input type="radio" name="name" value="george" /> George</label>
<label><input type="radio" name="name" value="ringo" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join(['<div>%s</div>' % bf for bf in f['name']]), """<div><label><input type="radio" name="name" value="john" /> John</label></div>
<div><label><input type="radio" name="name" value="paul" /> Paul</label></div>
<div><label><input type="radio" name="name" value="george" /> George</label></div>
<div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>""")
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join([str(bf) for bf in f['name']]), '<input type="text" name="name" />')
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
</select>""")
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""")
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />')
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>""")
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />')
f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />""")
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'})
self.assertTrue(f.is_valid())
self.assertHTMLEqual(str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" /><input type="text" name="when_1" value="01:01" id="id_when_1" />')
self.assertHTMLEqual(f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" /><input type="hidden" name="when_1" value="01:01" id="id_when_1" />')
def test_mulitple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J', 'P']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id='%s_id')
self.assertHTMLEqual(str(f['composers']), """<ul id="composers_id">
<li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>""")
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict, MultiValueDict and
# MergeDict (when created as a merge of MultiValueDicts) conveniently work with
# this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
data = {'name': 'Yesterday', 'composers': ['J', 'P']}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict('name=Yesterday&composers=J&composers=P')
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MergeDict(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])))
f = SongForm(data)
self.assertEqual(f.errors, {})
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput)
f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>""")
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({'name': 'Yesterday'}, auto_id=False)
self.assertEqual(f.errors['composers'], ['This field is required.'])
f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J', 'P'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
def clean_special_safe_name(self):
raise ValidationError(mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']))
f = EscapingForm({'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape"}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>""")
f = EscapingForm({
'special_name': "Should escape < & > and <script>alert('xss')</script>",
'special_safe_name': "<i>Do not escape</i>"
}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Should escape < & > and <script>alert('xss')</script>'</li></ul><input type="text" name="special_name" value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="<i>Do not escape</i>" /></td></tr>""")
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you want the
# validation message to be associated with a particular field, implement the
# clean_XXX() method on the Form, where XXX is the field name. As in
# Field.clean(), the clean_XXX() method should return the cleaned value. In the
# clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
# of all the data that has been cleaned *so far*, in order by the fields,
# including the current field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data['password2']
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. If you do this, any ValidationError raised by that
# method will not be associated with a particular field; it will have a
# special-case association with the field named '__all__'.
# Note that in Form.clean(), you have access to self.cleaned_data, a dictionary of
# all the fields/values that have *not* raised a ValidationError. Also note
# Form.clean() is required to return a dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>""")
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.'])
self.assertHTMLEqual(f.as_table(), """<tr><td colspan="2"><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password2: <input type="password" name="password2" /></li>""")
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.fields['birthday'] = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>""")
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if names_required:
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['class'] = 'required'
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs['class'] = 'required'
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
f = Person(names_required=True)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'required'}, {'class': 'required'}))
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if name_max_length:
self.fields['first_name'].max_length = name_max_length
self.fields['last_name'].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
f = Person(name_max_length=20)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20))
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male')))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields['gender'].choices += (('u', 'Unspecified'),)
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
f = Person(allow_unspec_gender=True)
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')])
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
def test_validators_independence(self):
""" Test that we are able to modify a form field validators list without polluting
other forms """
from django.core.validators import MaxValueValidator
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields['myfield'].validators[0] = MaxValueValidator(12)
self.assertFalse(f1.fields['myfield'].validators[0] == f2.fields['myfield'].validators[0])
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>""")
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id='id_%s')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>""")
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><td colspan="2"><ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>""")
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""")
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>""")
# If you specify a custom "attrs" that includes the "maxlength" attribute,
# the Field's max_length attribute will override whatever "maxlength" you specify
# in "attrs".
class UserRegistration(Form):
username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>""")
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label='Your username')
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Contraseña (de nuevo): <input type="password" name="password2" /></li>""")
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label='The first question')
q2 = CharField(label='What is your name?')
q3 = CharField(label='The answer to life is:')
q4 = CharField(label='Answer this question!')
q5 = CharField(label='The last question. Period.')
self.assertHTMLEqual(Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>""")
self.assertHTMLEqual(Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>""")
# If a label is set to the empty string for a field, that field won't get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label='')
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify the
# punctuation symbol used at the end of a label. By default, the colon (:) is
# used, and is only appended to the label if the label doesn't already end with a
# punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
# be appended regardless of the last character of the label.
class FavoriteForm(Form):
color = CharField(label='Favorite color?')
animal = CharField(label='Favorite animal')
f = FavoriteForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='?')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='\u2192')
self.assertHTMLEqual(f.as_ul(), '<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>')
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# An 'initial' value is *not* used as a fallback if data is not provided. In this
# example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f','foo'),('b','bar'),('w','whiz')])
# We need to define functions that get called later.)
def initial_django():
return 'django'
def initial_stephane():
return 'stephane'
def initial_options():
return ['f','b']
def initial_other_options():
return ['b','w']
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': 'foo', 'options':['f','b']}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# A callable 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f','foo'),('b','bar'),('w','whiz')], initial=initial_other_options)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w" selected="selected">whiz</option>
</select></li>""")
p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
def test_changed_data(self):
class Person(Form):
first_name = CharField(initial='Hans')
last_name = CharField(initial='Greatel')
birthday = DateField(initial=datetime.date(1974, 8, 16))
p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16'})
self.assertTrue(p.is_valid())
self.assertNotIn('first_name', p.changed_data)
self.assertIn('last_name', p.changed_data)
self.assertNotIn('birthday', p.changed_data)
# Test that field raising ValidationError is always in changed_data
class PedanticField(forms.Field):
def to_python(self, value):
raise ValidationError('Whatever')
class Person2(Person):
pedantic = PedanticField(initial='whatever', show_hidden_initial=True)
p = Person2(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16', 'initial-pedantic': 'whatever'})
self.assertFalse(p.is_valid())
self.assertIn('pedantic', p.changed_data)
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial='djangonaut')
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({'password': 'foo'})
self.assertEqual(bound['username'].value(), None)
self.assertEqual(unbound['username'].value(), 'djangonaut')
self.assertEqual(bound['password'].value(), 'foo')
self.assertEqual(unbound['password'].value(), None)
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text' argument)
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
self.assertHTMLEqual(p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></p>
<p>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""")
self.assertHTMLEqual(p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br /><span class="helptext">e.g., [email protected]</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br /><span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""")
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput)
next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>""")
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
m = Musician(auto_id=False)
self.assertHTMLEqual(m.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>""")
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertHTMLEqual(b.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>""")
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same HTML page,
# or multiple copies of the same form. We can accomplish this with form prefixes.
# Pass the keyword argument 'prefix' to the Form constructor to use this feature.
# This value will be prepended to each HTML form field name. One way to think
# about this is "namespaces for HTML forms". Notice that in the data argument,
# each field's key has the prefix, in this case 'person1', prepended to the
# actual field name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>""")
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />')
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
'person1-first_name': '',
'person1-last_name': '',
'person1-birthday': ''
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertEqual(p['first_name'].errors, ['This field is required.'])
try:
p['person1-first_name'].errors
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9',
'person2-first_name': 'Jim',
'person2-last_name': 'Morrison',
'person2-birthday': '1943-12-8'
}
p1 = Person(data, prefix='person1')
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data['first_name'], 'John')
self.assertEqual(p1.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9))
p2 = Person(data, prefix='person2')
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data['first_name'], 'Jim')
self.assertEqual(p2.cleaned_data['last_name'], 'Morrison')
self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name
p = Person(prefix='foo')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>""")
data = {
'foo-prefix-first_name': 'John',
'foo-prefix-last_name': 'Lennon',
'foo-prefix-birthday': '1940-10-9'
}
p = Person(data, prefix='foo')
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({'name': 'Joe'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the request.FILES,
# not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
self.assertTrue(f.is_valid())
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
def my_function(method, post_data):
if method == 'POST':
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return 'VALID: %r' % sorted(six.iteritems(form.cleaned_data))
t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>')
return t.render(Context({'form': form}))
# Case 1: GET (an empty form, with no errors).)
self.assertHTMLEqual(my_function('GET', {}), """<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 2: POST with erroneous data (a redisplayed form, with errors).)
self.assertHTMLEqual(my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 3: POST with valid data (the success message).)
self.assertEqual(my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}),
str_prefix("VALID: [('password1', %(_)s'secret'), ('password2', %(_)s'secret'), ('username', %(_)s'adrian')]"))
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
# You have full flexibility in displaying form fields in a template. Just pass a
# Form instance to the template, and use "dot" access to refer to individual
# fields. Note, however, that this flexibility comes with the responsibility of
# displaying all the errors, including any that might not be associated with a
# particular field.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# Use form.[field].label to output a field's label. You can specify the label for
# a field by using the 'label' argument to a Field class. If you don't specify
# 'label', Django will use the field name with underscores converted to spaces,
# and the initial letter capitalized.
t = Template('''<form action="">
<p><label>{{ form.username.label }}: {{ form.username }}</label></p>
<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# User form.[field].label_tag to output a field's label with a <label> tag
# wrapped around it, but *only* if the given field has an "id" attribute.
# Recall from above that passing the "auto_id" argument to a Form gives each
# field an "id" attribute.
t = Template('''<form action="">
<p>{{ form.username.label_tag }}: {{ form.username }}</p>
<p>{{ form.password1.label_tag }}: {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }}: {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action="">
<p><label for="id_username">Username</label>: <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1</label>: <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2</label>: <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>""")
# User form.[field].help_text to output a field's help text. If the given field
# does not have help text, nothing will be output.
t = Template('''<form action="">
<p>{{ form.username.label_tag }}: {{ form.username }}<br />{{ form.username.help_text }}</p>
<p>{{ form.password1.label_tag }}: {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }}: {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertEqual(Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '')
# To display the errors that aren't associated with a particular field -- e.g.,
# the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
# template. If used on its own, it is displayed as a <ul> (or an empty string, if
# the list of errors is empty). You can also use it in {% if %} statements.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
t = Template('''<form action="">
{{ form.non_field_errors }}
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<ul class="errorlist"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# agrument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {'artist': '', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']})
self.assertEqual(form.cleaned_data, {})
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {'artist': 'The Doors', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.']})
self.assertEqual(form.cleaned_data, {'artist': 'The Doors'})
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {'artist': None, 'song': ''}
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {'amount': '0.0', 'qty': ''}
form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
self.assertTrue(form.is_valid())
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ['token'])
self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name'])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertHTMLEqual(MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td><input id="id_field1" type="text" name="field1" maxlength="50" /><input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>')
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = 'error'
p.required_css_class = 'required'
self.assertHTMLEqual(p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li class="required"><label for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></li>
<li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></li>
<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p class="required"><label for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></p>
<p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></p>""")
self.assertHTMLEqual(p.as_table(), """<tr class="required error"><th><label for="id_name">Name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="name" id="id_name" /></td></tr>
<tr class="required"><th><label for="id_is_cool">Is cool:</label></th><td><select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td><input type="email" name="email" id="id_email" /></td></tr>
<tr class="required error"><th><label for="id_age">Age:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="number" name="age" id="id_age" /></td></tr>""")
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=widgets.SplitHiddenDateTimeWidget)
form = EventForm()
self.assertHTMLEqual(form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0" /><input type="hidden" name="happened_at_1" id="id_happened_at_1" />')
def test_multivalue_field_validation(self):
def bad_names(value):
if value == 'bad value':
raise ValidationError('bad value not allowed')
class NameField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (CharField(label='First name', max_length=10),
CharField(label='Last name', max_length=10))
super(NameField, self).__init__(fields=fields, *args, **kwargs)
def compress(self, data_list):
return ' '.join(data_list)
class NameForm(Form):
name = NameField(validators=[bad_names])
form = NameForm(data={'name' : ['bad', 'value']})
form.full_clean()
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['bad value not allowed']})
form = NameForm(data={'name' : ['should be overly', 'long for the field names']})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['Ensure this value has at most 10 characters (it has 16).',
'Ensure this value has at most 10 characters (it has 24).']})
form = NameForm(data={'name' : ['fname', 'lname']})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'name' : 'fname lname'})
def test_custom_empty_values(self):
"""
Test that form fields can customize what is considered as an empty value
for themselves (#19997).
"""
class CustomJSONField(CharField):
empty_values = [None, '']
def to_python(self, value):
# Fake json.loads
if value == '{}':
return {}
return super(CustomJSONField, self).to_python(value)
class JSONForm(forms.Form):
json = CustomJSONField()
form = JSONForm(data={'json': '{}'});
form.full_clean()
self.assertEqual(form.cleaned_data, {'json' : {}})
def test_boundfield_label_tag(self):
class SomeForm(Form):
field = CharField()
boundfield = SomeForm()['field']
testcases = [ # (args, kwargs, expected)
# without anything: just print the <label>
((), {}, '<label for="id_field">Field</label>'),
# passing just one argument: overrides the field's label
(('custom',), {}, '<label for="id_field">custom</label>'),
# the overriden label is escaped
(('custom&',), {}, '<label for="id_field">custom&</label>'),
((mark_safe('custom&'),), {}, '<label for="id_field">custom&</label>'),
# Passing attrs to add extra attributes on the <label>
((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field</label>')
]
for args, kwargs, expected in testcases:
self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected)
def test_boundfield_label_tag_no_id(self):
"""
If a widget has no id, label_tag just returns the text with no
surrounding <label>.
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(auto_id='')['field']
self.assertHTMLEqual(boundfield.label_tag(), 'Field')
self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&')
def test_boundfield_label_tag_custom_widget_id_for_label(self):
class CustomIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return 'custom_' + id
class EmptyIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return None
class SomeForm(Form):
custom = CharField(widget=CustomIdForLabelTextInput)
empty = CharField(widget=EmptyIdForLabelTextInput)
form = SomeForm()
self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom</label>')
self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty</label>')
|
py | 1a3fb46398f28a46ca45f7d04f20a5adba683431 | #!/usr/bin/env python2
# Apache 2.0
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exclude', '-v', dest='exclude', action='store_true', help='exclude filter words')
parser.add_argument('filt', type=str, help='filter list')
parser.add_argument('infile', type=str, help='input file')
args = parser.parse_args()
vocab=set()
with open(args.filt) as vocabfile:
for line in vocabfile:
vocab.add(unicode(line, 'utf_8').strip())
with open(args.infile) as textfile:
for line in textfile:
if args.exclude:
print " ".join(map(lambda word: word if not word in vocab else '', unicode(line, 'utf_8').strip().split())).encode('utf_8')
else:
print " ".join(map(lambda word: word if word in vocab else '<UNK>', unicode(line, 'utf_8').strip().split())).encode('utf_8')
|
py | 1a3fb4fce395626861c358cda8877da1298aef45 | import requests
cookies = {
}
headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="98", "Google Chrome";v="98"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'Origin': 'https://www.ajihuo.com',
'Upgrade-Insecure-Requests': '1',
'DNT': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-User': '?1',
'Sec-Fetch-Dest': 'document',
'Referer': 'https://www.ajihuo.com/pycharm/4197.html',
'Accept-Language': 'zh,en;q=0.9,zh-CN;q=0.8',
# Requests sorts cookies= alphabetically
# 'Cookie': 'Hm_lvt_30e155ae2d2fcd1332fa3746413210ee=1644235401,1644636578; session_prefix=821409ea4390560811ae93f9c050dd51; Hm_lvt_bc4856baaeecfd9234b878603add3a71=1647510147; Hm_lpvt_bc4856baaeecfd9234b878603add3a71=1647754175',
}
data = {
'secret_key': '199992',
'Submit': '\u9605\u8BFB\u5168\u6587',
}
response = requests.post('https://www.ajihuo.com/pycharm/4197.html', headers=headers, cookies=cookies, data=data)
print() |
bzl | 1a3fb597c1c25a6f6dd1ff57516a7f1c327f251c | # Validation of content in this file is done on the bazel/repositories.bzl file to make it free of bazel
# constructs. This is to allow this file to be loaded into Python based build and maintenance tools.
# Envoy dependencies may be annotated with the following attributes:
DEPENDENCY_ANNOTATIONS = [
# List of the categories describing how the dependency is being used. This attribute is used
# for automatic tracking of security posture of Envoy's dependencies.
# Possible values are documented in the USE_CATEGORIES list below.
# This attribute is mandatory for each dependecy.
"use_category",
# Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID
# of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See
# https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements
# i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned.
# This attribute is optional for components with use categories listed in the
# USE_CATEGORIES_WITH_CPE_OPTIONAL
"cpe",
]
# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed
# to be declared.
USE_CATEGORIES = [
# This dependency is used in build process.
"build",
# This dependency is used for unit tests.
"test",
# This dependency is used in API protos.
"api",
# This dependency is used in processing downstream or upstream requests.
"dataplane",
# This dependency is used to process xDS requests.
"controlplane",
# This dependecy is used for logging, metrics or tracing. It may process unstrusted input.
"observability",
# This dependency does not handle untrusted data and is used for various utility purposes.
"other",
]
# Components with these use categories are not required to specify the 'cpe' annotation.
USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "test", "other"]
DEPENDENCY_REPOSITORIES = dict(
bazel_compdb = dict(
sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4",
strip_prefix = "bazel-compilation-database-0.4.5",
urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.5.tar.gz"],
use_category = ["build"],
),
bazel_gazelle = dict(
sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f",
urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz"],
use_category = ["build"],
),
bazel_toolchains = dict(
sha256 = "882fecfc88d3dc528f5c5681d95d730e213e39099abff2e637688a91a9619395",
strip_prefix = "bazel-toolchains-3.4.0",
urls = [
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.4.0/bazel-toolchains-3.4.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.4.0.tar.gz",
],
use_category = ["build"],
),
build_bazel_rules_apple = dict(
sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42",
urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.19.0/rules_apple.0.19.0.tar.gz"],
use_category = ["build"],
),
envoy_build_tools = dict(
sha256 = "88e58fdb42021e64a0b35ae3554a82e92f5c37f630a4dab08a132fc77f8db4b7",
strip_prefix = "envoy-build-tools-1d6573e60207efaae6436b25ecc594360294f63a",
# 2020-07-18
urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/1d6573e60207efaae6436b25ecc594360294f63a.tar.gz"],
use_category = ["build"],
),
boringssl = dict(
sha256 = "07f1524766b9ed1543674b48e7fce7e3569b6e2b6c0c43ec124dedee9b60f641",
strip_prefix = "boringssl-a0899df79b3a63e606448c72d63a090d86bdb75b",
# To update BoringSSL, which tracks Chromium releases:
# 1. Open https://omahaproxy.appspot.com/ and note <current_version> of linux/stable release.
# 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags/<current_version>/DEPS and note <boringssl_revision>.
# 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges <boringssl_revision>.
#
# chromium-84.0.4147.45(beta)
# 2020-05-14
urls = ["https://github.com/google/boringssl/archive/a0899df79b3a63e606448c72d63a090d86bdb75b.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
boringssl_fips = dict(
sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8",
# fips-20190808
urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_google_absl = dict(
sha256 = "ec8ef47335310cc3382bdc0d0cc1097a001e67dc83fcba807845aa5696e7e1e4",
strip_prefix = "abseil-cpp-302b250e1d917ede77b5ff00a6fd9f28430f1563",
# 2020-07-13
urls = ["https://github.com/abseil/abseil-cpp/archive/302b250e1d917ede77b5ff00a6fd9f28430f1563.tar.gz"],
use_category = ["dataplane", "controlplane"],
cpe = "N/A",
),
com_github_apache_thrift = dict(
sha256 = "7d59ac4fdcb2c58037ebd4a9da5f9a49e3e034bf75b3f26d9fe48ba3d8806e6b",
strip_prefix = "thrift-0.11.0",
urls = ["https://files.pythonhosted.org/packages/c6/b4/510617906f8e0c5660e7d96fbc5585113f83ad547a3989b80297ac72a74c/thrift-0.11.0.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:apache:thrift:*",
),
com_github_c_ares_c_ares = dict(
sha256 = "d08312d0ecc3bd48eee0a4cc0d2137c9f194e0a28de2028928c0f6cae85f86ce",
strip_prefix = "c-ares-1.16.1",
urls = ["https://github.com/c-ares/c-ares/releases/download/cares-1_16_1/c-ares-1.16.1.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:c-ares_project:c-ares:*",
),
com_github_circonus_labs_libcircllhist = dict(
sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c",
strip_prefix = "libcircllhist-63a16dd6f2fc7bc841bb17ff92be8318df60e2e1",
# 2019-02-11
urls = ["https://github.com/circonus-labs/libcircllhist/archive/63a16dd6f2fc7bc841bb17ff92be8318df60e2e1.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_github_cyan4973_xxhash = dict(
sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7",
strip_prefix = "xxHash-0.7.3",
urls = ["https://github.com/Cyan4973/xxHash/archive/v0.7.3.tar.gz"],
use_category = ["dataplane", "controlplane"],
cpe = "N/A",
),
com_github_envoyproxy_sqlparser = dict(
sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71",
strip_prefix = "sql-parser-3b40ba2d106587bdf053a292f7e3bb17e818a57f",
# 2020-06-10
urls = ["https://github.com/envoyproxy/sql-parser/archive/3b40ba2d106587bdf053a292f7e3bb17e818a57f.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_mirror_tclap = dict(
sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f",
strip_prefix = "tclap-tclap-1-2-1-release-final",
urls = ["https://github.com/mirror/tclap/archive/tclap-1-2-1-release-final.tar.gz"],
use_category = ["other"],
),
com_github_fmtlib_fmt = dict(
sha256 = "5014aacf55285bf79654539791de0d6925063fddf4dfdd597ef76b53eb994f86",
strip_prefix = "fmt-e2ff910675c7800e5c4e28e1509ca6a50bdceafa",
# 2020-04-29
urls = ["https://github.com/fmtlib/fmt/archive/e2ff910675c7800e5c4e28e1509ca6a50bdceafa.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_github_gabime_spdlog = dict(
sha256 = "378a040d91f787aec96d269b0c39189f58a6b852e4cbf9150ccfacbe85ebbbfc",
strip_prefix = "spdlog-1.6.1",
urls = ["https://github.com/gabime/spdlog/archive/v1.6.1.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_github_google_libprotobuf_mutator = dict(
sha256 = "d51365191580c4bf5e9ff104eebcfe34f7ff5f471006d7a460c15dcb3657501c",
strip_prefix = "libprotobuf-mutator-7a2ed51a6b682a83e345ff49fc4cfd7ca47550db",
# 2020-06-25
urls = ["https://github.com/google/libprotobuf-mutator/archive/7a2ed51a6b682a83e345ff49fc4cfd7ca47550db.tar.gz"],
use_category = ["test"],
),
com_github_gperftools_gperftools = dict(
sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e",
strip_prefix = "gperftools-2.8",
urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-2.8/gperftools-2.8.tar.gz"],
use_category = ["test"],
),
com_github_grpc_grpc = dict(
# TODO(JimmyCYJ): Bump to release 1.27
# This sha on grpc:v1.25.x branch is specifically chosen to fix gRPC STS call credential options.
sha256 = "bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123",
strip_prefix = "grpc-d8f4928fa779f6005a7fe55a176bdb373b0f910f",
# 2020-02-11
urls = ["https://github.com/grpc/grpc/archive/d8f4928fa779f6005a7fe55a176bdb373b0f910f.tar.gz"],
use_category = ["dataplane", "controlplane"],
cpe = "cpe:2.3:a:grpc:grpc:*",
),
com_github_luajit_luajit = dict(
sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8",
strip_prefix = "LuaJIT-2.1.0-beta3",
urls = ["https://github.com/LuaJIT/LuaJIT/archive/v2.1.0-beta3.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_moonjit_moonjit = dict(
sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6",
strip_prefix = "moonjit-2.2.0",
urls = ["https://github.com/moonjit/moonjit/archive/2.2.0.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_nghttp2_nghttp2 = dict(
sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8",
strip_prefix = "nghttp2-1.41.0",
urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.41.0/nghttp2-1.41.0.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:nghttp2:nghttp2:*",
),
io_opentracing_cpp = dict(
sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301",
strip_prefix = "opentracing-cpp-1.5.1",
urls = ["https://github.com/opentracing/opentracing-cpp/archive/v1.5.1.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_lightstep_tracer_cpp = dict(
sha256 = "0e99716598c010e56bc427ea3482be5ad2c534be8b039d172564deec1264a213",
strip_prefix = "lightstep-tracer-cpp-3efe2372ee3d7c2138d6b26e542d757494a7938d",
# 2020-03-24
urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/3efe2372ee3d7c2138d6b26e542d757494a7938d.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_github_datadog_dd_opentracing_cpp = dict(
sha256 = "b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924",
strip_prefix = "dd-opentracing-cpp-1.1.5",
urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v1.1.5.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_github_google_benchmark = dict(
sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2",
strip_prefix = "benchmark-1.5.1",
urls = ["https://github.com/google/benchmark/archive/v1.5.1.tar.gz"],
use_category = ["test"],
),
com_github_libevent_libevent = dict(
sha256 = "c64156c24602ab7a5c66937d774cc55868911d5bbbf1650792f5877744b1c2d9",
# This SHA includes the new "prepare" and "check" watchers, used for event loop performance
# stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition
# in the watchers (see https://github.com/libevent/libevent/pull/802).
# This also includes the fixes for https://github.com/libevent/libevent/issues/806
# and https://github.com/lyft/envoy-mobile/issues/215.
# This also include the fixes for Phantom events with EV_ET (see
# https://github.com/libevent/libevent/issues/984).
# TODO(adip): Update to v2.2 when it is released.
strip_prefix = "libevent-06a11929511bebaaf40c52aaf91de397b1782ba2",
# 2020-05-08
urls = ["https://github.com/libevent/libevent/archive/06a11929511bebaaf40c52aaf91de397b1782ba2.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:libevent_project:libevent:*",
),
net_zlib = dict(
# Use the dev branch of zlib to resolve fuzz bugs and out of bound
# errors resulting in crashes in zlib 1.2.11.
# TODO(asraa): Remove when zlib > 1.2.11 is released.
sha256 = "155a8f8c1a753fb05b16a1b0cc0a0a9f61a78e245f9e0da483d13043b3bcbf2e",
strip_prefix = "zlib-79baebe50e4d6b73ae1f8b603f0ef41300110aa3",
# 2019-04-14 development branch
urls = ["https://github.com/madler/zlib/archive/79baebe50e4d6b73ae1f8b603f0ef41300110aa3.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:gnu:zlib:*",
),
com_github_jbeder_yaml_cpp = dict(
sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f",
strip_prefix = "yaml-cpp-98acc5a8874faab28b82c28936f4b400b389f5d6",
# 2020-07-28
urls = ["https://github.com/greenhouse-org/yaml-cpp/archive/98acc5a8874faab28b82c28936f4b400b389f5d6.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_msgpack_msgpack_c = dict(
sha256 = "433cbcd741e1813db9ae4b2e192b83ac7b1d2dd7968a3e11470eacc6f4ab58d2",
strip_prefix = "msgpack-3.2.1",
urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-3.2.1/msgpack-3.2.1.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_github_google_jwt_verify = dict(
sha256 = "f1fde4f3ebb3b2d841332c7a02a4b50e0529a19709934c63bc6208d1bbe28fb1",
strip_prefix = "jwt_verify_lib-7276a339af8426724b744216f619c99152f8c141",
# 2020-07-09
urls = ["https://github.com/google/jwt_verify_lib/archive/7276a339af8426724b744216f619c99152f8c141.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_nodejs_http_parser = dict(
sha256 = "8fa0ab8770fd8425a9b431fdbf91623c4d7a9cdb842b9339289bd2b0b01b0d3d",
strip_prefix = "http-parser-2.9.3",
urls = ["https://github.com/nodejs/http-parser/archive/v2.9.3.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:nodejs:node.js:*",
),
com_github_pallets_jinja = dict(
sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4",
strip_prefix = "jinja-2.10.3",
urls = ["https://github.com/pallets/jinja/archive/2.10.3.tar.gz"],
use_category = ["build"],
),
com_github_pallets_markupsafe = dict(
sha256 = "222a10e3237d92a9cd45ed5ea882626bc72bc5e0264d3ed0f2c9129fa69fc167",
strip_prefix = "markupsafe-1.1.1/src",
urls = ["https://github.com/pallets/markupsafe/archive/1.1.1.tar.gz"],
use_category = ["build"],
),
com_github_tencent_rapidjson = dict(
sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b",
strip_prefix = "rapidjson-dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1",
# Changes through 2019-12-02
urls = ["https://github.com/Tencent/rapidjson/archive/dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:tencent:rapidjson:*",
),
com_github_twitter_common_lang = dict(
sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1",
strip_prefix = "twitter.common.lang-0.3.9/src",
urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-0.3.9.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_twitter_common_rpc = dict(
sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514",
strip_prefix = "twitter.common.rpc-0.3.9/src",
urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-0.3.9.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_twitter_common_finagle_thrift = dict(
sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a",
strip_prefix = "twitter.common.finagle-thrift-0.3.9/src",
urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-0.3.9.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_google_googletest = dict(
sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb",
strip_prefix = "googletest-release-1.10.0",
urls = ["https://github.com/google/googletest/archive/release-1.10.0.tar.gz"],
use_category = ["test"],
),
com_google_protobuf = dict(
sha256 = "d7cfd31620a352b2ee8c1ed883222a0d77e44346643458e062e86b1d069ace3e",
strip_prefix = "protobuf-3.10.1",
urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protobuf-all-3.10.1.tar.gz"],
use_category = ["dataplane", "controlplane"],
cpe = "N/A",
),
grpc_httpjson_transcoding = dict(
sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5",
strip_prefix = "grpc-httpjson-transcoding-faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6",
# 2020-03-02
urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
io_bazel_rules_go = dict(
sha256 = "a8d6b1b354d371a646d2f7927319974e0f9e52f73a2452d2b3877118169eb6bb",
urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.23.3/rules_go-v0.23.3.tar.gz"],
use_category = ["build"],
),
rules_cc = dict(
sha256 = "9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0",
strip_prefix = "rules_cc-818289e5613731ae410efb54218a4077fb9dbb03",
# 2020-05-13
# TODO(lizan): pin to a point releases when there's a released version.
urls = ["https://github.com/bazelbuild/rules_cc/archive/818289e5613731ae410efb54218a4077fb9dbb03.tar.gz"],
use_category = ["build"],
),
rules_foreign_cc = dict(
sha256 = "7ca49ac5b0bc8f5a2c9a7e87b7f86aca604bda197259c9b96f8b7f0a4f38b57b",
strip_prefix = "rules_foreign_cc-f54b7ae56dcf1b81bcafed3a08d58fc08ac095a7",
# 2020-06-09
urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/f54b7ae56dcf1b81bcafed3a08d58fc08ac095a7.tar.gz"],
use_category = ["build"],
),
rules_python = dict(
sha256 = "76a8fd4e7eca2a3590f816958faa0d83c9b2ce9c32634c5c375bcccf161d3bb5",
strip_prefix = "rules_python-a0fbf98d4e3a232144df4d0d80b577c7a693b570",
# 2020-04-09
# TODO(htuch): revert back to a point releases when pip3_import appears.
urls = ["https://github.com/bazelbuild/rules_python/archive/a0fbf98d4e3a232144df4d0d80b577c7a693b570.tar.gz"],
use_category = ["build"],
),
six = dict(
sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73",
urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-1.12.0.tar.gz"],
use_category = ["other"],
),
io_opencensus_cpp = dict(
sha256 = "12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212",
strip_prefix = "opencensus-cpp-7877337633466358ed680f9b26967da5b310d7aa",
# 2020-06-01
urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/7877337633466358ed680f9b26967da5b310d7aa.tar.gz"],
use_category = ["observability"],
cpe = "N/A",
),
com_github_curl = dict(
sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98",
strip_prefix = "curl-7.69.1",
urls = ["https://github.com/curl/curl/releases/download/curl-7_69_1/curl-7.69.1.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_googlesource_chromium_v8 = dict(
# This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh
# and contains complete checkout of V8 with all dependencies necessary to build wee8.
sha256 = "cc6f5357cd10922bfcf667bd882624ad313e21b009b919ce00f322f390012476",
urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.3.110.9.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_googlesource_quiche = dict(
# Static snapshot of https://quiche.googlesource.com/quiche/+archive/b2b8ff25f5a565324b93411ca29c3403ccbca969.tar.gz
sha256 = "792924bbf27203bb0d1d08c99597a30793ef8f4cfa2df99792aea7200f1b27e3",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/b2b8ff25f5a565324b93411ca29c3403ccbca969.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_googlesource_googleurl = dict(
# Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_6dafefa72cba2ab2ba4922d17a30618e9617c7cf.tar.gz
sha256 = "f1ab73ddd1a7db4e08a9e4db6c2e98e5a0a7bbaca08f5fee0d73adb02c24e44a",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_6dafefa72cba2ab2ba4922d17a30618e9617c7cf.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_google_cel_cpp = dict(
sha256 = "cad7d01139947d78e413d112cb8f7431fbb33cf66b0adf9c280824803fc2a72e",
strip_prefix = "cel-cpp-b9453a09b28a1531c4917e8792b3ea61f6b1a447",
# 2020-07-14
urls = ["https://github.com/google/cel-cpp/archive/b9453a09b28a1531c4917e8792b3ea61f6b1a447.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_github_google_flatbuffers = dict(
sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a",
strip_prefix = "flatbuffers-a83caf5910644ba1c421c002ef68e42f21c15f9f",
urls = ["https://github.com/google/flatbuffers/archive/a83caf5910644ba1c421c002ef68e42f21c15f9f.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
com_googlesource_code_re2 = dict(
sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f",
strip_prefix = "re2-2020-07-06",
# 2020-07-06
urls = ["https://github.com/google/re2/archive/2020-07-06.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
# Included to access FuzzedDataProvider.h. This is compiler agnostic but
# provided as part of the compiler-rt source distribution. We can't use the
# Clang variant as we are not a Clang-LLVM only shop today.
org_llvm_releases_compiler_rt = dict(
sha256 = "6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75",
# Only allow peeking at fuzzer related files for now.
strip_prefix = "compiler-rt-10.0.0.src",
urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/compiler-rt-10.0.0.src.tar.xz"],
use_category = ["test"],
),
upb = dict(
sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47",
strip_prefix = "upb-8a3ae1ef3e3e3f26b45dec735c5776737fc7247f",
# 2019-11-19
urls = ["https://github.com/protocolbuffers/upb/archive/8a3ae1ef3e3e3f26b45dec735c5776737fc7247f.tar.gz"],
use_category = ["dataplane", "controlplane"],
cpe = "N/A",
),
kafka_source = dict(
sha256 = "e7b748a62e432b5770db6dbb3b034c68c0ea212812cb51603ee7f3a8a35f06be",
strip_prefix = "kafka-2.4.0/clients/src/main/resources/common/message",
urls = ["https://github.com/apache/kafka/archive/2.4.0.zip"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:apache:kafka:*",
),
kafka_server_binary = dict(
sha256 = "b9582bab0c3e8d131953b1afa72d6885ca1caae0061c2623071e7f396f2ccfee",
strip_prefix = "kafka_2.12-2.4.0",
urls = ["http://us.mirrors.quenda.co/apache/kafka/2.4.0/kafka_2.12-2.4.0.tgz"],
use_category = ["test"],
),
kafka_python_client = dict(
sha256 = "454bf3aafef9348017192417b7f0828a347ec2eaf3efba59336f3a3b68f10094",
strip_prefix = "kafka-python-2.0.0",
urls = ["https://github.com/dpkp/kafka-python/archive/2.0.0.tar.gz"],
use_category = ["test"],
),
org_unicode_icuuc = dict(
strip_prefix = "icu-release-64-2",
sha256 = "524960ac99d086cdb6988d2a92fc163436fd3c6ec0a84c475c6382fbf989be05",
urls = ["https://github.com/unicode-org/icu/archive/release-64-2.tar.gz"],
use_category = ["dataplane"],
cpe = "cpe:2.3:a:icu-project:international_components_for_unicode",
),
proxy_wasm_cpp_sdk = dict(
sha256 = "7d9e1f2e299215ed3e5fa8c8149740872b1100cfe3230fc639f967d9dcfd812e",
strip_prefix = "proxy-wasm-cpp-sdk-5cec30b448975e1fd3f4117311f0957309df5cb0",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/5cec30b448975e1fd3f4117311f0957309df5cb0.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
proxy_wasm_cpp_host = dict(
sha256 = "494d3f81156b92bac640c26000497fbf3a7b1bc35f9789594280450c6e5d8129",
strip_prefix = "proxy-wasm-cpp-host-928db4d79ec7b90aea3ad13ea5df36dc60c9c31d",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/928db4d79ec7b90aea3ad13ea5df36dc60c9c31d.tar.gz"],
use_category = ["dataplane"],
cpe = "N/A",
),
emscripten_toolchain = dict(
sha256 = "2bdbee6947e32ad1e03cd075b48fda493ab16157b2b0225b445222cd528e1843",
patch_cmds = [
"./emsdk install 1.39.19-upstream",
"./emsdk activate --embedded 1.39.19-upstream",
],
strip_prefix = "emsdk-dec8a63594753fe5f4ad3b47850bf64d66c14a4e",
urls = ["https://github.com/emscripten-core/emsdk/archive/dec8a63594753fe5f4ad3b47850bf64d66c14a4e.tar.gz"],
use_category = ["build"],
),
rules_antlr = dict(
sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429",
strip_prefix = "rules_antlr-3cc2f9502a54ceb7b79b37383316b23c4da66f9a",
urls = ["https://github.com/marcohu/rules_antlr/archive/3cc2f9502a54ceb7b79b37383316b23c4da66f9a.tar.gz"],
use_category = ["build"],
),
antlr4_runtimes = dict(
sha256 = "4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574",
strip_prefix = "antlr4-4.7.1",
urls = ["https://github.com/antlr/antlr4/archive/4.7.1.tar.gz"],
use_category = ["build"],
),
)
|
py | 1a3fb612f67eb0ffb36fdaf43a8e0971cf20db77 | import os
from stat import S_IFBLK
from .size import ByteSize
class Device(object):
def __init__(self, path):
# strip all symbolic links and make path absolute (for /proc/mounts)
self.path = os.path.abspath(os.path.realpath(path))
self.st = os.lstat(self.path)
@property
def is_device(self):
return bool(self.st.st_mode & S_IFBLK)
@property
def major(self):
return os.major(self.st.st_rdev)
@property
def minor(self):
return os.minor(self.st.st_rdev)
@property
def sys_fs_path(self):
return os.path.join('/sys/dev/block', '{0.major}:{0.minor}'
.format(self))
def _lookup_sys(self, name):
return open(
os.path.join(self.sys_fs_path, name), 'rb').read().rstrip(b'\n')
def _lookup_sys_bool(self, name):
return int(self._lookup_sys(name)) == 1
@property
def removable(self):
return self._lookup_sys_bool('removable')
@property
def size(self):
return ByteSize(int(self._lookup_sys('size')) * 512)
@property
def model(self):
return self._lookup_sys('device/model').decode('utf8').strip()
@property
def read_only(self):
return self._lookup_sys_bool('ro')
def open(self, mode='r'):
return open(self.path, mode)
def __repr__(self):
return '{0.__class__.__name__}({0.path})'.format(self)
@classmethod
def iter_block_devices(cls, base_path='/dev'):
for name in os.listdir(base_path):
if name.startswith('sd') and len(name) == 3:
yield cls(os.path.join(base_path, name))
|
py | 1a3fb6ad93cd554220022d8d1de50123d5101430 | #!/usr/bin/env python
import sys
from setuptools import setup
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call([sys.executable, "-m", "pip", "install", "setuptools-rust"])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
setup_requires = ["setuptools-rust>=0.10.1", "wheel"]
install_requires = []
setup(
name="py-rustlib",
version="0.1.0",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Rust",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
],
packages=["src"],
rust_extensions=[RustExtension("py_rustlib.py_rustlib")],
install_requires=install_requires,
setup_requires=setup_requires,
include_package_data=True,
zip_safe=False,
)
|
py | 1a3fb6c567f516a42fb4ddbc4270b59042b19bce | import logging
import unittest
from cuda_checker.check import (
get_pytorch_properties,
get_tensorflow_properties,
)
_LOGGING_FORMAT = "%(asctime)s %(levelname)s %(pathname)s %(message)s"
logging.basicConfig(
format=_LOGGING_FORMAT,
datefmt="%Y-%m-%d %H:%M:%S%z",
handlers=[logging.StreamHandler()],
level=logging.DEBUG,
)
class Test(unittest.TestCase):
def test_check(self) -> None:
logging
_LOGGING_FORMAT = "%(asctime)s %(levelname)s %(pathname)s %(message)s"
logging.basicConfig(
format=_LOGGING_FORMAT,
datefmt="%Y-%m-%d %H:%M:%S%z",
handlers=[logging.StreamHandler()],
level=logging.DEBUG,
)
get_pytorch_properties()
get_tensorflow_properties()
if __name__ == "__main__":
unittest.main()
|
py | 1a3fb8172efb315fc9f98b14af301dc406ffd6de | import datetime
import os
import random
import re
import string
import sys
import unittest2
from mock import patch, Mock
import stripe
NOW = datetime.datetime.now()
DUMMY_CARD = {
'number': '4242424242424242',
'exp_month': NOW.month,
'exp_year': NOW.year + 4
}
DUMMY_DEBIT_CARD = {
'number': '4000056655665556',
'exp_month': NOW.month,
'exp_year': NOW.year + 4
}
DUMMY_CHARGE = {
'amount': 100,
'currency': 'usd',
'card': DUMMY_CARD
}
DUMMY_DISPUTE = {
'status': 'needs_response',
'currency': 'usd',
'metadata': {}
}
DUMMY_PLAN = {
'amount': 2000,
'interval': 'month',
'name': 'Amazing Gold Plan',
'currency': 'usd',
'id': ('stripe-test-gold-' +
''.join(random.choice(string.ascii_lowercase) for x in range(10)))
}
DUMMY_COUPON = {
'percent_off': 25,
'duration': 'repeating',
'duration_in_months': 5,
'metadata': {}
}
DUMMY_RECIPIENT = {
'name': 'John Doe',
'type': 'individual'
}
DUMMY_TRANSFER = {
'amount': 400,
'currency': 'usd',
'recipient': 'self'
}
DUMMY_APPLE_PAY_DOMAIN = {
'domain_name': 'test.com',
}
DUMMY_INVOICE_ITEM = {
'amount': 456,
'currency': 'usd',
}
SAMPLE_INVOICE = stripe.util.json.loads("""
{
"amount_due": 1305,
"attempt_count": 0,
"attempted": true,
"charge": "ch_wajkQ5aDTzFs5v",
"closed": true,
"customer": "cus_osllUe2f1BzrRT",
"date": 1338238728,
"discount": null,
"ending_balance": 0,
"id": "in_t9mHb2hpK7mml1",
"livemode": false,
"next_payment_attempt": null,
"object": "invoice",
"paid": true,
"period_end": 1338238728,
"period_start": 1338238716,
"starting_balance": -8695,
"subtotal": 10000,
"total": 10000,
"lines": {
"invoiceitems": [],
"prorations": [],
"subscriptions": [
{
"plan": {
"interval": "month",
"object": "plan",
"identifier": "expensive",
"currency": "usd",
"livemode": false,
"amount": 10000,
"name": "Expensive Plan",
"trial_period_days": null,
"id": "expensive"
},
"period": {
"end": 1340917128,
"start": 1338238728
},
"amount": 10000
}
]
}
}
""")
class StripeTestCase(unittest2.TestCase):
RESTORE_ATTRIBUTES = ('api_version', 'api_key')
def setUp(self):
super(StripeTestCase, self).setUp()
self._stripe_original_attributes = {}
for attr in self.RESTORE_ATTRIBUTES:
self._stripe_original_attributes[attr] = getattr(stripe, attr)
api_base = os.environ.get('STRIPE_API_BASE')
if api_base:
stripe.api_base = api_base
stripe.api_key = os.environ.get(
'STRIPE_API_KEY', 'tGN0bIwXnHdwOa85VABjPdSn8nWY7G7I')
def tearDown(self):
super(StripeTestCase, self).tearDown()
for attr in self.RESTORE_ATTRIBUTES:
setattr(stripe, attr, self._stripe_original_attributes[attr])
# Python < 2.7 compatibility
def assertRaisesRegexp(self, exception, regexp, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except exception as err:
if regexp is None:
return True
if isinstance(regexp, basestring):
regexp = re.compile(regexp)
if not regexp.search(str(err)):
raise self.failureException('"%s" does not match "%s"' %
(regexp.pattern, str(err)))
else:
raise self.failureException(
'%s was not raised' % (exception.__name__,))
class StripeUnitTestCase(StripeTestCase):
REQUEST_LIBRARIES = ['urlfetch', 'requests', 'pycurl']
if sys.version_info >= (3, 0):
REQUEST_LIBRARIES.append('urllib.request')
else:
REQUEST_LIBRARIES.append('urllib2')
def setUp(self):
super(StripeUnitTestCase, self).setUp()
self.request_patchers = {}
self.request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
patcher = patch("stripe.http_client.%s" % (lib,))
self.request_mocks[lib] = patcher.start()
self.request_patchers[lib] = patcher
def tearDown(self):
super(StripeUnitTestCase, self).tearDown()
for patcher in self.request_patchers.itervalues():
patcher.stop()
class StripeApiTestCase(StripeTestCase):
def setUp(self):
super(StripeApiTestCase, self).setUp()
self.requestor_patcher = patch('stripe.api_requestor.APIRequestor')
requestor_class_mock = self.requestor_patcher.start()
self.requestor_mock = requestor_class_mock.return_value
def tearDown(self):
super(StripeApiTestCase, self).tearDown()
self.requestor_patcher.stop()
def mock_response(self, res):
self.requestor_mock.request = Mock(return_value=(res, 'reskey'))
class StripeResourceTest(StripeApiTestCase):
def setUp(self):
super(StripeResourceTest, self).setUp()
self.mock_response({})
class MyResource(stripe.resource.APIResource):
pass
class MySingleton(stripe.resource.SingletonAPIResource):
pass
class MyListable(stripe.resource.ListableAPIResource):
pass
class MyCreatable(stripe.resource.CreateableAPIResource):
pass
class MyUpdateable(stripe.resource.UpdateableAPIResource):
pass
class MyDeletable(stripe.resource.DeletableAPIResource):
pass
class MyComposite(stripe.resource.ListableAPIResource,
stripe.resource.CreateableAPIResource,
stripe.resource.UpdateableAPIResource,
stripe.resource.DeletableAPIResource):
pass
|
py | 1a3fb84701958f9633541459939fa67afeffeebc | import os
import sys
VERBOSE = True
PACKTESTASSETS = False
# Assets
BAKED = '../Baked/'
TEXTURES = 'Textures/'
MESHES = 'Meshes/'
FONTS = 'Fonts/'
SHADERS = 'Shaders/'
CONFIG = 'Config/'
AUDIO = 'Audio/'
ROOMS = 'Rooms/'
MISC = 'Misc/'
BASEPACKFILE = 'eldritch-base.cpk'
TEXTURESPACKFILE = 'eldritch-textures.cpk'
MESHESPACKFILE = 'eldritch-meshes.cpk'
AUDIOPACKFILE = 'eldritch-audio.cpk'
WORLDPACKFILE = 'eldritch-world.cpk'
SYNCERPACKFILE = 'syncer.cpk'
# Map from folder names to package files
DLCPACKFILES = { 'DLC1' : 'dlc1.cpk',
'DLC2' : 'dlc2.cpk' }
# Tools folders
TOOLS_DIR = '../Tools/' # Relative to Baked directory
# Tools
FILE_PACKER = TOOLS_DIR + 'FilePacker.exe'
#-----------------------------------------------------
def pack():
packdir( CONFIG, True, '.ccf', BASEPACKFILE )
packdir( CONFIG, True, '.pcf', BASEPACKFILE )
packdir( FONTS, True, '.fnp', BASEPACKFILE )
packdir( MESHES, True, '.cms', MESHESPACKFILE )
packdir( SHADERS, True, '.cfx', BASEPACKFILE )
packdir( SHADERS, True, '.chv2', BASEPACKFILE )
packdir( SHADERS, True, '.chp2', BASEPACKFILE )
packdir( SHADERS, True, '.gv12', BASEPACKFILE )
packdir( SHADERS, True, '.gf12', BASEPACKFILE )
packdir( TEXTURES, True, '.dds', TEXTURESPACKFILE )
packdir( TEXTURES, True, '.bmp', TEXTURESPACKFILE )
packdir( TEXTURES, True, '.tga', TEXTURESPACKFILE )
packdir( ROOMS, True, '.eldritchroom', WORLDPACKFILE )
packdir( AUDIO, True, '.wav', AUDIOPACKFILE )
#packdir( AUDIO, True, '.ogg', AUDIOPACKFILE ) # Compress .ogg files because Audiere can't stream from pack file anyway.
packdir( AUDIO, False, '.ogg', AUDIOPACKFILE ) # Don't compress .ogg files because FMOD can stream from pack file.
packdir( MISC, True, '.bmp', BASEPACKFILE )
packdir( CONFIG, True, '.pcf', SYNCERPACKFILE )
#-----------------------------------------------------
def runtool( args ):
if VERBOSE:
for arg in args:
print arg,
print
os.spawnv( os.P_WAIT, args[0], args )
#-----------------------------------------------------
# If ext is specified, only files matching that extension are baked
# If ext isn't specified, all files in the folder are baked
# This will recurse into any subfolders of the given path
def packdir( dir, compress, ext, packfile ):
for path, dirs, files in os.walk( dir ):
# Ignore source control and test content
if '.svn' in dirs:
dirs.remove( '.svn' )
# Ignore test content if we're not building a test package
if( ( not PACKTESTASSETS ) and ( 'Test' in dirs ) ):
dirs.remove( 'Test' )
usepackfile = packfile
# Override package file for DLC
for dlcdir, dlcpackfile in DLCPACKFILES.iteritems():
if dlcdir in path:
usepackfile = dlcpackfile
for file in files:
if( ( not ext ) or ( ext in file ) ):
infile = os.path.join( path, file )
compressflag = ''
if compress:
compressflag = '-c'
runtool( [ FILE_PACKER, infile, usepackfile, compressflag ] )
#-----------------------------------------------------
# Entry point
#-----------------------------------------------------
for arg in sys.argv:
if arg == '-t':
PACKTESTASSETS = True
print 'ALERT: Packing test assets!'
print 'Deleting pack files...'
os.chdir( BAKED )
if os.path.exists( BASEPACKFILE ):
os.remove( BASEPACKFILE )
if os.path.exists( TEXTURESPACKFILE ):
os.remove( TEXTURESPACKFILE )
if os.path.exists( MESHESPACKFILE ):
os.remove( MESHESPACKFILE )
if os.path.exists( AUDIOPACKFILE ):
os.remove( AUDIOPACKFILE )
if os.path.exists( WORLDPACKFILE ):
os.remove( WORLDPACKFILE )
if os.path.exists( SYNCERPACKFILE ):
os.remove( SYNCERPACKFILE )
for dlcdir, dlcpackfile in DLCPACKFILES.iteritems():
if os.path.exists( dlcpackfile ):
os.remove( dlcpackfile )
print 'Packing assets...'
try:
pack()
except:
sys.exit(1)
print 'Packing done!' |
py | 1a3fb87333813b91522bc6bcd551cc7da99c67ed | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scatterpolar.textfont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
py | 1a3fb9a0b1d0e9116060a850627b619b2b34f0c4 |
import urllib.request
import threading
import smtplib
import ssl
state= input("Type the state you are from, with the first letter capitalized, and press enter: ")
sender = input("Type the email that you would like to send emails FROM, and press enter: ")
password = input("Type the password for that email and press enter: ")
receiver = input("Type the email that you would like to send emails TO, and press enter: ")
# How often to refresh the page, in seconds
UPDATE = 60.0
# Port for SSL
port = 465
# Message in the email.
text = "Book an appointment at CVS! https://www.cvs.com/immunizations/covid-19-vaccine"
subject = "CVS Vaccine Appointment Available!"
message = 'Subject: {}\n\n{}'.format(subject, text)
# Create a secure SSL context
context = ssl.create_default_context()
# This function repeatedly reads the CVS website, and if any appointments are
# available in your state, it emails you.
def sendit():
# Initializes threading (repition / refreshing of website)
threading.Timer(UPDATE, sendit).start()
# Reads website into var 'html'
html = urllib.request.urlopen('https://www.cvs.com/immunizations/covid-19-vaccine').read()
# If not all appointments are booked...
lookforstring = f"At this time, all appointments in {state} are booked."
if lookforstring.encode() not in html:
# Login via STMP and send email
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(sender, password)
server.sendmail(sender, receiver, message)
sendit()
|
py | 1a3fba08ff5ea3eb170348300bf97c2580d64a96 | import argparse, os, pathlib
parser = argparse.ArgumentParser(description='Convert training data to PEPREC')
parser.add_argument('-f', '--files', nargs='+', help='files contaning peptides')
parser.add_argument('-s', '--suffix', default='peprec', help='suffix for the output file names')
parser.add_argument('-o', '--output', default='.', help='where to save the output files')
if __name__ == '__main__':
args = parser.parse_args()
for infile in args.files:
dirname, basename = os.path.split(infile)
fname, ext = os.path.splitext(basename)
outfname = f'{fname}_{args.suffix}{ext}'
outdir = args.output if os.path.isabs(args.output) else os.path.abspath(args.output)
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
outfile = os.path.join(outdir, outfname)
print(f'Printing PEPREC to {outfile}')
with open(infile, 'r') as inf, open(outfile, 'w') as outf:
pepid = 0
outf.write('spec_id modifications peptide charge\n')
for line in inf:
try:
tokens = line.rstrip('\r\n').split('\t')
charge, seq, score, y_ions, y_ints, b_ions, b_ints, y_frac = tokens
pepid = '_'.join([seq, charge])
outf.write(f'{pepid} - {seq} {charge}\n')
except ValueError as e:
print("Unexpected number of tokens found on line!")
e.args += (line,)
raise |
py | 1a3fbb072237600d1bb84279edc8ebd56aa13443 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
import os
from azure.core.exceptions import ResourceNotFoundError
from azure.cli.testsdk.scenario_tests import AllowLargeResponse, live_only
from azure.cli.core.util import CLIError
from azure.cli.core.mock import DummyCli
from azure.cli.testsdk.base import execute
from azure.cli.testsdk.exceptions import CliTestError
from azure.cli.testsdk import (
JMESPathCheck,
JMESPathCheckExists,
JMESPathCheckGreaterThan,
NoneCheck,
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer,
KeyVaultPreparer,
LiveScenarioTest,
record_only)
from azure.cli.testsdk.preparers import (
AbstractPreparer,
SingleValueReplacer)
from azure.cli.command_modules.sql.custom import (
ClientAuthenticationType,
ClientType,
ComputeModelType,
ResourceIdType)
from datetime import datetime, timedelta
from time import sleep
# Constants
server_name_prefix = 'clitestserver'
server_name_max_length = 62
managed_instance_name_prefix = 'clitestmi'
instance_pool_name_prefix = 'clitestip'
managed_instance_name_max_length = 20
class SqlServerPreparer(AbstractPreparer, SingleValueReplacer):
def __init__(self, name_prefix=server_name_prefix, parameter_name='server', location='westus',
admin_user='admin123', admin_password='SecretPassword123',
resource_group_parameter_name='resource_group', skip_delete=True):
super(SqlServerPreparer, self).__init__(name_prefix, server_name_max_length)
self.location = location
self.parameter_name = parameter_name
self.admin_user = admin_user
self.admin_password = admin_password
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
def create_resource(self, name, **kwargs):
group = self._get_resource_group(**kwargs)
template = 'az sql server create -l {} -g {} -n {} -u {} -p {}'
execute(DummyCli(), template.format(self.location, group, name, self.admin_user, self.admin_password))
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.skip_delete:
group = self._get_resource_group(**kwargs)
execute(DummyCli(), 'az sql server delete -g {} -n {} --yes --no-wait'.format(group, name))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a sql server account a resource group is required. Please add ' \
'decorator @{} in front of this storage account preparer.'
raise CliTestError(template.format(ResourceGroupPreparer.__name__,
self.resource_group_parameter_name))
class ManagedInstancePreparer(AbstractPreparer, SingleValueReplacer):
subscription_id = '8313371e-0879-428e-b1da-6353575a9192'
group = 'CustomerExperienceTeam_RG'
location = 'westcentralus'
vnet_name = 'vnet-mi-tooling'
subnet_name = 'ManagedInstance'
subnet = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(subscription_id, group, vnet_name, subnet_name)
# For cross-subnet update SLO, we need a target subnet to move managed instance to.
target_vnet_name = 'vnet-mi-tooling'
target_subnet_name = 'ManagedInstance2'
target_subnet = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(subscription_id, group, target_vnet_name, target_subnet_name)
target_subnet_vcores = 4
collation = "Serbian_Cyrillic_100_CS_AS"
licence = 'LicenseIncluded'
v_core = 4
storage = 32
edition = 'GeneralPurpose'
family = 'Gen5'
proxy = 'Proxy'
fog_name = "fgtest2022a"
primary_name = 'mi-primary-wcus'
secondary_name = 'mi-mdcs-cx-secondary'
sec_group = 'mdcs-cx-secondary-vnet'
sec_location = 'centralus'
sec_subnet = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/vnet-sql-mi-secondary/subnets/default'.format(subscription_id, sec_group)
def __init__(self, name_prefix=managed_instance_name_prefix, parameter_name='mi', admin_user='admin123',
minimalTlsVersion='', user_assigned_identity_id='', identity_type='', pid='', otherParams='',
admin_password='SecretPassword123SecretPassword', public=True, tags='', is_geo_secondary=False,
skip_delete=False):
super(ManagedInstancePreparer, self).__init__(name_prefix, server_name_max_length)
self.parameter_name = parameter_name
self.admin_user = admin_user
self.admin_password = admin_password
self.public = public
self.skip_delete = skip_delete
self.tags = tags
self.minimalTlsVersion = minimalTlsVersion
self.identityType = identity_type
self.userAssignedIdentityId = user_assigned_identity_id
self.pid = pid
self.otherParams = otherParams
self.is_geo_secondary = is_geo_secondary
def create_resource(self, name, **kwargs):
location = self.location
subnet = self.subnet
v_core = self.v_core
template = 'az sql mi create -g {} -n {} -l {} -u {} -p {} --subnet {} --license-type {}' \
' --collation {} --capacity {} --storage {} --edition {} --family {} --tags {}' \
' --proxy-override {} --bsr Geo'
if self.public:
template += ' --public-data-endpoint-enabled'
if self.minimalTlsVersion:
template += f" --minimal-tls-version {self.minimalTlsVersion}"
if self.identityType == ResourceIdType.system_assigned_user_assigned.value or self.identityType == ResourceIdType.user_assigned.value:
template += f" --assign-identity --user-assigned-identity-id {self.userAssignedIdentityId} --identity-type {self.identityType} --pid {self.pid}"
if self.identityType == ResourceIdType.system_assigned.value:
template += f" --assign-identity"
if self.otherParams:
template += f" {self.otherParams}"
if self.is_geo_secondary:
location = self.sec_location
subnet = self.sec_subnet
v_core = 4
execute(DummyCli(), template.format(
self.group, name, location,
self.admin_user, self.admin_password,
subnet, self.licence, self.collation,
v_core, self.storage, self.edition,
self.family, self.tags, self.proxy))
return {self.parameter_name: name, 'rg': self.group}
def remove_resource(self, name, **kwargs):
if not self.skip_delete:
try:
execute(DummyCli(), 'az sql mi delete -g {} -n {} --yes --no-wait'.format(self.group, name))
except ResourceNotFoundError:
pass
class SqlServerMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1', location='westeurope')
@ResourceGroupPreparer(parameter_name='resource_group_2', location='westeurope')
def test_sql_server_mgmt(self, resource_group_1, resource_group_2, resource_group_location):
server_name_1 = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_2 = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_3 = self.create_random_name(server_name_prefix, server_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456', 'SecretPassword789']
federated_client_id_1 = '748eaea0-6dbc-4be9-a50b-6a2d3dad00d4'
federated_client_id_2 = '17deee33-9da7-40ce-a33c-8a96f2f8f07d'
federated_client_id_3 = '00000000-0000-0000-0000-000000000000'
# test create sql server with minimal required parameters
server_1 = self.cmd('sql server create -g {} --name {} '
'--admin-user {} --admin-password {}'
.format(resource_group_1, server_name_1, admin_login, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('identity', None)]).get_output_in_json()
# test list sql server should be 1
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[JMESPathCheck('length(@)', 1)])
# test update sql server
self.cmd('sql server update -g {} --name {} --admin-password {} -i'
.format(resource_group_1, server_name_1, admin_passwords[1]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test update without identity parameter, validate identity still exists
# also use --id instead of -g/-n
self.cmd('sql server update --ids {} --admin-password {}'
.format(server_1['id'], admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test create another sql server, with identity this time
self.cmd('sql server create -g {} --name {} -l {} -i '
'--admin-user {} --admin-password {}'
.format(resource_group_2, server_name_2, resource_group_location, admin_login, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test list sql server in that group should be 1
self.cmd('sql server list -g {}'.format(resource_group_2), checks=[JMESPathCheck('length(@)', 1)])
# test list sql server in the subscription should be at least 2
self.cmd('sql server list', checks=[JMESPathCheckGreaterThan('length(@)', 1)])
# test show sql server
self.cmd('sql server show -g {} --name {}'
.format(resource_group_1, server_name_1),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login)])
self.cmd('sql server show --id {}'
.format(server_1['id']),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login)])
self.cmd('sql server list-usages -g {} -n {}'
.format(resource_group_1, server_name_1),
checks=[JMESPathCheck('[0].resourceName', server_name_1)])
# test delete sql server
self.cmd('sql server delete --id {} --yes'
.format(server_1['id']), checks=NoneCheck())
self.cmd('sql server delete -g {} --name {} --yes'
.format(resource_group_2, server_name_2), checks=NoneCheck())
# test list sql server should be 0
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[NoneCheck()])
# test create third sql server, with identity and federated client id
self.cmd('sql server create -g {} --name {} -l {} -i '
'--admin-user {} --admin-password {} --federated-client-id {}'
.format(resource_group_1, server_name_3, resource_group_location, admin_login, admin_passwords[0], federated_client_id_1),
checks=[
JMESPathCheck('name', server_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('identity.type', 'SystemAssigned'),
JMESPathCheck('federatedClientId', federated_client_id_1)])
self.cmd('sql server show -g {} --name {}'
.format(resource_group_1, server_name_3),
checks=[
JMESPathCheck('name', server_name_3),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('federatedClientId', federated_client_id_1)])
# test update sql server's federated client id
self.cmd('sql server update -g {} --name {} --admin-password {} --federated-client-id {} -i'
.format(resource_group_1, server_name_3, admin_passwords[2], federated_client_id_2),
checks=[
JMESPathCheck('name', server_name_3),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('federatedClientId', federated_client_id_2)])
# test update sql server's federated client id to empty guid
self.cmd('sql server update -g {} --name {} --admin-password {} --federated-client-id {} -i'
.format(resource_group_1, server_name_3, admin_passwords[2], federated_client_id_3),
checks=[
JMESPathCheck('name', server_name_3),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('federatedClientId', None)])
# delete sql server
self.cmd('sql server delete -g {} --name {} --yes'
.format(resource_group_1, server_name_3), checks=NoneCheck())
@ResourceGroupPreparer(parameter_name='resource_group_1', location='westeurope')
def test_sql_server_public_network_access_create_mgmt(self, resource_group_1, resource_group_location):
server_name_1 = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_2 = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_3 = self.create_random_name(server_name_prefix, server_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
# test create sql server with no enable-public-network passed in, verify publicNetworkAccess == Enabled
self.cmd('sql server create -g {} --name {} '
'--admin-user {} --admin-password {}'
.format(resource_group_1, server_name_1, admin_login, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('publicNetworkAccess', 'Enabled')])
# test create sql server with enable-public-network == true passed in, verify publicNetworkAccess == Enabled
self.cmd('sql server create -g {} --name {} '
'--admin-user {} --admin-password {} --enable-public-network {}'
.format(resource_group_1, server_name_2, admin_login, admin_passwords[0], 'true'),
checks=[
JMESPathCheck('name', server_name_2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('publicNetworkAccess', 'Enabled')])
# test create sql server with enable-public-network == false passed in, verify publicNetworkAccess == Disabled
# note: although server does not have private links, creating server with disabled public network is allowed
self.cmd('sql server create -g {} --name {} '
'--admin-user {} --admin-password {} -e {}'
.format(resource_group_1, server_name_3, admin_login, admin_passwords[0], 'false'),
checks=[
JMESPathCheck('name', server_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('publicNetworkAccess', 'Disabled')])
# test get sql server to verify publicNetworkAccess == 'Disabled' for the above server as expected
# note: although server does not have private links, creating server with disabled public network is allowed
self.cmd('sql server show -g {} --name {}'
.format(resource_group_1, server_name_3),
checks=[
JMESPathCheck('name', server_name_3),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('publicNetworkAccess', 'Disabled')])
@ResourceGroupPreparer(parameter_name='resource_group', location='westeurope')
def test_sql_server_public_network_access_update_mgmt(self, resource_group, resource_group_location):
server_name = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_2 = self.create_random_name(server_name_prefix, server_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
# test create sql server with no enable-public-network passed in, verify publicNetworkAccess == Enabled
self.cmd('sql server create -g {} --name {} --admin-user {} --admin-password {}'
.format(resource_group, server_name, admin_login, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('publicNetworkAccess', 'Enabled')])
# test update sql server with enable-public-network == false passed in, verify publicNetworkAccess == Disabled
# note: we test for exception thrown here since this server does not have private links so updating server
# to disable public network access will throw an error
try:
self.cmd('sql server update -g {} -n {} --enable-public-network {}'
.format(resource_group, server_name, 'false'))
except Exception as e:
expectedmessage = "Unable to set Deny Public Network Access to Yes since there is no private endpoint enabled to access the server"
if expectedmessage in str(e):
pass
# test create sql server with enable-public-network == false passed in, verify publicNetworkAccess == Disabled
# note: although server does not have private links, creating server with disabled public network is allowed
self.cmd('sql server create -g {} --name {} '
'--admin-user {} --admin-password {} -e {}'
.format(resource_group, server_name_2, admin_login, admin_passwords[0], 'false'),
checks=[
JMESPathCheck('name', server_name_2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('administratorLogin', admin_login),
JMESPathCheck('publicNetworkAccess', 'Disabled')])
# test update sql server with no enable-public-network passed in, verify publicNetworkAccess == Disabled
# note: we test for exception thrown here since this server does not have private links so updating server
# to disable public network access will throw an error
try:
self.cmd('sql server update -g {} -n {} -i'
.format(resource_group, server_name_2))
except Exception as e:
expectedmessage = "Unable to set Deny Public Network Access to Yes since there is no private endpoint enabled to access the server"
if expectedmessage in str(e):
pass
# test update sql server with enable-public-network == true passed in, verify publicNetworkAccess == Enabled
self.cmd('sql server update -g {} -n {} -e {}'
.format(resource_group, server_name_2, 'true'),
checks=[
JMESPathCheck('name', server_name_2),
JMESPathCheck('publicNetworkAccess', 'Enabled')])
class SqlServerFirewallMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer(location='eastus')
def test_sql_firewall_mgmt(self, resource_group, resource_group_location, server):
firewall_rule_1 = 'rule1'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '255.255.255.255'
firewall_rule_2 = 'rule2'
start_ip_address_2 = '123.123.123.123'
end_ip_address_2 = '123.123.123.124'
# allow_all_azure_ips_rule = 'AllowAllAzureIPs'
# allow_all_azure_ips_address = '0.0.0.0'
# test sql server firewall-rule create
fw_rule_1 = self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, resource_group, server,
start_ip_address_1, end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)]).get_output_in_json()
# test sql server firewall-rule show by group/server/name
self.cmd('sql server firewall-rule show --name {} -g {} --server {}'
.format(firewall_rule_1, resource_group, server),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule show by id
self.cmd('sql server firewall-rule show --id {}'
.format(fw_rule_1['id']),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule update by group/server/name
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, resource_group, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule update by id
self.cmd('sql server firewall-rule update --id {} '
'--start-ip-address {}'
.format(fw_rule_1['id'], start_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_2)])
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--end-ip-address {}'
.format(firewall_rule_1, resource_group, server,
end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule create another rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_2, resource_group, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_2),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule list
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(resource_group, server), checks=[JMESPathCheck('length(@)', 2)])
# # test sql server firewall-rule create azure ip rule
# self.cmd('sql server firewall-rule allow-all-azure-ips -g {} --server {} '
# .format(resource_group, server), checks=[
# JMESPathCheck('name', allow_all_azure_ips_rule),
# JMESPathCheck('resourceGroup', resource_group),
# JMESPathCheck('startIpAddress', allow_all_azure_ips_address),
# JMESPathCheck('endIpAddress', allow_all_azure_ips_address)])
# # test sql server firewall-rule list
# self.cmd('sql server firewall-rule list -g {} --server {}'
# .format(resource_group, server), checks=[JMESPathCheck('length(@)', 3)])
# test sql server firewall-rule delete
self.cmd('sql server firewall-rule delete --id {}'
.format(fw_rule_1['id']), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(resource_group, server), checks=[JMESPathCheck('length(@)', 1)])
self.cmd('sql server firewall-rule delete --name {} -g {} --server {}'
.format(firewall_rule_2, resource_group, server), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(resource_group, server), checks=[NoneCheck()])
class SqlServerOutboundFirewallMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
@SqlServerPreparer(location='eastus')
def test_sql_outbound_firewall_mgmt(self, resource_group, resource_group_location, server):
outbound_firewall_rule_allowed_fqdn_1 = 'testOBFR1'
outbound_firewall_rule_allowed_fqdn_2 = 'testOBFR2'
# test sql server outbound-firewall-rule create
self.cmd('sql server outbound-firewall-rule create -g {} --server {} --outbound-rule-fqdn {}'
.format(resource_group, server, outbound_firewall_rule_allowed_fqdn_1),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', outbound_firewall_rule_allowed_fqdn_1)])
# test sql server outbound-firewall-rule show by group/server/name
self.cmd('sql server outbound-firewall-rule show -g {} --server {} --outbound-rule-fqdn {}'
.format(resource_group, server, outbound_firewall_rule_allowed_fqdn_1),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', outbound_firewall_rule_allowed_fqdn_1)])
# test sql server outbound-firewall-rule create another rule
self.cmd('sql server outbound-firewall-rule create -g {} --server {} --outbound-rule-fqdn {}'
.format(resource_group, server, outbound_firewall_rule_allowed_fqdn_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', outbound_firewall_rule_allowed_fqdn_2)])
# test sql server outbound-firewall-rule list
self.cmd('sql server outbound-firewall-rule list -g {} --server {}'
.format(resource_group, server), checks=[JMESPathCheck('length(@)', 2)])
# test sql server outbound-firewall-rule delete
self.cmd('sql server outbound-firewall-rule delete -g {} --server {} --outbound-rule-fqdn {}'
.format(resource_group, server, outbound_firewall_rule_allowed_fqdn_2), checks=NoneCheck())
self.cmd('sql server outbound-firewall-rule list -g {} --server {}'
.format(resource_group, server), checks=[JMESPathCheck('length(@)', 1)])
class SqlServerDbMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_db_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
database_name_2 = "cliautomationdb02"
database_name_3 = "cliautomationdb03"
update_service_objective = 'P1'
update_storage = '10GB'
update_storage_bytes = str(10 * 1024 * 1024 * 1024)
read_scale_disabled = 'Disabled'
read_scale_enabled = 'Enabled'
backup_storage_redundancy_local = 'local'
backup_storage_redundancy_zone = 'zone'
# test sql db commands
db1 = self.cmd('sql db create -g {} --server {} --name {} --read-scale {} --backup-storage-redundancy {} --yes'
.format(resource_group, server, database_name, read_scale_disabled,
backup_storage_redundancy_local),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('readScale', 'Disabled'),
JMESPathCheck('highAvailabilityReplicaCount', None),
JMESPathCheck('requestedBackupStorageRedundancy', 'Local')]).get_output_in_json()
self.cmd('sql db list -g {} --server {}'
.format(resource_group, server),
checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[1].resourceGroup', resource_group)])
self.cmd('sql db list-usages -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('[0].resourceGroup', resource_group)])
# Show by group/server/name
self.cmd('sql db show -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', resource_group)])
# Show by id
self.cmd('sql db show --id {}'
.format(db1['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', resource_group)])
# Update by group/server/name
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --max-size {} --read-scale {}'
' --set tags.key1=value1 --backup-storage-redundancy {}'
.format(resource_group, server, database_name,
update_service_objective, update_storage,
read_scale_enabled, backup_storage_redundancy_zone),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1'),
JMESPathCheck('readScale', 'Enabled'),
JMESPathCheck('highAvailabilityReplicaCount', None)])
# Update by id
self.cmd('sql db update --id {} --set tags.key2=value2'
.format(db1['id']),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key2', 'value2')])
# Rename by group/server/name
db2 = self.cmd('sql db rename -g {} -s {} -n {} --new-name {}'
.format(resource_group, server, database_name, database_name_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_2)]).get_output_in_json()
# Rename by id
db3 = self.cmd('sql db rename --id {} --new-name {}'
.format(db2['id'], database_name_3),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_3)]).get_output_in_json()
# Delete by group/server/name
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(resource_group, server, database_name_3),
checks=[NoneCheck()])
# Delete by id
self.cmd('sql db delete --id {} --yes'
.format(db3['id']),
checks=[NoneCheck()])
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
@AllowLargeResponse()
def test_sql_db_vcore_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
# Create database with vcore edition
vcore_edition = 'GeneralPurpose'
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, database_name, vcore_edition),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition)])
# Update database to dtu edition
dtu_edition = 'Standard'
dtu_capacity = 10
self.cmd('sql db update -g {} --server {} --name {} --edition {} --capacity {} --max-size 250GB'
.format(resource_group, server, database_name, dtu_edition, dtu_capacity),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', dtu_edition),
JMESPathCheck('sku.tier', dtu_edition),
JMESPathCheck('sku.capacity', dtu_capacity)])
# Update database back to vcore edition
vcore_family = 'Gen5'
vcore_capacity = 4
self.cmd('sql db update -g {} --server {} --name {} -e {} -c {} -f {}'
.format(resource_group, server, database_name, vcore_edition,
vcore_capacity, vcore_family),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.capacity', vcore_capacity),
JMESPathCheck('sku.family', vcore_family)])
# Update only family
vcore_family_updated = 'Gen5'
# Update only capacity
vcore_capacity_updated = 8
self.cmd('sql db update -g {} -s {} -n {} --capacity {}'
.format(resource_group, server, database_name, vcore_capacity_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated)])
# Update only edition
vcore_edition_updated = 'BusinessCritical'
self.cmd('sql db update -g {} -s {} -n {} --tier {}'
.format(resource_group, server, database_name, vcore_edition_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated)])
# Create database with vcore edition and all sku properties specified
database_name_2 = 'cliautomationdb02'
vcore_edition = 'GeneralPurpose'
self.cmd('sql db create -g {} --server {} --name {} -e {} -c {} -f {}'
.format(resource_group, server, database_name_2,
vcore_edition_updated, vcore_capacity_updated,
vcore_family_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_2),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated)])
@ResourceGroupPreparer(name_prefix='clitest-sql', location='eastus2')
@SqlServerPreparer(name_prefix='clitest-sql', location='eastus2')
@AllowLargeResponse()
def test_sql_db_read_replica_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
# Create database with Hyperscale edition
edition = 'Hyperscale'
family = 'Gen5'
capacity = 2
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {} --ha-replicas {}'
.format(resource_group, server, database_name, edition, family, capacity, 4),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', edition),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('readScale', 'Enabled'),
JMESPathCheck('highAvailabilityReplicaCount', '4')])
# Increase read replicas
self.cmd('sql db update -g {} --server {} --name {} --read-replicas {}'
.format(resource_group, server, database_name, 3),
checks=[
JMESPathCheck('readScale', 'Enabled'),
JMESPathCheck('highAvailabilityReplicaCount', '3')])
# Decrease read replicas
self.cmd('sql db update -g {} --server {} --name {} --read-replicas {}'
.format(resource_group, server, database_name, 0),
checks=[
JMESPathCheck('readScale', 'Disabled'),
JMESPathCheck('highAvailabilityReplicaCount', '0')])
# Alternate syntax
self.cmd('sql db update -g {} --server {} --name {} --ha-replicas {}'
.format(resource_group, server, database_name, 2),
checks=[
JMESPathCheck('readScale', 'Enabled'),
JMESPathCheck('highAvailabilityReplicaCount', '2')])
@ResourceGroupPreparer(location='westcentralus')
@SqlServerPreparer(location='westcentralus')
def test_sql_db_ledger(self, resource_group, resource_group_location, server):
database_name_one = "cliautomationdb01"
database_name_two = "cliautomationdb02"
# test sql db is created with ledger off by default
self.cmd('sql db create -g {} --server {} --name {} --yes'
.format(resource_group, server, database_name_one),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_one),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('ledgerOn', False)])
self.cmd('sql db show -g {} -s {} --name {}'
.format(resource_group, server, database_name_one),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_one),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('ledgerOn', False)])
# test sql db with ledger on
self.cmd('sql db create -g {} --server {} --name {} --ledger-on --yes'
.format(resource_group, server, database_name_two),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_two),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('ledgerOn', True)])
self.cmd('sql db show -g {} -s {} --name {}'
.format(resource_group, server, database_name_two),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_two),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('ledgerOn', True)])
class SqlServerServerlessDbMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
@AllowLargeResponse()
def test_sql_db_serverless_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
compute_model_serverless = ComputeModelType.serverless
compute_model_provisioned = ComputeModelType.provisioned
# Create database with vcore edition
vcore_edition = 'GeneralPurpose'
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, database_name, vcore_edition),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition)])
# Update database to serverless offering
self.cmd('sql db update -g {} --server {} --name {} --compute-model {}'
.format(resource_group, server, database_name, compute_model_serverless),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.name', 'GP_S_Gen5')])
# Update auto pause delay and min capacity
auto_pause_delay = 120
min_capacity = 1.0
self.cmd('sql db update -g {} -s {} -n {} --auto-pause-delay {} --min-capacity {}'
.format(resource_group, server, database_name, auto_pause_delay, min_capacity),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('autoPauseDelay', auto_pause_delay),
JMESPathCheck('minCapacity', min_capacity)])
# Update only vCores
vCores = 8
self.cmd('sql db update -g {} -s {} -n {} -c {}'
.format(resource_group, server, database_name, vCores),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.capacity', vCores)])
# Update back to provisioned database offering
self.cmd('sql db update -g {} --server {} --name {} --compute-model {}'
.format(resource_group, server, database_name, compute_model_provisioned),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.name', 'GP_Gen5')])
# Create database with vcore edition with everything specified for Serverless
database_name_2 = 'cliautomationdb02'
vcore_edition = 'GeneralPurpose'
vcore_family = 'Gen5'
vcore_capacity = 4
auto_pause_delay = 120
min_capacity = 1.0
self.cmd(
'sql db create -g {} --server {} --name {} -e {} -c {} -f {} --compute-model {} --auto-pause-delay {} --min-capacity {}'
.format(resource_group, server, database_name_2,
vcore_edition, vcore_capacity,
vcore_family, compute_model_serverless, auto_pause_delay, min_capacity),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_2),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.capacity', vcore_capacity),
JMESPathCheck('sku.family', vcore_family),
JMESPathCheck('sku.name', 'GP_S_Gen5'),
JMESPathCheck('autoPauseDelay', auto_pause_delay),
JMESPathCheck('minCapacity', min_capacity)])
class SqlServerDbOperationMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
def test_sql_db_operation_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'GP_Gen5_8'
# Create db
self.cmd('sql db create -g {} -s {} -n {} --yes'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# Update DB with --no-wait
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --no-wait'
.format(resource_group, server, database_name, update_service_objective))
# List operations
ops = list(
self.cmd('sql db op list -g {} -s {} -d {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].databaseName', database_name)
])
.get_output_in_json())
# Cancel operation
self.cmd('sql db op cancel -g {} -s {} -d {} -n {}'
.format(resource_group, server, database_name, ops[0]['name']))
class SqlServerDbShortTermRetentionScenarioTest(ScenarioTest):
def test_sql_db_short_term_retention(self):
# Initial parameters. default_diffbackup_hours will be changed to 24 soon.
self.kwargs.update({
'resource_group': 'qiangdsrg',
'server_name': 'qiangdsmemberserver',
'database_name': 'hubdatabase',
'retention_days_v1': 7,
'diffbackup_hours_v1': 24,
'retention_days_v2': 6,
'diffbackup_hours_v2': 12,
'retention_days_v3': 5
})
# Test UPDATE short term retention policy on live database, value updated to v1.
self.cmd(
'sql db str-policy set -g {resource_group} -s {server_name} -n {database_name} --retention-days {retention_days_v1} --diffbackup-hours {diffbackup_hours_v1}',
checks=[
self.check('resourceGroup', '{resource_group}'),
self.check('retentionDays', '{retention_days_v1}'),
self.check('diffBackupIntervalInHours', '{diffbackup_hours_v1}')])
# Test GET short term retention policy on live database, value equals to v1.
self.cmd(
'sql db str-policy show -g {resource_group} -s {server_name} -n {database_name}',
checks=[
self.check('resourceGroup', '{resource_group}'),
self.check('retentionDays', '{retention_days_v1}'),
self.check('diffBackupIntervalInHours', '{diffbackup_hours_v1}')])
# Test UPDATE short term retention policy on live database, value updated to v2.
self.cmd(
'sql db str-policy set -g {resource_group} -s {server_name} -n {database_name} --retention-days {retention_days_v2} --diffbackup-hours {diffbackup_hours_v2}',
checks=[
self.check('resourceGroup', '{resource_group}'),
self.check('retentionDays', '{retention_days_v2}'),
self.check('diffBackupIntervalInHours', '{diffbackup_hours_v2}')])
# Test UPDATE short term retention policy on live database, only update retention days value to v3.
self.cmd(
'sql db str-policy set -g {resource_group} -s {server_name} -n {database_name} --retention-days {retention_days_v3}',
checks=[
self.check('resourceGroup', '{resource_group}'),
self.check('retentionDays', '{retention_days_v3}'),
self.check('diffBackupIntervalInHours', '{diffbackup_hours_v2}')])
class SqlServerDbLongTermRetentionScenarioTest(ScenarioTest):
def test_sql_db_long_term_retention(
self):
self.kwargs.update({
'rg': 'CustomerExperienceTeam_RG',
'loc': 'westcentralus',
'server_name': 'mi-tooling-server',
'database_name': 'a-reneamoso-qpi-testing',
'weekly_retention': 'P1W',
'monthly_retention': 'P1M',
'yearly_retention': 'P2M',
'week_of_year': 12
})
# test update long term retention on live database
self.cmd(
'sql db ltr-policy set -g {rg} -s {server_name} -n {database_name}'
' --weekly-retention {weekly_retention} --monthly-retention {monthly_retention}'
' --yearly-retention {yearly_retention} --week-of-year {week_of_year}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('weeklyRetention', '{weekly_retention}'),
self.check('monthlyRetention', '{monthly_retention}'),
self.check('yearlyRetention', '{yearly_retention}')])
# test get long term retention policy on live database
self.cmd(
'sql db ltr-policy show -g {rg} -s {server_name} -n {database_name}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('weeklyRetention', '{weekly_retention}'),
self.check('monthlyRetention', '{monthly_retention}'),
self.check('yearlyRetention', '{yearly_retention}')])
# test list long term retention backups for location
# with resource group
self.cmd(
'sql db ltr-backup list -l {loc} -g {rg}',
checks=[
self.greater_than('length(@)', 0)])
# without resource group
self.cmd(
'sql db ltr-backup list -l {loc}',
checks=[
self.greater_than('length(@)', 0)])
# test list long term retention backups for instance
# with resource group
self.cmd(
'sql db ltr-backup list -l {loc} -s {server_name} -g {rg}',
checks=[
self.greater_than('length(@)', 0)])
# without resource group
self.cmd(
'sql db ltr-backup list -l {loc} -s {server_name}',
checks=[
self.greater_than('length(@)', 0)])
# test list long term retention backups for database
# with resource group
self.cmd(
'sql db ltr-backup list -l {loc} -s {server_name} -d {database_name} -g {rg}',
checks=[
self.greater_than('length(@)', 0)])
# without resource group
self.cmd(
'sql db ltr-backup list -l {loc} -s {server_name} -d {database_name}',
checks=[
self.greater_than('length(@)', 0)])
# setup for test show long term retention backup
backup = self.cmd(
'sql db ltr-backup list -l {loc} -s {server_name} -d {database_name} --latest True').get_output_in_json()
self.kwargs.update({
'backup_name': backup[0]['name'],
'backup_id': backup[0]['id']
})
# test show long term retention backup
self.cmd(
'sql db ltr-backup show -l {loc} -s {server_name} -d {database_name} -n {backup_name}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('serverName', '{server_name}'),
self.check('databaseName', '{database_name}'),
self.check('name', '{backup_name}')])
# test restore managed database from LTR backup
self.kwargs.update({
'dest_database_name': 'cli-restore-ltr'
})
self.cmd(
'sql db ltr-backup restore --backup-id \'{backup_id}\' --dest-database {dest_database_name}'
' --dest-server {server_name} --dest-resource-group {rg}',
checks=[
self.check('name', '{dest_database_name}')])
# test delete long term retention backup
self.cmd(
'sql db ltr-backup delete -l {loc} -s {server_name} -d {database_name} -n \'{backup_name}\' --yes',
checks=[NoneCheck()])
class SqlManagedInstanceOperationMgmtScenarioTest(ScenarioTest):
@ManagedInstancePreparer()
def test_sql_mi_operation_mgmt(self, mi, rg):
managed_instance_name = mi
resource_group = rg
edition_updated = 'BusinessCritical'
v_core_update = 4
# Managed instance becomes ready before the operation is completed. For that reason, we should wait
# for the operation to complete in order to proceed with testing.
if self.is_live:
sleep(120)
print('Updating MI...\n')
# Update sql managed_instance
self.cmd('sql mi update -g {} -n {} --edition {} --capacity {} --no-wait'
.format(resource_group, managed_instance_name, edition_updated, v_core_update))
print('Listing all operations...\n')
# List operations
ops = list(
self.cmd('sql mi op list -g {} --mi {}'
.format(resource_group, managed_instance_name),
checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].managedInstanceName', managed_instance_name)
])
.get_output_in_json())
print('Canceling operation...\n')
# Cancel operation
self.cmd('sql mi op cancel -g {} --mi {} -n {}'
.format(resource_group, managed_instance_name, ops[1]['name']))
class SqlServerConnectionPolicyScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer(location='eastus')
def test_sql_server_connection_policy(self, resource_group, resource_group_location, server):
# Show
self.cmd('sql server conn-policy show -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('connectionType', 'Default')])
# Update
for type in ('Proxy', 'Default', 'Redirect'):
self.cmd('sql server conn-policy update -g {} -s {} -t {}'
.format(resource_group, server, type),
checks=[JMESPathCheck('connectionType', type)])
class AzureActiveDirectoryAdministratorScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
def test_aad_admin(self, resource_group, server):
self.kwargs.update({
'rg': resource_group,
'sn': server,
'administrator_name': "ActiveDirectory",
'oid': '5e90ef3b-9b42-4777-819b-25c36961ea4d',
'oid2': 'e4d43337-d52c-4a0c-b581-09055e0359a0',
'user': 'DSEngAll',
'user2': 'TestUser'
})
print('Arguments are updated with login and sid data')
self.cmd('sql server ad-admin create -s {sn} -g {rg} -i {oid} -u {user}',
checks=[
self.check('login', '{user}'),
self.check('sid', '{oid}')])
self.cmd('sql server ad-admin list -s {sn} -g {rg}',
checks=[
self.check('[0].login', '{user}'),
self.check('[0].sid', '{oid}')])
self.cmd('sql server ad-admin update -s {sn} -g {rg}'
' -u {user2} -i {oid2}',
checks=[
self.check('login', '{user2}'),
self.check('sid', '{oid2}')])
self.cmd('sql server ad-admin delete -s {sn} -g {rg}')
self.cmd('sql server ad-admin list -s {sn} -g {rg}',
checks=[
self.check('[0].login', None),
self.check('[0].sid', None)])
class SqlServerADOnlyAuthScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
def test_aadonly(self, resource_group, server):
print('\n********************')
print("Server: {}".format(server))
print('********************\n')
user = 'DSEngAll'
oid = '5e90ef3b-9b42-4777-819b-25c36961ea4d'
self.cmd('sql server ad-admin create -s {} -g {} -u {} -i {}'.format(server, resource_group, user, oid),
checks=[])
self.cmd('sql server ad-only-auth enable -n {} -g {}'.format(server, resource_group), checks=[])
self.cmd('sql server ad-only-auth disable -n {} -g {}'.format(server, resource_group), checks=[])
self.cmd('sql server ad-only-auth get -n {} -g {}'.format(server, resource_group), checks=[])
class SqlServerDbCopyScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1', location='westeurope')
@ResourceGroupPreparer(parameter_name='resource_group_2', location='westeurope')
@SqlServerPreparer(parameter_name='server1', resource_group_parameter_name='resource_group_1',
location='westeurope')
@SqlServerPreparer(parameter_name='server2', resource_group_parameter_name='resource_group_2',
location='westeurope')
@AllowLargeResponse()
def test_sql_db_copy(self, resource_group_1, resource_group_2,
resource_group_location,
server1, server2):
database_name = "cliautomationdb01"
database_copy_name = "cliautomationdb02"
service_objective = 'GP_Gen5_8'
# create database
self.cmd('sql db create -g {} --server {} --name {} --yes'
.format(resource_group_1, server1, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', database_name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
# copy database to same server (min parameters)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {}'
.format(resource_group_1, server1, database_name, database_copy_name),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', database_copy_name)
])
# copy database to same server (min parameters, plus service_objective)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {} --service-objective {}'
.format(resource_group_1, server1, database_name, database_copy_name, service_objective),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', database_copy_name),
JMESPathCheck('requestedServiceObjectiveName', service_objective),
])
# copy database to same server specify backup storage redundancy
bsr_database = "bsr_database"
backup_storage_redundancy = 'local'
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {} --backup-storage-redundancy {}'
.format(resource_group_1, server1, database_name, bsr_database, backup_storage_redundancy),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', bsr_database),
JMESPathCheck('requestedBackupStorageRedundancy', 'Local')
])
# copy database to elastic pool in other server (max parameters, other than
# service_objective)
pool_name = 'pool1'
pool_edition = 'GeneralPurpose'
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
' --edition {}'
.format(resource_group_2, server2, pool_name, pool_edition))
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {} --dest-resource-group {} --dest-server {} '
'--elastic-pool {}'
.format(resource_group_1, server1, database_name, database_copy_name,
resource_group_2, server2, pool_name),
checks=[
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('name', database_copy_name),
JMESPathCheck('elasticPoolName', pool_name)
])
def _get_earliest_restore_date(db):
return datetime.strptime(db['earliestRestoreDate'], "%Y-%m-%dT%H:%M:%S+00:00")
def _get_earliest_restore_date_for_deleted_db(deleted_db):
return datetime.strptime(deleted_db['earliestRestoreDate'], "%Y-%m-%dT%H:%M:%S+00:00")
def _get_deleted_date(deleted_db):
return datetime.strptime(deleted_db['deletionDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00")
def _create_db_wait_for_first_backup(test, resource_group, server, database_name):
# create db
db = test.cmd('sql db create -g {} --server {} --name {} -y'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Wait until earliestRestoreDate is in the past. When run live, this will take at least
# 10 minutes. Unforunately there's no way to speed this up
while db['earliestRestoreDate'] is None:
sleep(60)
db = test.cmd('sql db show -g {} -s {} -n {}'
.format(resource_group, server, database_name)).get_output_in_json()
earliest_restore_date = _get_earliest_restore_date(db)
if datetime.utcnow() <= earliest_restore_date:
print('Waiting until earliest restore date', earliest_restore_date)
while datetime.utcnow() <= earliest_restore_date:
sleep(10) # seconds
return db
def _wait_until_first_backup_midb(self):
earliest_restore_date_string = None
while earliest_restore_date_string is None:
db = self.cmd('sql midb show -g {rg} --mi {managed_instance_name} -n {database_name}',
checks=[self.greater_than('length(@)', 0)])
earliest_restore_date_string = db.json_value['earliestRestorePoint']
class SqlServerDbRestoreScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
@AllowLargeResponse()
def test_sql_db_restore(self, resource_group, resource_group_location, server):
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_standalone_database_name = 'cliautomationdb01restore1'
restore_pool_database_name = 'cliautomationdb01restore2'
elastic_pool = 'cliautomationpool1'
# create elastic pool
self.cmd('sql elastic-pool create -g {} -s {} -n {}'
.format(resource_group, server, elastic_pool))
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, resource_group, server, database_name)
# Restore to standalone db
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --service-objective {} --edition {}'
.format(resource_group, server, database_name, datetime.utcnow().isoformat(),
restore_standalone_database_name, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_standalone_database_name),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore to db into pool. Note that 'elasticPoolName' is populated
# in transform func which only runs after `show`/`list` commands.
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --elastic-pool {}'
.format(resource_group, server, database_name, datetime.utcnow().isoformat(),
restore_pool_database_name, elastic_pool),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_pool_database_name),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} -s {} -n {}'
.format(resource_group, server, restore_pool_database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_pool_database_name),
JMESPathCheck('status', 'Online'),
JMESPathCheck('elasticPoolName', elastic_pool)])
# restore db with backup storage redundancy parameter
bsr_database = 'bsr_database'
backup_storage_redundancy = 'geo'
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {} --backup-storage-redundancy {}'
.format(resource_group, server, database_name, datetime.utcnow().isoformat(),
bsr_database, backup_storage_redundancy),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', bsr_database),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo')])
class SqlServerDbRestoreDeletedScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
@AllowLargeResponse()
def test_sql_db_restore_deleted(self, resource_group, resource_group_location, server):
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_database_name1 = 'cliautomationdb01restore1'
restore_database_name2 = 'cliautomationdb01restore2'
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, resource_group, server, database_name)
# Delete database
self.cmd('sql db delete -g {} -s {} -n {} --yes'.format(resource_group, server, database_name))
# Wait for deleted database to become visible. When run live, this will take around
# 5-10 minutes. Unforunately there's no way to speed this up. Use timeout to ensure
# test doesn't loop forever if there's a bug.
start_time = datetime.now()
timeout = timedelta(0, 15 * 60) # 15 minutes timeout
while True:
deleted_dbs = list(
self.cmd('sql db list-deleted -g {} -s {}'.format(resource_group, server)).get_output_in_json())
if deleted_dbs:
# Deleted db found, stop polling
break
# Deleted db not found, sleep (if running live) and then poll again.
if self.is_live:
self.assertTrue(datetime.now() < start_time + timeout, 'Deleted db not found before timeout expired.')
sleep(10) # seconds
deleted_db = deleted_dbs[0]
# Restore deleted to latest point in time
self.cmd('sql db restore -g {} -s {} -n {} --deleted-time {} --dest-name {}'
' --service-objective {} --edition {}'
.format(resource_group, server, database_name, _get_deleted_date(deleted_db).isoformat(),
restore_database_name1, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_database_name1),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore deleted to earlier point in time
self.cmd('sql db restore -g {} -s {} -n {} -t {} --deleted-time {} --dest-name {}'
.format(resource_group, server, database_name,
_get_earliest_restore_date_for_deleted_db(deleted_db).isoformat(),
_get_deleted_date(deleted_db).isoformat(), restore_database_name2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_database_name2),
JMESPathCheck('status', 'Online')])
class SqlServerDbSecurityScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
def _get_storage_key(self, storage_account, resource_group):
return self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer(location='westeurope')
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer(location='westeurope')
@StorageAccountPreparer(location='westus')
@StorageAccountPreparer(parameter_name='storage_account_2',
resource_group_parameter_name='resource_group_2')
def test_sql_db_security_mgmt(self, resource_group, resource_group_2,
resource_group_location, server,
storage_account, storage_account_2):
database_name = "cliautomationdb01"
state_enabled = 'Enabled'
state_disabled = 'Disabled'
# get storage account endpoint and key
storage_endpoint = self._get_storage_endpoint(storage_account, resource_group)
key = self._get_storage_key(storage_account, resource_group)
# create db
self.cmd('sql db create -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# get audit policy
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('blobStorageTargetState', state_disabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# update audit policy - enable
retention_days = 30
audit_actions_input = 'DATABASE_LOGOUT_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP'
audit_actions_expected = ['DATABASE_LOGOUT_GROUP',
'DATABASE_ROLE_MEMBER_CHANGE_GROUP']
self.cmd('sql db audit-policy update -g {} -s {} -n {}'
' --state {} --blob-storage-target-state {} --storage-key {} --storage-endpoint={}'
' --retention-days={} --actions {}'
.format(resource_group, server, database_name, state_enabled, state_enabled, key,
storage_endpoint, retention_days, audit_actions_input),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# update audit policy - specify storage account and resource group. use secondary key
storage_endpoint_2 = self._get_storage_endpoint(storage_account_2, resource_group_2)
self.cmd('sql db audit-policy update -g {} -s {} -n {} --blob-storage-target-state {} --storage-account {}'
.format(resource_group, server, database_name, state_enabled, storage_account_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - disable
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
.format(resource_group, server, database_name, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get threat detection policy
self.cmd('sql db threat-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('resourceGroup', resource_group)])
# update threat detection policy - enable
disabled_alerts_input = 'Sql_Injection_Vulnerability Access_Anomaly'
disabled_alerts_expected = ['Sql_Injection_Vulnerability', 'Access_Anomaly']
email_addresses_input = '[email protected] [email protected]'
email_addresses_expected = ['[email protected]', '[email protected]']
email_account_admins = True
self.cmd('sql db threat-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint {}'
' --retention-days {} --email-addresses {} --disabled-alerts {}'
' --email-account-admins {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, email_addresses_input,
disabled_alerts_input, email_account_admins),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''),
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# update threat policy - specify storage account and resource group. use secondary key
key_2 = self._get_storage_key(storage_account_2, resource_group_2)
self.cmd('sql db threat-policy update -g {} -s {} -n {}'
' --storage-account {}'
.format(resource_group, server, database_name, storage_account_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''),
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# create log analytics workspace
log_analytics_workspace_name = self.create_random_name("laws", 20)
log_analytics_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'
.format(resource_group, log_analytics_workspace_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', log_analytics_workspace_name),
JMESPathCheck('provisioningState',
'Succeeded')]).get_output_in_json()['id']
# update audit policy - enable log analytics target
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
' --log-analytics-target-state {} --log-analytics-workspace-resource-id {}'
.format(resource_group, server, database_name, state_enabled,
state_enabled, log_analytics_workspace_id),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify logAnalyticsTargetState is enabled and isAzureMonitorTargetEnabled is true
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_enabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', True)])
# update audit policy - disable log analytics target
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {} --log-analytics-target-state {}'
.format(resource_group, server, database_name, state_enabled, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify logAnalyticsTargetState is disabled and isAzureMonitorTargetEnabled is false
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# create event hub namespace
eventhub_namespace = 'cliehnamespacedb01'
self.cmd('eventhubs namespace create -g {} -n {}'
.format(resource_group, eventhub_namespace),
checks=[
JMESPathCheck('provisioningState', 'Succeeded')])
# create event hub
eventhub_name = 'cliehdb01'
self.cmd('eventhubs eventhub create -g {} -n {} --namespace-name {}'
.format(resource_group, eventhub_name, eventhub_namespace),
checks=[
JMESPathCheck('status', 'Active')])
# create event hub autorization rule
eventhub_auth_rule = 'cliehauthruledb01'
eventhub_auth_rule_id = self.cmd(
'eventhubs namespace authorization-rule create -g {} -n {} --namespace-name {} --rights Listen Manage Send'
.format(resource_group, eventhub_auth_rule, eventhub_namespace)).get_output_in_json()['id']
# update audit policy - enable event hub target
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {} --event-hub-target-state {}'
' --event-hub-authorization-rule-id {} --event-hub {}'
.format(resource_group, server, database_name, state_enabled, state_enabled,
eventhub_auth_rule_id, eventhub_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify eventHubTargetState is enabled and isAzureMonitorTargetEnabled is true
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_enabled),
JMESPathCheck('isAzureMonitorTargetEnabled', True)])
# update audit policy - disable event hub target
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {} --event-hub-target-state {}'
.format(resource_group, server, database_name, state_enabled, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify eventHubTargetState is disabled and isAzureMonitorTargetEnabled is false
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
class SqlServerSecurityScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
def _get_storage_key(self, storage_account, resource_group):
return self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer(location='westeurope')
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer(location='westeurope')
@StorageAccountPreparer(location='westus')
@StorageAccountPreparer(parameter_name='storage_account_2',
resource_group_parameter_name='resource_group_2')
def test_sql_server_security_mgmt(self, resource_group, resource_group_2,
resource_group_location, server,
storage_account, storage_account_2):
state_enabled = 'Enabled'
state_disabled = 'Disabled'
# get storage account endpoint and key
storage_endpoint = self._get_storage_endpoint(storage_account, resource_group)
key = self._get_storage_key(storage_account, resource_group)
# get audit policy
self.cmd('sql server audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('blobStorageTargetState', state_disabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# update audit policy - enable
retention_days = 30
audit_actions_input = 'DATABASE_LOGOUT_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP'
audit_actions_expected = ['DATABASE_LOGOUT_GROUP',
'DATABASE_ROLE_MEMBER_CHANGE_GROUP']
self.cmd('sql server audit-policy update -g {} -n {}'
' --state {} --blob-storage-target-state {} --storage-key {} --storage-endpoint={}'
' --retention-days={} --actions {}'
.format(resource_group, server, state_enabled, state_enabled, key,
storage_endpoint, retention_days, audit_actions_input),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy
self.cmd('sql server audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# update audit policy - specify storage account and resource group. use secondary key
storage_endpoint_2 = self._get_storage_endpoint(storage_account_2, resource_group_2)
self.cmd('sql server audit-policy update -g {} -n {}'
' --blob-storage-target-state {} --storage-account {}'
.format(resource_group, server, state_enabled, storage_account_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - disable
self.cmd('sql server audit-policy update -g {} -n {}'
' --state {}'
.format(resource_group, server, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# create log analytics workspace
log_analytics_workspace_name = self.create_random_name("laws", 20)
log_analytics_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'
.format(resource_group, log_analytics_workspace_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', log_analytics_workspace_name),
JMESPathCheck('provisioningState',
'Succeeded')]).get_output_in_json()['id']
# update audit policy - enable log analytics target
self.cmd('sql server audit-policy update -g {} -n {}'
' --state {}'
' --log-analytics-target-state {} --log-analytics-workspace-resource-id {}'
.format(resource_group, server, state_enabled, state_enabled, log_analytics_workspace_id),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify logAnalyticsTargetState is enabled and isAzureMonitorTargetEnabled is true
self.cmd('sql server audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_enabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', True)])
# update audit policy - disable log analytics target
self.cmd('sql server audit-policy update -g {} -n {}'
' --state {} --log-analytics-target-state {}'
.format(resource_group, server, state_enabled, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify logAnalyticsTargetState is disabled and isAzureMonitorTargetEnabled is false
self.cmd('sql server audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# create event hub namespace
eventhub_namespace = 'cliehnamespacedb01'
self.cmd('eventhubs namespace create -g {} -n {}'
.format(resource_group, eventhub_namespace),
checks=[
JMESPathCheck('provisioningState', 'Succeeded')])
# create event hub
eventhub_name = 'cliehsrv01'
self.cmd('eventhubs eventhub create -g {} -n {} --namespace-name {}'
.format(resource_group, eventhub_name, eventhub_namespace),
checks=[
JMESPathCheck('status', 'Active')])
# create event hub autorization rule
eventhub_auth_rule = 'cliehauthruledb01'
eventhub_auth_rule_id = self.cmd(
'eventhubs namespace authorization-rule create -g {} -n {} --namespace-name {} --rights Listen Manage Send'
.format(resource_group, eventhub_auth_rule, eventhub_namespace)).get_output_in_json()['id']
# update audit policy - enable event hub target
self.cmd('sql server audit-policy update -g {} -n {}'
' --state {} --event-hub-target-state {}'
' --event-hub-authorization-rule-id {} --event-hub {}'
.format(resource_group, server, state_enabled, state_enabled,
eventhub_auth_rule_id, eventhub_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify eventHubTargetState is enabled and isAzureMonitorTargetEnabled is true
self.cmd('sql server audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_enabled),
JMESPathCheck('isAzureMonitorTargetEnabled', True)])
# update audit policy - disable event hub target
self.cmd('sql server audit-policy update -g {} -n {}'
' --state {} --event-hub-target-state {}'
.format(resource_group, server, state_enabled, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get audit policy - verify eventHubTargetState is disabled and isAzureMonitorTargetEnabled is false
self.cmd('sql server audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
class SqlServerMSSupportScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
def _get_storage_key(self, storage_account, resource_group):
return self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer(location='westeurope')
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer(location='westeurope')
@StorageAccountPreparer(location='westus')
@StorageAccountPreparer(parameter_name='storage_account_2',
resource_group_parameter_name='resource_group_2')
def test_sql_server_ms_support_mgmt(self, resource_group, resource_group_2,
resource_group_location, server,
storage_account, storage_account_2):
state_enabled = 'Enabled'
state_disabled = 'Disabled'
# get storage account endpoint and key
storage_endpoint = self._get_storage_endpoint(storage_account, resource_group)
key = self._get_storage_key(storage_account, resource_group)
# get MS support audit policy
self.cmd('sql server ms-support audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('blobStorageTargetState', state_disabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# update MS support audit policy - enable
self.cmd('sql server ms-support audit-policy update -g {} -n {}'
' --state {} --blob-storage-target-state {} --storage-key {} --storage-endpoint={}'
.format(resource_group, server, state_enabled, state_enabled, key, storage_endpoint),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageEndpoint', storage_endpoint)])
# get MS support audit policy
self.cmd('sql server ms-support audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# update MS support audit policy - specify storage account and resource group. use secondary key
storage_endpoint_2 = self._get_storage_endpoint(storage_account_2, resource_group_2)
self.cmd(
'sql server ms-support audit-policy update -g {} -n {} --blob-storage-target-state {} --storage-account {}'
.format(resource_group, server, state_enabled, storage_account_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageEndpoint', storage_endpoint_2)])
# update MS support audit policy - disable
self.cmd('sql server ms-support audit-policy update -g {} -n {} --state {}'
.format(resource_group, server, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled)])
# create log analytics workspace
log_analytics_workspace_name = "clilaworkspacems04"
log_analytics_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'
.format(resource_group, log_analytics_workspace_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', log_analytics_workspace_name),
JMESPathCheck('provisioningState',
'Succeeded')]).get_output_in_json()['id']
# update MS support audit policy - enable log analytics target
self.cmd('sql server ms-support audit-policy update -g {} -n {} --state {}'
' --log-analytics-target-state {} --log-analytics-workspace-resource-id {}'
.format(resource_group, server, state_enabled, state_enabled, log_analytics_workspace_id),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled)])
# get MS support audit policy - verify logAnalyticsTargetState is enabled and isAzureMonitorTargetEnabled is true
self.cmd('sql server ms-support audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_enabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', True)])
# update MS support audit policy - disable log analytics target
self.cmd('sql server ms-support audit-policy update -g {} -n {} --state {} --log-analytics-target-state {}'
.format(resource_group, server, state_enabled, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled)])
# get MS support audit policy - verify logAnalyticsTargetState is disabled and isAzureMonitorTargetEnabled is false
self.cmd('sql server ms-support audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
# create event hub namespace
eventhub_namespace = 'cliehnamespacems02'
self.cmd('eventhubs namespace create -g {} -n {}'
.format(resource_group, eventhub_namespace),
checks=[
JMESPathCheck('provisioningState', 'Succeeded')])
# create event hub
eventhub_name = 'cliehms02'
self.cmd('eventhubs eventhub create -g {} -n {} --namespace-name {}'
.format(resource_group, eventhub_name, eventhub_namespace),
checks=[
JMESPathCheck('status', 'Active')])
# create event hub autorization rule
eventhub_auth_rule = 'cliehauthrulems02'
eventhub_auth_rule_id = self.cmd(
'eventhubs namespace authorization-rule create -g {} -n {} --namespace-name {} --rights Listen Manage Send'
.format(resource_group, eventhub_auth_rule, eventhub_namespace)).get_output_in_json()['id']
# update MS support audit policy - enable event hub target
self.cmd('sql server ms-support audit-policy update -g {} -n {} --state {} --event-hub-target-state {}'
' --event-hub-authorization-rule-id {} --event-hub {}'
.format(resource_group, server, state_enabled, state_enabled,
eventhub_auth_rule_id, eventhub_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled)])
# get MS support audit policy - verify eventHubTargetState is enabled and isAzureMonitorTargetEnabled is true
self.cmd('sql server ms-support audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_enabled),
JMESPathCheck('isAzureMonitorTargetEnabled', True)])
# update MS support audit policy - disable event hub target
self.cmd('sql server ms-support audit-policy update -g {} -n {} --state {} --event-hub-target-state {}'
.format(resource_group, server, state_enabled, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled)])
# get MS support audit policy - verify eventHubTargetState is disabled and isAzureMonitorTargetEnabled is false
self.cmd('sql server ms-support audit-policy show -g {} -n {}'
.format(resource_group, server),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('blobStorageTargetState', state_enabled),
JMESPathCheck('logAnalyticsTargetState', state_disabled),
JMESPathCheck('eventHubTargetState', state_disabled),
JMESPathCheck('isAzureMonitorTargetEnabled', False)])
class SqlServerDwMgmtScenarioTest(ScenarioTest):
# pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
@AllowLargeResponse()
def test_sql_dw_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'DW200c'
update_storage = '20TB'
update_storage_bytes = str(20 * 1024 * 1024 * 1024 * 1024)
# test sql db commands
dw = self.cmd('sql dw create -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('edition', 'DataWarehouse'),
JMESPathCheck('sku.tier', 'DataWarehouse'),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Sanity check that the default max size is not equal to the size that we will update to
# later. That way we know that update is actually updating the size.
self.assertNotEqual(dw['maxSizeBytes'], update_storage_bytes,
'Initial max size in bytes is equal to the value we want to update to later,'
' so we will not be able to verify that update max size is actually updating.')
# DataWarehouse is a little quirky and is considered to be both a database and its
# separate own type of thing. (Why? Because it has the same REST endpoint as regular
# database, so it must be a database. However it has only a subset of supported operations,
# so to clarify which operations are supported by dw we group them under `sql dw`.) So the
# dw shows up under both `db list` and `dw list`.
self.cmd('sql db list -g {} --server {}'
.format(resource_group, server),
checks=[
JMESPathCheck('length(@)', 2), # includes dw and master
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[1].resourceGroup', resource_group)])
self.cmd('sql dw list -g {} --server {}'
.format(resource_group, server),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].resourceGroup', resource_group)])
self.cmd('sql db show -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', resource_group)])
# pause/resume
self.cmd('sql dw pause -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show --id {}'
.format(dw['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('status', 'Paused')])
self.cmd('sql dw resume -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show --id {}'
.format(dw['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('status', 'Online')])
# Update DW storage
self.cmd('sql dw update -g {} -s {} -n {} --max-size {}'
' --set tags.key1=value1'
.format(resource_group, server, database_name, update_storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Update DW service objective
self.cmd('sql dw update --id {} --service-objective {}'
.format(dw['id'], update_service_objective),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Delete DW
self.cmd('sql dw delete -g {} --server {} --name {} --yes'
.format(resource_group, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw delete --id {} --yes'
.format(dw['id']),
checks=[NoneCheck()])
class SqlServerDnsAliasMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1",
location='eastus')
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2",
location='eastus')
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1",
location='eastus')
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1",
location='eastus')
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2",
location='eastus')
def test_sql_server_dns_alias_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
alias_name = 'alias1'
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# Create server dns alias
self.cmd('sql server dns-alias create -n {} -s {} -g {}'
.format(alias_name, s1.name, s1.group),
checks=[
JMESPathCheck('name', alias_name),
JMESPathCheck('resourceGroup', s1.group)
])
# Check that alias is created on a right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the same server (to check that operation is idempotent)
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[])
# Check if alias is pointing to the right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} --original-resource-group {} -s {} -g {}'
.format(alias_name, s2.name, s2.group, s3.name, s3.group),
checks=[])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Drop alias
self.cmd('sql server dns-alias delete -n {} -s {} -g {}'
.format(alias_name, s3.name, s3.group),
checks=[])
# Verify that alias got dropped correctly
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 0)
])
class SqlServerDbReplicaMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1",
location='westeurope')
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2",
location='westeurope')
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1",
location='westeurope')
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1",
location='westeurope')
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2",
location='westeurope')
@AllowLargeResponse()
def test_sql_db_replica_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
database_name = "cliautomationdb01"
target_database_name = "cliautomationdb02"
hs_database_name = "cliautomationhs03"
hs_target_database_name = "cliautomationnr04"
service_objective = 'GP_Gen5_8'
hs_service_objective = 'HS_Gen5_8'
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# create db in first server
self.cmd('sql db create -g {} -s {} -n {} --yes'
.format(s1.group, s1.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s1.group)])
# create hs db in first server
self.cmd('sql db create -g {} -s {} -n {} --service-objective {} --yes'
.format(s1.group, s1.name, hs_database_name, hs_service_objective),
checks=[
JMESPathCheck('name', hs_database_name),
JMESPathCheck('resourceGroup', s1.group)])
# create replica in second server with min params
# partner resource group unspecified because s1.group == s2.group
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
.format(s1.group, s1.name, database_name,
s2.name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# create replica in second server with backup storage redundancy
backup_storage_redundancy = "zone"
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {} --backup-storage-redundancy {}'
.format(s1.group, s1.name, database_name,
s2.name, backup_storage_redundancy),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone')])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s2.group, s2.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# Delete replica in second server and recreate with explicit service objective and name
self.cmd('sql db delete -g {} -s {} -n {} --yes'
.format(s2.group, s2.name, database_name))
secondary_type = "Geo"
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {} '
' --service-objective {} --partner-database {} --secondary-type {}'
.format(s1.group, s1.name, database_name,
s2.name, service_objective, target_database_name, secondary_type),
checks=[
JMESPathCheck('name', target_database_name),
JMESPathCheck('resourceGroup', s2.group),
JMESPathCheck('requestedServiceObjectiveName', service_objective),
JMESPathCheck('secondaryType', secondary_type)])
# Create a named replica
secondary_type = "Named"
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {} '
' --service-objective {} --partner-resource-group {} --partner-database {} --secondary-type {} --ha-replicas {}'
.format(s1.group, s1.name, hs_database_name,
s1.name, hs_service_objective, s1.group, hs_target_database_name, secondary_type, 2),
checks=[
JMESPathCheck('name', hs_target_database_name),
JMESPathCheck('resourceGroup', s1.group),
JMESPathCheck('requestedServiceObjectiveName', hs_service_objective),
JMESPathCheck('secondaryType', secondary_type),
JMESPathCheck('highAvailabilityReplicaCount', 2)])
# Create replica in pool in third server with max params (except service objective)
pool_name = 'pool1'
pool_edition = 'GeneralPurpose'
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
' --edition {}'
.format(s3.group, s3.name, pool_name, pool_edition))
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
' --partner-resource-group {} --elastic-pool {}'
.format(s1.group, s1.name, database_name,
s3.name, s3.group, pool_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group),
JMESPathCheck('elasticPoolName', pool_name)])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group)])
# list replica links on s1 - it should link to s2 and s3
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s1.group, s1.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# list replica links on s3 - it should link only to s1
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Secondary'),
JMESPathCheck('[0].partnerRole', 'Primary')])
# Failover to s3.
self.cmd('sql db replica set-primary -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# list replica links on s3 - it should link to s1 and s2
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# Stop replication from s3 to s2 twice. Second time should be no-op.
for _ in range(2):
# Delete link
self.cmd('sql db replica delete-link -g {} -s {} -n {} --partner-resource-group {}'
' --partner-server {} --yes'
.format(s3.group, s3.name, database_name, s2.group, s2.name),
checks=[NoneCheck()])
# Verify link was deleted. s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Failover to s3 again (should be no-op, it's already primary)
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Force failover back to s1
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s1.group, s1.name, database_name),
checks=[NoneCheck()])
class SqlElasticPoolsMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolsMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "cliautomationpool01"
def verify_activities(self, activities, resource_group, server):
if isinstance(activities, list.__class__):
raise AssertionError("Actual value '{}' expected to be list class."
.format(activities))
for activity in activities:
if isinstance(activity, dict.__class__):
raise AssertionError("Actual value '{}' expected to be dict class"
.format(activities))
if activity['resourceGroup'] != resource_group:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['resourceGroup'], resource_group))
elif activity['serverName'] != server:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['serverName'], server))
elif activity['currentElasticPoolName'] != self.pool_name:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['currentElasticPoolName'], self.pool_name))
return True
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
@AllowLargeResponse()
def test_sql_elastic_pools_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb02"
pool_name2 = "cliautomationpool02"
edition = 'Standard'
dtu = 1200
db_dtu_min = 10
db_dtu_max = 50
storage = '1200GB'
storage_mb = 1228800
updated_dtu = 50
updated_db_dtu_min = 10
updated_db_dtu_max = 50
updated_storage = '50GB'
updated_storage_mb = 51200
db_service_objective = 'S1'
# test sql elastic-pool commands
elastic_pool_1 = self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} '
'--storage {}'
.format(resource_group, server, self.pool_name, dtu,
edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('sku.capacity', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('sku.tier', edition)]).get_output_in_json()
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('zoneRedundant', False)])
self.cmd('sql elastic-pool show --id {}'
.format(elastic_pool_1['id']),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb)])
self.cmd('sql elastic-pool list -g {} --server {}'
.format(resource_group, server),
checks=[
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].name', self.pool_name),
JMESPathCheck('[0].state', 'Ready'),
JMESPathCheck('[0].databaseDtuMin', db_dtu_min),
JMESPathCheck('[0].databaseDtuMax', db_dtu_max),
JMESPathCheck('[0].edition', edition),
JMESPathCheck('[0].storageMb', storage_mb)])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --storage {} --set tags.key1=value1'
.format(resource_group, server, self.pool_name,
updated_dtu, updated_storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', updated_dtu),
JMESPathCheck('sku.capacity', updated_dtu),
JMESPathCheck('edition', edition),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max),
JMESPathCheck('storageMb', updated_storage_mb),
JMESPathCheck('maxSizeBytes', updated_storage_mb * 1024 * 1024),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update --id {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(elastic_pool_1['id'], dtu,
updated_db_dtu_min, updated_db_dtu_max,
storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('sku.capacity', dtu),
JMESPathCheck('databaseDtuMin', updated_db_dtu_min),
JMESPathCheck('databaseDtuMax', updated_db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', updated_db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', updated_db_dtu_max),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('maxSizeBytes', storage_mb * 1024 * 1024),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--remove tags.key1'
.format(resource_group, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('tags', {})])
# create a second pool with minimal params
elastic_pool_2 = self.cmd('sql elastic-pool create -g {} --server {} --name {} '
.format(resource_group, server, pool_name2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('state', 'Ready')]).get_output_in_json()
self.cmd('sql elastic-pool list -g {} -s {}'.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 2)])
# Create a database directly in an Azure sql elastic pool.
# Note that 'elasticPoolName' is populated in transform
# func which only runs after `show`/`list` commands.
self.cmd('sql db create -g {} --server {} --name {} '
'--elastic-pool {}'
.format(resource_group, server, database_name, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', elastic_pool_1['id']),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('elasticPoolName', self.pool_name)])
if self.is_live:
sleep(120)
# Move database to second pool by specifying pool name.
# Also specify service objective just for fun.
# Note that 'elasticPoolName' is populated in transform
# func which only runs after `show`/`list` commands.
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(resource_group, server, database_name, pool_name2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', elastic_pool_2['id']),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} --server {} --name {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('elasticPoolName', pool_name2)])
if self.is_live:
sleep(60)
# Remove database from pool
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(resource_group, server, database_name, db_service_objective),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('requestedServiceObjectiveName', db_service_objective),
JMESPathCheck('status', 'Online')])
if self.is_live:
sleep(60)
# Move database back into pool by specifying pool id.
# Note that 'elasticPoolName' is populated in transform
# func which only runs after `show`/`list` commands.
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(resource_group, server, database_name, elastic_pool_1['id']),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', elastic_pool_1['id']),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', elastic_pool_1['id']),
JMESPathCheck('elasticPoolName', self.pool_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# List databases in a pool
self.cmd('sql elastic-pool list-dbs -g {} -s {} -n {}'
.format(resource_group, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# List databases in a pool - alternative command
self.cmd('sql db list -g {} -s {} --elastic-pool {}'
.format(resource_group, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# delete sql server database
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(resource_group, server, database_name),
checks=[NoneCheck()])
# delete sql elastic pool
self.cmd('sql elastic-pool delete -g {} --server {} --name {}'
.format(resource_group, server, self.pool_name),
checks=[NoneCheck()])
# delete sql elastic pool by id
self.cmd('sql elastic-pool delete --id {}'
.format(elastic_pool_1['id']),
checks=[NoneCheck()])
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
@AllowLargeResponse()
def test_sql_elastic_pools_vcore_mgmt(self, resource_group, resource_group_location, server):
pool_name = "cliautomationpool1"
# Create pool with vcore edition
vcore_edition = 'GeneralPurpose'
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, pool_name, vcore_edition),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition)])
# Update pool to dtu edition
dtu_edition = 'Standard'
dtu_capacity = 100
db_dtu_max = 10
self.cmd('sql elastic-pool update -g {} --server {} --name {} --edition {} --capacity {} --max-size 250GB '
'--db-max-dtu {}'
.format(resource_group, server, pool_name, dtu_edition, dtu_capacity, db_dtu_max),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', dtu_edition),
JMESPathCheck('sku.tier', dtu_edition),
JMESPathCheck('dtu', dtu_capacity),
JMESPathCheck('sku.capacity', dtu_capacity),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max)])
# Update pool back to vcore edition
vcore_family = 'Gen5'
vcore_family_updated = 'Gen5'
vcore_capacity = 4
self.cmd('sql elastic-pool update -g {} --server {} --name {} -e {} -c {} -f {} '
'--db-max-capacity 2'
.format(resource_group, server, pool_name, vcore_edition,
vcore_capacity, vcore_family),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity),
JMESPathCheck('sku.family', vcore_family),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.maxCapacity', 2)])
# Update only capacity
vcore_capacity_updated = 8
self.cmd('sql elastic-pool update -g {} -s {} -n {} --capacity {}'
.format(resource_group, server, pool_name, vcore_capacity_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.maxCapacity', 2)])
# Update only edition
vcore_edition_updated = 'BusinessCritical'
self.cmd('sql elastic-pool update -g {} -s {} -n {} --tier {}'
.format(resource_group, server, pool_name, vcore_edition_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.maxCapacity', 2)])
# Update only db min & max cap
db_min_capacity_updated = 0.5
db_max_capacity_updated = 1
self.cmd('sql elastic-pool update -g {} -s {} -n {} --db-max-capacity {} --db-min-capacity {}'
.format(resource_group, server, pool_name, db_max_capacity_updated, db_min_capacity_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.minCapacity', db_min_capacity_updated),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_max_capacity_updated)])
# Create pool with vcore edition and all sku properties specified
pool_name_2 = 'cliautomationpool2'
vcore_edition = 'GeneralPurpose'
self.cmd('sql elastic-pool create -g {} --server {} --name {} -e {} -c {} -f {}'
.format(resource_group, server, pool_name_2,
vcore_edition_updated, vcore_capacity_updated,
vcore_family_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None)])
class SqlElasticPoolOperationMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolOperationMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "operationtestep1"
@ResourceGroupPreparer(location='westeurope')
@SqlServerPreparer(location='westeurope')
@AllowLargeResponse()
def test_sql_elastic_pool_operation_mgmt(self, resource_group, resource_group_location, server):
edition = 'Premium'
dtu = 125
db_dtu_min = 0
db_dtu_max = 50
storage = '50GB'
storage_mb = 51200
update_dtu = 250
update_db_dtu_min = 50
update_db_dtu_max = 250
# Create elastic pool
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(resource_group, server, self.pool_name, dtu, edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('edition', edition),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('sku.capacity', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('maxSizeBytes', storage_mb * 1024 * 1024)])
# Update elastic pool
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {}'
.format(resource_group, server, self.pool_name, update_dtu, update_db_dtu_min, update_db_dtu_max))
# List operations on the elastic pool
ops = list(self.cmd('sql elastic-pool op list -g {} --server {} --elastic-pool {}'
.format(resource_group, server, self.pool_name)).get_output_in_json())
# Cancel operation
try:
self.cmd('sql elastic-pool op cancel -g {} --server {} --elastic-pool {} --name {}'
.format(resource_group, server, self.pool_name, ops[0]['name']))
except Exception as e:
expectedmessage = "Cannot cancel management operation {} in current state.".format(ops[0]['name'])
if expectedmessage in str(e):
pass
class SqlServerCapabilityScenarioTest(ScenarioTest):
@AllowLargeResponse()
def test_sql_capabilities(self):
location = 'westeurope'
# New capabilities are added quite frequently and the state of each capability depends
# on your subscription. So it's not a good idea to make strict checks against exactly
# which capabilities are returned. The idea is to just check the overall structure.
db_max_size_length_jmespath = 'length([].supportedServiceLevelObjectives[].supportedMaxSizes[])'
# Get all db capabilities
self.cmd('sql db list-editions -l {}'.format(location),
checks=[
# At least system, standard, and premium edition exist
JMESPathCheckExists("[?name == 'System']"),
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheckExists("[?name == 'Premium']"),
# At least s0 and p1 service objectives exist
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'P1']"),
# Max size data is omitted
JMESPathCheck(db_max_size_length_jmespath, 0)])
# Get all available db capabilities
self.cmd('sql db list-editions -l {} --available'.format(location),
checks=[
# System edition is not available
JMESPathCheck("length([?name == 'System'])", 0),
# At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheckExists("[?name == 'Premium']"),
# At least s0 and p1 service objectives exist
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'P1']"),
# Max size data is omitted
JMESPathCheck(db_max_size_length_jmespath, 0)])
# Get all db capabilities with size data
self.cmd('sql db list-editions -l {} --show-details max-size'.format(location),
checks=[
# Max size data is included
JMESPathCheckGreaterThan(db_max_size_length_jmespath, 0)])
# Search for db edition - note that it's case insensitive
self.cmd('sql db list-editions -l {} --edition standard'.format(location),
checks=[
# Standard edition exists, other editions don't
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for dtus
self.cmd('sql db list-editions -l {} --dtu 100'.format(location),
checks=[
# All results have 100 dtu
JMESPathCheckGreaterThan(
'length([].supportedServiceLevelObjectives[?performanceLevel.value == `100`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.value != `100`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `DTU`][])', 0)])
# Search for vcores
self.cmd('sql db list-editions -l {} --vcore 2'.format(location),
checks=[
# All results have 2 vcores
JMESPathCheckGreaterThan(
'length([].supportedServiceLevelObjectives[?performanceLevel.value == `2`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.value != `2`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `VCores`][])',
0)])
# Search for db service objective - note that it's case insensitive
# Checked items:
# * Standard edition exists, other editions don't
# * S0 service objective exists, others don't exist
self.cmd('sql db list-editions -l {} --edition standard --service-objective s0'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheck("length([].supportedServiceLevelObjectives[] | [?name != 'S0'])", 0)])
pool_max_size_length_jmespath = 'length([].supportedElasticPoolPerformanceLevels[].supportedMaxSizes[])'
pool_db_max_dtu_length_jmespath = 'length([].supportedElasticPoolPerformanceLevels[].supportedPerDatabaseMaxPerformanceLevels[])'
pool_db_min_dtu_length_jmespath = (
'length([].supportedElasticPoolPerformanceLevels[].supportedPerDatabaseMaxPerformanceLevels[]'
'.supportedPerDatabaseMinPerformanceLevels[])')
pool_db_max_size_length_jmespath = 'length([].supportedElasticPoolPerformanceLevels[].supportedPerDatabaseMaxSizes[])'
# Get all elastic pool capabilities
self.cmd('sql elastic-pool list-editions -l {}'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Premium']"),
JMESPathCheck(pool_max_size_length_jmespath, 0), # Optional details are omitted
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Search for elastic pool edition - note that it's case insensitive
self.cmd('sql elastic-pool list-editions -l {} --edition standard'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # Standard edition exists, other editions don't
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for dtus
self.cmd('sql elastic-pool list-editions -l {} --dtu 100'.format(location),
checks=[
# All results have 100 dtu
JMESPathCheckGreaterThan(
'length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value == `100`][])', 0),
JMESPathCheck(
'length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value != `100`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `DTU`][])', 0)])
# Search for vcores
self.cmd('sql elastic-pool list-editions -l {} --vcore 2'.format(location),
checks=[
# All results have 2 vcores
JMESPathCheckGreaterThan(
'length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value == `2`][])', 0),
JMESPathCheck('length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value != `2`][])',
0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `VCores`][])',
0)])
# Get all db capabilities with pool max size
self.cmd('sql elastic-pool list-editions -l {} --show-details max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max size
self.cmd('sql elastic-pool list-editions -l {} --show-details db-max-size'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max dtu
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-max-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db min dtu (which is nested under per db max dtu)
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with everything
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu db-max-dtu '
'db-max-size max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
class SqlServerImportExportMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
@SqlServerPreparer(location='eastus')
@StorageAccountPreparer(location='eastus')
@AllowLargeResponse()
def test_sql_db_import_export_mgmt(self, resource_group, resource_group_location, server, storage_account):
location_long_name = 'eastus'
admin_login = 'admin123'
admin_password = 'SecretPassword123'
db_name = 'cliautomationdb01'
db_name2 = 'cliautomationdb02'
db_name3 = 'cliautomationdb03'
blob = 'testbacpac.bacpac'
blob2 = 'testbacpac2.bacpac'
container = 'bacpacs'
firewall_rule_1 = 'allowAllIps'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '0.0.0.0'
# create server firewall rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, resource_group, server,
start_ip_address_1, end_ip_address_1),
checks=[JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# create dbs
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name2),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name2),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name3),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name3),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
# get storage account endpoint
storage_endpoint = self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
bacpacUri = '{}{}/{}'.format(storage_endpoint, container, blob)
bacpacUri2 = '{}{}/{}'.format(storage_endpoint, container, blob2)
# get storage account key
storageKey = self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
# Set Expiry
expiryString = '9999-12-25T00:00:00Z'
# Get sas key
sasKey = self.cmd('storage blob generate-sas --account-name {} -c {} -n {} --permissions rw --expiry {}'.format(
storage_account, container, blob2, expiryString)).get_output_in_json()
# create storage account blob container
self.cmd('storage container create -n {} --account-name {} --account-key {} '
.format(container, storage_account, storageKey),
checks=[JMESPathCheck('created', True)])
# export database to blob container using both keys
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[
JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'ExportDatabase'),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[
JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'ExportDatabase'),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to second database using Storage Key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name2, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[
JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name2),
JMESPathCheck('requestType', 'ImportToExistingDatabase'),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to third database using SAS key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name3, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[
JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name3),
JMESPathCheck('requestType', 'ImportToExistingDatabase'),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
class SqlServerConnectionStringScenarioTest(ScenarioTest):
def test_sql_db_conn_str(self):
# ADO.NET, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net').get_output_in_json()
self.assertEqual(conn_str,
'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;')
# ADO.NET, ADPassword
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c ado.net -a ADPassword').get_output_in_json()
self.assertEqual(conn_str,
'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Password"')
# ADO.NET, ADIntegrated
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c ado.net -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str,
'Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Integrated"')
# SqlCmd, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd').get_output_in_json()
self.assertEqual(conn_str,
'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -N -l 30')
# SqlCmd, ADPassword
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADPassword').get_output_in_json()
self.assertEqual(conn_str,
'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -G -N -l 30')
# SqlCmd, ADIntegrated
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -G -N -l 30')
# JDBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc').get_output_in_json()
self.assertEqual(conn_str,
'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>@myserver;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30')
# JDBC, ADPassword
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c jdbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str,
'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryPassword')
# JDBC, ADIntegrated
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c jdbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str,
'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryIntegrated')
# PHP PDO, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo').get_output_in_json()
self.assertEqual(conn_str,
'$conn = new PDO("sqlsrv:server = tcp:myserver.database.windows.net,1433; Database = mydb; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");')
# PHP PDO, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADPassword', expect_failure=True)
# PHP PDO, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADIntegrated', expect_failure=True)
# PHP, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php').get_output_in_json()
self.assertEqual(conn_str,
'$connectionOptions = array("UID"=>"<username>@myserver", "PWD"=>"<password>", "Database"=>mydb, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:myserver.database.windows.net,1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);')
# PHP, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADPassword', expect_failure=True)
# PHP, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADIntegrated', expect_failure=True)
# ODBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc').get_output_in_json()
self.assertEqual(conn_str,
'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;')
# ODBC, ADPassword
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c odbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str,
'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryPassword')
# ODBC, ADIntegrated
conn_str = self.cmd(
'sql db show-connection-string -s myserver -n mydb -c odbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str,
'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryIntegrated')
class SqlTransparentDataEncryptionScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer(location='eastus')
def test_sql_tde(self, resource_group, server):
sn = server
db_name = self.create_random_name("sqltdedb", 20)
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, sn, db_name))
# validate encryption is on by default
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(resource_group, sn, db_name),
checks=[JMESPathCheck('state', 'Enabled')])
# disable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Disabled'
.format(resource_group, sn, db_name))
sleep(5)
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(resource_group, sn, db_name),
checks=[JMESPathCheck('state', 'Disabled')])
# enable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Enabled'
.format(resource_group, sn, db_name))
sleep(5)
# validate encryption is enabled
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(resource_group, sn, db_name),
checks=[JMESPathCheck('state', 'Enabled')])
@ResourceGroupPreparer(location='eastus')
@SqlServerPreparer(location='eastus')
@KeyVaultPreparer(location='eastus', name_prefix='sqltdebyok')
def test_sql_tdebyok(self, resource_group, server, key_vault):
resource_prefix = 'sqltdebyok'
# add identity to server
server_resp = self.cmd('sql server update -g {} -n {} -i'
.format(resource_group, server)).get_output_in_json()
server_identity = server_resp['identity']['principalId']
# create db
db_name = self.create_random_name(resource_prefix, 20)
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name))
# create vault and acl server identity
self.cmd('keyvault set-policy -g {} -n {} --object-id {} --key-permissions wrapKey unwrapKey get list'
.format(resource_group, key_vault, server_identity))
# create key
key_name = self.create_random_name(resource_prefix, 32)
key_resp = self.cmd('keyvault key create -n {} -p software --vault-name {}'
.format(key_name, key_vault)).get_output_in_json()
kid = key_resp['key']['kid']
# add server key
server_key_resp = self.cmd('sql server key create -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault')])
server_key_name = server_key_resp.get_output_in_json()['name']
# validate show key
self.cmd('sql server key show -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('name', server_key_name)])
# validate list key (should return 2 items)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 2)])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# update encryption protector to akv key
self.cmd('sql server tde-key set -g {} -s {} -t AzureKeyVault -k {} --auto-rotation-enabled'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# JMESPathCheck('autoRotationEnabled', True) - property is removed from backend
# validate encryption protector is akv via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# update encryption protector to service managed
self.cmd('sql server tde-key set -g {} -s {} -t ServiceManaged'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# delete server key
self.cmd('sql server key delete -g {} -s {} -k {}'
.format(resource_group, server, kid))
# wait for key to be deleted
time.sleep(10)
# validate deleted server key via list (should return 1 item)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 1)])
class SqlServerIdentityTest(ScenarioTest):
@AllowLargeResponse()
def test_sql_server_identity(self):
server_name_test = 'umitest'
server_name = self.create_random_name(server_name_test, managed_instance_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
families = ['Gen5']
subnet = '/subscriptions/e64f3e8e-ab91-4a65-8cdd-5cd2f47d00b4/resourceGroups/alswansotest3-rg/providers/Microsoft.Network/virtualNetworks/vnet-alswansotestmi/subnets/ManagedInstance'
license_type = 'LicenseIncluded'
loc = 'eastus2euap'
v_cores = 4
storage_size_in_gb = '32'
edition = 'GeneralPurpose'
resource_group_1 = "alswansotest3-rg"
collation = "SQL_Latin1_General_CP1_CI_AS"
proxy_override = "Proxy"
test_umi = '/subscriptions/e64f3e8e-ab91-4a65-8cdd-5cd2f47d00b4/resourceGroups/viparek/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testumi'
umi_list = '/subscriptions/e64f3e8e-ab91-4a65-8cdd-5cd2f47d00b4/resourceGroups/viparek/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testumi'
identity_type = ResourceIdType.system_assigned_user_assigned.value
user = admin_login
self.cmd('sql server create -g {} -n {} -l {} -i '
'--admin-user {} --admin-password {} --user-assigned-identity-id {} --identity-type {} --pid {}'
.format(resource_group_1, server_name, loc, user, admin_passwords[0], umi_list, identity_type,
test_umi),
checks=[
JMESPathCheck('name', server_name),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned, UserAssigned')])
# test show sql server
self.cmd('sql server show -g {} --name {}'
.format(resource_group_1, server_name),
checks=[
JMESPathCheck('name', server_name),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', admin_login)])
self.cmd('sql server delete -g {} -n {} --yes'
.format(resource_group_1, server_name), checks=NoneCheck())
# test show sql server doesn't return anything
self.cmd('sql server show -g {} -n {}'
.format(resource_group_1, server_name),
expect_failure=True)
class SqlServerVnetMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
@SqlServerPreparer(location='eastus')
def test_sql_vnet_mgmt(self, resource_group, resource_group_location, server):
vnet_rule_1 = 'rule1'
vnet_rule_2 = 'rule2'
# Create vnet's - vnet1 and vnet2
vnetName1 = 'vnet1'
vnetName2 = 'vnet2'
subnetName = 'subnet1'
addressPrefix = '10.0.1.0/24'
endpoint = 'Microsoft.Sql'
# Vnet 1 without service endpoints to test ignore-missing-vnet-service-endpoint feature
self.cmd('network vnet create -g {} -n {}'.format(resource_group, vnetName1))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {}'
.format(resource_group, vnetName1, subnetName, addressPrefix))
vnet1 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName1, resource_group)).get_output_in_json()
vnet_id_1 = vnet1['id']
# Vnet 2
self.cmd('network vnet create -g {} -n {}'.format(resource_group, vnetName2))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {} --service-endpoints {}'
.format(resource_group, vnetName2, subnetName, addressPrefix, endpoint),
checks=JMESPathCheck('serviceEndpoints[0].service', 'Microsoft.Sql'))
vnet2 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName2, resource_group)).get_output_in_json()
vnet_id_2 = vnet2['id']
# test sql server vnet-rule create using subnet name and vnet name and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {} --vnet-name {} -i'
.format(vnet_rule_1, resource_group, server, subnetName, vnetName1))
# test sql server vnet-rule show rule 1
self.cmd('sql server vnet-rule show --name {} -g {} --server {}'
.format(vnet_rule_1, resource_group, server),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule create using subnet id
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_2, resource_group, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 1 with vnet 2
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_1, resource_group, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 2 with vnet 1 and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {} -i'
.format(vnet_rule_2, resource_group, server, vnet_id_1),
checks=[JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_1),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule list
self.cmd('sql server vnet-rule list -g {} --server {}'.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 2)])
# test sql server vnet-rule delete rule 1
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_1, resource_group, server),
checks=NoneCheck())
# test sql server vnet-rule delete rule 2
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_2, resource_group, server),
checks=NoneCheck())
class SqlSubscriptionUsagesScenarioTest(ScenarioTest):
def test_sql_subscription_usages(self):
self.cmd('sql list-usages -l westus',
checks=[JMESPathCheckGreaterThan('length(@)', 0)])
self.cmd('sql show-usage -l westus -u ServerQuota',
checks=[
JMESPathCheck('name', 'ServerQuota'),
JMESPathCheckGreaterThan('limit', 0)])
class SqlZoneResilienceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
@SqlServerPreparer(location='eastus')
@AllowLargeResponse()
def test_sql_zone_resilient_database(self, resource_group, resource_group_location, server):
database_name = "createUnzonedUpdateToZonedDb"
database_name_2 = "createZonedUpdateToUnzonedDb"
database_name_3 = "updateNoParamForUnzonedDb"
database_name_4 = "updateNoParamForZonedDb"
# Test creating database with zone resilience set to false. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant {}'
.format(resource_group, server, database_name, "Premium", False),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with zone resilience set to true. Expect zone resilience to update to true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --zone-redundant'
.format(resource_group, server, database_name, 'P1'),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', True)])
# Test creating database with zone resilience set to true. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --z'
.format(resource_group, server, database_name_2, "Premium"),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --z {}'
.format(resource_group, server, database_name_2, 'P1', False),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_2),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', False)])
# Create database with no zone resilience set. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, database_name_3, "Premium"),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with no zone resilience set. Expect zone resilience to stay false.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(resource_group, server, database_name_3, 'P2'),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_3),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', False)])
# Create database with zone resilience set. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(resource_group, server, database_name_4, "Premium"),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_4),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with no zone resilience set. Expect zone resilience to stay true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(resource_group, server, database_name_4, 'P2'),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_4),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', True)])
@ResourceGroupPreparer(location='eastus')
@SqlServerPreparer(location='eastus')
@AllowLargeResponse()
def test_sql_zone_resilient_pool(self, resource_group, resource_group_location, server):
pool_name = "createUnzonedUpdateToZonedPool"
pool_name_2 = "createZonedUpdateToUnzonedPool"
pool_name_3 = "updateNoParamForUnzonedPool"
pool_name_4 = "updateNoParamForZonedPool"
# Test creating pool with zone resilience set to false. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --z {}'
.format(resource_group, server, pool_name, "Premium", False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with zone resilience set to true. Expect zone resilience to update to true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --z'
.format(resource_group, server, pool_name))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('zoneRedundant', True)])
# Test creating pool with zone resilience set to true. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(resource_group, server, pool_name_2, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --zone-redundant {}'
.format(resource_group, server, pool_name_2, False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('zoneRedundant', False)])
# Create pool with no zone resilience set. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, pool_name_3, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with no zone resilience set. Expect zone resilience to stay false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(resource_group, server, pool_name_3, 250))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', False)])
# Create pool with zone resilience set. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(resource_group, server, pool_name_4, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with no zone resilience set. Expect zone resilience to stay true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(resource_group, server, pool_name_4, 250))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(resource_group, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', True)])
@ResourceGroupPreparer(location='eastus2euap')
@SqlServerPreparer(location='eastus2euap')
@AllowLargeResponse()
def test_sql_zone_resilient_copy_hyperscale_database(self, resource_group, server):
# Set db names
source_non_zr_db_name = "sourceNonZrDb"
source_zr_db_name = "sourceZrDb"
copy_source_non_zr_true_param_db_name = "copySourceNonZrTrueParamDb"
copy_source_zr_false_param_db_name = "copySourceZrFalseParamDb"
copy_source_non_zr_no_param_db_name = "copySourceNonZrNoParamDb"
copy_source_zr_no_param_db_name = "copySourceZrNoParamDb"
# Create non zone redundant source vldb
# Verify created vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Geo)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {}'
.format(resource_group, server, source_non_zr_db_name, "Hyperscale", 'Gen5', 2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', source_non_zr_db_name),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo'),
JMESPathCheck('zoneRedundant', False)])
# Create zone redundant source vldb with zone redundancy == true and backup storage redundancy == Zone
# Verify created vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {} --backup-storage-redundancy {} --zone-redundant {}'
.format(resource_group, server, source_zr_db_name, "Hyperscale", 'Gen5', 2, 'zone', True),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', source_zr_db_name),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
# Copy non zone redundant source vldb with zone redundancy == true and backup storage redundancy == Zone
# Verify copied vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db copy -g {} --server {} --name {} --dest-name {} --backup-storage-redundancy {} --z'
.format(resource_group, server, source_non_zr_db_name, copy_source_non_zr_true_param_db_name, 'zone'),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', copy_source_non_zr_true_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
# Copy zone redundant source vldb with zone redundancy == false
# Verify copied vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Zone)
self.cmd('sql db copy -g {} --server {} --name {} --dest-name {} --zone-redundant {}'
.format(resource_group, server, source_zr_db_name, copy_source_zr_false_param_db_name, False),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', copy_source_zr_false_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', False)])
# Copy non zone redundant source vldb with no parameters passed in
# Verify copied vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Geo)
self.cmd('sql db copy -g {} --server {} --name {} --dest-name {}'
.format(resource_group, server, source_non_zr_db_name, copy_source_non_zr_no_param_db_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', copy_source_non_zr_no_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo'),
JMESPathCheck('zoneRedundant', False)])
# Copy zone redundant source vldb with no parameters passed in
# Verify copied vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db copy -g {} --server {} --name {} --dest-name {}'
.format(resource_group, server, source_zr_db_name, copy_source_zr_no_param_db_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', copy_source_zr_no_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
@ResourceGroupPreparer(parameter_name="resource_group_pri", location='eastus2euap')
@SqlServerPreparer(parameter_name="server_name_pri", resource_group_parameter_name="resource_group_pri",location='eastus2euap')
@ResourceGroupPreparer(parameter_name="resource_group_sec", location='eastus2euap')
@SqlServerPreparer(parameter_name="server_name_sec", resource_group_parameter_name="resource_group_sec",location='eastus2euap')
@AllowLargeResponse()
def test_sql_zone_resilient_replica_hyperscale_database(self, resource_group_pri, server_name_pri, resource_group_sec, server_name_sec):
# Set db names
non_zr_db_name_1 = "nonZrDb1"
zr_db_name_1 = "zrDb1"
non_zr_db_name_2 = "nonZrDb2"
zr_db_name_2 = "zrDb2"
pri_non_zr_true_param_db_name = "priNonZrTrueParamDb"
pri_zr_false_param_db_name = "priZrFalseParamDb"
pri_non_zr_no_param_db_name = "priNonZrNoParamDb"
pri_zr_no_param_db_name = "priZrNoParamDb"
# Create non zone redundant primary vldb
# Verify created vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Geo)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {}'
.format(resource_group_pri, server_name_pri, non_zr_db_name_1, "Hyperscale", 'Gen5', 2),
checks=[
JMESPathCheck('resourceGroup', resource_group_pri),
JMESPathCheck('name', non_zr_db_name_1),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo'),
JMESPathCheck('zoneRedundant', False)])
# Create secondary vldb replica from non zone redundant primary vldb with zone redundancy == true and backup storage redundancy == Zone
# Verify created secondary vldb replica has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db replica create -g {} -s {} -n {} --partner-resource-group {} --partner-server {} '
'--partner-database {} --backup-storage-redundancy {} --z'
.format(resource_group_pri, server_name_pri, non_zr_db_name_1,
resource_group_sec, server_name_sec, pri_non_zr_true_param_db_name, 'zone'),
checks=[
JMESPathCheck('name', pri_non_zr_true_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
# Create zone redundant primary vldb with zone redundancy == true and backup storage redundancy == Zone
# Verify created vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {} --backup-storage-redundancy {} --zone-redundant {}'
.format(resource_group_pri, server_name_pri, zr_db_name_1, "Hyperscale", 'Gen5', 2, 'zone', True),
checks=[
JMESPathCheck('resourceGroup', resource_group_pri),
JMESPathCheck('name', zr_db_name_1),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
# Create secondary vldb replica from zone redundant primary vldb with zone redundancy == false
# Verify created secondary vldb replica has correct values (specifically zone redundancy == false and backup storage redundancy == Zone)
self.cmd('sql db replica create -g {} -s {} -n {} --partner-resource-group {} --partner-server {} '
'--partner-database {} --z {}'
.format(resource_group_pri, server_name_pri, zr_db_name_1,
resource_group_sec, server_name_sec, pri_zr_false_param_db_name, False),
checks=[
JMESPathCheck('name', pri_zr_false_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', False)])
# Create non zone redundant primary vldb
# Verify created vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Geo)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {}'
.format(resource_group_pri, server_name_pri, non_zr_db_name_2, "Hyperscale", 'Gen5', 2),
checks=[
JMESPathCheck('resourceGroup', resource_group_pri),
JMESPathCheck('name', non_zr_db_name_2),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo'),
JMESPathCheck('zoneRedundant', False)])
# Create secondary vldb replica from non zone redundant primary vldb with no parameters passed in
# Verify created secondary vldb replica has correct values (specifically zone redundancy == false and backup storage redundancy == geo)
self.cmd('sql db replica create -g {} -s {} -n {} --partner-resource-group {} --partner-server {} --partner-database {}'
.format(resource_group_pri, server_name_pri, non_zr_db_name_2,
resource_group_sec, server_name_sec, pri_non_zr_no_param_db_name),
checks=[
JMESPathCheck('name', pri_non_zr_no_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo'),
JMESPathCheck('zoneRedundant', False)])
# Create zone redundant primary vldb with zone redundancy == true and backup storage redundancy == Zone
# Verify created vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {} --backup-storage-redundancy {} --zone-redundant'
.format(resource_group_pri, server_name_pri, zr_db_name_2, "Hyperscale", 'Gen5', 2, 'zone'),
checks=[
JMESPathCheck('resourceGroup', resource_group_pri),
JMESPathCheck('name', zr_db_name_2),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
# Create secondary vldb replica from zone redundant primary vldb with no parameters passed in
# Verify created secondary vldb replica has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db replica create -g {} -s {} -n {} --partner-resource-group {} --partner-server {} --partner-database {}'
.format(resource_group_pri, server_name_pri, zr_db_name_2,
resource_group_sec, server_name_sec, pri_zr_no_param_db_name),
checks=[
JMESPathCheck('name', pri_zr_no_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
@ResourceGroupPreparer(location='eastus2euap')
@SqlServerPreparer(location='eastus2euap')
@AllowLargeResponse()
def test_sql_zone_resilient_restore_hyperscale_database(self, resource_group, server):
# Set db names
source_non_zr_db_name = "sourceNonZrDb"
source_zr_db_name = "sourceZrDb"
restore_source_non_zr_true_param_db_name = "restoreSourceNonZrTrueParamDb"
restore_source_zr_false_param_db_name = "restoreSourceZrFalseParamDb"
restore_source_non_zr_no_param_db_name = "restoreSourceNonZrNoParamDb"
restore_source_zr_no_param_db_name = "restoreSourceZrNoParamDb"
# Create non zone redundant source vldb
# Verify created vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Geo)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {}'
.format(resource_group, server, source_non_zr_db_name, "Hyperscale", 'Gen5', 2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', source_non_zr_db_name),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo'),
JMESPathCheck('zoneRedundant', False)])
# Create zone redundant source vldb with zone redundancy == true and backup storage redundancy == Zone
# Verify created vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --family {} --capacity {} --backup-storage-redundancy {} --zone-redundant {}'
.format(resource_group, server, source_zr_db_name, "Hyperscale", 'Gen5', 2, 'zone', True),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', source_zr_db_name),
JMESPathCheck('edition', 'Hyperscale'),
JMESPathCheck('sku.tier', 'Hyperscale'),
JMESPathCheck('sku.family', 'Gen5'),
JMESPathCheck('sku.capacity', 2),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
# Restore non zone redundant source vldb with zone redundancy == true and backup storage redundancy == Zone
# Verify restored vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db restore -g {} --server {} --name {} --dest-name {} --time {} '
'--edition {} --family {} --capacity {} --backup-storage-redundancy {} --z'
.format(resource_group, server, source_non_zr_db_name, restore_source_non_zr_true_param_db_name, datetime.utcnow().isoformat(),
"Hyperscale", 'Gen5', 2, 'zone'),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_source_non_zr_true_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
# Restore zone redundant source vldb with zone redundancy == false
# Verify restored vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Zone)
self.cmd('sql db restore -g {} --server {} --name {} --dest-name {} --time {} --z {}'
.format(resource_group, server, source_zr_db_name, restore_source_zr_false_param_db_name, datetime.utcnow().isoformat(), False),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_source_zr_false_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', False)])
# Restore non zone redundant source vldb with no parameters passed in
# Verify restored vldb has correct values (specifically zone redundancy == false and backup storage redundancy == Geo)
self.cmd('sql db restore -g {} --server {} --name {} --dest-name {} --time {}'
.format(resource_group, server, source_non_zr_db_name, restore_source_non_zr_no_param_db_name, datetime.utcnow().isoformat()),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_source_non_zr_no_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Geo'),
JMESPathCheck('zoneRedundant', False)])
# Restore zone redundant source vldb with no parameters passed in
# Verify restored vldb has correct values (specifically zone redundancy == true and backup storage redundancy == Zone)
self.cmd('sql db restore -g {} --server {} --name {} --dest-name {} --time {}'
.format(resource_group, server, source_zr_db_name, restore_source_zr_no_param_db_name, datetime.utcnow().isoformat()),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', restore_source_zr_no_param_db_name),
JMESPathCheck('requestedBackupStorageRedundancy', 'Zone'),
JMESPathCheck('zoneRedundant', True)])
class SqlDBMaintenanceScenarioTest(ScenarioTest):
DEFAULT_MC = "SQL_Default"
MDB1 = "SQL_EastUS2_DB_1"
MDB2 = "SQL_EastUS2_DB_2"
def _get_full_maintenance_id(self, name):
return "/subscriptions/{}/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/{}".format(
self.get_subscription_id(), name)
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
@AllowLargeResponse()
def test_sql_db_maintenance(self, resource_group, resource_group_location, server):
database_name_1 = "createDb1maintenance"
database_name_2 = "createDb2maintenance"
database_name_3 = "updateEnrollAndSwitchDb1maintenance"
# Test creating database with maintenance set to DB_1
self.cmd('sql db create -g {} --server {} --name {} --edition {} --maint-config-id {}'
.format(resource_group, server, database_name_1, "Premium", self.MDB1),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB1))])
# Test creating database with maintenance set to DB_2 (full id)
self.cmd('sql db create -g {} --server {} --name {} --edition {} --capacity {} --maint-config-id {}'
.format(resource_group, server, database_name_2, "Standard", 50,
self._get_full_maintenance_id(self.MDB2)),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Standard'),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('sku.capacity', 50),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB2))])
# Test creating database with no maintenance specified
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, database_name_3, "Standard"),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Standard'),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.DEFAULT_MC))])
# Test enrolling into maintenance
self.cmd('sql db update -g {} --server {} --name {} --edition {} --capacity {} -m {}'
.format(resource_group, server, database_name_3, "Premium", 125, self.MDB2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('sku.capacity', 125),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB2))])
# Test switching maintenance and enrolling into zone redundancy
self.cmd('sql db update -g {} --server {} --name {} -m {} --zone-redundant'
.format(resource_group, server, database_name_3, self._get_full_maintenance_id(self.MDB1)),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', True),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB1))])
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
@AllowLargeResponse()
def test_sql_elastic_pool_maintenance(self, resource_group, resource_group_location, server):
pool_name_1 = "createDb1maintenance"
pool_name_2 = "createDb2maintenance"
pool_name_3 = "updateEnrollAndSwitchDb1maintenance"
# Test creating elastic pool with maintenance set to DB_1
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --maint-config-id {}'
.format(resource_group, server, pool_name_1, "Premium", self.MDB1),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB1))])
# Test creating elastic pool with maintenance set to DB_2 (full id)
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --capacity {} --maint-config-id {}'
.format(resource_group, server, pool_name_2, "Standard", 100,
self._get_full_maintenance_id(self.MDB2)),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('sku.capacity', 100),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB2))])
# Test creating elastic pool with no maintenance specified
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, pool_name_3, "Premium"),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.DEFAULT_MC))])
# Test enrolling into maintenance
self.cmd('sql elastic-pool update -g {} --server {} --name {} --edition {} -m {}'
.format(resource_group, server, pool_name_3, "Premium", self.MDB2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('sku.capacity', 125),
JMESPathCheck('zoneRedundant', False),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB2))])
# Test switching maintenance and enrolling into zone redundancy
self.cmd('sql elastic-pool update -g {} --server {} --name {} --maint-config-id {} --zone-redundant'
.format(resource_group, server, pool_name_3, self._get_full_maintenance_id(self.MDB1)),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', True),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(self.MDB1))])
class SqlServerTrustGroupsScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ManagedInstancePreparer(parameter_name="mi1")
@ManagedInstancePreparer(parameter_name="mi2")
def test_sql_server_trust_groups(self, mi1, rg, mi2):
self.kwargs.update({
'loc': ManagedInstancePreparer.location,
'rg': rg,
'managed_instance_name_1': mi1,
'managed_instance_name_2': mi2
})
# Create sql managed_instance
managed_instance_1 = self.cmd('sql mi show -g {rg} -n {managed_instance_name_1}').get_output_in_json()
managed_instance_2 = self.cmd('sql mi show -g {rg} -n {managed_instance_name_2}').get_output_in_json()
self.kwargs.update({
'stg_name': 'stg-test',
'trust_scope': 'GlobalTransactions',
'mi1': managed_instance_1['id'],
'mi2': managed_instance_2['id'],
})
stg = self.cmd(
'az sql stg create -g {rg} -l {loc} --trust-scope {trust_scope} -n {stg_name} -m {mi1} {mi2}').get_output_in_json()
assert stg['name'] == 'stg-test'
self.cmd('az sql stg show -g {rg} -l {loc} -n {stg_name}').get_output_in_json()
stg_list = self.cmd('az sql stg list -g {rg} --instance-name {managed_instance_name_1}').get_output_in_json()
assert len(stg_list) == 1
stg_list = self.cmd('az sql stg list -g {rg} -l {loc}').get_output_in_json()
assert len(stg_list) >= 1
self.cmd('az sql stg delete -g {rg} -l {loc} -n {stg_name} --yes')
class SqlManagedInstanceCustomMaintenanceWindow(ScenarioTest):
MMI1 = "SQL_WestCentralUS_MI_1"
def _get_full_maintenance_id(self, name):
return "/subscriptions/{}/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/{}".format(
self.get_subscription_id(), name)
def test_sql_managed_instance_cmw(self):
# Values of existing resources in order to test this feature
loc = 'westcentralus'
resource_group = ManagedInstancePreparer.group
subnet = ManagedInstancePreparer.subnet
####
self.kwargs.update({
'loc': loc,
'rg': resource_group,
'subnet': subnet,
'managed_instance_name': self.create_random_name(managed_instance_name_prefix,
managed_instance_name_max_length),
'username': 'admin123',
'admin_password': 'SecretPassword123',
'timezone_id': 'Central European Standard Time',
'license_type': 'LicenseIncluded',
'v_cores': 8,
'storage_size_in_gb': '128',
'edition': 'GeneralPurpose',
'family': 'Gen5',
'collation': ManagedInstancePreparer.collation,
'proxy_override': "Proxy",
'maintenance_id': self._get_full_maintenance_id(self.MMI1)
})
# test create sql managed_instance with FMW
managed_instance = self.cmd('sql mi create -g {rg} -n {managed_instance_name} -l {loc} '
'-u {username} -p {admin_password} --subnet {subnet} --license-type {license_type} --capacity {v_cores} '
'--storage {storage_size_in_gb} --edition {edition} --family {family} --collation {collation} '
'--proxy-override {proxy_override} --public-data-endpoint-enabled --timezone-id "{timezone_id}" --maint-config-id "{maintenance_id}"',
checks=[
self.check('name', '{managed_instance_name}'),
self.check('resourceGroup', '{rg}'),
self.check('administratorLogin', '{username}'),
self.check('vCores', '{v_cores}'),
self.check('storageSizeInGb', '{storage_size_in_gb}'),
self.check('licenseType', '{license_type}'),
self.check('sku.tier', '{edition}'),
self.check('sku.family', '{family}'),
self.check('sku.capacity', '{v_cores}'),
self.check('identity', None),
self.check('collation', '{collation}'),
self.check('proxyOverride', '{proxy_override}'),
self.check('publicDataEndpointEnabled', 'True'),
self.check('maintenanceConfigurationId',
self._get_full_maintenance_id(self.MMI1))]).get_output_in_json()
# test delete sql managed instance 2
self.cmd('sql mi delete --ids {} --yes'
.format(managed_instance['id']), checks=NoneCheck())
class SqlManagedInstanceMgmtScenarioTest(ScenarioTest):
DEFAULT_MC = "SQL_Default"
MMI1 = "SQL_WestEurope_MI_1"
tag1 = "tagName1=tagValue1"
tag2 = "tagName2=tagValue2"
backup_storage_redundancy = "Local"
def _get_full_maintenance_id(self, name):
return "/subscriptions/{}/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/{}".format(
self.get_subscription_id(), name)
@AllowLargeResponse()
@ManagedInstancePreparer(
tags=f"{tag1} {tag2}",
minimalTlsVersion="1.2",
otherParams=f"--bsr {backup_storage_redundancy}")
def test_sql_managed_instance_mgmt(self, mi, rg):
managed_instance_name_1 = mi
resource_group_1 = rg
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
tls1_2 = "1.2"
tls1_1 = "1.1"
user = admin_login
# test show sql managed instance 1
subnet = ManagedInstancePreparer.subnet
target_subnet = ManagedInstancePreparer.target_subnet
if not (self.in_recording or self.is_live):
subnet = subnet.replace(ManagedInstancePreparer.subscription_id, "00000000-0000-0000-0000-000000000000")
target_subnet = target_subnet.replace(ManagedInstancePreparer.subscription_id, "00000000-0000-0000-0000-000000000000")
managed_instance_1 = self.cmd('sql mi show -g {} -n {}'
.format(resource_group_1, managed_instance_name_1),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('subnetId', subnet),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('vCores', ManagedInstancePreparer.v_core),
JMESPathCheck('storageSizeInGb', ManagedInstancePreparer.storage),
JMESPathCheck('licenseType', ManagedInstancePreparer.licence),
JMESPathCheck('sku.tier', ManagedInstancePreparer.edition),
JMESPathCheck('sku.family', ManagedInstancePreparer.family),
JMESPathCheck('sku.capacity', ManagedInstancePreparer.v_core),
JMESPathCheck('collation', ManagedInstancePreparer.collation),
JMESPathCheck('identity', None),
JMESPathCheck('publicDataEndpointEnabled', 'True'),
JMESPathCheck('minimalTlsVersion', tls1_2),
JMESPathCheck('tags', "{'tagName1': 'tagValue1', 'tagName2': 'tagValue2'}"),
JMESPathCheck('currentBackupStorageRedundancy', self.backup_storage_redundancy),
JMESPathCheck('maintenanceConfigurationId', self._get_full_maintenance_id(
self.DEFAULT_MC))]).get_output_in_json()
# test show sql managed instance 1 using id
self.cmd('sql mi show --ids {}'
.format(managed_instance_1['id']),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
# Managed instance becomes ready before the operation is completed. For that reason, we should wait
# for the operation to complete in order to proceed with testing.
if self.is_live:
sleep(120)
# test update sql managed_instance 1
self.cmd('sql mi update -g {} -n {} --admin-password {} -i'
.format(resource_group_1, managed_instance_name_1, admin_passwords[1]),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
# remove this check since there is an issue and the fix is being deployed currently
# JMESPathCheck('identity.type', 'SystemAssigned')
JMESPathCheck('administratorLogin', user)])
# test update without identity parameter, validate identity still exists
# also use --ids instead of -g/-n
self.cmd('sql mi update --ids {} --admin-password {}'
.format(managed_instance_1['id'], admin_passwords[0]),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
# remove this check since there is an issue and the fix is being deployed currently
# JMESPathCheck('identity.type', 'SystemAssigned')
JMESPathCheck('administratorLogin', user)])
# test update proxyOverride and publicDataEndpointEnabled
# test is currently removed due to long execution time due to waiting for SqlAliasStateMachine completion to complete
# self.cmd('sql mi update -g {} -n {} --proxy-override {} --public-data-endpoint-enabled {}'
# .format(resource_group_1, managed_instance_name_1, proxy_override_update, public_data_endpoint_enabled_update),
# checks=[
# JMESPathCheck('name', managed_instance_name_1),
# JMESPathCheck('resourceGroup', resource_group_1),
# JMESPathCheck('proxyOverride', proxy_override_update),
# JMESPathCheck('publicDataEndpointEnabled', public_data_endpoint_enabled_update)])
# test update minimalTlsVersion
self.cmd('sql mi update -g {} -n {} --minimal-tls-version {}'
.format(resource_group_1, managed_instance_name_1, tls1_1),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('minimalTlsVersion', tls1_1)])
# test update managed instance tags
tag3 = "tagName3=tagValue3"
self.cmd('sql mi update -g {} -n {} --set tags.{}'
.format(resource_group_1, managed_instance_name_1, tag3),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('tags',
"{'tagName1': 'tagValue1', 'tagName2': 'tagValue2', 'tagName3': 'tagValue3'}")])
# test remove managed instance tags
self.cmd('sql mi update -g {} -n {} --remove tags.tagName1'
.format(resource_group_1, managed_instance_name_1),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('tags', "{'tagName2': 'tagValue2', 'tagName3': 'tagValue3'}")])
# test override managed instance tags
self.cmd('sql mi update -g {} -n {} --tags {}'
.format(resource_group_1, managed_instance_name_1, self.tag1),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('tags', "{'tagName1': 'tagValue1'}")])
# test clear managed instance tags by passing ""
self.cmd('sql mi update -g {} -n {} --tags ""'
.format(resource_group_1, managed_instance_name_1),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('tags', {})])
# test cross-subnet update SLO with the subnet resource id
self.cmd('sql mi update -g {} -n {} --subnet {} --capacity {}'
.format(resource_group_1, managed_instance_name_1, target_subnet, ManagedInstancePreparer.target_subnet_vcores),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('subnetId', target_subnet)])
# test cross-subnet update SLO with subnet and vNet names
self.cmd('sql mi update -g {} -n {} --subnet {} --vnet-name {}'
.format(resource_group_1, managed_instance_name_1, ManagedInstancePreparer.subnet_name, ManagedInstancePreparer.vnet_name),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('subnetId', subnet)])
# test list sql managed_instance in the subscription should be at least 1
self.cmd('sql mi list', checks=[JMESPathCheckGreaterThan('length(@)', 0)])
class SqlManagedInstanceBackupStorageRedundancyTest(ScenarioTest):
bsr_geo = "Geo"
@AllowLargeResponse()
@ManagedInstancePreparer(
otherParams=f"--bsr {bsr_geo}")
def test_sql_managed_instance_bsr(self, mi, rg):
managed_instance_name_1 = mi
resource_group_1 = rg
# test show sql managed instance 1
self.cmd('sql mi show -g {} -n {}'
.format(resource_group_1, managed_instance_name_1),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('currentBackupStorageRedundancy', self.bsr_geo),
JMESPathCheck('requestedBackupStorageRedundancy', self.bsr_geo)])
if self.is_live:
sleep(120)
bsr_local = "Local"
# Test update bsr to Local
self.cmd('sql mi update -g {} -n {} --bsr {} --yes'
.format(resource_group_1, managed_instance_name_1, bsr_local),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('currentBackupStorageRedundancy', bsr_local),
JMESPathCheck('requestedBackupStorageRedundancy', bsr_local)])
if self.is_live:
sleep(120)
# Test update bsr to Geo
self.cmd('sql mi update -g {} -n {} --bsr {} --yes'
.format(resource_group_1, managed_instance_name_1, self.bsr_geo),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('currentBackupStorageRedundancy', self.bsr_geo),
JMESPathCheck('requestedBackupStorageRedundancy', self.bsr_geo)])
class SqlManagedInstanceMgmtScenarioIdentityTest(ScenarioTest):
test_umi = '/subscriptions/{}/resourcegroups/{}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/mi-tooling-managed-identity'.format(ManagedInstancePreparer.subscription_id, ManagedInstancePreparer.group)
verify_umi_with_empty_uuid = '/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/{}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/mi-tooling-managed-identity'.format(ManagedInstancePreparer.group)
@AllowLargeResponse()
@ManagedInstancePreparer(
identity_type=ResourceIdType.system_assigned_user_assigned.value,
user_assigned_identity_id=test_umi,
pid=test_umi)
def test_sql_managed_instance_create_identity_mgmt(self, mi, rg):
managed_instance_name = mi
resource_group_1 = rg
# test show sql managed instance
self.cmd('sql mi show -g {} -n {}'
.format(resource_group_1, managed_instance_name),
checks=[
JMESPathCheck('name', managed_instance_name),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck(
'primaryUserAssignedIdentityId',
self.test_umi if self.in_recording or self.is_live else self.verify_umi_with_empty_uuid
),
JMESPathCheck('identity.type', 'SystemAssigned,UserAssigned')]
)
class SqlManagedInstancePoolScenarioTest(ScenarioTest):
# Instance pool test should be deprecated and also it takes more then 5 hours to record.
@live_only()
@ManagedInstancePreparer()
def test_sql_instance_pool(self, mi, rg):
print("Starting instance pool tests")
instance_pool_name_1 = self.create_random_name(instance_pool_name_prefix, managed_instance_name_max_length)
instance_pool_name_2 = self.create_random_name(instance_pool_name_prefix, managed_instance_name_max_length)
license_type = ManagedInstancePreparer.licence
location = ManagedInstancePreparer.location
v_cores = ManagedInstancePreparer.v_core
edition = ManagedInstancePreparer.edition
family = ManagedInstancePreparer.family
resource_group = rg
subnet = ManagedInstancePreparer.subnet
num_pools = len(self.cmd('sql instance-pool list -g {}'.format(resource_group)).get_output_in_json())
# test create sql managed_instance
self.cmd(
'sql instance-pool create -g {} -n {} -l {} '
'--subnet {} --license-type {} --capacity {} -e {} -f {}'.format(
resource_group, instance_pool_name_1, location, subnet, license_type, v_cores, edition, family),
checks=[
JMESPathCheck('name', instance_pool_name_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('vCores', v_cores),
JMESPathCheck('licenseType', license_type),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('sku.family', family)])
# test show sql instance pool
self.cmd('sql instance-pool show -g {} -n {}'
.format(resource_group, instance_pool_name_1),
checks=[
JMESPathCheck('name', instance_pool_name_1),
JMESPathCheck('resourceGroup', resource_group)])
# test updating tags of an instance pool
tag1 = "bar=foo"
tag2 = "foo=bar"
self.cmd('sql instance-pool update -g {} -n {} --tags {} {}'
.format(resource_group, instance_pool_name_1, tag1, tag2),
checks=[
JMESPathCheck('name', instance_pool_name_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('tags', "{'bar': 'foo', 'foo': 'bar'}")])
# test updating instance pool to clear tags by passing ""
self.cmd('sql instance-pool update -g {} -n {} --tags ""'
.format(resource_group, instance_pool_name_1),
checks=[
JMESPathCheck('name', instance_pool_name_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('tags', {})])
# Instance Pool 2
self.cmd(
'sql instance-pool create -g {} -n {} -l {} '
'--subnet {} --license-type {} --capacity {} -e {} -f {}'.format(
resource_group, instance_pool_name_2, location, subnet, license_type, v_cores, edition, family),
checks=[
JMESPathCheck('name', instance_pool_name_2),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('vCores', v_cores),
JMESPathCheck('licenseType', license_type),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('sku.family', family)])
# test show sql instance pool
self.cmd('sql instance-pool show -g {} -n {}'
.format(resource_group, instance_pool_name_2),
checks=[
JMESPathCheck('name', instance_pool_name_2),
JMESPathCheck('resourceGroup', resource_group)])
# test updating tags of an instance pool
tag1 = "bar=foo"
tag2 = "foo=bar"
self.cmd('sql instance-pool update -g {} -n {} --tags {} {}'
.format(resource_group, instance_pool_name_2, tag1, tag2),
checks=[
JMESPathCheck('name', instance_pool_name_2),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('tags', "{'bar': 'foo', 'foo': 'bar'}")])
# test updating instance pool to clear tags by passing ""
self.cmd('sql instance-pool update -g {} -n {} --tags ""'
.format(resource_group, instance_pool_name_2),
checks=[
JMESPathCheck('name', instance_pool_name_2),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('tags', {})])
self.cmd('sql instance-pool list -g {}'
.format(resource_group),
checks=[
JMESPathCheck('length(@)', num_pools + 2)])
# test delete sql managed instance
self.cmd('sql instance-pool delete -g {} -n {} --yes'
.format(resource_group, instance_pool_name_1), checks=NoneCheck())
# test show sql managed instance doesn't return anything
self.cmd('sql instance-pool show -g {} -n {}'
.format(resource_group, instance_pool_name_1),
expect_failure=True)
# test delete sql managed instance
self.cmd('sql instance-pool delete -g {} -n {} --yes --no-wait'
.format(resource_group, instance_pool_name_2), checks=NoneCheck())
# verify all created instance pool above have been deleted
self.cmd('sql instance-pool list -g {}'
.format(resource_group),
checks=[
JMESPathCheck('length(@)', num_pools)])
class SqlManagedInstanceTransparentDataEncryptionScenarioTest(ScenarioTest):
@ManagedInstancePreparer(
identity_type=ResourceIdType.system_assigned.value
)
def test_sql_mi_tdebyok(self, mi, rg):
resource_prefix = 'sqltdebyok'
self.kwargs.update({
'loc': ManagedInstancePreparer.location,
'rg': rg,
'managed_instance_name': mi,
'database_name': self.create_random_name(resource_prefix, 20),
'collation': ManagedInstancePreparer.collation,
})
# get sql managed_instance
managed_instance = self.cmd('sql mi show -g {rg} -n {managed_instance_name}').get_output_in_json()
self.kwargs.update({
'mi_identity': managed_instance['identity']['principalId'],
'vault_name': self.create_random_name(resource_prefix, 24),
'key_name': self.create_random_name(resource_prefix, 32),
})
# create database
self.cmd('sql midb create -g {rg} --mi {managed_instance_name} -n {database_name} --collation {collation}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('name', '{database_name}'),
self.check('location', '{loc}'),
self.check('collation', '{collation}'),
self.check('status', 'Online')])
# create vault and acl server identity
self.cmd('keyvault create -g {rg} -n {vault_name} --enable-soft-delete true')
self.cmd(
'keyvault set-policy -g {rg} -n {vault_name} --object-id {mi_identity} --key-permissions wrapKey unwrapKey get list')
# create key
key_resp = self.cmd(
'keyvault key create -n {key_name} -p software --vault-name {vault_name}').get_output_in_json()
self.kwargs.update({
'kid': key_resp['key']['kid'],
})
# add server key
server_key_resp = self.cmd('sql mi key create -g {rg} --mi {managed_instance_name} -k {kid}',
checks=[
self.check('uri', '{kid}'),
self.check('serverKeyType', 'AzureKeyVault')])
self.kwargs.update({
'server_key_name': server_key_resp.get_output_in_json()['name'],
})
# validate show key
self.cmd('sql mi key show -g {rg} --mi {managed_instance_name} -k {kid}',
checks=[
self.check('uri', '{kid}'),
self.check('serverKeyType', 'AzureKeyVault'),
self.check('name', '{server_key_name}')])
# validate list key (should return 2 items)
self.cmd('sql mi key list -g {rg} --mi {managed_instance_name}',
checks=[JMESPathCheck('length(@)', 2)])
# validate encryption protector is service managed via show
self.cmd('sql mi tde-key show -g {rg} --mi {managed_instance_name}',
checks=[
self.check('serverKeyType', 'ServiceManaged'),
self.check('serverKeyName', 'ServiceManaged')])
# update encryption protector to akv key
self.cmd(
'sql mi tde-key set -g {rg} --mi {managed_instance_name} -t AzureKeyVault -k {kid}',
checks=[
self.check('serverKeyType', 'AzureKeyVault'),
self.check('serverKeyName', '{server_key_name}'),
self.check('uri', '{kid}')])
# validate encryption protector is akv via show
self.cmd('sql mi tde-key show -g {rg} --mi {managed_instance_name}',
checks=[
self.check('serverKeyType', 'AzureKeyVault'),
self.check('serverKeyName', '{server_key_name}'),
self.check('uri', '{kid}')])
# update encryption protector to service managed
self.cmd('sql mi tde-key set -g {rg} --mi {managed_instance_name} -t ServiceManaged',
checks=[
self.check('serverKeyType', 'ServiceManaged'),
self.check('serverKeyName', 'ServiceManaged')])
# validate encryption protector is service managed via show
self.cmd('sql mi tde-key show -g {rg} --mi {managed_instance_name}',
checks=[
self.check('serverKeyType', 'ServiceManaged'),
self.check('serverKeyName', 'ServiceManaged')])
class SqlManagedInstanceDbShortTermRetentionScenarioTest(ScenarioTest):
@ResourceGroupPreparer(random_name_length=17, name_prefix='clitest')
@ManagedInstancePreparer()
def test_sql_managed_db_short_retention(self, mi, rg):
resource_prefix = 'MIDBShortTermRetention'
loc = 'westcentralus'
resource_group = 'autobot-managed-instance-v12'
subnet = '/subscriptions/4b9746e4-d324-4e1d-be53-ec3c8f3a0c18/resourceGroups/autobot-managed-instance-v12/providers/Microsoft.Network/virtualNetworks/autobot-managed-instance-vnet/subnets/clsubnet'
self.kwargs.update({
'loc': ManagedInstancePreparer.location,
'managed_instance_name': mi,
'database_name': self.create_random_name(resource_prefix, 50),
'collation': ManagedInstancePreparer.collation,
'retention_days_inc': 14,
'retention_days_dec': 7,
'rg': rg
})
# create database
self.cmd('sql midb create -g {rg} --mi {managed_instance_name} -n {database_name} --collation {collation}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('name', '{database_name}'),
self.check('location', '{loc}'),
self.check('collation', '{collation}'),
self.check('status', 'Online')])
# test update short term retention on live database
self.cmd(
'sql midb short-term-retention-policy set -g {rg} --mi {managed_instance_name} -n {database_name} --retention-days {retention_days_inc}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('retentionDays', '{retention_days_inc}')])
# test get short term retention on live database
self.cmd('sql midb short-term-retention-policy show -g {rg} --mi {managed_instance_name} -n {database_name}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('retentionDays', '{retention_days_inc}')])
# Wait for first backup before dropping
_wait_until_first_backup_midb(self)
# Delete by group/server/name
self.cmd('sql midb delete -g {rg} --managed-instance {managed_instance_name} -n {database_name} --yes',
checks=[NoneCheck()])
# Get deleted database
deleted_databases = self.cmd('sql midb list-deleted -g {rg} --managed-instance {managed_instance_name}',
checks=[
self.greater_than('length(@)', 0)])
self.kwargs.update({
'deleted_time': _get_deleted_date(deleted_databases.json_value[0]).isoformat()
})
# test update short term retention on deleted database
self.cmd(
'sql midb short-term-retention-policy set -g {rg} --mi {managed_instance_name} -n {database_name} --retention-days {retention_days_dec} --deleted-time {deleted_time}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('retentionDays', '{retention_days_dec}')])
# test get short term retention on deleted database
self.cmd(
'sql midb short-term-retention-policy show -g {rg} --mi {managed_instance_name} -n {database_name} --deleted-time {deleted_time}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('retentionDays', '{retention_days_dec}')])
class SqlManagedInstanceDbLongTermRetentionScenarioTest(ScenarioTest):
@ManagedInstancePreparer()
def test_sql_managed_db_long_term_retention(self, mi, rg):
resource_prefix = 'MIDBLongTermRetention'
self.kwargs.update({
'rg': rg,
'loc': ManagedInstancePreparer.location,
'managed_instance_name': mi,
'database_name': self.create_random_name(resource_prefix, 50),
'weekly_retention': 'P1W',
'monthly_retention': 'P1M',
'yearly_retention': 'P2M',
'week_of_year': 12,
'collation': ManagedInstancePreparer.collation
})
# create database
self.cmd('sql midb create -g {rg} --mi {managed_instance_name} -n {database_name} --collation {collation}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('name', '{database_name}'),
self.check('location', '{loc}'),
self.check('collation', '{collation}'),
self.check('status', 'Online')])
# test update long term retention on live database
self.cmd(
'sql midb ltr-policy set -g {rg} --mi {managed_instance_name} -n {database_name} --weekly-retention {weekly_retention} --monthly-retention {monthly_retention} --yearly-retention {yearly_retention} --week-of-year {week_of_year}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('weeklyRetention', '{weekly_retention}'),
self.check('monthlyRetention', '{monthly_retention}'),
self.check('yearlyRetention', '{yearly_retention}')])
# test get long term retention policy on live database
self.cmd(
'sql midb ltr-policy show -g {rg} --mi {managed_instance_name} -n {database_name}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('weeklyRetention', '{weekly_retention}'),
self.check('monthlyRetention', '{monthly_retention}'),
self.check('yearlyRetention', '{yearly_retention}')])
# test list long term retention backups for location
# with resource group
self.cmd(
'sql midb ltr-backup list -l {loc} -g {rg}',
checks=[
JMESPathCheckGreaterThan('length(@)', 0)])
# without resource group
self.cmd(
'sql midb ltr-backup list -l {loc}',
checks=[
JMESPathCheckGreaterThan('length(@)', 0)])
# test list long term retention backups for instance
# with resource group
self.cmd(
'sql midb ltr-backup list -l {loc} --mi {managed_instance_name} -g {rg}',
checks=[
self.check('length(@)', 0)])
# without resource group
self.cmd(
'sql midb ltr-backup list -l {loc} --mi {managed_instance_name}',
checks=[
self.check('length(@)', 0)])
# test list long term retention backups for database
# with resource group
self.cmd(
'sql midb ltr-backup list -l {loc} --mi {managed_instance_name} -d {database_name} -g {rg}',
checks=[
self.check('length(@)', 0)])
# without resource group
self.cmd(
'sql midb ltr-backup list -l {loc} --mi {managed_instance_name} -d {database_name}',
checks=[
self.check('length(@)', 0)])
# Milan: we need to think a way to test restore with ltr as in live mode this is not possible
# because after setting LTR it needs to pass some time before backup to show up
#
# # setup for test show long term retention backup
# backup = self.cmd(
# 'sql midb ltr-backup list -l {loc} --mi {managed_instance_name} -d {database_name} --latest').get_output_in_json()
# self.kwargs.update({
# 'backup_name': backup[0]['name'],
# 'backup_id': backup[0]['id']
# })
# # test show long term retention backup
# self.cmd(
# 'sql midb ltr-backup show -l {loc} --mi {managed_instance_name} -d {database_name} -n {backup_name}',
# checks=[
# self.check('resourceGroup', '{rg}'),
# self.check('managedInstanceName', '{managed_instance_name}'),
# self.check('databaseName', '{database_name}'),
# self.check('name', '{backup_name}')])
# self.cmd(
# 'sql midb ltr-backup show --id {backup_id}',
# checks=[
# self.check('resourceGroup', '{rg}'),
# self.check('managedInstanceName', '{managed_instance_name}'),
# self.check('databaseName', '{database_name}'),
# self.check('name', '{backup_name}')])
# # test restore managed database from LTR backup
# self.kwargs.update({
# 'dest_database_name': 'cli-restore-ltr-backup-test2'
# })
# self.cmd(
# 'sql midb ltr-backup restore --backup-id \'{backup_id}\' --dest-database {dest_database_name} --dest-mi {managed_instance_name} --dest-resource-group {rg}',
# checks=[
# self.check('name', '{dest_database_name}')])
# # test delete long term retention backup
# self.cmd(
# 'sql midb ltr-backup delete -l {loc} --mi {managed_instance_name} -d {database_name} -n \'{backup_name}\' --yes',
# checks=[NoneCheck()])
class SqlManagedInstanceRestoreDeletedDbScenarioTest(ScenarioTest):
@ManagedInstancePreparer()
def test_sql_managed_deleted_db_restore(self, mi, rg):
resource_prefix = 'MIRestoreDeletedDB'
self.kwargs.update({
'loc': ManagedInstancePreparer.location,
'rg': rg,
'managed_instance_name': mi,
'database_name': self.create_random_name(resource_prefix, 50),
'restored_database_name': self.create_random_name(resource_prefix, 50),
'collation': ManagedInstancePreparer.collation
})
# create database
self.cmd('sql midb create -g {rg} --mi {managed_instance_name} -n {database_name} --collation {collation}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('name', '{database_name}'),
self.check('location', '{loc}'),
self.check('collation', '{collation}'),
self.check('status', 'Online')])
# Wait for first backup before dropping
_wait_until_first_backup_midb(self)
# Delete by group/server/name
self.cmd('sql midb delete -g {rg} --managed-instance {managed_instance_name} -n {database_name} --yes',
checks=[NoneCheck()])
# Get deleted database
deleted_databases = self.cmd('sql midb list-deleted -g {rg} --managed-instance {managed_instance_name}',
checks=[
self.greater_than('length(@)', 0)])
self.kwargs.update({
'deleted_time': _get_deleted_date(deleted_databases.json_value[0]).isoformat()
})
# test restore deleted database
self.cmd(
'sql midb restore -g {rg} --mi {managed_instance_name} -n {database_name} --dest-name {restored_database_name} --deleted-time {deleted_time} --time {deleted_time}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('name', '{restored_database_name}'),
self.check('status', 'Online')])
class SqlManagedInstanceDbMgmtScenarioTest(ScenarioTest):
@ManagedInstancePreparer()
def test_sql_managed_db_mgmt(self, mi, rg):
database_name = "cliautomationdb01"
database_name_restored = "restoredcliautomationdb01"
managed_instance_name_1 = mi
resource_group_1 = rg
loc = ManagedInstancePreparer.location
collation = ManagedInstancePreparer.collation
# test sql db commands
db1 = self.cmd('sql midb create -g {} --mi {} -n {} --collation {}'
.format(resource_group_1, managed_instance_name_1, database_name, collation),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc),
JMESPathCheck('collation', collation),
JMESPathCheck('status', 'Online')]).get_output_in_json()
time.sleep(
300) # Sleeping 5 minutes should be enough for the restore to be possible (Skipped under playback mode)
# test sql db restore command
db1 = self.cmd('sql midb restore -g {} --mi {} -n {} --dest-name {} --time {}'
.format(resource_group_1, managed_instance_name_1, database_name, database_name_restored,
datetime.utcnow().isoformat()),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', database_name_restored),
JMESPathCheck('location', loc),
JMESPathCheck('status', 'Online')]).get_output_in_json()
self.cmd('sql midb list -g {} --managed-instance {}'
.format(resource_group_1, managed_instance_name_1),
checks=[JMESPathCheck('length(@)', 2)])
# Show by group/managed_instance/database-name
self.cmd('sql midb show -g {} --managed-instance {} -n {}'
.format(resource_group_1, managed_instance_name_1, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('location', loc),
JMESPathCheck('collation', collation),
JMESPathCheck('status', 'Online')])
# Show by id
self.cmd('sql midb show --ids {}'
.format(db1['id']),
checks=[
JMESPathCheck('name', database_name_restored),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('location', loc),
JMESPathCheck('collation', collation),
JMESPathCheck('status', 'Online')])
# Delete by group/server/name
self.cmd('sql midb delete -g {} --managed-instance {} -n {} --yes'
.format(resource_group_1, managed_instance_name_1, database_name),
checks=[NoneCheck()])
# test show sql managed db doesn't return anything
self.cmd('sql midb show -g {} --managed-instance {} -n {}'
.format(resource_group_1, managed_instance_name_1, database_name),
expect_failure=True)
class SqlManagedInstanceAzureActiveDirectoryAdministratorScenarioTest(ScenarioTest):
# This MI AAD test needs special AD setup, please contact MI AAD team for new recording.
def test_sql_mi_aad_admin(self):
print('Test is started...\n')
self.kwargs.update({
'oid': '03db4d3a-a1d3-42d1-8055-2452646dbc2a',
'oid2': '23716ccd-3bf5-4934-9773-20ce34909e2e',
'user': '[email protected]',
'user2': '[email protected]',
'managed_instance_name': "migrantpermissionstest",
'rg': "srbozovi_test"
})
print('Arguments are updated with login and sid data')
self.cmd('sql mi ad-admin create --mi {managed_instance_name} -g {rg} -i {oid} -u {user}',
checks=[
self.check('login', '{user}'),
self.check('sid', '{oid}')])
print('Aad admin is set...\n')
self.cmd('sql mi ad-admin list --mi {managed_instance_name} -g {rg}',
checks=[
self.check('[0].login', '{user}'),
self.check('[0].sid', '{oid}')])
print('Get aad admin...\n')
self.cmd('sql mi ad-admin update --mi {managed_instance_name} -g {rg} -u {user2} -i {oid2}',
checks=[
self.check('login', '{user2}'),
self.check('sid', '{oid2}')])
print('Aad admin is updated...\n')
self.cmd('sql mi ad-admin delete --mi {managed_instance_name} -g {rg}')
print('Aad admin is deleted...\n')
self.cmd('sql mi ad-admin list --mi {managed_instance_name} -g {rg}',
checks=[
self.check('login', None)])
print('Test is finished...\n')
class SqlManagedInstanceAzureADOnlyAuthenticationsScenarioTest(ScenarioTest):
# This MI AAD test needs special AD setup, please contact MI AAD team for new recording.
def test_sql_mi_ad_only_auth(self):
print('Test is started...\n')
self.kwargs.update({
'oid': '03db4d3a-a1d3-42d1-8055-2452646dbc2a',
'user': '[email protected]',
'managed_instance_name': "migrantpermissionstest",
'rg': "srbozovi_test"
})
print('Arguments are updated with login and sid data')
self.cmd('sql mi ad-admin create --mi {managed_instance_name} -g {rg} -i {oid} -u {user}',
checks=[
self.check('login', '{user}'),
self.check('sid', '{oid}')])
self.cmd('sql mi ad-only-auth enable -n {managed_instance_name} -g {rg}', checks=[])
self.cmd('sql mi ad-only-auth disable -n {managed_instance_name} -g {rg}', checks=[])
self.cmd('sql mi ad-only-auth get -n {managed_instance_name} -g {rg}', checks=[])
class SqlFailoverGroupMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1",
location='westeurope')
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_2", location='eastus')
def test_sql_failover_group_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2):
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
from azure.cli.core.commands.client_factory import get_subscription_id
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_2, resource_group_location_2)
failover_group_name = "fgclitest16578-lulu"
database_name = "db1"
server2_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Sql/servers/{}".format(
get_subscription_id(self.cli_ctx),
resource_group_2,
server_name_2)
# Create database on primary server
self.cmd('sql db create -g {} --server {} --name {}'
.format(s1.group, s1.name, database_name),
checks=[
JMESPathCheck('resourceGroup', s1.group),
JMESPathCheck('name', database_name)
])
# Create Failover Group
self.cmd(
'sql failover-group create -n {} -g {} -s {} --partner-resource-group {} --partner-server {} --failover-policy Automatic --grace-period 2'
.format(failover_group_name, s1.group, s1.name, s2.group, s2.name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('resourceGroup', s1.group),
JMESPathCheck('partnerServers[0].id', server2_id),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 120),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('length(databases)', 0)
])
# List of all failover groups on the primary server
self.cmd('sql failover-group list -g {} -s {}'
.format(s1.group, s1.name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', failover_group_name),
JMESPathCheck('[0].replicationRole', 'Primary')
])
# Get Failover Group on a partner server and check if role is secondary
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 120),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('replicationRole', 'Secondary'),
JMESPathCheck('length(databases)', 0)
])
if self.in_recording:
time.sleep(60)
# Update Failover Group
self.cmd('sql failover-group update -g {} -s {} -n {} --grace-period 3 --add-db {}'
.format(s1.group, s1.name, failover_group_name, database_name),
checks=[
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 180),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('length(databases)', 1)
])
# Check if properties got propagated to secondary server
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 180),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('replicationRole', 'Secondary'),
JMESPathCheck('length(databases)', 1)
])
# Check if database is created on partner side
self.cmd('sql db list -g {} -s {}'
.format(s2.group, s2.name),
checks=[
JMESPathCheck('length(@)', 2)
])
if self.in_recording:
time.sleep(60)
# Update Failover Group failover policy to Manual
self.cmd('sql failover-group update -g {} -s {} -n {} --failover-policy Manual'
.format(s1.group, s1.name, failover_group_name),
checks=[
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Manual'),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('length(databases)', 1)
])
# Failover Failover Group
self.cmd('sql failover-group set-primary -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name))
# The failover operation is completed when new primary is promoted to primary role
# But there is a async part to make old primary a new secondary
# And we have to wait for this to complete if we are recording the test
if self.in_recording:
time.sleep(60)
# Check the roles of failover groups to confirm failover happened
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
# Fail back to original server
self.cmd('sql failover-group set-primary -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name))
# The failover operation is completed when new primary is promoted to primary role
# But there is a async part to make old primary a new secondary
# And we have to wait for this to complete if we are recording the test
if self.in_recording:
time.sleep(60)
# Check the roles of failover groups to confirm failover happened
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
# Do no-op failover to the same server
self.cmd('sql failover-group set-primary -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name))
# Check the roles of failover groups to confirm failover didn't happen
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
# Remove database from failover group
self.cmd('sql failover-group update -g {} -s {} -n {} --remove-db {}'
.format(s1.group, s1.name, failover_group_name, database_name),
checks=[
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Manual'),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('length(databases)', 0)
])
# Check if database got removed
self.cmd('sql db show -g {} -s {} -n {}'
.format(s2.group, s2.name, database_name),
checks=[
JMESPathCheck('[0].failoverGroupId', 'None')
])
# Drop failover group
self.cmd('sql failover-group delete -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name))
# Check if failover group really got dropped
self.cmd('sql failover-group list -g {} -s {}'
.format(s1.group, s1.name),
checks=[
JMESPathCheck('length(@)', 0)
])
self.cmd('sql failover-group list -g {} -s {}'
.format(s2.group, s2.name),
checks=[
JMESPathCheck('length(@)', 0)
])
class SqlVirtualClusterMgmtScenarioTest(ScenarioTest):
@ManagedInstancePreparer()
def test_sql_virtual_cluster_mgmt(self, mi, rg):
subnet = ManagedInstancePreparer.subnet
self.kwargs.update({
'loc': ManagedInstancePreparer.location,
'subnet_id': subnet,
'rg': rg
})
if not (self.in_recording or self.is_live):
self.kwargs.update({
'subnet_id': subnet.replace(ManagedInstancePreparer.subscription_id,
"00000000-0000-0000-0000-000000000000")
})
# test list sql virtual cluster in the subscription, should be at least 1
virtual_clusters = self.cmd('sql virtual-cluster list',
checks=[
self.greater_than('length(@)', 0),
self.greater_than('length([?subnetId == \'{subnet_id}\'])', 1),
self.check('[?subnetId == \'{subnet_id}\'].location | [0]', '{loc}'),
self.check('[?subnetId == \'{subnet_id}\'].resourceGroup | [0]', '{rg}')])
# test list sql virtual cluster in the resource group, should be at least 1
virtual_clusters = self.cmd('sql virtual-cluster list -g {rg}',
checks=[
self.greater_than('length(@)', 0),
self.greater_than('length([?subnetId == \'{subnet_id}\'])', 1),
self.check('[?subnetId == \'{subnet_id}\'].location | [0]', '{loc}'),
self.check('[?subnetId == \'{subnet_id}\'].resourceGroup | [0]',
'{rg}')]).get_output_in_json()
virtual_cluster = next(vc for vc in virtual_clusters if vc['subnetId'] == self._apply_kwargs('{subnet_id}'))
self.kwargs.update({
'vc_name': virtual_cluster['name']
})
# test show sql virtual cluster
self.cmd('sql virtual-cluster show -g {rg} -n {vc_name}',
checks=[
self.check('location', '{loc}'),
self.check('name', '{vc_name}'),
self.check('resourceGroup', '{rg}'),
self.check('subnetId', '{subnet_id}')])
class SqlInstanceFailoverGroupMgmtScenarioTest(ScenarioTest):
def test_sql_instance_failover_group_mgmt(self):
resource_group_name = ManagedInstancePreparer.group
primary_name = ManagedInstancePreparer.primary_name
secondary_name = ManagedInstancePreparer.secondary_name
secondary_group = ManagedInstancePreparer.sec_group
failover_group_name = ManagedInstancePreparer.fog_name
primary_location = ManagedInstancePreparer.location
secondary_location = ManagedInstancePreparer.sec_location
# Create Failover Group
self.cmd(
'sql instance-failover-group create -n {} -g {} --mi {} --partner-resource-group {} --partner-mi {} --failover-policy Automatic --grace-period 2'
.format(failover_group_name, resource_group_name, primary_name, secondary_group,
secondary_name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('resourceGroup', resource_group_name),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 120)
])
# Get Instance Failover Group on a partner managed instance and check if role is secondary
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(secondary_group, secondary_location, failover_group_name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 120),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('replicationRole', 'Secondary')
])
# Update Failover Group
self.cmd('sql instance-failover-group update -g {} -n {} -l {} --grace-period 3 '
.format(resource_group_name, failover_group_name, primary_location),
checks=[
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 180),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled')
])
# Check if properties got propagated to secondary server
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(secondary_group, secondary_location, failover_group_name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 180),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('replicationRole', 'Secondary')
])
# Update Failover Group failover policy to Manual
self.cmd('sql instance-failover-group update -g {} -n {} -l {} --failover-policy Manual'
.format(resource_group_name, failover_group_name, primary_location),
checks=[
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Manual'),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled')
])
# Failover Failover Group
self.cmd('sql instance-failover-group set-primary -g {} -n {} -l {} '
.format(secondary_group, failover_group_name, secondary_location))
# The failover operation is completed when new primary is promoted to primary role
# But there is a async part to make old primary a new secondary
# And we have to wait for this to complete if we are recording the test
if self.in_recording:
time.sleep(30)
# Check the roles of failover groups to confirm failover happened
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(secondary_group, secondary_location, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(resource_group_name, primary_location, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
# Fail back to original server
self.cmd('sql instance-failover-group set-primary --allow-data-loss -g {} -n {} -l {}'
.format(resource_group_name, failover_group_name, primary_location))
# The failover operation is completed when new primary is promoted to primary role
# But there is a async part to make old primary a new secondary
# And we have to wait for this to complete if we are recording the test
if self.in_recording:
time.sleep(30)
# Check the roles of failover groups to confirm failover happened
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(secondary_group, secondary_location, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(resource_group_name, primary_location, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
# Do no-op failover to the same server
self.cmd('sql instance-failover-group set-primary -g {} -n {} -l {}'
.format(resource_group_name, failover_group_name, primary_location))
# Check the roles of failover groups to confirm failover didn't happen
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(secondary_group, secondary_location, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
self.cmd('sql instance-failover-group show -g {} -l {} -n {}'
.format(resource_group_name, primary_location, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
# Drop failover group
self.cmd('sql instance-failover-group delete -g {} -l {} -n {}'
.format(resource_group_name, primary_location, failover_group_name),
checks=NoneCheck())
class SqlDbSensitivityClassificationsScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
def _get_storage_key(self, storage_account, resource_group):
return self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
@StorageAccountPreparer(location='eastus2')
def test_sql_db_sensitivity_classifications(self, resource_group, resource_group_location, server, storage_account):
from azure.mgmt.sql.models import SampleName
database_name = "sensitivityclassificationsdb01"
# create db
self.cmd('sql db create -g {} -s {} -n {} --sample-name {}'
.format(resource_group, server, database_name, SampleName.adventure_works_lt),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# list current sensitivity classifications
self.cmd('sql db classification list -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', 0)]) # No classifications are set at the beginning
# get storage account endpoint and key
storage_endpoint = self._get_storage_endpoint(storage_account, resource_group)
key = self._get_storage_key(storage_account, resource_group)
# enable ADS - (required to use data classification)
disabled_alerts_input = 'Sql_Injection_Vulnerability Access_Anomaly'
disabled_alerts_expected = ['Sql_Injection_Vulnerability', 'Access_Anomaly']
email_addresses_input = '[email protected] [email protected]'
email_addresses_expected = ['[email protected]', '[email protected]']
email_account_admins = True
state_enabled = 'Enabled'
retention_days = 30
self.cmd('sql db threat-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint {}'
' --retention-days {} --email-addresses {} --disabled-alerts {}'
' --email-account-admins {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, email_addresses_input,
disabled_alerts_input, email_account_admins),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''),
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# list recommended sensitivity classifications
expected_recommended_sensitivityclassifications_count = 15
self.cmd('sql db classification recommendation list -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', expected_recommended_sensitivityclassifications_count)])
schema_name = 'SalesLT'
table_name = 'Customer'
column_name = 'FirstName'
# disable the recommendation for SalesLT/Customer/FirstName
self.cmd('sql db classification recommendation disable -g {} -s {} -n {} --schema {} --table {} --column {}'
.format(resource_group, server, database_name, schema_name, table_name, column_name))
# list recommended sensitivity classifications
self.cmd('sql db classification recommendation list -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', expected_recommended_sensitivityclassifications_count - 1)])
# re-enable the disabled recommendation
self.cmd('sql db classification recommendation enable -g {} -s {} -n {} --schema {} --table {} --column {}'
.format(resource_group, server, database_name, schema_name, table_name, column_name))
# lits recommended sensitivity classifications
self.cmd('sql db classification recommendation list -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', expected_recommended_sensitivityclassifications_count)])
# update the sensitivity classification
information_type = 'Name'
label_name = 'Confidential - GDPR'
response = self.cmd(
'sql db classification update -g {} -s {} -n {} --schema {} --table {} --column {} --information-type {} --label "{}"'
.format(resource_group, server, database_name, schema_name, table_name, column_name, information_type,
label_name),
checks=[
JMESPathCheck('informationType', information_type),
JMESPathCheck('labelName', label_name)]).get_output_in_json()
information_type_id = response['informationTypeId']
label_id = response['labelId']
# get the classified column
self.cmd('sql db classification show -g {} -s {} -n {} --schema {} --table {} --column {}'
.format(resource_group, server, database_name, schema_name, table_name, column_name),
checks=[
JMESPathCheck('informationType', information_type),
JMESPathCheck('labelName', label_name),
JMESPathCheck('informationTypeId', information_type_id),
JMESPathCheck('labelId', label_id)])
# list recommended classifications
self.cmd('sql db classification recommendation list -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', expected_recommended_sensitivityclassifications_count - 1)])
# list current classifications
self.cmd('sql db classification list -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', 1)])
# delete the label
self.cmd('sql db classification delete -g {} -s {} -n {} --schema {} --table {} --column {}'
.format(resource_group, server, database_name, schema_name, table_name, column_name))
# list current labels
self.cmd('sql db classification list -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('length(@)', 0)])
class SqlServerMinimalTlsVersionScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
def test_sql_server_minimal_tls_version(self, resource_group):
server_name_1 = self.create_random_name(server_name_prefix, server_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
resource_group_location = "eastus"
tls1_2 = "1.2"
tls1_1 = "1.1"
# test create sql server with minimal required parameters
self.cmd('sql server create -g {} --name {} '
'--admin-user {} --admin-password {} --minimal-tls-version {}'
.format(resource_group, server_name_1, admin_login, admin_passwords[0], tls1_2),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('location', resource_group_location),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('minimalTlsVersion', tls1_2)]).get_output_in_json()
# test update sql server
self.cmd('sql server update -g {} --name {} --minimal-tls-version {} -i'
.format(resource_group, server_name_1, tls1_1),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('minimalTlsVersion', tls1_1)])
class SqlManagedInstanceFailoverScenarionTest(ScenarioTest):
@ManagedInstancePreparer()
def test_sql_mi_failover_mgmt(self, mi, rg):
self.kwargs.update({
'resource_group': rg,
'managed_instance_name': mi
})
# Wait for 5 minutes so that first full backup is created
if self.in_recording or self.is_live:
sleep(5 * 60)
# Failover managed instance primary replica
self.cmd('sql mi failover -g {resource_group} -n {managed_instance_name}', checks=NoneCheck())
class SqlManagedDatabaseLogReplayScenarionTest(ScenarioTest):
@live_only()
@AllowLargeResponse()
@ManagedInstancePreparer()
def test_sql_midb_logreplay_mgmt(self, mi, rg):
managed_database_name = 'logReplayTestDb'
managed_database_name1 = 'logReplayTestDb1'
# Uploading bak file to blob is restricted by testing framework, so only mitigation for now is to use hard-coded values
self.kwargs.update({
'storage_account': 'toolingsa',
'container_name': 'tools',
'resource_group': rg,
'managed_instance_name': mi,
'managed_database_name': managed_database_name,
'managed_database_name1': managed_database_name1,
'storage_uri': 'https://toolingsa.blob.core.windows.net/tools',
'last_backup_name': 'full.bak'
})
from datetime import datetime, timedelta
self.kwargs['expiry'] = (datetime.utcnow() + timedelta(hours=12)).strftime('%Y-%m-%dT%H:%MZ')
self.kwargs['storage_key'] = str(self.cmd(
'az storage account keys list -n {storage_account} -g {resource_group} --query "[0].value"').output)
self.kwargs['sas_token'] = self.cmd(
'storage container generate-sas --account-name {storage_account} --account-key {storage_key} --name {container_name} --permissions rl --expiry {expiry} -otsv').output.strip()
# Start Log Replay Service
self.cmd(
'sql midb log-replay start -g {resource_group} --mi {managed_instance_name} -n {managed_database_name} --ss {sas_token} --su {storage_uri} --no-wait',
checks=NoneCheck())
if self.in_recording or self.is_live:
sleep(10)
self.cmd(
'sql midb log-replay wait -g {resource_group} --mi {managed_instance_name} -n {managed_database_name} --exists')
# Complete log replay service
self.cmd(
'sql midb log-replay complete -g {resource_group} --mi {managed_instance_name} -n {managed_database_name} --last-bn {last_backup_name}',
checks=NoneCheck())
if self.in_recording or self.is_live:
sleep(60)
# Verify status is Online
self.cmd('sql midb show -g {resource_group} --mi {managed_instance_name} -n {managed_database_name}',
checks=[
JMESPathCheck('status', 'Online')])
# Cancel test for Log replay
# Start Log Replay Service
self.cmd(
'sql midb log-replay start -g {resource_group} --mi {managed_instance_name} -n {managed_database_name1} --ss {sas_token} --su {storage_uri} --no-wait',
checks=NoneCheck())
self.cmd(
'sql midb log-replay show -g {resource_group} --mi {managed_instance_name} -n {managed_database_name1}',
checks=[
JMESPathCheck('type', 'Microsoft.Sql/managedInstances/databases/restoreDetails'),
JMESPathCheck('resourceGroup', rg)])
# Wait a minute to start restoring
if self.in_recording or self.is_live:
sleep(60)
# Cancel log replay service
self.cmd(
'sql midb log-replay stop -g {resource_group} --mi {managed_instance_name} -n {managed_database_name1} --yes',
checks=NoneCheck())
class SqlLedgerDigestUploadsScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer()
@SqlServerPreparer(location='westcentralus')
def test_sql_ledger(self, resource_group, server):
db_name = self.create_random_name("sqlledgerdb", 20)
endpoint = "https://test.confidential-ledger.azure.com"
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name))
# validate ledger digest uploads is disabled by default
self.cmd('sql db ledger-digest-uploads show -g {} -s {} --name {}'
.format(resource_group, server, db_name),
checks=[JMESPathCheck('state', 'Disabled')])
# enable uploads to ACL dummy instance
self.cmd('sql db ledger-digest-uploads enable -g {} -s {} --name {} --endpoint {}'
.format(resource_group, server, db_name, endpoint))
sleep(2)
# validate setting through show command
self.cmd('sql db ledger-digest-uploads show -g {} -s {} --name {}'
.format(resource_group, server, db_name),
checks=[JMESPathCheck('state', 'Enabled'),
JMESPathCheck('digestStorageEndpoint', endpoint)])
# disable ledger digest uploads
self.cmd('sql db ledger-digest-uploads disable -g {} -s {} --name {}'
.format(resource_group, server, db_name))
sleep(2)
# validate setting through show command
self.cmd('sql db ledger-digest-uploads show -g {} -s {} --name {}'
.format(resource_group, server, db_name),
checks=[JMESPathCheck('state', 'Disabled')])
|
py | 1a3fbb8caa1ad53eecb7c7bd2612c6cdda7638a2 | def async_migrations_ok() -> bool:
from posthog.async_migrations.runner import is_posthog_version_compatible
from posthog.models.async_migration import AsyncMigration, MigrationStatus
for migration in AsyncMigration.objects.all():
migration_completed_or_running = migration.status in [
MigrationStatus.CompletedSuccessfully,
MigrationStatus.Running,
]
migration_in_range = is_posthog_version_compatible(migration.posthog_min_version, migration.posthog_max_version)
if not migration_completed_or_running and migration_in_range:
return False
return True
|
py | 1a3fbbd60bb61c28587afbbbc261453aac81375d | from experiment import Experiment
import logging
import time
from traitlets import Enum, Float, Int, Unicode
try:
from tqdm import trange
except ImportError:
trange = range
class Main(Experiment):
#
# Description of the experiment. Used in the help message.
#
description = Unicode("Basic experiment.")
#
# Overwrite results path format. Supported vars: base_path, script_name, git, date, time
#
results_path_format = Unicode("{base_path}/{script_name}/{date}_{time}")
#
# Parameters of experiment
#
epochs = Int(100, config=True, help="Number of epochs")
lr = Float(0.1, config=True, help="Learning rate of training")
loss_type = Enum(("mse", "l1"), config=True, default_value="mse", help="Loss type.")
def run(self):
"""Running the experiment"""
logging.info("Starting experiment")
logging.info("Using {} loss".format(self.loss_type))
loss = 100
for i in trange(self.epochs):
loss = loss * self.lr
time.sleep(.5)
logging.info("Experiment finished")
if __name__ == "__main__":
main = Main()
main.initialize()
main.start()
|
py | 1a3fbc51116ce5027deac213bb25bc62235330d6 | #!/usr/bin/python
print(Hello World) |
py | 1a3fbc72f18323894c80e4ab7b9ff463e8743e60 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test a node with the -disablewallet option.
- Test that validateaddress RPC works when running with -disablewallet
- Test that it is not possible to mine to an invalid address.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class DisableWalletTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-disablewallet"]]
def run_test (self):
# Make sure wallet is really disabled
assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo)
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mbTYaNZm7TaPt5Du65aPsL8FNTktufYydC')
assert(x['isvalid'] == True)
if __name__ == '__main__':
DisableWalletTest ().main ()
|
py | 1a3fbcf5890be26c57e5a049e162950b88ecff2d | import discord
from discord.ext import commands
class Pingmodule():
def __init__(self, bot):
self.bot = bot
async def on_message(self, message):
if self.bot.user in message.mentions:
await message.add_reaction(':ping:456793379808870401')
def setup(bot):
bot.add_cog(Pingmodule(bot))
|
py | 1a3fbd0432c1e65d7f1b91b5f68701296cf67383 | # SPDX-License-Identifier: BSD-3-Clause
from typing import ClassVar, Mapping, cast
from softfab.ControlPage import ControlPage
from softfab.Page import InvalidRequest, PageProcessor
from softfab.configlib import ConfigDB
from softfab.joblib import JobDB
from softfab.pageargs import DictArg, PageArgs, StrArg
from softfab.request import Request
from softfab.response import Response
from softfab.users import User, checkPrivilege
from softfab.xmlgen import xml
class LoadExecuteDefault_POST(ControlPage['LoadExecuteDefault_POST.Arguments',
'LoadExecuteDefault_POST.Processor']):
class Arguments(PageArgs):
config = StrArg()
prod = DictArg(StrArg())
local = DictArg(StrArg())
param = DictArg(StrArg())
comment = StrArg('')
class Processor(PageProcessor['LoadExecuteDefault_POST.Arguments']):
configDB: ClassVar[ConfigDB]
jobDB: ClassVar[JobDB]
async def process(self,
req: Request['LoadExecuteDefault_POST.Arguments'],
user: User
) -> None:
args = req.args
products = cast(Mapping[str, str], args.prod)
localAt = cast(Mapping[str, str], args.local)
params = cast(Mapping[str, str], args.param)
if 'notify' in params and ':' not in params['notify']:
raise InvalidRequest('Invalid value of \'notify\' parameter')
try:
jobConfig = self.configDB[args.config]
except KeyError:
raise InvalidRequest(
f'Configuration "{args.config}" does not exist'
)
else:
jobDB = self.jobDB
for job in jobConfig.createJobs(
user.name, None, products, params, localAt
):
job.comment += '\n' + args.comment
jobDB.add(job)
def checkAccess(self, user: User) -> None:
checkPrivilege(user, 'j/c', 'start jobs')
async def writeReply(self, response: Response, proc: Processor) -> None:
response.writeXML(xml.ok)
|
py | 1a3fbd0e8414b657698e6ace3b53785b9400efb7 | #!/usr/bin/env python3
from pathlib import Path
from textwrap import indent
import hashlib
import json
import urllib.request
CMAKE_SHA256_URL_TEMPLATE = "https://cmake.org/files/v{minor}/cmake-{full}-SHA-256.txt"
CMAKE_URL_TEMPLATE = "https://github.com/Kitware/CMake/releases/download/v{full}/{file}"
CMAKE_VERSIONS = [
"3.19.6",
"3.19.5",
"3.18.6",
"3.17.5",
"3.16.9",
"3.15.7",
"3.14.7",
]
CMAKE_TARGETS = {
"Darwin-x86_64": [
"@platforms//cpu:x86_64",
"@platforms//os:macos",
],
"Linux-aarch64": [
"@platforms//cpu:aarch64",
"@platforms//os:linux",
],
"Linux-x86_64": [
"@platforms//cpu:x86_64",
"@platforms//os:linux",
],
"macos-universal": [
"@platforms//os:macos",
],
"win32-x86": [
"@platforms//cpu:x86_32",
"@platforms//os:windows",
],
"win64-x64": [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
],
}
NINJA_URL_TEMPLATE = "https://github.com/ninja-build/ninja/releases/download/v{full}/ninja-{target}.zip"
NINJA_TARGETS = {
"linux": [
"@platforms//cpu:x86_64",
"@platforms//os:linux",
],
"mac": [
"@platforms//cpu:x86_64",
"@platforms//os:macos",
],
"win": [
"@platforms//cpu:x86_64",
"@platforms//os:windows",
],
}
NINJA_VERSIONS = (
"1.10.2",
"1.10.1",
"1.10.0",
"1.9.0",
"1.8.2",
)
REPO_DEFINITION = """\
maybe(
http_archive,
name = "{name}",
urls = [
"{url}",
],
sha256 = "{sha256}",
strip_prefix = "{prefix}",
build_file_content = {template}.format(
bin = "{bin}",
),
)
"""
TOOLCHAIN_REPO_DEFINITION = """\
# buildifier: leave-alone
maybe(
prebuilt_toolchains_repository,
name = "{name}",
repos = {repos},
tool = "{tool}",
)
"""
REGISTER_TOOLCHAINS = """\
native.register_toolchains(
{toolchains}
)
"""
BZL_FILE_TEMPLATE = """\
\"\"\" A U T O G E N E R A T E D -- D O N O T M O D I F Y
@generated
This file is generated by prebuilt_toolchains.py
\"\"\"
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@rules_foreign_cc//toolchains:prebuilt_toolchains_repository.bzl", "prebuilt_toolchains_repository")
_CMAKE_BUILD_FILE = \"\"\"\\
load("@rules_foreign_cc//toolchains/native_tools:native_tools_toolchain.bzl", "native_tool_toolchain")
package(default_visibility = ["//visibility:public"])
filegroup(
name = "cmake_data",
srcs = glob(
[
"**",
],
exclude = [
"WORKSPACE",
"WORKSPACE.bazel",
"BUILD",
"BUILD.bazel",
],
),
)
native_tool_toolchain(
name = "cmake_tool",
path = "bin/{bin}",
target = ":cmake_data",
)
\"\"\"
_NINJA_BUILD_FILE = \"\"\"\\
load("@rules_foreign_cc//toolchains/native_tools:native_tools_toolchain.bzl", "native_tool_toolchain")
package(default_visibility = ["//visibility:public"])
filegroup(
name = "ninja_bin",
srcs = ["{{bin}}"],
)
native_tool_toolchain(
name = "ninja_tool",
path = "$(execpath :ninja_bin)",
target = ":ninja_bin",
)
\"\"\"
# buildifier: disable=unnamed-macro
def prebuilt_toolchains(cmake_version, ninja_version):
\"\"\"Register toolchains for pre-built cmake and ninja binaries
Args:
cmake_version (string): The target cmake version
ninja_version (string): The target ninja-build version
\"\"\"
_cmake_toolchains(cmake_version)
_ninja_toolchains(ninja_version)
_make_toolchains()
def _cmake_toolchains(version):
{cmake_definitions}
def _ninja_toolchains(version):
{ninja_definitions}
def _make_toolchains():
{make_definitions}
"""
def get_cmake_definitions() -> str:
"""Define a set of repositories and calls for registering `cmake` toolchains
Returns:
str: The Implementation of `_cmake_toolchains`
"""
archives = []
for version in CMAKE_VERSIONS:
major, minor, _patch = version.split(".")
version_archives = []
version_toolchains = {}
minor_version = "{}.{}".format(major, minor)
for line in urllib.request.urlopen(CMAKE_SHA256_URL_TEMPLATE.format(minor=minor_version, full=version)).readlines():
line = line.decode("utf-8").strip("\n ")
# Only take tar and zip files. The rest can't be easily decompressed.
if not line.endswith(".tar.gz") and not line.endswith(".zip"):
continue
# Only include the targets we care about.
plat_target = None
for target in CMAKE_TARGETS.keys():
if target in line:
plat_target = target
break
if not plat_target:
continue
sha256, file = line.split()
name = file.replace(".tar.gz", "").replace(".zip", "")
bin = "cmake.exe" if "win" in file.lower() else "cmake"
if "Darwin" in file or "macos" in file:
prefix = name + "/CMake.app/Contents"
else:
prefix = name
version_archives.append(
REPO_DEFINITION.format(
name=name,
sha256=sha256,
prefix=prefix,
url=CMAKE_URL_TEMPLATE.format(
full=version,
file=file
),
build="cmake",
template="_CMAKE_BUILD_FILE",
bin=bin,
)
)
version_toolchains.update({plat_target: name})
archives.append("\n".join(
[
" if \"{}\" == version:".format(version),
] + [indent(archive, " " * 8) for archive in version_archives])
)
toolchains_repos = {}
for target, name in version_toolchains.items():
toolchains_repos.update({name: CMAKE_TARGETS[target]})
archives.append(indent(TOOLCHAIN_REPO_DEFINITION.format(
name="cmake_{}_toolchains".format(version),
repos=indent(json.dumps(toolchains_repos, indent=4), " " * 4).lstrip(),
tool="cmake",
), " " * 8))
archives.append(indent(REGISTER_TOOLCHAINS.format(
toolchains="\n".join(
[indent("\"@cmake_{}_toolchains//:{}_toolchain\",".format(
version,
repo
), " " * 4) for repo in toolchains_repos])
), " " * 8))
archives.extend([
indent("return", " " * 8),
"",
])
archives.append(
indent("fail(\"Unsupported version: \" + str(version))", " " * 4))
return "\n".join([archive.rstrip(" ") for archive in archives])
def get_ninja_definitions() -> str:
"""Define a set of repositories and calls for registering `ninja` toolchains
Returns:
str: The Implementation of `_ninja_toolchains`
"""
archives = []
for version in NINJA_VERSIONS:
version_archives = []
version_toolchains = {}
for target in NINJA_TARGETS.keys():
url = NINJA_URL_TEMPLATE.format(
full=version,
target=target,
)
# Get sha256 (can be slow)
remote = urllib.request.urlopen(url)
total_read = 0
max_file_size = 100*1024*1024
hash = hashlib.sha256()
while True:
data = remote.read(4096)
total_read += 4096
if not data or total_read > max_file_size:
break
hash.update(data)
sha256 = hash.hexdigest()
name = "ninja_{}_{}".format(version, target)
version_archives.append(
REPO_DEFINITION.format(
name=name,
url=url,
sha256=sha256,
prefix="",
build="ninja",
template="_NINJA_BUILD_FILE",
bin="ninja.exe" if "win" in target else "ninja",
)
)
version_toolchains.update({target: name})
archives.append("\n".join(
[
" if \"{}\" == version:".format(version),
] + [indent(archive, " " * 8) for archive in version_archives])
)
toolchains_repos = {}
for target, name in version_toolchains.items():
toolchains_repos.update({name: NINJA_TARGETS[target]})
archives.append(indent(TOOLCHAIN_REPO_DEFINITION.format(
name="ninja_{}_toolchains".format(version),
repos=indent(json.dumps(toolchains_repos, indent=4), " " * 4).lstrip(),
tool="ninja",
), " " * 8))
archives.append(indent(REGISTER_TOOLCHAINS.format(
toolchains="\n".join(
[indent("\"@ninja_{}_toolchains//:{}_toolchain\",".format(
version,
repo
), " " * 4) for repo in toolchains_repos])
), " " * 8))
archives.extend([
indent("return", " " * 8),
"",
])
archives.append(
indent("fail(\"Unsupported version: \" + str(version))", " " * 4))
return "\n".join(archives)
def get_make_definitions() -> str:
"""Define a set of repositories and calls for registering `make` toolchains
Returns:
str: The Implementation of `_make_toolchains`
"""
return indent(
"# There are currently no prebuilt make binaries\npass",
" " * 4)
def main():
"""The main entrypoint of the toolchains generator"""
repos_bzl_file = Path(__file__).parent.absolute() / \
"prebuilt_toolchains.bzl"
repos_bzl_file.write_text(BZL_FILE_TEMPLATE.format(
cmake_definitions=get_cmake_definitions(),
ninja_definitions=get_ninja_definitions(),
make_definitions=get_make_definitions(),
))
if __name__ == "__main__":
main()
|
py | 1a3fbdca3f3af15bd222916720ab02cb4f1e414a | from .lizard import displace, pspec_to_displacements, pspec_to_displacement_boost
from .gadget import gadget
from .uniform import read_uniform_distrib
from .log import VerboseTimingLog
|
py | 1a3fbe3af2173b5b952acd8a64ee00444a26af89 | a=1
b=1
c=1
print("Enter limiter\n")
n=int(input())
s=0
while(a<=10):
s=a+s
a=s
c=c+1
while(c<=n):
b=s
while(b!=0):
d=b%10
d=int(d)
s=s+d
b=b/10
b=int(b)
c=c+1
b=s
print(int(s))
t=input()
|
py | 1a3fbeed157fe31c92f2de1264b02270c168cafd | import random
MOZNOSTI_Z = 'ABCDEFGV'
MOZNOSTI_NA = 'ABCDEFGWXYZ'
NAPOVEDA = """
Příkazy:
? - Vypíše tuto nápovědu.
U - Otočí kartu balíčku (z U do V).
Nebo doplní balíček U, pokud je prázdný.
EC - Přemístí karty z E na C.
Za E dosaď odkud karty vzít: A-G nebo V.
Za C dosaď kam chceš karty dát: A-G nebo W-Z.
E2G - Přemístí 2 karty z E na C
Za E dosaď odkud kartu vzít: A-G nebo V.
Za 2 dosaď počet karet.
Za C dosaď kam chceš kartu dát: A-G nebo W-Z.
Ctrl+C - Ukončí hru
"""
def popis_karty(karta):
hodnota, barva, licem_nahoru = karta
if not licem_nahoru:
return '[???]'
if hodnota == 1:
znak_hodnoty = 'A'
elif hodnota == 10:
znak_hodnoty = 'X'
elif hodnota == 11:
znak_hodnoty = 'J'
elif hodnota == 12:
znak_hodnoty = 'Q'
elif hodnota == 13:
znak_hodnoty = 'K'
else:
znak_hodnoty = str(hodnota)
if barva == 'Pi':
znak_barvy = '♠ '
elif barva == 'Sr':
znak_barvy = ' ♥'
elif barva == 'Ka':
znak_barvy = ' ♦'
elif barva == 'Kr':
znak_barvy = '♣ '
return '[{}{}]'.format(znak_hodnoty, znak_barvy)
def popis_balicku(balicek):
if balicek:
return popis_karty(balicek[-1])
else:
return '[ ]'
def vypis_hru(hra):
balicky, cile, sloupce = hra
print()
print(' U V W X Y Z')
print('{} {} {} {} {} {}'.format(
popis_balicku(balicky[0]),
popis_balicku(balicky[1]),
popis_balicku(cile[0]),
popis_balicku(cile[1]),
popis_balicku(cile[2]),
popis_balicku(cile[3]),
))
print()
print(' A B C D E F G')
max_delka = 0
for sloupec in sloupce:
if max_delka < len(sloupec):
max_delka = len(sloupec)
for i in range(max_delka):
for sloupec in sloupce:
if i < len(sloupec):
print(popis_karty(sloupec[i]), end=' ')
else:
print(' ', end=' ')
print()
print()
def otoc_kartu(karta, nove_otoceni):
hodnota, barva, licem_nahoru = karta
return hodnota, barva, nove_otoceni
def udelej_hru():
balicek = []
for hodnota in range(1, 14):
for barva in 'Pi', 'Sr', 'Ka', 'Kr':
balicek.append((hodnota, barva, False))
random.shuffle(balicek)
sloupce = []
for cislo_sloupce in range(7):
novy_sloupec = []
sloupce.append(novy_sloupec)
for i in range(cislo_sloupce):
karta = balicek.pop()
novy_sloupec.append(karta)
karta = balicek.pop()
novy_sloupec.append(otoc_kartu(karta, True))
balicky = balicek, []
cile = [], [], [], []
sloupce = tuple(sloupce)
return balicky, cile, sloupce
def hrac_vyhral(hra):
balicky, cile, sloupce = hra
for balicek in balicky:
if balicek:
return False
for sloupec in sloupce:
if sloupec:
return False
return True
def nacti_tah():
"""Zeptá se uživatele, co dělat
Stará se o výpis nápovědy.
Může vrátit buď řetězec 'U' ("lízni z balíčku"), nebo trojici
(z, pocet, na), kde:
- `z` je číslo místa, ze kterého karty vezmou (A-G: 0-6; V: 7)
- `pocet` je počet karet, které se přemisťují
- `na` je číslo místa, kam se karty mají dát (A-G: 0-6, W-Z: 7-10)
Zadá-li uživatel špatný vstup, zeptá se znova.
"""
while True:
retezec = input('Zadej tah: ')
retezec = retezec.upper()
if retezec.startswith('?'):
print(NAPOVEDA)
elif retezec == 'U':
return 'U'
elif len(retezec) < 2:
print('Nerozumím tahu')
elif retezec[0] in MOZNOSTI_Z and retezec[-1] in MOZNOSTI_NA:
if len(retezec) == 2:
pocet = 1
else:
try:
pocet = int(retezec[1:-1])
except ValueError:
print('"{}" není číslo'.format(retezec[1:-1]))
continue
tah = (MOZNOSTI_Z.index(retezec[0]), pocet,
MOZNOSTI_NA.index(retezec[-1]))
print(popis_tahu(tah))
return tah
else:
print('Nerozumím tahu')
def popis_tahu(tah):
if tah == 'U':
return 'Balíček'
else:
z, pocet, na = tah
return '{} karet z {} na {}'.format(
pocet, MOZNOSTI_Z[z], MOZNOSTI_NA[na])
def priprav_tah(hra, tah):
"""Zkontroluje, že je tah podle pravidel
Jako argument bere hru, a tah získaný z funkce `nacti_tah`.
Vrací buď řetězec 'U' ("lízni z balíčku"), nebo trojici
(zdrojovy_balicek, pocet, cilovy_balicek), kde `*_balicek` jsou přímo
seznamy, ze kterých/na které se budou karty přemisťovat, a `pocet` je počet
karet k přemístění.
Není-li tah podle pravidel, vynkce vyvolá výjimku `ValueError` s nějakou
rozumnou chybovou hláškou.
"""
balicky, cile, sloupce = hra
if tah == 'U':
return 'U'
else:
z, pocet, na = tah
if z == 7:
if pocet != 1:
raise ValueError('Z balíčku se nedá brát víc karet najednou')
zdrojovy_balicek = balicky[1]
else:
zdrojovy_balicek = sloupce[z]
if len(zdrojovy_balicek) < pocet:
raise ValueError('Na to není v {} dost karet!'.format(MOZNOSTI_Z[z]))
karty = zdrojovy_balicek[-pocet:]
for hodnota, barva, licem_nahoru in karty:
if not licem_nahoru:
raise ValueError('Nemůžeš přesouvat karty, které jsou rubem nahoru!')
if na < 7:
cilovy_balicek = sloupce[na]
if cilovy_balicek:
zkontroluj_postupku([cilovy_balicek[-1]] + karty)
else:
if karty[0][0] != 13:
raise ValueError('Do prázdného sloupečku smí jen král, {} nesedí!'.format(
popis_karty(karty[0])))
zkontroluj_postupku(karty)
else:
if pocet != 1:
raise ValueError('Do cíle se nedá dávat víc karet najednou')
hodnota, barva, otoceni = karty[0]
cilovy_balicek = cile[na - 7]
if cilovy_balicek:
hodnota_p, barva_p, otoceni_p = cilovy_balicek[-1]
if barva != barva_p:
raise ValueError('Cílová hromádka musí mít jednu barvu; {} na {} nesedí'.format(
popis_karty(karty[0]), popis_karty(cilovy_balicek[-1])))
if hodnota != hodnota_p + 1:
raise ValueError('Do cíle musíš skládat karty postupně od nejnižších; {} na {} nejde'.format(
popis_karty(karty[0]), popis_karty(cilovy_balicek[-1])))
else:
if hodnota != 1:
raise ValueError('Do prázdného cíle smí jen eso!')
return zdrojovy_balicek, pocet, cilovy_balicek
def udelej_tah(hra, info):
balicky, cile, sloupce = hra
if info == 'U':
if balicky[0]:
karta = balicky[0].pop()
karta = otoc_kartu(karta, True)
print('Karta z balíčku:', popis_karty(karta))
balicky[1].append(karta)
else:
print('Otáčím balíček')
while balicky[1]:
karta = balicky[1].pop()
karta = otoc_kartu(karta, False)
balicky[0].append(karta)
else:
zdrojovy_balicek, pocet, cilovy_balicek = info
karty = zdrojovy_balicek[-pocet:]
print('Přesouvám:', end=' ')
for karta in karty:
print(popis_karty(karta), end=' ')
print()
del zdrojovy_balicek[-len(karty):]
cilovy_balicek.extend(karty)
if zdrojovy_balicek and not zdrojovy_balicek[-1][2]:
karta = zdrojovy_balicek.pop()
karta = otoc_kartu(karta, True)
print('Otočená karta:', popis_karty(karta))
zdrojovy_balicek.append(karta)
def druh_barvy(barva):
if barva == 'Pi':
return 'černá'
elif barva == 'Sr':
return 'červená'
elif barva == 'Ka':
return 'červená'
elif barva == 'Kr':
return 'černá'
def zkontroluj_postupku(karty):
for karta_a, karta_b in zip(karty[1:], karty):
hodnota_a, barva_a, lic_a = karta_a
hodnota_b, barva_b, lic_b = karta_b
if hodnota_a != hodnota_b - 1:
raise ValueError('Musíš dělat sestupné postupky; {} a {} nesedí'.format(
popis_karty(karta_a), popis_karty(karta_b)))
if druh_barvy(barva_a) == druh_barvy(barva_b):
raise ValueError('Musíš střídat barvy; {} je {} a {} taky'.format(
popis_karty(karta_a), druh_barvy(barva_a), popis_karty(karta_b)))
|
py | 1a3fbfae8174887f18ae981bcbe46893dc2eb623 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cinderclient import client
from cinderclient import api_versions
from cinderclient.v2 import availability_zones
from cinderclient.v2 import cgsnapshots
from cinderclient.v2 import consistencygroups
from cinderclient.v2 import capabilities
from cinderclient.v2 import limits
from cinderclient.v2 import pools
from cinderclient.v2 import qos_specs
from cinderclient.v2 import quota_classes
from cinderclient.v2 import quotas
from cinderclient.v2 import services
from cinderclient.v2 import volumes
from cinderclient.v2 import volume_snapshots
from cinderclient.v2 import volume_types
from cinderclient.v2 import volume_type_access
from cinderclient.v2 import volume_encryption_types
from cinderclient.v2 import volume_backups
from cinderclient.v2 import volume_backups_restore
from cinderclient.v2 import volume_transfers
class Client(object):
"""Top-level object to access the OpenStack Volume API.
Create an instance with your creds::
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
Then call methods on its managers::
>>> client.volumes.list()
...
"""
def __init__(self, username=None, api_key=None, project_id=None,
auth_url='', insecure=False, timeout=None, tenant_id=None,
proxy_tenant_id=None, proxy_token=None, region_name=None,
endpoint_type='publicURL', extensions=None,
service_type='volumev2', service_name=None,
volume_service_name=None, bypass_url=None, retries=0,
http_log_debug=False, cacert=None, auth_system='keystone',
auth_plugin=None, session=None, api_version=None,
logger=None, **kwargs):
# FIXME(comstud): Rename the api_key argument above when we
# know it's not being used as keyword argument
password = api_key
self.version = '2.0'
self.limits = limits.LimitsManager(self)
# extensions
self.volumes = volumes.VolumeManager(self)
self.volume_snapshots = volume_snapshots.SnapshotManager(self)
self.volume_types = volume_types.VolumeTypeManager(self)
self.volume_type_access = \
volume_type_access.VolumeTypeAccessManager(self)
self.volume_encryption_types = \
volume_encryption_types.VolumeEncryptionTypeManager(self)
self.qos_specs = qos_specs.QoSSpecsManager(self)
self.quota_classes = quota_classes.QuotaClassSetManager(self)
self.quotas = quotas.QuotaSetManager(self)
self.backups = volume_backups.VolumeBackupManager(self)
self.restores = volume_backups_restore.VolumeBackupRestoreManager(self)
self.transfers = volume_transfers.VolumeTransferManager(self)
self.services = services.ServiceManager(self)
self.consistencygroups = consistencygroups.\
ConsistencygroupManager(self)
self.cgsnapshots = cgsnapshots.CgsnapshotManager(self)
self.availability_zones = \
availability_zones.AvailabilityZoneManager(self)
self.pools = pools.PoolManager(self)
self.capabilities = capabilities.CapabilitiesManager(self)
self.api_version = api_version or api_versions.APIVersion(self.version)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
if not logger:
logger = logging.getLogger(__name__)
self.client = client._construct_http_client(
username=username,
password=password,
project_id=project_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
tenant_id=tenant_id,
proxy_tenant_id=tenant_id,
proxy_token=proxy_token,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
bypass_url=bypass_url,
retries=retries,
http_log_debug=http_log_debug,
cacert=cacert,
auth_system=auth_system,
auth_plugin=auth_plugin,
session=session,
api_version=self.api_version,
logger=logger,
**kwargs)
def authenticate(self):
"""Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()
def get_volume_api_version_from_endpoint(self):
return self.client.get_volume_api_version_from_endpoint()
|
py | 1a3fbfd3ad0c183732bcd7cb561dda620d375d6c | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated. Please use `airflow.providers.sftp.operators.sftp`.
"""
import warnings
# pylint: disable=unused-import
from airflow.providers.sftp.operators.sftp import SFTPOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.sftp.operators.sftp`.",
DeprecationWarning,
stacklevel=2,
)
|
py | 1a3fc10d94b1e2f55ac0e2b31e0f1b7b1463bb04 | from typing import List
import networkx as nx
from pycid.analyze.requisite_graph import requisite_graph
from pycid.core.cid import CID
def admits_voi(cid: CID, decision: str, node: str) -> bool:
r"""Return True if cid admits value of information for node.
- A CID admits value of information for a node X if:
i) X is not a descendant of the decision node, D.
ii) X is d-connected to U given Fa_D \ {X}, where U ∈ U ∩ Desc(D)
("Agent Incentives: a Causal Perspective" by Everitt, Carey, Langlois, Ortega, and Legg, 2020)
"""
if len(cid.agents) > 1:
raise ValueError(
f"This CID has {len(cid.agents)} agents. This incentive is currently only valid for CIDs with one agent."
)
if node not in cid.nodes:
raise KeyError(f"{node} is not present in the cid")
if decision not in cid.nodes:
raise KeyError(f"{decision} is not present in the cid")
if not cid.sufficient_recall():
raise ValueError("Voi only implemented graphs with sufficient recall")
if node in nx.descendants(cid, decision) or node == decision:
return False
cid2 = cid.copy_without_cpds()
cid2.add_edge(node, decision)
req_graph = requisite_graph(cid2)
return node in req_graph.get_parents(decision)
def admits_voi_list(cid: CID, decision: str) -> List[str]:
"""
Return the list of nodes with possible value of information for decision.
"""
non_descendants = set(cid.nodes) - set(nx.descendants(cid, decision)) - {decision}
return [x for x in non_descendants if admits_voi(cid, decision, x)]
def quantitative_voi(cid: CID, decision: str, node: str) -> float:
r"""
Returns the quantitative value of information (voi) of a variable corresponding to a node in a parameterised CID.
A node X ∈ V \ Desc(D) in a single-decision CID has quantitative voi equal to
EU_max[M(X->D)] - EU_max[M(X \ ->D)]
ie the maximum utility attainable in M(X->D) minus the maximum utility attainable in M(X \ ->D) where
- M(X->D) is the CID that contains the directed edge X -> D
- M(X \ ->D) is the CID without the directed edge X -> D.
("Agent Incentives: a Causal Perspective" by Everitt, Carey, Langlois, Ortega, and Legg, 2020)
"""
if node not in cid.nodes:
raise KeyError(f"{node} is not present in the cid")
if node in {decision}.union(set(nx.descendants(cid, decision))):
raise ValueError(
f"{node} is a decision node or is a descendent of the decision node. \
VOI only applies to nodes which are not descendents of the decision node."
)
new_cid = cid.copy()
new_cid.add_edge(node, decision)
new_cid.impute_optimal_policy()
ev1: float = new_cid.expected_utility({})
new_cid.remove_all_decision_rules()
new_cid.remove_edge(node, decision)
new_cid.impute_optimal_policy()
ev2: float = new_cid.expected_utility({})
return ev1 - ev2
|
py | 1a3fc1f0d386e073dc50b8753ebf57e4273fdb1d | # Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import tempfile
import shutil
import zipfile
import io
import itertools
import struct
from abc import ABC, abstractmethod
from contextlib import closing
import av
import numpy as np
from pyunpack import Archive
from PIL import Image, ImageFile
import open3d as o3d
import pydicom
from cvat.apps.engine.utils import rotate_image
from cvat.apps.engine.models import DimensionType
# fixes: "OSError:broken data stream" when executing line 72 while loading images downloaded from the web
# see: https://stackoverflow.com/questions/42462431/oserror-broken-data-stream-when-reading-image-file
ImageFile.LOAD_TRUNCATED_IMAGES = True
from cvat.apps.engine.mime_types import mimetypes
from utils.dataset_manifest import VideoManifestManager, ImageManifestManager
def get_mime(name):
for type_name, type_def in MEDIA_TYPES.items():
if type_def['has_mime_type'](name):
return type_name
return 'unknown'
def create_tmp_dir():
return tempfile.mkdtemp(prefix='cvat-', suffix='.data')
def delete_tmp_dir(tmp_dir):
if tmp_dir:
shutil.rmtree(tmp_dir)
def files_to_ignore(directory):
ignore_files = ('__MSOSX', '._.DS_Store', '__MACOSX', '.DS_Store')
if not any(ignore_file in directory for ignore_file in ignore_files):
return True
return False
class IMediaReader(ABC):
def __init__(self, source_path, step, start, stop, dimension):
self._source_path = sorted(source_path)
self._step = step
self._start = start
self._stop = stop
self._dimension = dimension
@abstractmethod
def __iter__(self):
pass
@abstractmethod
def get_preview(self):
pass
@abstractmethod
def get_progress(self, pos):
pass
@staticmethod
def _get_preview(obj):
PREVIEW_SIZE = (256, 256)
if isinstance(obj, io.IOBase):
preview = Image.open(obj)
else:
preview = obj
preview.thumbnail(PREVIEW_SIZE)
return preview.convert('RGB')
@abstractmethod
def get_image_size(self, i):
pass
def __len__(self):
return len(self.frame_range)
@property
def frame_range(self):
return range(self._start, self._stop, self._step)
class ImageListReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path:
raise Exception('No image found')
if stop is None:
stop = len(source_path)
else:
stop = min(len(source_path), stop + 1)
step = max(step, 1)
assert stop > start
super().__init__(
source_path=source_path,
step=step,
start=start,
stop=stop,
dimension=dimension
)
def __iter__(self):
for i in range(self._start, self._stop, self._step):
yield (self.get_image(i), self.get_path(i), i)
def filter(self, callback):
source_path = list(filter(callback, self._source_path))
ImageListReader.__init__(
self,
source_path,
step=self._step,
start=self._start,
stop=self._stop,
dimension=self._dimension
)
def get_path(self, i):
return self._source_path[i]
def get_image(self, i):
return self._source_path[i]
def get_progress(self, pos):
return (pos - self._start + 1) / (self._stop - self._start)
def get_preview(self):
if self._dimension == DimensionType.DIM_3D:
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
else:
fp = open(self._source_path[0], "rb")
return self._get_preview(fp)
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(self._source_path[i])
return img.width, img.height
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
# FIXME
ImageListReader.__init__(self,
source_path=source_files,
step=step,
start=start,
stop=stop
)
self._dimension = dimension
@property
def absolute_source_paths(self):
return [self.get_path(idx) for idx, _ in enumerate(self._source_path)]
class DirectoryReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
image_paths = []
for source in source_path:
for root, _, files in os.walk(source):
paths = [os.path.join(root, f) for f in files]
paths = filter(lambda x: get_mime(x) == 'image', paths)
image_paths.extend(paths)
super().__init__(
source_path=image_paths,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
class ArchiveReader(DirectoryReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._archive_source = source_path[0]
extract_dir = source_path[1] if len(source_path) > 1 else os.path.dirname(source_path[0])
Archive(self._archive_source).extractall(extract_dir)
if extract_dir == os.path.dirname(source_path[0]):
os.remove(self._archive_source)
super().__init__(
source_path=[extract_dir],
step=step,
start=start,
stop=stop,
dimension=dimension
)
class PdfReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path:
raise Exception('No PDF found')
self._pdf_source = source_path[0]
_basename = os.path.splitext(os.path.basename(self._pdf_source))[0]
_counter = itertools.count()
def _make_name():
for page_num in _counter:
yield '{}{:09d}.jpeg'.format(_basename, page_num)
from pdf2image import convert_from_path
self._tmp_dir = os.path.dirname(source_path[0])
os.makedirs(self._tmp_dir, exist_ok=True)
# Avoid OOM: https://github.com/openvinotoolkit/cvat/issues/940
paths = convert_from_path(self._pdf_source,
last_page=stop, paths_only=True,
output_folder=self._tmp_dir, fmt="jpeg", output_file=_make_name())
os.remove(source_path[0])
super().__init__(
source_path=paths,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
class ZipReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._zip_source = zipfile.ZipFile(source_path[0], mode='r')
self.extract_dir = source_path[1] if len(source_path) > 1 else None
file_list = [f for f in self._zip_source.namelist() if files_to_ignore(f) and get_mime(f) == 'image']
super().__init__(file_list, step=step, start=start, stop=stop, dimension=dimension)
def __del__(self):
self._zip_source.close()
def get_preview(self):
if self._dimension == DimensionType.DIM_3D:
# TODO
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
return self._get_preview(fp)
io_image = io.BytesIO(self._zip_source.read(self._source_path[0]))
return self._get_preview(io_image)
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[i])))
return img.width, img.height
def get_image(self, i):
if self._dimension == DimensionType.DIM_3D:
return self.get_path(i)
return io.BytesIO(self._zip_source.read(self._source_path[i]))
def get_zip_filename(self):
return self._zip_source.filename
def get_path(self, i):
if self._zip_source.filename:
return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i]) \
if not self.extract_dir else os.path.join(self.extract_dir, self._source_path[i])
else: # necessary for mime_type definition
return self._source_path[i]
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().reconcile(
source_files=source_files,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
def extract(self):
self._zip_source.extractall(self.extract_dir if self.extract_dir else os.path.dirname(self._zip_source.filename))
if not self.extract_dir:
os.remove(self._zip_source.filename)
class VideoReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().__init__(
source_path=source_path,
step=step,
start=start,
stop=stop + 1 if stop is not None else stop,
dimension=dimension,
)
def _has_frame(self, i):
if i >= self._start:
if (i - self._start) % self._step == 0:
if self._stop is None or i < self._stop:
return True
return False
def _decode(self, container):
frame_num = 0
for packet in container.demux():
if packet.stream.type == 'video':
for image in packet.decode():
frame_num += 1
if self._has_frame(frame_num - 1):
if packet.stream.metadata.get('rotate'):
old_image = image
image = av.VideoFrame().from_ndarray(
rotate_image(
image.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
image.pts = old_image.pts
yield (image, self._source_path[0], image.pts)
def __iter__(self):
container = self._get_av_container()
source_video_stream = container.streams.video[0]
source_video_stream.thread_type = 'AUTO'
return self._decode(container)
def get_progress(self, pos):
duration = self._get_duration()
return pos / duration if duration else None
def _get_av_container(self):
if isinstance(self._source_path[0], io.BytesIO):
self._source_path[0].seek(0) # required for re-reading
return av.open(self._source_path[0])
def _get_duration(self):
container = self._get_av_container()
stream = container.streams.video[0]
duration = None
if stream.duration:
duration = stream.duration
else:
# may have a DURATION in format like "01:16:45.935000000"
duration_str = stream.metadata.get("DURATION", None)
tb_denominator = stream.time_base.denominator
if duration_str and tb_denominator:
_hour, _min, _sec = duration_str.split(':')
duration_sec = 60*60*float(_hour) + 60*float(_min) + float(_sec)
duration = duration_sec * tb_denominator
return duration
def get_preview(self):
container = self._get_av_container()
stream = container.streams.video[0]
preview = next(container.decode(stream))
return self._get_preview(preview.to_image() if not stream.metadata.get('rotate') \
else av.VideoFrame().from_ndarray(
rotate_image(
preview.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
).to_image()
)
def get_image_size(self, i):
image = (next(iter(self)))[0]
return image.width, image.height
class FragmentMediaReader:
def __init__(self, chunk_number, chunk_size, start, stop, step=1):
self._start = start
self._stop = stop + 1 # up to the last inclusive
self._step = step
self._chunk_number = chunk_number
self._chunk_size = chunk_size
self._start_chunk_frame_number = \
self._start + self._chunk_number * self._chunk_size * self._step
self._end_chunk_frame_number = min(self._start_chunk_frame_number \
+ (self._chunk_size - 1) * self._step + 1, self._stop)
self._frame_range = self._get_frame_range()
@property
def frame_range(self):
return self._frame_range
def _get_frame_range(self):
frame_range = []
for idx in range(self._start, self._stop, self._step):
if idx < self._start_chunk_frame_number:
continue
elif idx < self._end_chunk_frame_number and \
not ((idx - self._start_chunk_frame_number) % self._step):
frame_range.append(idx)
elif (idx - self._start_chunk_frame_number) % self._step:
continue
else:
break
return frame_range
class ImageDatasetManifestReader(FragmentMediaReader):
def __init__(self, manifest_path, **kwargs):
super().__init__(**kwargs)
self._manifest = ImageManifestManager(manifest_path)
self._manifest.init_index()
def __iter__(self):
for idx in self._frame_range:
yield self._manifest[idx]
class VideoDatasetManifestReader(FragmentMediaReader):
def __init__(self, manifest_path, **kwargs):
self.source_path = kwargs.pop('source_path')
super().__init__(**kwargs)
self._manifest = VideoManifestManager(manifest_path)
self._manifest.init_index()
def _get_nearest_left_key_frame(self):
if self._start_chunk_frame_number >= \
self._manifest[len(self._manifest) - 1].get('number'):
left_border = len(self._manifest) - 1
else:
left_border = 0
delta = len(self._manifest)
while delta:
step = delta // 2
cur_position = left_border + step
if self._manifest[cur_position].get('number') < self._start_chunk_frame_number:
cur_position += 1
left_border = cur_position
delta -= step + 1
else:
delta = step
if self._manifest[cur_position].get('number') > self._start_chunk_frame_number:
left_border -= 1
frame_number = self._manifest[left_border].get('number')
timestamp = self._manifest[left_border].get('pts')
return frame_number, timestamp
def __iter__(self):
start_decode_frame_number, start_decode_timestamp = self._get_nearest_left_key_frame()
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = next(stream for stream in container.streams if stream.type == 'video')
video_stream.thread_type = 'AUTO'
container.seek(offset=start_decode_timestamp, stream=video_stream)
frame_number = start_decode_frame_number - 1
for packet in container.demux(video_stream):
for frame in packet.decode():
frame_number += 1
if frame_number in self._frame_range:
if video_stream.metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
yield frame
elif frame_number < self._frame_range[-1]:
continue
else:
return
class IChunkWriter(ABC):
def __init__(self, quality, dimension=DimensionType.DIM_2D):
self._image_quality = quality
self._dimension = dimension
@staticmethod
def _compress_image(image_path, quality):
image = image_path.to_image() if isinstance(image_path, av.VideoFrame) else Image.open(image_path)
# Ensure image data fits into 8bit per pixel before RGB conversion as PIL clips values on conversion
if image.mode == "I":
# Image mode is 32bit integer pixels.
# Autoscale pixels by factor 2**8 / im_data.max() to fit into 8bit
im_data = np.array(image)
im_data = im_data * (2**8 / im_data.max())
image = Image.fromarray(im_data.astype(np.int32))
converted_image = image.convert('RGB')
image.close()
buf = io.BytesIO()
converted_image.save(buf, format='JPEG', quality=quality, optimize=True)
buf.seek(0)
width, height = converted_image.size
converted_image.close()
return width, height, buf
@abstractmethod
def save_as_chunk(self, images, chunk_path):
pass
class ZipChunkWriter(IChunkWriter):
def save_as_chunk(self, images, chunk_path):
with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:
for idx, (image, path, _) in enumerate(images):
arcname = '{:06d}{}'.format(idx, os.path.splitext(path)[1])
if isinstance(image, io.BytesIO):
zip_chunk.writestr(arcname, image.getvalue())
else:
zip_chunk.write(filename=image, arcname=arcname)
# return empty list because ZipChunkWriter write files as is
# and does not decode it to know img size.
return []
class ZipCompressedChunkWriter(IChunkWriter):
def save_as_chunk(self, images, chunk_path):
image_sizes = []
with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:
for idx, (image, _, _) in enumerate(images):
if self._dimension == DimensionType.DIM_2D:
w, h, image_buf = self._compress_image(image, self._image_quality)
extension = "jpeg"
else:
image_buf = open(image, "rb") if isinstance(image, str) else image
properties = ValidateDimension.get_pcd_properties(image_buf)
w, h = int(properties["WIDTH"]), int(properties["HEIGHT"])
extension = "pcd"
image_buf.seek(0, 0)
image_buf = io.BytesIO(image_buf.read())
image_sizes.append((w, h))
arcname = '{:06d}.{}'.format(idx, extension)
zip_chunk.writestr(arcname, image_buf.getvalue())
return image_sizes
class Mpeg4ChunkWriter(IChunkWriter):
def __init__(self, quality=67):
# translate inversed range [1:100] to [0:51]
quality = round(51 * (100 - quality) / 99)
super().__init__(quality)
self._output_fps = 25
try:
codec = av.codec.Codec('libopenh264', 'w')
self._codec_name = codec.name
self._codec_opts = {
'profile': 'constrained_baseline',
'qmin': str(self._image_quality),
'qmax': str(self._image_quality),
'rc_mode': 'buffer',
}
except av.codec.codec.UnknownCodecError:
codec = av.codec.Codec('libx264', 'w')
self._codec_name = codec.name
self._codec_opts = {
"crf": str(self._image_quality),
"preset": "ultrafast",
}
def _create_av_container(self, path, w, h, rate, options, f='mp4'):
# x264 requires width and height must be divisible by 2 for yuv420p
if h % 2:
h += 1
if w % 2:
w += 1
container = av.open(path, 'w',format=f)
video_stream = container.add_stream(self._codec_name, rate=rate)
video_stream.pix_fmt = "yuv420p"
video_stream.width = w
video_stream.height = h
video_stream.options = options
return container, video_stream
def save_as_chunk(self, images, chunk_path):
if not images:
raise Exception('no images to save')
input_w = images[0][0].width
input_h = images[0][0].height
output_container, output_v_stream = self._create_av_container(
path=chunk_path,
w=input_w,
h=input_h,
rate=self._output_fps,
options=self._codec_opts,
)
self._encode_images(images, output_container, output_v_stream)
output_container.close()
return [(input_w, input_h)]
@staticmethod
def _encode_images(images, container, stream):
for frame, _, _ in images:
# let libav set the correct pts and time_base
frame.pts = None
frame.time_base = None
for packet in stream.encode(frame):
container.mux(packet)
# Flush streams
for packet in stream.encode():
container.mux(packet)
class Mpeg4CompressedChunkWriter(Mpeg4ChunkWriter):
def __init__(self, quality):
super().__init__(quality)
if self._codec_name == 'libx264':
self._codec_opts = {
'profile': 'baseline',
'coder': '0',
'crf': str(self._image_quality),
'wpredp': '0',
'flags': '-loop',
}
def save_as_chunk(self, images, chunk_path):
if not images:
raise Exception('no images to save')
input_w = images[0][0].width
input_h = images[0][0].height
downscale_factor = 1
while input_h / downscale_factor >= 1080:
downscale_factor *= 2
output_h = input_h // downscale_factor
output_w = input_w // downscale_factor
output_container, output_v_stream = self._create_av_container(
path=chunk_path,
w=output_w,
h=output_h,
rate=self._output_fps,
options=self._codec_opts,
)
self._encode_images(images, output_container, output_v_stream)
output_container.close()
return [(input_w, input_h)]
class DicomListExtractor(ImageListReader):
def __init__(self, source_path, dest_path, image_quality, step=1, start=0, stop=0):
if not source_path:
raise Exception('No Dicom found')
import pydicom
super().__init__(
source_path=sorted(source_path),
dest_path=dest_path,
image_quality=image_quality,
step=1,
start=0,
stop=0,
)
self._dimensions = []
series = dict()
self._jpeg_source_paths = []
for i, source in enumerate(self._source_path):
dcm = pydicom.read_file(source)
series_time = dcm.get("SeriesTime", "")
if series_time not in series:
series[series_time] = Series(i, dcm.get("SeriesDescription", ""))
else:
series[series_time].stop_frame = i
img = _normalize_image(dcm.pixel_array)
pilImg = Image.fromarray(img)
self._dimensions.append(pilImg.size)
jpeg_source_path = os.path.splitext(source)[0] + '.jpg'
pilImg.save(jpeg_source_path, 'JPEG')
self._jpeg_source_paths.append(jpeg_source_path)
# SeriesTimeで昇順に並べかえたSeriesのリストを取得
self._series = [v for _, v in sorted(series.items())]
...
def _normalize_image(img, min_percent = 0, max_percent = 99, gamma = 1.2):
vmin = np.percentile(img, min_percent)
vmax = np.percentile(img, max_percent)
img = ((img - vmin) / (vmax - vmin))
img[img < 0] = 0
img = pow(img, gamma) * 255
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def _is_archive(path):
mime = mimetypes.guess_type(path)
mime_type = mime[0]
encoding = mime[1]
supportedArchives = ['application/x-rar-compressed',
'application/x-tar', 'application/x-7z-compressed', 'application/x-cpio',
'gzip', 'bzip2']
return mime_type in supportedArchives or encoding in supportedArchives
def _is_video(path):
mime = mimetypes.guess_type(path)
return mime[0] is not None and mime[0].startswith('video')
def _is_image(path):
mime = mimetypes.guess_type(path)
# Exclude vector graphic images because Pillow cannot work with them
return mime[0] is not None and mime[0].startswith('image') and \
not mime[0].startswith('image/svg')
def _is_dir(path):
return os.path.isdir(path)
def _is_pdf(path):
mime = mimetypes.guess_type(path)
return mime[0] == 'application/pdf'
def _is_zip(path):
mime = mimetypes.guess_type(path)
mime_type = mime[0]
encoding = mime[1]
supportedArchives = ['application/zip']
return mime_type in supportedArchives or encoding in supportedArchives
# 'has_mime_type': function receives 1 argument - path to file.
# Should return True if file has specified media type.
# 'extractor': class that extracts images from specified media.
# 'mode': 'annotation' or 'interpolation' - mode of task that should be created.
# 'unique': True or False - describes how the type can be combined with other.
# True - only one item of this type and no other is allowed
# False - this media types can be combined with other which have unique == False
MEDIA_TYPES = {
'image': {
'has_mime_type': _is_image,
'extractor': ImageListReader,
'mode': 'annotation',
'unique': False,
},
'video': {
'has_mime_type': _is_video,
'extractor': VideoReader,
'mode': 'interpolation',
'unique': True,
},
'archive': {
'has_mime_type': _is_archive,
'extractor': ArchiveReader,
'mode': 'annotation',
'unique': True,
},
'directory': {
'has_mime_type': _is_dir,
'extractor': DirectoryReader,
'mode': 'annotation',
'unique': False,
},
'pdf': {
'has_mime_type': _is_pdf,
'extractor': PdfReader,
'mode': 'annotation',
'unique': True,
},
'zip': {
'has_mime_type': _is_zip,
'extractor': ZipReader,
'mode': 'annotation',
'unique': True,
}
}
class ValidateDimension:
def __init__(self, path=None):
self.dimension = DimensionType.DIM_2D
self.path = path
self.related_files = {}
self.image_files = {}
self.converted_files = []
@staticmethod
def get_pcd_properties(fp, verify_version=False):
kv = {}
pcd_version = ["0.7", "0.6", "0.5", "0.4", "0.3", "0.2", "0.1",
".7", ".6", ".5", ".4", ".3", ".2", ".1"]
try:
for line in fp:
line = line.decode("utf-8")
if line.startswith("#"):
continue
k, v = line.split(" ", maxsplit=1)
kv[k] = v.strip()
if "DATA" in line:
break
if verify_version:
if "VERSION" in kv and kv["VERSION"] in pcd_version:
return True
return None
return kv
except AttributeError:
return None
@staticmethod
def convert_bin_to_pcd(path, delete_source=True):
list_pcd = []
with open(path, "rb") as f:
size_float = 4
byte = f.read(size_float * 4)
while byte:
x, y, z, _ = struct.unpack("ffff", byte)
list_pcd.append([x, y, z])
byte = f.read(size_float * 4)
np_pcd = np.asarray(list_pcd)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np_pcd)
pcd_filename = path.replace(".bin", ".pcd")
o3d.io.write_point_cloud(pcd_filename, pcd)
if delete_source:
os.remove(path)
return pcd_filename
def set_path(self, path):
self.path = path
def bin_operation(self, file_path, actual_path):
pcd_path = ValidateDimension.convert_bin_to_pcd(file_path)
self.converted_files.append(pcd_path)
return pcd_path.split(actual_path)[-1][1:]
@staticmethod
def pcd_operation(file_path, actual_path):
with open(file_path, "rb") as file:
is_pcd = ValidateDimension.get_pcd_properties(file, verify_version=True)
return file_path.split(actual_path)[-1][1:] if is_pcd else file_path
def process_files(self, root, actual_path, files):
pcd_files = {}
for file in files:
file_name, file_extension = os.path.splitext(file)
file_path = os.path.abspath(os.path.join(root, file))
if file_extension == ".bin":
path = self.bin_operation(file_path, actual_path)
pcd_files[file_name] = path
self.related_files[path] = []
elif file_extension == ".pcd":
path = ValidateDimension.pcd_operation(file_path, actual_path)
if path == file_path:
self.image_files[file_name] = file_path
else:
pcd_files[file_name] = path
self.related_files[path] = []
else:
if _is_image(file_path):
self.image_files[file_name] = file_path
return pcd_files
def validate(self):
"""
Validate the directory structure for kitty and point cloud format.
"""
if not self.path:
return
actual_path = self.path
for root, _, files in os.walk(actual_path):
if not files_to_ignore(root):
continue
self.process_files(root, actual_path, files)
if len(self.related_files.keys()):
self.dimension = DimensionType.DIM_3D
|
py | 1a3fc2e0602e80eae0ea751463c52d82c0df0164 | # Generated by Django 2.2 on 2020-05-25 10:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0006_auto_20200511_1802'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='posts/'),
),
]
|
py | 1a3fc30a3a0009949333d9b309a15feaa02c0d3e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import AppConfigurationManagementClientConfiguration
from .operations import ConfigurationStoresOperations
from .operations import Operations
from . import models
class AppConfigurationManagementClient(SDKClient):
"""AppConfigurationManagementClient
:ivar config: Configuration for client.
:vartype config: AppConfigurationManagementClientConfiguration
:ivar configuration_stores: ConfigurationStores operations
:vartype configuration_stores: azure.mgmt.appconfiguration.operations.ConfigurationStoresOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appconfiguration.operations.Operations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = AppConfigurationManagementClientConfiguration(credentials, subscription_id, base_url)
super(AppConfigurationManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2019-10-01'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.configuration_stores = ConfigurationStoresOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
|
py | 1a3fc39afc76385e2592146bdc42eb4ce4a3fc34 | import numpy, pygame
from cell import *
class Board(object):
def __init__(self, display_surface, board_dimension, background_color, lines_color):
# Board properties
self.BOARD_W, self.BOARD_H = board_dimension
# Colors
self.WHITE = (255, 255, 255)
self.BLACK = (0, 0, 0)
# Pygame window properties
self.background_color = background_color
self.DSURFACE = display_surface
self.DSURFACE.fill(self.background_color)
self.WIDTH, self.HEIGHT = self.DSURFACE.get_width(), self.DSURFACE.get_height()
# Registered events list
self.registered_events = [
{"on_cell_lmb_down": []},
{"on_cell_rmb_down": []},
{"on_space_key_pressed": []}
]
# Grid properties
self.lines_color = lines_color
self.highlight = False
self.highlight_color = self.BLACK
self.highlight_color_alpha = 100 # Ranging from 0-255
# Sprite vars and properties
self.sprite_group = pygame.sprite.Group()
self.scale_cell_sprite = True
# Current selected cell
self.current_cell_selected = None
#self.init_board()
def init(self):
#print "Initializing a ({}, {}) board.".format(self.BOARD_W, self.BOARD_H)
#self.board = [["0" for x in range(self.BOARD_W)] for y in range(self.BOARD_H)]
self.board = [[Cell((x,y, True),None) for y in range(self.BOARD_H)] for x in range(self.BOARD_W)]
def restart(self):
# Clean the 2d array representing the grid
self.board = [[Cell((x,y, True), None) for y in range(self.BOARD_H)] for x in range(self.BOARD_W)]
# Remove all sprites from sprite_group to remove them from the screen
self.sprite_group.empty()
self.DSURFACE.fill(self.background_color)
self.__draw_grid(self.lines_color, 1)
def insert(self, sprite, cell_position, overwrite=False):
cell_x, cell_y = cell_position
cell = self.board[cell_x][cell_y]
if cell.overwrite or cell.isEmpty():
cell.overwrite = overwrite
self.sprite_group.remove(cell.sprite)
cell.empty()
cell.id = sprite.id
cell.sprite = sprite
# Instead of 0 I should be using the starting drawing position
x = int(numpy.interp(cell_y, [0, self.BOARD_H], [0, self.WIDTH]))
y = int(numpy.interp(cell_x, [0, self.BOARD_W], [0, self.HEIGHT]))
# Set the position of the sprite to the interpolated values x,y (Top left corner of current cell)
cell.sprite.rect = x, y
# Scale the image to fit the board size
if self.scale_cell_sprite:
cell.sprite.image= pygame.transform.scale(cell.sprite.image, (self.WIDTH/self.BOARD_H, self.HEIGHT/self.BOARD_H))
# Add sprite to sprite groups and the draw it
self.sprite_group.add(cell.sprite)
return True
else:
return False
def __draw_grid(self, color, lines_width):
### Draws a 3x3 grid on the DSURFACE
# Draw vertical lines
cell_width = self.WIDTH/self.BOARD_W
for width in range(cell_width, self.WIDTH, cell_width):
pygame.draw.line(self.DSURFACE, color, [width, 0], [width, self.HEIGHT], lines_width)
# Draw horizontal lines
cell_height = self.HEIGHT/self.BOARD_H
for height in range(cell_height, self.HEIGHT, cell_height):
pygame.draw.line(self.DSURFACE, color, [0, height], [self.WIDTH, height], lines_width)
def get_mouse_pos(self):
mouse_pos = pygame.mouse.get_pos()
return mouse_pos
def register_event(self, function, event_type):
event = {event_type: function}
for e in self.registered_events:
for key, value in e.iteritems():
if (key == event_type):
value.append(function)
def get_registered_events(self, event_name):
for e in self.registered_events:
for key, value in e.iteritems():
if (key == event_name):
return value
def on_cell_lmb_down(self, mouse_pos):
registered_events = self.get_registered_events("on_cell_lmb_down")
for fn in registered_events:
fn(mouse_pos)
def on_cell_rmb_down(self, mouse_pos):
registered_events = self.get_registered_events("on_cell_rmb_down")
for fn in registered_events:
fn(mouse_pos)
def on_space_key_pressed(self):
registered_events = self.get_registered_events("on_space_key_pressed")
for fn in registered_events:
fn()
def event_handling(self, events):
# Starts event handling for the board grid
running = True
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
mouse_pos = pygame.mouse.get_pos()
cell_center_x = int(numpy.interp(mouse_pos[1], [0, self.WIDTH], [0, self.BOARD_W]))
cell_center_y = int(numpy.interp(mouse_pos[0], [0, self.HEIGHT], [0, self.BOARD_H]))
# Call every function registered as lmb_down button , with current mouse pos
# as argument
self.on_cell_lmb_down((cell_center_x, cell_center_y))
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
mouse_pos = pygame.mouse.get_pos()
cell_center_x = int(numpy.interp(mouse_pos[1], [0, self.WIDTH], [0, self.BOARD_W]))
cell_center_y = int(numpy.interp(mouse_pos[0], [0, self.HEIGHT], [0, self.BOARD_H]))
# Call every function registered as rmb_down button , with current mouse pos
# as argument
self.on_cell_rmb_down((cell_center_x, cell_center_y))
elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
self.on_space_key_pressed()
def select_cell(self, cell_position):
self.current_cell_selected = cell_position
print "board:select_cell: Cell selected: {}".format(self.current_cell_selected)
def highlight_cell(self, cell_position):
cell_x, cell_y = cell_position
x = int(numpy.interp(cell_y, [0, self.BOARD_H], [0, self.WIDTH]))
y = int(numpy.interp(cell_x, [0, self.BOARD_W], [0, self.HEIGHT]))
overlay = pygame.Surface((self.WIDTH/self.BOARD_W, self.HEIGHT/self.BOARD_H))
overlay.set_alpha(self.highlight_color_alpha)
overlay.fill(self.highlight_color)
self.DSURFACE.blit(overlay, (x,y))
# Check if a given value is in the board
def inBoard(self, value):
for cell_row in self.board:
for cell in cell_row:
if cell.id == value:
return True
return False
def filled(self):
for arr in self.board:
for cell in arr:
if cell.isEmpty():
return False
return True
def update(self, events):
self.DSURFACE.fill(self.background_color)
self.__draw_grid(self.lines_color, 1)
self.event_handling(events)
### Grid Update ###
# Highlight the current selected cell, if any
if self.current_cell_selected != None and self.highlight == True:
self.highlight_cell(self.current_cell_selected)
# Draws all the sprites on sprite_group to the surface
self.sprite_group.draw(self.DSURFACE)
|
py | 1a3fc4075de06fd665906fc8874967ac17ea6a1e | from favourites_list import *
from positional_list import *
class FavouritesListMTF(FavouritesList):
"""List of elements odered with move-to-front heuristic."""
# we override _move_up provide move to front semantics.
def _move_up(self,p):
"""Move accesses item at Position p to frony of the list."""
if p != self._data.first():
self._data.add_first(self._data.delete(p)) # delete/insert
# we overide top because list is no longer sorted
def top(self,k):
"""Generate sequence of top k elements in terms of access count."""
if not 1<=k<=len(self):
raise ValueError("Illegal value for k")
# we begin to make a copy of original list
temp = PositionalList()
for item in self._data: # positional list supports iteration
temp.add_last(item)
# we repeatedly find, report and remove element with largest count
for j in range(k):
# find and report next highest from temp
highPos = temp.first()
walk = temp.after(highPos)
while walk is not None:
if walk.element()._count > highPos.element()._count:
highPos = walk
walk = temp.after(walk)
# we have found the element with highest count
yield highPos.element()._value # report element to user
temp.delete(highPos) # remove form temp list
|
py | 1a3fc477d504332b521056697457e88493106037 | from setuptools import setup, find_packages
import os
version = '0.2.1'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('README.txt')
+ '\n' +
read('js', 'gridster', 'test_gridster.txt')
+ '\n' +
read('CHANGES.txt'))
setup(
name='js.gridster',
version=version,
description="Fanstatic packaging of gridster",
long_description=long_description,
classifiers=[],
keywords='fanstatic jquery gridster',
author='Marco Scheidhuber',
author_email='[email protected]',
url='https://github.com/j23d/js.gridster',
license='BSD',
packages=find_packages(),
namespace_packages=['js'],
include_package_data=True,
zip_safe=False,
install_requires=[
'fanstatic',
'setuptools',
],
entry_points={
'fanstatic.libraries': [
'gridster = js.gridster:library',
],
},
)
|
py | 1a3fc4ed50c336e27206a614ad3202e8285c5d37 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import abstractmethod
from collections import Hashable
from functools import wraps
from aif360.datasets import Dataset
from aif360.decorating_metaclass import ApplyDecorator
def _make_key(args, kwargs, unhashable, kwd_mark=(object(),)):
"""Simplified version of functools."""
key = args
if kwargs:
key += kwd_mark
for item in kwargs.items():
if not isinstance(item[1], Hashable):
return unhashable
key += item
return key
def memoize(func):
"""Based off functools.lru_cache (not available in Python 2).
A little inefficient but we're just storing floats.
"""
sentinal = object()
unhashable = object()
cache = {}
@wraps(func)
def wrapper(*args, **kwargs):
key = _make_key(args, kwargs, unhashable)
if key is unhashable:
return func(*args, **kwargs)
result = cache.get(key, sentinal)
if result is not sentinal:
return result
result = func(*args, **kwargs)
cache[key] = result
return result
return wrapper
BaseClass = ApplyDecorator(memoize)
class Metric(BaseClass):
"""Base class for metrics."""
@abstractmethod
def __init__(self, dataset):
"""Initialize a `Metrics` object.
Args:
dataset (Dataset): Dataset on which to evaluate metrics.
"""
if isinstance(dataset, Dataset):
self.dataset = dataset
else:
raise TypeError("dataset must be of Dataset class")
|
py | 1a3fc4fe8bd0552e327fb7b7c849954a92666129 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec
from google.cloud.aiplatform_v1beta1.types import io
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={"Dataset", "ImportDataConfig", "ExportDataConfig",},
)
class Dataset(proto.Message):
r"""A collection of DataItems and Annotations on them.
Attributes:
name (str):
Output only. The resource name of the
Dataset.
display_name (str):
Required. The user-defined name of the
Dataset. The name can be up to 128 characters
long and can be consist of any UTF-8 characters.
description (str):
Optional. The description of the Dataset.
metadata_schema_uri (str):
Required. Points to a YAML file stored on
Google Cloud Storage describing additional
information about the Dataset. The schema is
defined as an OpenAPI 3.0.2 Schema Object. The
schema files that can be used here are found in
gs://google-cloud-
aiplatform/schema/dataset/metadata/.
metadata (google.protobuf.struct_pb2.Value):
Required. Additional information about the
Dataset.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Dataset was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Dataset was
last updated.
etag (str):
Used to perform consistent read-modify-write
updates. If not set, a blind "overwrite" update
happens.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset.LabelsEntry]):
The labels with user-defined metadata to organize your
Datasets.
Label keys and values can be no longer than 64 characters
(Unicode codepoints), can only contain lowercase letters,
numeric characters, underscores and dashes. International
characters are allowed. No more than 64 user labels can be
associated with one Dataset (System labels are excluded).
See https://goo.gl/xmQnxf for more information and examples
of labels. System reserved label keys are prefixed with
"aiplatform.googleapis.com/" and are immutable. Following
system labels exist for each Dataset:
- "aiplatform.googleapis.com/dataset_metadata_schema":
output only, its value is the
[metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]
title.
encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec):
Customer-managed encryption key spec for a
Dataset. If set, this Dataset and all sub-
resources of this Dataset will be secured by
this key.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=16,)
metadata_schema_uri = proto.Field(proto.STRING, number=3,)
metadata = proto.Field(proto.MESSAGE, number=8, message=struct_pb2.Value,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
etag = proto.Field(proto.STRING, number=6,)
labels = proto.MapField(proto.STRING, proto.STRING, number=7,)
encryption_spec = proto.Field(
proto.MESSAGE, number=11, message=gca_encryption_spec.EncryptionSpec,
)
class ImportDataConfig(proto.Message):
r"""Describes the location from where we import data into a
Dataset, together with the labels that will be applied to the
DataItems and the Annotations.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource):
The Google Cloud Storage location for the
input content.
This field is a member of `oneof`_ ``source``.
data_item_labels (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig.DataItemLabelsEntry]):
Labels that will be applied to newly imported DataItems. If
an identical DataItem as one being imported already exists
in the Dataset, then these labels will be appended to these
of the already existing one, and if labels with identical
key is imported before, the old label value will be
overwritten. If two DataItems are identical in the same
import data operation, the labels will be combined and if
key collision happens in this case, one of the values will
be picked randomly. Two DataItems are considered identical
if their content bytes are identical (e.g. image bytes or
pdf bytes). These labels will be overridden by Annotation
labels specified inside index file referenced by
[import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri],
e.g. jsonl file.
import_schema_uri (str):
Required. Points to a YAML file stored on Google Cloud
Storage describing the import format. Validation will be
done against the schema. The schema is defined as an
`OpenAPI 3.0.2 Schema
Object <https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject>`__.
"""
gcs_source = proto.Field(
proto.MESSAGE, number=1, oneof="source", message=io.GcsSource,
)
data_item_labels = proto.MapField(proto.STRING, proto.STRING, number=2,)
import_schema_uri = proto.Field(proto.STRING, number=4,)
class ExportDataConfig(proto.Message):
r"""Describes what part of the Dataset is to be exported, the
destination of the export and how to export.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination):
The Google Cloud Storage location where the output is to be
written to. In the given directory a new directory will be
created with name:
``export-data-<dataset-display-name>-<timestamp-of-export-call>``
where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
format. All export output will be written into that
directory. Inside that directory, annotations with the same
schema will be grouped into sub directories which are named
with the corresponding annotations' schema title. Inside
these sub directories, a schema.yaml will be created to
describe the output format.
This field is a member of `oneof`_ ``destination``.
annotations_filter (str):
A filter on Annotations of the Dataset. Only Annotations on
to-be-exported DataItems(specified by [data_items_filter][])
that match this filter will be exported. The filter syntax
is the same as in
[ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations].
"""
gcs_destination = proto.Field(
proto.MESSAGE, number=1, oneof="destination", message=io.GcsDestination,
)
annotations_filter = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a3fc532c55dd0592134520d0a7c896cebe965b5 | from flask import Flask, jsonify, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# Connect to Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cafes.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# Cafe TABLE Configuration
class Cafe(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
map_url = db.Column(db.String(500), nullable=False)
img_url = db.Column(db.String(500), nullable=False)
location = db.Column(db.String(250), nullable=False)
seats = db.Column(db.String(250), nullable=False)
has_toilet = db.Column(db.Boolean, nullable=False)
has_wifi = db.Column(db.Boolean, nullable=False)
has_sockets = db.Column(db.Boolean, nullable=False)
can_take_calls = db.Column(db.Boolean, nullable=False)
coffee_price = db.Column(db.String(250), nullable=True)
def to_dict(self):
"""Convert a database table to a dictionary."""
return {column.name: getattr(self, column.name) for column in self.__table__.columns}
@app.route("/")
def home():
return render_template("index.html")
# HTTP GET - Read Records
@app.route("/random", methods=["GET"])
def get_random():
"""Return a random cafe from the database."""
random_cafe = db.session.query(Cafe).order_by(db.func.random()).first()
return jsonify(random_cafe.to_dict())
@app.route("/all", methods=["GET"])
def get_all():
"""Return all cafes from the database."""
return jsonify(cafes=[cafe.to_dict() for cafe in db.session.query(Cafe).all()])
@app.route("/search", methods=["GET"])
def search():
"""Search for a cafe by location."""
try:
location = request.args.get("loc").capitalize()
except AttributeError:
return jsonify(error={"No Search Terms": "Please provide a valid search term."}), 400
else:
results = db.session.query(Cafe).filter_by(location=location).all()
if results:
return jsonify(cafes=[cafe.to_dict() for cafe in results])
else:
return jsonify(error={"Not Found": "Sorry, we don't have a cafe at that location."})
# HTTP POST - Create Record
@app.route("/add", methods=["POST"])
def add_cafe():
"""Add a new cafe to the database."""
new_cafe = Cafe(
name=request.form.get("name"),
map_url=request.form.get("map_url"),
img_url=request.form.get("img_url"),
location=request.form.get("location"),
has_sockets=bool(request.form.get("sockets")),
has_toilet=bool(request.form.get("toilet")),
has_wifi=bool(request.form.get("wifi")),
can_take_calls=bool(request.form.get("calls")),
seats=request.form.get("seats"),
coffee_price=request.form.get("coffee_price"),
)
db.session.add(new_cafe)
db.session.commit()
return jsonify(response={"success": "Successfully added the new cafe."})
# HTTP PUT/PATCH - Update Record
@app.route("/update-price/<int:cafe_id>", methods=["PATCH"])
def update_price(cafe_id):
"""Update the coffee price of a cafe."""
try:
price = request.args.get("new_price")
except AttributeError:
return jsonify(error={"No Data": "Please provide a valid price."}), 400
else:
cafe = db.session.query(Cafe).filter_by(id=cafe_id).first()
if not cafe:
return jsonify(error={"Not Found": "Sorry, a cafe with that id does not exist."}), 404
else:
cafe.coffee_price = price
db.session.commit()
return jsonify(response={"success": "Successfully updated the price of the cafe."})
# HTTP DELETE - Delete Record
@app.route("/report-closed/<cafe_id>", methods=["DELETE"])
def report_closed(cafe_id):
"""Report a closed cafe and delete it from the database."""
try:
api_key = request.args.get("api_key")
except AttributeError:
return jsonify(error={"No Data": "Please provide a valid API key."}), 400
else:
if api_key != "secretkey":
return jsonify(error={"Invalid API Key": "Please provide a valid API Key."}), 403
else:
cafe = db.session.query(Cafe).filter_by(id=cafe_id).first()
if not cafe:
return jsonify(error={"Not Found": "Sorry, a cafe with that id does not exist."}), 404
else:
db.session.delete(cafe)
db.session.commit()
return jsonify(response={"success": "Successfully reported and deleted the cafe."})
if __name__ == '__main__':
app.run(debug=True)
|
py | 1a3fc5d5d09fed1da40799afea97429a83a6dc4c | # Generated by Django 3.1 on 2020-08-26 03:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rosters', '0023_auto_20200821_0330'),
]
operations = [
migrations.AlterField(
model_name='shiftrule',
name='roles',
field=models.ManyToManyField(related_name='shift_rule_role_set', through='rosters.ShiftRuleRole', to='rosters.Role'),
),
]
|
py | 1a3fc5ed60ac9f2575ad0ae821ecb7694417a04f | # Generated by Django 3.0.5 on 2020-04-19 16:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0003_auto_20200419_1557'),
]
operations = [
migrations.AlterField(
model_name='person',
name='gif_search_type',
field=models.CharField(choices=[('RAW_URL', 'URL'), ('GIPHY_TOP_SEARCH', 'Giphy Top Search'), ('GIPHY_RANDOM_SEARCH', 'Giphy Random Search'), ('GIPHY_SHUFFLE_SEARCH', 'Giphy Shuffle Search')], default='RAW_URL', max_length=100),
),
]
|
py | 1a3fc8d44ddb39c673e1a3d41a2b5999f59a68bc | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Optional
import numpy as np
from parameterized import parameterized
from monai.apps.pathology.transforms import TileOnGrid
from tests.utils import TEST_NDARRAYS, assert_allclose
TEST_CASES = []
for tile_count in [16, 64]:
for tile_size in [8, 32]:
for filter_mode in ["min", "max", "random"]:
for background_val in [255, 0]:
TEST_CASES.append(
[
{
"tile_count": tile_count,
"tile_size": tile_size,
"filter_mode": filter_mode,
"random_offset": False,
"background_val": background_val,
}
]
)
for tile_size in [8, 16]:
for step in [4, 8]:
TEST_CASES.append([{"tile_count": 16, "step": step, "tile_size": tile_size}])
TESTS = []
for p in TEST_NDARRAYS:
for tc in TEST_CASES:
TESTS.append([p, *tc])
TEST_CASES2 = []
for tile_count in [16, 64]:
for tile_size in [8, 32]:
for filter_mode in ["min", "max", "random"]:
for background_val in [255, 0]:
TEST_CASES2.append(
[
{
"tile_count": tile_count,
"tile_size": tile_size,
"filter_mode": filter_mode,
"random_offset": True,
"background_val": background_val,
}
]
)
TESTS2 = []
for p in TEST_NDARRAYS:
for tc in TEST_CASES2:
TESTS2.append([p, *tc])
def make_image(
tile_count: int,
tile_size: int,
step: int = 0,
random_offset: bool = False,
filter_mode: Optional[str] = None,
seed=123,
**kwargs,
):
tile_count = int(np.sqrt(tile_count))
pad = 0
if random_offset:
pad = 3
if step == 0:
step = tile_size
image = np.random.randint(
200,
size=[3, (tile_count - 1) * step + tile_size + pad, (tile_count - 1) * step + tile_size + pad],
dtype=np.uint8,
)
imlarge = image
random_state = np.random.RandomState(seed)
if random_offset:
pad_h = image.shape[1] % tile_size
pad_w = image.shape[2] % tile_size
offset = (random_state.randint(pad_h) if pad_h > 0 else 0, random_state.randint(pad_w) if pad_w > 0 else 0)
image = image[:, offset[0] :, offset[1] :]
tiles_list = []
for x in range(tile_count):
for y in range(tile_count):
tiles_list.append(image[:, x * step : x * step + tile_size, y * step : y * step + tile_size])
tiles = np.stack(tiles_list, axis=0) # type: ignore
if (filter_mode == "min" or filter_mode == "max") and len(tiles) > tile_count ** 2:
tiles = tiles[np.argsort(tiles.sum(axis=(1, 2, 3)))]
return imlarge, tiles
class TestTileOnGrid(unittest.TestCase):
@parameterized.expand(TESTS)
def test_tile_patch_single_call(self, in_type, input_parameters):
img, tiles = make_image(**input_parameters)
input_img = in_type(img)
tiler = TileOnGrid(**input_parameters)
output = tiler(input_img)
assert_allclose(output, tiles, type_test=False)
@parameterized.expand(TESTS2)
def test_tile_patch_random_call(self, in_type, input_parameters):
img, tiles = make_image(**input_parameters, seed=123)
input_img = in_type(img)
tiler = TileOnGrid(**input_parameters)
tiler.set_random_state(seed=123)
output = tiler(input_img)
assert_allclose(output, tiles, type_test=False)
if __name__ == "__main__":
unittest.main()
|
py | 1a3fc92e6ee9389c839a33a04ff53b1af1527868 | from keras import activations, layers, models
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
from tfreplknet.drop import DropPath
@register_keras_serializable(package='TFRepLKNet')
class FFN(layers.Layer):
def __init__(self, ratio, dropout, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4)
self.ratio = ratio
self.dropout = dropout
@shape_type_conversion
def build(self, input_shape):
channels = input_shape[-1]
if channels is None:
raise ValueError('Channel dimension of the inputs should be defined. Found `None`.')
self.input_spec = layers.InputSpec(ndim=4, axes={-1: channels})
# noinspection PyAttributeOutsideInit
self.bn = layers.BatchNormalization(momentum=0.1, epsilon=1.001e-5, name='preffn_bn')
# noinspection PyAttributeOutsideInit
self.pw1 = models.Sequential([
layers.Conv2D(int(channels * self.ratio), 1, use_bias=False, name=f'{self.name}/pw1/conv'),
layers.BatchNormalization(momentum=0.1, epsilon=1.001e-5, name=f'{self.name}/pw1/bn')
], name='pw1')
# noinspection PyAttributeOutsideInit
self.pw2 = models.Sequential([
layers.Conv2D(channels, 1, use_bias=False, name=f'{self.name}/pw2/conv'),
layers.BatchNormalization(momentum=0.1, epsilon=1.001e-5, name=f'{self.name}/pw2/bn')
], name='pw2')
# noinspection PyAttributeOutsideInit
self.drop = DropPath(self.dropout)
super().build(input_shape)
def call(self, inputs, *args, **kwargs):
outputs = self.bn(inputs)
outputs = self.pw1(outputs)
outputs = activations.gelu(outputs)
outputs = self.pw2(outputs)
outputs = inputs + self.drop(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({
'ratio': self.ratio,
'dropout': self.dropout
})
return config
|
pyde | 1a3fc9440555b5d1f6548a705cb0d63650dc7ce1 | add_library('video')
add_library('opencv_processing')
video = None
opencv = None
def setup():
size(720, 480, P2D)
video = Movie(this, "street.mov")
opencv = OpenCV(this, 720, 480)
opencv.startBackgroundSubtraction(5, 3, 0.5)
video.loop()
video.play()
def draw():
image(video, 0, 0)
opencv.loadImage(video)
opencv.updateBackground()
opencv.dilate()
opencv.erode()
noFill()
stroke(255, 0, 0)
strokeWeight(3)
for contour in opencv.findContours():
contour.draw()
def movieEvent(m):
m.read()
|
py | 1a3fc947149d6e17a2bb4b3584638120788f8651 | '''
Skip-thought vectors
'''
import os
import theano
import theano.tensor as tensor
import cPickle as pkl
import numpy
import copy
import nltk
from collections import OrderedDict, defaultdict
from scipy.linalg import norm
from nltk.tokenize import word_tokenize
profile = False
#-----------------------------------------------------------------------------#
# Specify model and table locations here
#-----------------------------------------------------------------------------#
path_to_models = 'model/'
path_to_tables = 'model/'
#-----------------------------------------------------------------------------#
path_to_umodel = path_to_models + 'uni_skip.npz'
path_to_bmodel = path_to_models + 'bi_skip.npz'
def load_model():
"""
Load the model with saved tables
"""
# Load model options
print 'Loading model parameters...'
with open('%s.pkl'%path_to_umodel, 'rb') as f:
uoptions = pkl.load(f)
with open('%s.pkl'%path_to_bmodel, 'rb') as f:
boptions = pkl.load(f)
# Load parameters
uparams = init_params(uoptions)
uparams = load_params(path_to_umodel, uparams)
utparams = init_tparams(uparams)
bparams = init_params_bi(boptions)
bparams = load_params(path_to_bmodel, bparams)
btparams = init_tparams(bparams)
# Extractor functions
print 'Compiling encoders...'
embedding, x_mask, ctxw2v = build_encoder(utparams, uoptions)
f_w2v = theano.function([embedding, x_mask], ctxw2v, name='f_w2v')
embedding, x_mask, ctxw2v = build_encoder_bi(btparams, boptions)
f_w2v2 = theano.function([embedding, x_mask], ctxw2v, name='f_w2v2')
# Tables
print 'Loading tables...'
utable, btable = load_tables()
# Store everything we need in a dictionary
print 'Packing up...'
model = {}
model['uoptions'] = uoptions
model['boptions'] = boptions
model['utable'] = utable
model['btable'] = btable
model['f_w2v'] = f_w2v
model['f_w2v2'] = f_w2v2
return model
def load_tables():
"""
Load the tables
"""
words = []
utable = numpy.load(path_to_tables + 'utable.npy')
btable = numpy.load(path_to_tables + 'btable.npy')
f = open(path_to_tables + 'dictionary.txt', 'rb')
for line in f:
words.append(line.decode('utf-8').strip())
f.close()
utable = OrderedDict(zip(words, utable))
btable = OrderedDict(zip(words, btable))
return utable, btable
class Encoder(object):
"""
Sentence encoder.
"""
def __init__(self, model):
self._model = model
def encode(self, X, use_norm=True, verbose=True, batch_size=128, use_eos=False):
"""
Encode sentences in the list X. Each entry will return a vector
"""
return encode(self._model, X, use_norm, verbose, batch_size, use_eos)
def encode(model, X, use_norm=True, verbose=True, batch_size=128, use_eos=False):
"""
Encode sentences in the list X. Each entry will return a vector
"""
# first, do preprocessing
X = preprocess(X)
# word dictionary and init
d = defaultdict(lambda : 0)
for w in model['utable'].keys():
d[w] = 1
ufeatures = numpy.zeros((len(X), model['uoptions']['dim']), dtype='float32')
bfeatures = numpy.zeros((len(X), 2 * model['boptions']['dim']), dtype='float32')
# length dictionary
ds = defaultdict(list)
captions = [s.split() for s in X]
for i,s in enumerate(captions):
ds[len(s)].append(i)
# Get features. This encodes by length, in order to avoid wasting computation
for k in ds.keys():
if verbose:
print k
numbatches = len(ds[k]) / batch_size + 1
for minibatch in range(numbatches):
caps = ds[k][minibatch::numbatches]
if use_eos:
uembedding = numpy.zeros((k+1, len(caps), model['uoptions']['dim_word']), dtype='float32')
bembedding = numpy.zeros((k+1, len(caps), model['boptions']['dim_word']), dtype='float32')
else:
uembedding = numpy.zeros((k, len(caps), model['uoptions']['dim_word']), dtype='float32')
bembedding = numpy.zeros((k, len(caps), model['boptions']['dim_word']), dtype='float32')
for ind, c in enumerate(caps):
caption = captions[c]
for j in range(len(caption)):
if d[caption[j]] > 0:
uembedding[j,ind] = model['utable'][caption[j]]
bembedding[j,ind] = model['btable'][caption[j]]
else:
uembedding[j,ind] = model['utable']['UNK']
bembedding[j,ind] = model['btable']['UNK']
if use_eos:
uembedding[-1,ind] = model['utable']['<eos>']
bembedding[-1,ind] = model['btable']['<eos>']
if use_eos:
uff = model['f_w2v'](uembedding, numpy.ones((len(caption)+1,len(caps)), dtype='float32'))
bff = model['f_w2v2'](bembedding, numpy.ones((len(caption)+1,len(caps)), dtype='float32'))
else:
uff = model['f_w2v'](uembedding, numpy.ones((len(caption),len(caps)), dtype='float32'))
bff = model['f_w2v2'](bembedding, numpy.ones((len(caption),len(caps)), dtype='float32'))
if use_norm:
for j in range(len(uff)):
uff[j] /= norm(uff[j])
bff[j] /= norm(bff[j])
for ind, c in enumerate(caps):
ufeatures[c] = uff[ind]
bfeatures[c] = bff[ind]
features = numpy.c_[ufeatures, bfeatures]
return features
def preprocess(text):
"""
Preprocess text for encoder
"""
X = []
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
for t in text:
sents = sent_detector.tokenize(t)
result = ''
for s in sents:
tokens = word_tokenize(s)
result += ' ' + ' '.join(tokens)
X.append(result)
return X
def nn(model, text, vectors, query, k=5):
"""
Return the nearest neighbour sentences to query
text: list of sentences
vectors: the corresponding representations for text
query: a string to search
"""
qf = encode(model, [query])
qf /= norm(qf)
scores = numpy.dot(qf, vectors.T).flatten()
sorted_args = numpy.argsort(scores)[::-1]
sentences = [text[a] for a in sorted_args[:k]]
print 'QUERY: ' + query
print 'NEAREST: '
for i, s in enumerate(sentences):
print s, sorted_args[i]
def word_features(table):
"""
Extract word features into a normalized matrix
"""
features = numpy.zeros((len(table), 620), dtype='float32')
keys = table.keys()
for i in range(len(table)):
f = table[keys[i]]
features[i] = f / norm(f)
return features
def nn_words(table, wordvecs, query, k=10):
"""
Get the nearest neighbour words
"""
keys = table.keys()
qf = table[query]
scores = numpy.dot(qf, wordvecs.T).flatten()
sorted_args = numpy.argsort(scores)[::-1]
words = [keys[a] for a in sorted_args[:k]]
print 'QUERY: ' + query
print 'NEAREST: '
for i, w in enumerate(words):
print w
def _p(pp, name):
"""
make prefix-appended name
"""
return '%s_%s'%(pp, name)
def init_tparams(params):
"""
initialize Theano shared variables according to the initial parameters
"""
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def load_params(path, params):
"""
load parameters
"""
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'gru': ('param_init_gru', 'gru_layer')}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
def init_params(options):
"""
initialize all parameters needed for the encoder
"""
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
# encoder: GRU
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'], dim=options['dim'])
return params
def init_params_bi(options):
"""
initialize all paramters needed for bidirectional encoder
"""
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
# encoder: GRU
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'], dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'], dim=options['dim'])
return params
def build_encoder(tparams, options):
"""
build an encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
x_mask = tensor.matrix('x_mask', dtype='float32')
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
ctx = proj[0][-1]
return embedding, x_mask, ctx
def build_encoder_bi(tparams, options):
"""
build bidirectional encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
embeddingr = embedding[::-1]
x_mask = tensor.matrix('x_mask', dtype='float32')
xr_mask = x_mask[::-1]
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
projr = get_layer(options['encoder'])[1](tparams, embeddingr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)
return embedding, x_mask, ctx
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.1, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = numpy.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype('float32')
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
"""
parameter init for GRU
"""
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
"""
Forward pass through GRU layer
"""
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
|
py | 1a3fca0c73744ab00751e83fcea217a07c8f3d0d | import unittest
from keras_wc_embd import get_batch_input
class TestGetBatchInput(unittest.TestCase):
def test_shape(self):
sentences = [
['All', 'work', 'and', 'no', 'play'],
['makes', 'Jack', 'a', 'dull', 'boy', '.'],
]
word_embd_input, char_embd_input = get_batch_input(
sentences,
max_word_len=5,
word_dict={},
char_dict={},
)
self.assertEqual(word_embd_input.shape, (2, 6))
self.assertEqual(char_embd_input.shape, (2, 6, 5))
for sentence_index in range(2):
for word_index in range(6):
if word_index < len(sentences[sentence_index]):
self.assertEqual(
1,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
for char_index in range(5):
if char_index < len(sentences[sentence_index][word_index]):
self.assertEqual(
1,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
0,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
0,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
def test_mapping(self):
sentences = [
['All', 'work', 'and', 'no', 'play'],
['makes', 'Jack', 'a', 'dull', 'boy', '.'],
]
word_embd_input, char_embd_input = get_batch_input(
sentences,
max_word_len=5,
word_dict={'All': 2, 'Work': 0},
char_dict={'a': 3},
word_ignore_case=False,
char_ignore_case=False,
)
self.assertEqual(word_embd_input.shape, (2, 6))
self.assertEqual(char_embd_input.shape, (2, 6, 5))
for sentence_index in range(2):
for word_index in range(6):
if word_index < len(sentences[sentence_index]):
if sentences[sentence_index][word_index] == 'All':
self.assertEqual(
2,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
else:
self.assertEqual(
1,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
for char_index in range(5):
if char_index < len(sentences[sentence_index][word_index]):
if sentences[sentence_index][word_index][char_index] == 'a':
self.assertEqual(
3,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
1,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
0,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
0,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
def test_ignore_case(self):
sentences = [
['All', 'work', 'and', 'no', 'play'],
['makes', 'Jack', 'a', 'dull', 'boy', '.'],
]
word_embd_input, char_embd_input = get_batch_input(
sentences,
max_word_len=5,
word_dict={'all': 2, 'Work': 0},
char_dict={'a': 3},
word_ignore_case=True,
char_ignore_case=True,
)
self.assertEqual(word_embd_input.shape, (2, 6))
self.assertEqual(char_embd_input.shape, (2, 6, 5))
for sentence_index in range(2):
for word_index in range(6):
if word_index < len(sentences[sentence_index]):
if sentences[sentence_index][word_index] == 'All':
self.assertEqual(
2,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
else:
self.assertEqual(
1,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
for char_index in range(5):
if char_index < len(sentences[sentence_index][word_index]):
if sentences[sentence_index][word_index][char_index].lower() == 'a':
self.assertEqual(
3,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
1,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
0,
char_embd_input[sentence_index, word_index, char_index].tolist(),
(sentence_index, word_index),
)
else:
self.assertEqual(
0,
word_embd_input[sentence_index, word_index],
(sentence_index, word_index),
)
def test_exceed_len(self):
sentences = [
['All', 'work', 'and', 'no', 'play'],
['makes', 'Jack', 'a', 'dull', 'boy', '.'],
]
word_embd_input, char_embd_input = get_batch_input(
sentences,
max_word_len=2,
word_dict={},
char_dict={},
)
self.assertEqual(word_embd_input.shape, (2, 6))
self.assertEqual(char_embd_input.shape, (2, 6, 2))
|
py | 1a3fca7698ce440863d4b1805f9c635081c38f37 | # Help packages
from sklearn.datasets import load_breast_cancer
import numpy as np
import matplotlib.pyplot as plt
# Classifiers
from sklearn.ensemble import GradientBoostingClassifier
from lightgbm import LGBMClassifier
# Parallel gridsearch
from paragrid import paragrid
if __name__ == "__main__":
# spaces
space_gpdt = {'learning_rate': [0.1,0.3,0.4, 0.6,0.8, 1],
'n_estimators': [200, 400, 600, 800, 1000],
'max_depth': [2]}
# Classification
breast_cancer = load_breast_cancer()
X, y = breast_cancer.data, breast_cancer.target
reg_cls_gpdt = GradientBoostingClassifier()
# xbg_cls = XGBClassifier()
lgbm_cls = LGBMClassifier()
params = paragrid(model=reg_cls_gpdt, space=space_gpdt, func_type='ML', own_trial=True)
param, results = params.gridsearch(optimize=True,X=X, y=y, target = 'max', order=False, niter=5)
print(params.score())
best_param = params.score()
|
py | 1a3fcc4dc9f976c7bca2466b825b1a8f3fb47887 | # This file is a demo for the 'Isothermal_Monolith_Simulator' object
import sys
sys.path.append('../..')
from catalyst.isothermal_monolith_catalysis import *
# Read in the data (data is now a dictionary containing the data we want)
data = naively_read_data_file("inputfiles/SCR_all-ages_300C.txt",factor=5)
# Testing
sim = Isothermal_Monolith_Simulator()
sim.add_axial_dim(0,5)
sim.add_axial_dataset(5) # Location of observations (in cm)
sim.add_temporal_dim(0,137)
sim.add_temporal_dataset(data["time"]) #Temporal observations (in s)
sim.add_age_set(["Unaged"])
sim.add_data_age_set(["Unaged"]) # Data observations can be a sub-set
sim.add_temperature_set(["300C"])
sim.add_data_temperature_set(["300C"]) # Data observations can be a sub-set
sim.add_gas_species(["NH3","H2O","O2","NO","NO2","N2O","N2"])
sim.add_data_gas_species(["NH3","NO","NO2","N2O"]) # Data observations can be a sub-set
sim.set_data_values_for("NH3","Unaged","300C",5,data["time"],data["NH3_Unaged"])
sim.set_data_values_for("NO","Unaged","300C",5,data["time"],data["NO_Unaged"])
sim.set_data_values_for("NO2","Unaged","300C",5,data["time"],data["NO2_Unaged"])
sim.set_data_values_for("N2O","Unaged","300C",5,data["time"],data["N2O_Unaged"])
#Clear up memory space after we don't need the dictionary anymore
data.clear()
sim.add_surface_species(["q1","q2a","q2b","q3a","q3b","q3c","q4a","q4b"])
sim.add_surface_sites(["S1","S2","S3a","S3b","S3c"])
sim.add_reactions({"r1": ReactionType.EquilibriumArrhenius,
"r2a": ReactionType.EquilibriumArrhenius,
"r2b": ReactionType.EquilibriumArrhenius,
"r3a": ReactionType.EquilibriumArrhenius,
"r3b": ReactionType.EquilibriumArrhenius,
"r3c": ReactionType.EquilibriumArrhenius,
"r4a": ReactionType.EquilibriumArrhenius,
"r4b": ReactionType.EquilibriumArrhenius,
"r5": ReactionType.Arrhenius,
"r6": ReactionType.Arrhenius,
"r7f": ReactionType.Arrhenius,
"r7r": ReactionType.Arrhenius,
"r8": ReactionType.Arrhenius,
"r9": ReactionType.Arrhenius,
"r10": ReactionType.Arrhenius,
"r11": ReactionType.Arrhenius,
"r12": ReactionType.Arrhenius,
"r13a": ReactionType.Arrhenius,
"r14a": ReactionType.Arrhenius,
"r15af": ReactionType.Arrhenius,
"r15ar": ReactionType.Arrhenius,
"r16a": ReactionType.Arrhenius,
"r17a": ReactionType.Arrhenius,
"r18a": ReactionType.Arrhenius,
"r19a": ReactionType.Arrhenius,
"r20a": ReactionType.Arrhenius,
"r13b": ReactionType.Arrhenius,
"r14b": ReactionType.Arrhenius,
"r15bf": ReactionType.Arrhenius,
"r15br": ReactionType.Arrhenius,
"r16b": ReactionType.Arrhenius,
"r17b": ReactionType.Arrhenius,
"r18b": ReactionType.Arrhenius,
"r19b": ReactionType.Arrhenius,
"r20b": ReactionType.Arrhenius,
"r21": ReactionType.Arrhenius,
"r22": ReactionType.Arrhenius,
"r23f": ReactionType.Arrhenius,
"r23r": ReactionType.Arrhenius,
"r24": ReactionType.Arrhenius,
"r25": ReactionType.Arrhenius,
"r26": ReactionType.Arrhenius,
"r27": ReactionType.Arrhenius,
"r28": ReactionType.Arrhenius,
"r29": ReactionType.Arrhenius,
"r30": ReactionType.Arrhenius,
"r31f": ReactionType.Arrhenius,
"r31r": ReactionType.Arrhenius,
"r32": ReactionType.Arrhenius,
"r33": ReactionType.Arrhenius,
"r34": ReactionType.Arrhenius,
"r35": ReactionType.Arrhenius,
"r36": ReactionType.Arrhenius,
"r37": ReactionType.Arrhenius,
"r38": ReactionType.Arrhenius,
"r39f": ReactionType.Arrhenius,
"r39r": ReactionType.Arrhenius,
"r40": ReactionType.Arrhenius,
"r41": ReactionType.Arrhenius,
"r42": ReactionType.Arrhenius,
"r43": ReactionType.Arrhenius,
"r44": ReactionType.Arrhenius
})
sim.set_bulk_porosity(0.3309)
sim.set_washcoat_porosity(0.4)
sim.set_reactor_radius(1)
sim.set_space_velocity_all_runs(1000) #volumes/min
sim.set_cell_density(62) # 62 cells per cm^2 (~400 cpsi)
# Setting up site balances using dicts
s1_data = {"mol_occupancy": {"q1": 1, "q4a": 1}}
s2_data = {"mol_occupancy": {"q2a": 1, "q2b": 1, "q4b": 1}}
s3a_data = {"mol_occupancy": {"q3a": 1}}
s3b_data = {"mol_occupancy": {"q3b": 1}}
s3c_data = {"mol_occupancy": {"q3c": 1}}
sim.set_site_balance("S1",s1_data)
sim.set_site_balance("S2",s2_data)
sim.set_site_balance("S3a",s3a_data)
sim.set_site_balance("S3b",s3b_data)
sim.set_site_balance("S3c",s3c_data)
# Reaction specification information (must correspond to correct reaction type)
# EquilibriumArrhenius
r1_equ = {"parameters": {"A": 250000, "E": 0, "dH": -54547.9, "dS": -29.9943},
"mol_reactants": {"S1": 1, "NH3": 1},
"mol_products": {"q1": 1},
"rxn_orders": {"S1": 1, "NH3": 1, "q1": 1}
}
r2a_equ = {"parameters": {"A": 300000, "E": 0, "dH": -78073.843, "dS": -35.311574},
"mol_reactants": {"S2": 1, "NH3": 1},
"mol_products": {"q2a": 1},
"rxn_orders": {"S2": 1, "NH3": 1, "q2a": 1}
}
r2b_equ = {"parameters": {"A": 150000, "E": 0, "dH": -78064.167, "dS": -46.821878},
"mol_reactants": {"q2a": 1, "NH3": 1},
"mol_products": {"q2b": 1},
"rxn_orders": {"q2a": 1, "NH3": 1, "q2b": 1}
}
r3a_equ = {"parameters": {"A": 2500000, "E": 0, "dH": -91860.8, "dS": -28.9292},
"mol_reactants": {"S3a": 1, "NH3": 1},
"mol_products": {"q3a": 1},
"rxn_orders": {"S3a": 1, "NH3": 1, "q3a": 1}
}
r3b_equ = {"parameters": {"A": 2500000, "E": 0, "dH": -91860.8, "dS": -28.9292},
"mol_reactants": {"S3b": 1, "NH3": 1},
"mol_products": {"q3b": 1},
"rxn_orders": {"S3b": 1, "NH3": 1, "q3b": 1}
}
r3c_equ = {"parameters": {"A": 2500000, "E": 0, "dH": -91860.8, "dS": -28.9292},
"mol_reactants": {"S3c": 1, "NH3": 1},
"mol_products": {"q3c": 1},
"rxn_orders": {"S3c": 1, "NH3": 1, "q3c": 1}
}
r4a_equ = {"parameters": {"A": 44000, "E": 0, "dH": -32099.1, "dS": -24.2494},
"mol_reactants": {"S1": 1, "H2O": 1},
"mol_products": {"q4a": 1},
"rxn_orders": {"S1": 1, "H2O": 1, "q4a": 1}
}
r4b_equ = {"parameters": {"A": 70000, "E": 0, "dH": -28889.23, "dS": -26.674},
"mol_reactants": {"S2": 1, "H2O": 1},
"mol_products": {"q4b": 1},
"rxn_orders": {"S2": 1, "H2O": 1, "q4b": 1}
}
# Arrhenius Reactions
# ---------- q1 reactions ------------
r5 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "O2": 0.75},
"mol_products": {"S1": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q1": 1, "O2": 1}
}
r6 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "O2": 1.25},
"mol_products": {"S1": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "O2": 1}
}
r7f = {"parameters": {"A": 3122.066, "E": 0},
"mol_reactants": {"S1": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S1": 1, "NO2": 1},
"rxn_orders": {"S1": 1, "NO": 1, "O2": 1}
}
r7r = {"parameters": {"A": 0.328075, "E": 0},
"mol_reactants": {"S1": 1, "NO2": 1},
"mol_products": {"S1": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S1": 1, "NO2": 1}
}
r8 = {"parameters": {"A": 16782330, "E": 0},
"mol_reactants": {"q1": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S1": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO": 1, "O2": 1}
}
r9 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO2": 1},
"mol_products": {"S1": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q1": 1, "NO2": 1}
}
r10 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S1": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO2": 1, "O2": 1}
}
r11 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S1": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO": 1, "O2": 1}
}
r12 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q1": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S1": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q1": 1, "NO": 1, "NO2": 1}
}
# ---------- q2a reactions ------------
r13a = {"parameters": {"A": 17.98625, "E": 0},
"mol_reactants": {"q2a": 1, "O2": 0.75},
"mol_products": {"S2": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "O2": 1}
}
r14a = {"parameters": {"A": 12.1689, "E": 0},
"mol_reactants": {"q2a": 1, "O2": 1.25},
"mol_products": {"S2": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "O2": 1}
}
r15af = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S2": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S2": 1, "NO2": 1},
"rxn_orders": {"S2": 1, "NO": 1, "O2": 1}
}
r15ar = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S2": 1, "NO2": 1},
"mol_products": {"S2": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S2": 1, "NO2": 1}
}
r16a = {"parameters": {"A": 1.33E8, "E": 0},
"mol_reactants": {"q2a": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S2": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO": 1, "O2": 1}
}
r17a = {"parameters": {"A": 4465644, "E": 0},
"mol_reactants": {"q2a": 1, "NO2": 1},
"mol_products": {"S2": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q2a": 1, "NO2": 1}
}
r18a = {"parameters": {"A": 1.86E8, "E": 0},
"mol_reactants": {"q2a": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S2": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO2": 1, "O2": 1}
}
r19a = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2a": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S2": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO": 1, "O2": 1}
}
r20a = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2a": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S2": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2a": 1, "NO": 1, "NO2": 1}
}
# ---------- q2b reactions ------------
r13b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "O2": 0.75},
"mol_products": {"q2a": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "O2": 1}
}
r14b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "O2": 1.25},
"mol_products": {"q2a": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "O2": 1}
}
r15bf = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 1, "O2": 0.5},
"mol_products": {"q2a": 1, "NO2": 1},
"rxn_orders": {"q2b": 1, "NO": 1, "O2": 1}
}
r15br = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2a": 1, "NO2": 1},
"mol_products": {"q2b": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"q2b": 1, "NO2": 1}
}
r16b = {"parameters": {"A": 3.27E8, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 1, "O2": 0.25},
"mol_products": {"q2a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO": 1, "O2": 1}
}
r17b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "NO2": 1},
"mol_products": {"q2a": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q2b": 1, "NO2": 1}
}
r18b = {"parameters": {"A": 4.14E9, "E": 0},
"mol_reactants": {"q2b": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"q2a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO2": 1, "O2": 1}
}
r19b = {"parameters": {"A": 5395255, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 1, "O2": 0.75},
"mol_products": {"q2a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO": 1, "O2": 1}
}
r20b = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q2b": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"q2a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q2b": 1, "NO": 1, "NO2": 1}
}
# ---------- q3a reactions ------------
r21 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "O2": 0.75},
"mol_products": {"S3a": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "O2": 1}
}
r22 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "O2": 1.25},
"mol_products": {"S3a": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "O2": 1}
}
r23f = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S3a": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S3a": 1, "NO2": 1},
"rxn_orders": {"S3a": 1, "NO": 1, "O2": 1}
}
r23r = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"S3a": 1, "NO2": 1},
"mol_products": {"S3a": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S3a": 1, "NO2": 1}
}
r24 = {"parameters": {"A": 3.26E8, "E": 0},
"mol_reactants": {"q3a": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S3a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO": 1, "O2": 1}
}
r25 = {"parameters": {"A": 2911397, "E": 0},
"mol_reactants": {"q3a": 1, "NO2": 1},
"mol_products": {"S3a": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q3a": 1, "NO2": 1}
}
r26 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S3a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO2": 1, "O2": 1}
}
r27 = {"parameters": {"A": 6312962, "E": 0},
"mol_reactants": {"q3a": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S3a": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO": 1, "O2": 1}
}
r28 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3a": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S3a": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3a": 1, "NO": 1, "NO2": 1}
}
# ---------- q3b reactions ------------
r29 = {"parameters": {"A": 105.2508, "E": 0},
"mol_reactants": {"q3b": 1, "O2": 0.75},
"mol_products": {"S3b": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "O2": 1}
}
r30 = {"parameters": {"A": 98.4407, "E": 0},
"mol_reactants": {"q3b": 1, "O2": 1.25},
"mol_products": {"S3b": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "O2": 1}
}
r31f = {"parameters": {"A": 3053293, "E": 0},
"mol_reactants": {"S3b": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S3b": 1, "NO2": 1},
"rxn_orders": {"S3b": 1, "NO": 1, "O2": 1}
}
r31r = {"parameters": {"A": 3825.781, "E": 0},
"mol_reactants": {"S3b": 1, "NO2": 1},
"mol_products": {"S3b": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S3b": 1, "NO2": 1}
}
r32 = {"parameters": {"A": 6.24E9, "E": 0},
"mol_reactants": {"q3b": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S3b": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO": 1, "O2": 1}
}
r33 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3b": 1, "NO2": 1},
"mol_products": {"S3b": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q3b": 1, "NO2": 1}
}
r34 = {"parameters": {"A": 1.22E9, "E": 0},
"mol_reactants": {"q3b": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S3b": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO2": 1, "O2": 1}
}
r35 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3b": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S3b": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO": 1, "O2": 1}
}
r36 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3b": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S3b": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3b": 1, "NO": 1, "NO2": 1}
}
# ---------- q3c reactions ------------
r37 = {"parameters": {"A": 0.238904073, "E": 0},
"mol_reactants": {"q3c": 1, "O2": 0.75},
"mol_products": {"S3c": 1, "N2": 0.5, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "O2": 1}
}
r38 = {"parameters": {"A": 0.54633, "E": 0},
"mol_reactants": {"q3c": 1, "O2": 1.25},
"mol_products": {"S3c": 1, "NO": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "O2": 1}
}
r39f = {"parameters": {"A": 3670639, "E": 0},
"mol_reactants": {"S3c": 1, "NO": 1, "O2": 0.5},
"mol_products": {"S3c": 1, "NO2": 1},
"rxn_orders": {"S3c": 1, "NO": 1, "O2": 1}
}
r39r = {"parameters": {"A": 2244.256, "E": 0},
"mol_reactants": {"S3c": 1, "NO2": 1},
"mol_products": {"S3c": 1, "NO": 1, "O2": 0.5},
"rxn_orders": {"S3c": 1, "NO2": 1}
}
r40 = {"parameters": {"A": 8.82E8, "E": 0},
"mol_reactants": {"q3c": 1, "NO": 1, "O2": 0.25},
"mol_products": {"S3c": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO": 1, "O2": 1}
}
r41 = {"parameters": {"A": 2548900, "E": 0},
"mol_reactants": {"q3c": 1, "NO2": 1},
"mol_products": {"S3c": 1, "N2": 1, "H2O": 1.5, "O2": 0.25},
"rxn_orders": {"q3c": 1, "NO2": 1}
}
r42 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3c": 1, "NO2": 1, "O2": 0.25},
"mol_products": {"S3c": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO2": 1, "O2": 1}
}
r43 = {"parameters": {"A": 17096289, "E": 0},
"mol_reactants": {"q3c": 1, "NO": 1, "O2": 0.75},
"mol_products": {"S3c": 1, "N2O": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO": 1, "O2": 1}
}
r44 = {"parameters": {"A": 0, "E": 0},
"mol_reactants": {"q3c": 1, "NO": 0.5, "NO2": 0.5},
"mol_products": {"S3c": 1, "N2": 1, "H2O": 1.5},
"rxn_orders": {"q3c": 1, "NO": 1, "NO2": 1}
}
sim.set_reaction_info("r1", r1_equ)
sim.set_reaction_info("r2a", r2a_equ)
sim.set_reaction_info("r2b", r2b_equ)
sim.set_reaction_info("r3a", r3a_equ)
sim.set_reaction_info("r3b", r3b_equ)
sim.set_reaction_info("r3c", r3c_equ)
sim.set_reaction_info("r4a", r4a_equ)
sim.set_reaction_info("r4b", r4b_equ)
sim.set_reaction_info("r5", r5)
sim.set_reaction_info("r6", r6)
sim.set_reaction_info("r7f", r7f)
sim.set_reaction_info("r7r", r7r)
sim.set_reaction_info("r8", r8)
sim.set_reaction_info("r9", r9)
sim.set_reaction_info("r10", r10)
sim.set_reaction_info("r11", r11)
sim.set_reaction_info("r12", r12)
sim.set_reaction_info("r13a", r13a)
sim.set_reaction_info("r14a", r14a)
sim.set_reaction_info("r15af", r15af)
sim.set_reaction_info("r15ar", r15ar)
sim.set_reaction_info("r16a", r16a)
sim.set_reaction_info("r17a", r17a)
sim.set_reaction_info("r18a", r18a)
sim.set_reaction_info("r19a", r19a)
sim.set_reaction_info("r20a", r20a)
sim.set_reaction_info("r13b", r13b)
sim.set_reaction_info("r14b", r14b)
sim.set_reaction_info("r15bf", r15bf)
sim.set_reaction_info("r15br", r15br)
sim.set_reaction_info("r16b", r16b)
sim.set_reaction_info("r17b", r17b)
sim.set_reaction_info("r18b", r18b)
sim.set_reaction_info("r19b", r19b)
sim.set_reaction_info("r20b", r20b)
sim.set_reaction_info("r21", r21)
sim.set_reaction_info("r22", r22)
sim.set_reaction_info("r23f", r23f)
sim.set_reaction_info("r23r", r23r)
sim.set_reaction_info("r24", r24)
sim.set_reaction_info("r25", r25)
sim.set_reaction_info("r26", r26)
sim.set_reaction_info("r27", r27)
sim.set_reaction_info("r28", r28)
sim.set_reaction_info("r29", r29)
sim.set_reaction_info("r30", r30)
sim.set_reaction_info("r31f", r31f)
sim.set_reaction_info("r31r", r31r)
sim.set_reaction_info("r32", r32)
sim.set_reaction_info("r33", r33)
sim.set_reaction_info("r34", r34)
sim.set_reaction_info("r35", r35)
sim.set_reaction_info("r36", r36)
sim.set_reaction_info("r37", r37)
sim.set_reaction_info("r38", r38)
sim.set_reaction_info("r39f", r39f)
sim.set_reaction_info("r39r", r39r)
sim.set_reaction_info("r40", r40)
sim.set_reaction_info("r41", r41)
sim.set_reaction_info("r42", r42)
sim.set_reaction_info("r43", r43)
sim.set_reaction_info("r44", r44)
# ----------------- Unaged Site Densities -----------
sim.set_site_density("S1","Unaged",0.052619016)
sim.set_site_density("S2","Unaged",0.023125746)
sim.set_site_density("S3a","Unaged",0.01632)
sim.set_site_density("S3b","Unaged",0.003233)
sim.set_site_density("S3c","Unaged",0.006699)
sim.set_isothermal_temp("Unaged","300C",300+273.15)
# Build the constraints then discretize
sim.build_constraints()
sim.discretize_model(method=DiscretizationMethod.FiniteDifference,
tstep=137,elems=5,colpoints=2)
# Initial conditions and Boundary Conditions should be set AFTER discretization
# ---------------- Unaged ICs ------------------
sim.set_const_IC("O2","Unaged","300C",0.002126764)
sim.set_const_IC("H2O","Unaged","300C",0.001074836)
sim.set_const_IC("NH3","Unaged","300C",0)
sim.set_const_IC("NO","Unaged","300C",0)
sim.set_const_IC("NO2","Unaged","300C",0)
sim.set_const_IC("N2O","Unaged","300C",0)
sim.set_const_IC("N2","Unaged","300C",0.0184)
sim.set_const_IC("q1","Unaged","300C",0)
sim.set_const_IC("q2a","Unaged","300C",0)
sim.set_const_IC("q2b","Unaged","300C",0)
sim.set_const_IC("q3a","Unaged","300C",0)
sim.set_const_IC("q3b","Unaged","300C",0)
sim.set_const_IC("q3c","Unaged","300C",0)
sim.set_const_IC("q4a","Unaged","300C",0)
sim.set_const_IC("q4b","Unaged","300C",0)
# ---------------- Unaged BCs ------------------
sim.set_time_dependent_BC("O2","Unaged","300C",
time_value_pairs=[(2.258,4.253E-5),
(20.925,0.002126764)],
initial_value=0.002126764)
sim.set_time_dependent_BC("H2O","Unaged","300C",
time_value_pairs=[(4.25,0.001056024),
(21.758,0.001044021)],
initial_value=0.001074836)
sim.set_time_dependent_BC("NH3","Unaged","300C",
time_value_pairs=[(2.258,6.33114E-6),
(37.591,0),
(49.758,6.33114E-6),
(76.925,0),
(99.258,6.33114E-6),
(120.258,0)],
initial_value=0)
sim.set_time_dependent_BC("NO","Unaged","300C",
time_value_pairs=[(37.591, 6.33114E-6),
(86.758,3.1426E-6),
(129.425,6.33114E-6)],
initial_value=0)
sim.set_time_dependent_BC("NO2","Unaged","300C",
time_value_pairs=[(86.758,3.1426E-6),
(129.425,0)],
initial_value=0)
sim.set_const_BC("N2O","Unaged","300C",0)
sim.set_const_BC("N2","Unaged","300C",0.0184)
# Fix the kinetics to only run a simulation
sim.fix_reaction("r1")
sim.fix_reaction("r2a")
sim.fix_reaction("r2b")
sim.fix_reaction("r3a")
sim.fix_reaction("r3b")
sim.fix_reaction("r3c")
sim.fix_reaction("r4a")
sim.fix_reaction("r4b")
# Fix all reactions for simulation mode only
sim.fix_all_reactions()
sim.unfix_reaction("r13a")
sim.unfix_reaction("r14a")
sim.unfix_reaction("r29")
sim.unfix_reaction("r30")
sim.unfix_reaction("r37")
sim.unfix_reaction("r38")
sim.unfix_reaction("r1")
sim.unfix_reaction("r2a")
sim.unfix_reaction("r2b")
sim.unfix_reaction("r3a")
sim.unfix_reaction("r3b")
sim.unfix_reaction("r3c")
sim.unfix_reaction("r4a")
sim.unfix_reaction("r4b")
'''
sim.set_reaction_param_bounds("r1","dH",factor=0)
sim.set_reaction_param_bounds("r1","dS",factor=0)
sim.set_reaction_param_bounds("r2a","dH",factor=0)
sim.set_reaction_param_bounds("r2a","dS",factor=0)
sim.set_reaction_param_bounds("r2b","dH",factor=0)
sim.set_reaction_param_bounds("r2b","dS",factor=0)
sim.set_reaction_param_bounds("r3a","dH",factor=0)
sim.set_reaction_param_bounds("r3a","dS",factor=0)
sim.set_reaction_param_bounds("r3b","dH",factor=0)
sim.set_reaction_param_bounds("r3b","dS",factor=0)
sim.set_reaction_param_bounds("r3c","dH",factor=0)
sim.set_reaction_param_bounds("r3c","dS",factor=0)
sim.set_reaction_param_bounds("r4a","dH",factor=0)
sim.set_reaction_param_bounds("r4a","dS",factor=0)
sim.set_reaction_param_bounds("r4b","dH",factor=0)
sim.set_reaction_param_bounds("r4b","dS",factor=0)
'''
#sim.set_reaction_param_bounds("r13a","A",factor=5)
#sim.set_reaction_param_bounds("r14a","A",factor=5)
#sim.set_reaction_param_bounds("r29","A",factor=5)
#sim.set_reaction_param_bounds("r30","A",factor=5)
#sim.set_reaction_param_bounds("r37","A",factor=5)
#sim.set_reaction_param_bounds("r38","A",factor=5)
sim.initialize_auto_scaling()
sim.initialize_simulator()
sim.finalize_auto_scaling()
sim.run_solver()
sim.print_results_of_breakthrough(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"Unaged", "300C", file_name="Unaged_SCR_300C_breakthrough.txt")
sim.print_results_of_location(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"Unaged", "300C", 0, file_name="Unaged_SCR_300C_bypass.txt")
sim.print_results_of_integral_average(["q1","q2a","q2b","q3a","q3b","q3c"],
"Unaged", "300C", file_name="Unaged_SCR_300C_average_ads.txt")
sim.print_kinetic_parameter_info(file_name="300C_opt_params.txt")
sim.save_model_state(file_name="300C_model.json")
|
py | 1a3fccde1e6fbabd74097c4f1ed22ea40de07a9e | from keras.applications.resnet50 import ResNet50 # pylint: disable=import-error
ResNet50(weights='imagenet', include_top=False) |
py | 1a3fce3b7ac89992b5db2bbfc14a2488160f13ab | """Test seligimus.exception."""
from seligimus.exception import SeligimusException
def test_inheritance() -> None:
"""Test seligimus.exception.SeligimusException inheritance."""
assert issubclass(SeligimusException, Exception)
|
py | 1a3fcf13afbf002f51e4043e95da3a30bfbd2658 | from django.urls import path
from .views import (
ContatoCreateView,
ContatoDeleteView,
ContatoDetailView,
ContatoListView,
ContatoUpdateView,
)
app_name = 'contato'
urlpatterns = [
path('', ContatoListView.as_view(), name='contato-list'),
path('create/', ContatoCreateView.as_view(), name='contato-create'),
path('<int:pk>/', ContatoDetailView.as_view(), name='contato-detail'),
path('<int:pk>/update/', ContatoUpdateView.as_view(), name='contato-update'),
path('<int:pk>/delete/', ContatoDeleteView.as_view(), name='contato-delete'),
]
|
py | 1a3fcf1496e81e5a369795cb838d08c96c37c1a5 | """
Train the ESIM model on the preprocessed SNLI dataset.
"""
# Aurelien Coet, 2018.
from utils.utils_top_transformer import train, validate
from vaa.droped import TransformerESIM as ESIM
# from vaa.model_esim import ESIM
from vaa.model_transformer_top import TOP
# from vaa.model_bert_transformer import ESIM
import torch.nn as nn
import matplotlib.pyplot as plt
import os
import sys
import argparse
import json
import numpy as np
import pickle
import torch
import matplotlib
import itertools
matplotlib.use('Agg')
def transform_batch_data(data, batch_size=64, shuffle=True):
data_batch = dict()
data_batch['premises'] = dict()
data_batch['hypotheses'] = dict()
data_batch['labels'] = dict()
index = np.arange(len(data['labels']))
if shuffle:
np.random.shuffle(index)
idx = -1
for i in range(len(index)):
if i % batch_size == 0:
idx += 1
data_batch['premises'][idx] = []
data_batch['hypotheses'][idx] = []
data_batch['labels'][idx] = []
data_batch['premises'][idx].append(data['premises'][index[i]])
data_batch['hypotheses'][idx].append(data['hypotheses'][index[i]])
data_batch['labels'][idx].append(int(data['labels'][index[i]]))
return data_batch
def main(train_file,
valid_file,
test_file,
target_dir,
embedding_size=512,
hidden_size=512,
dropout=0.5,
num_classes=3,
epochs=64,
batch_size=32,
lr=0.0004,
patience=5,
max_grad_norm=10.0,
checkpoint_model0=None,
checkpoint_model1=None,
finetuning=False):
"""
Train the ESIM model on the Quora dataset.
Args:
train_file: A path to some preprocessed data that must be used
to train the model.
valid_file: A path to some preprocessed data that must be used
to validate the model.
embeddings_file: A path to some preprocessed word embeddings that
must be used to initialise the model.
target_dir: The path to a directory where the trained model must
be saved.
hidden_size: The size of the hidden layers in the model. Defaults
to 300.
dropout: The dropout rate to use in the model. Defaults to 0.5.
num_classes: The number of classes in the output of the model.
Defaults to 3.
epochs: The maximum number of epochs for training. Defaults to 64.
batch_size: The size of the batches for training. Defaults to 32.
lr: The learning rate for the optimizer. Defaults to 0.0004.
patience: The patience to use for early stopping. Defaults to 5.
checkpoint: A checkpoint from which to continue training. If None,
training starts from scratch. Defaults to None.
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(20 * "=", " Preparing for training ", 20 * "=")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# -------------------- Data loading ------------------- #
print("\t* Loading training data...")
with open(train_file, "rb") as pkl:
train_data = pickle.load(pkl)
print("\t* Loading validation data...")
with open(valid_file, "rb") as pkl:
valid_data = pickle.load(pkl)
valid_dataloader = transform_batch_data(valid_data, batch_size=batch_size, shuffle=False)
print("\t* Loading test data...")
with open(test_file, "rb") as pkl:
test_data = pickle.load(pkl)
test_dataloader = transform_batch_data(test_data, batch_size=batch_size, shuffle=False)
# -------------------- Model definition ------------------- #
print("\t* Building model...")
model = []
model1 = ESIM(embedding_size,
hidden_size,
dropout=0,
num_classes=num_classes,
device=device).to(device)
model2 = TOP(embedding_size,
hidden_size,
dropout=dropout,
num_classes=num_classes,
device=device).to(device)
model.append(model1)
model.append(model2)
# -------------------- Preparation for training ------------------- #
criterion = nn.CrossEntropyLoss()
if finetuning:
optimizer = torch.optim.Adam(itertools.chain(model[0].parameters(), model[1].parameters()), lr=lr)
else:
optimizer = torch.optim.Adam(model[1].parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode="max",
factor=0.5,
patience=0)
best_score = 0.0
start_epoch = 1
# Data for loss curves plot.
epochs_count = []
train_losses = []
valid_losses = []
# Continuing training from a checkpoint if one was given as argument.
if checkpoint_model0:
checkpoint = torch.load(checkpoint_model0)
start_epoch = checkpoint["epoch"] + 1
best_score = checkpoint["best_score"]
print("\t* Training will continue on existing model from epoch {}..."
.format(start_epoch))
model[0].load_state_dict(checkpoint["model"])
# optimizer.load_state_dict(checkpoint["optimizer"])
# epochs_count = checkpoint["epochs_count"]
# train_losses = checkpoint["train_losses"]
# valid_losses = checkpoint["valid_losses"]
if checkpoint_model1:
checkpoint = torch.load(checkpoint_model1)
start_epoch = checkpoint["epoch"] + 1
best_score = checkpoint["best_score"]
print("\t* Training will continue on existing model from epoch {}..."
.format(start_epoch))
model[1].load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
epochs_count = checkpoint["epochs_count"]
train_losses = checkpoint["train_losses"]
valid_losses = checkpoint["valid_losses"]
# Compute loss and accuracy before starting (or resuming) training.
_, valid_loss, valid_accuracy = validate(model,
valid_dataloader,
criterion)
print("\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%"
.format(valid_loss, (valid_accuracy*100)))
_, test_loss, test_accuracy = validate(model,
test_dataloader,
criterion)
print("\t* test loss before training: {:.4f}, accuracy: {:.4f}%"
.format(test_loss, (test_accuracy*100)))
# -------------------- Training epochs ------------------- #
print("\n",
20 * "=",
"Training ESIM model on device: {}".format(device),
20 * "=")
patience_counter = 0
for epoch in range(start_epoch, epochs+1):
train_dataloader = transform_batch_data(train_data, batch_size=batch_size, shuffle=True)
epochs_count.append(epoch)
print("* Training epoch {}:".format(epoch))
epoch_time, epoch_loss, epoch_accuracy = train(model,
train_dataloader,
optimizer,
criterion,
epoch,
max_grad_norm)
train_losses.append(epoch_loss)
print("-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%"
.format(epoch_time, epoch_loss, (epoch_accuracy*100)))
print("* Validation for epoch {}:".format(epoch))
epoch_time, epoch_loss, epoch_accuracy = validate(model,
valid_dataloader,
criterion)
valid_losses.append(epoch_loss)
print("-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n"
.format(epoch_time, epoch_loss, (epoch_accuracy*100)))
print("* Test for epoch {}:".format(epoch))
epoch_time, epoch_loss, epoch_accuracy = validate(model,
test_dataloader,
criterion)
print("-> Test. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n"
.format(epoch_time, epoch_loss, (epoch_accuracy*100)))
sys.stdout.flush() #刷新输出
# Update the optimizer's learning rate with the scheduler.
scheduler.step(epoch_accuracy)
# Early stopping on validation accuracy.
if epoch_accuracy < best_score:
patience_counter += 1
else:
best_score = epoch_accuracy
patience_counter = 0
# Save the best model. The optimizer is not saved to avoid having
# a checkpoint file that is too heavy to be shared. To resume
# training from the best model, use the 'esim_*.pth.tar'
# checkpoints instead.
torch.save({"epoch": epoch,
"model": model[0].state_dict(),
"best_score": best_score,
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "best_model0.pth.tar"))
torch.save({"epoch": epoch,
"model": model[1].state_dict(),
"best_score": best_score,
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "best_model1.pth.tar"))
# Save the model at each epoch.
torch.save({"epoch": epoch,
"model": model[0].state_dict(),
"best_score": best_score,
"optimizer": optimizer.state_dict(),
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "esim_model0{}.pth.tar".format(epoch)))
torch.save({"epoch": epoch,
"model": model[1].state_dict(),
"best_score": best_score,
"optimizer": optimizer.state_dict(),
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(target_dir, "esim_model1{}.pth.tar".format(epoch)))
if patience_counter >= patience:
print("-> Early stopping: patience limit reached, stopping...")
break
# Plotting of the loss curves for the train and validation sets.
fig = plt.figure()
plt.plot(epochs_count, train_losses, "-r")
plt.plot(epochs_count, valid_losses, "-b")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend(["Training loss", "Validation loss"])
plt.title("Cross entropy loss")
fig.savefig('quora_loss.png')
if __name__ == "__main__":
default_config = "../../config/training/quora_training_transformer.json"
parser = argparse.ArgumentParser(
description="Train the ESIM model on quora")
parser.add_argument("--config",
default=default_config,
help="Path to a json configuration file")
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir + '/scripts/training'
parser.add_argument("--checkpoint_model0",
default=os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/transformer/' +"best.pth.tar",
help="Path to a checkpoint file to resume training")
parser.add_argument("--checkpoint_model1",
default=None,#os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/bert/' +"esim_model1{}.pth.tar".format(2),
help="Path to a checkpoint file to resume training")
args = parser.parse_args()
if args.config == default_config:
config_path = os.path.join(script_dir, args.config)
else:
config_path = args.config
with open(os.path.normpath(config_path), 'r') as config_file:
config = json.load(config_file)
main(os.path.normpath(os.path.join(script_dir, config["train_data"])),
os.path.normpath(os.path.join(script_dir, config["valid_data"])),
os.path.normpath(os.path.join(script_dir, config["test_data"])),
os.path.normpath(os.path.join(script_dir, config["target_dir"])),
config["embedding_size"],
config["hidden_size"],
config["dropout"],
config["num_classes"],
config["epochs"],
config["batch_size"],
config["lr"],
config["patience"],
config["max_gradient_norm"],
args.checkpoint_model0,
args.checkpoint_model1,
finetuning=False)
|
py | 1a3fd17065ad09733def43902eecb53a01582617 | __version__ = '1.2.3.dev0'
from .core import ( # noqa: F401
load_kconf,
load_configuration,
Configuration,
CfgProfile,
CfgInclude,
PROFILES_FILENAME,
defconfig_for_target,
defconfig_merge,
defconfig_split,
)
|
py | 1a3fd1b0ed8c0e952daaa663f3b9e6e0f49b5c45 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import abc
import pandas as pd
from ..log import get_module_logger
class Expression(abc.ABC):
"""
Expression base class
Expression is designed to handle the calculation of data with the format below
data with two dimension for each instrument,
- feature
- time: it could be observation time or period time.
- period time is designed for Point-in-time database. For example, the period time maybe 2014Q4, its value can observed for multiple times(different value may be observed at different time due to amendment).
"""
def __str__(self):
return type(self).__name__
def __repr__(self):
return str(self)
def __gt__(self, other):
from .ops import Gt # pylint: disable=C0415
return Gt(self, other)
def __ge__(self, other):
from .ops import Ge # pylint: disable=C0415
return Ge(self, other)
def __lt__(self, other):
from .ops import Lt # pylint: disable=C0415
return Lt(self, other)
def __le__(self, other):
from .ops import Le # pylint: disable=C0415
return Le(self, other)
def __eq__(self, other):
from .ops import Eq # pylint: disable=C0415
return Eq(self, other)
def __ne__(self, other):
from .ops import Ne # pylint: disable=C0415
return Ne(self, other)
def __add__(self, other):
from .ops import Add # pylint: disable=C0415
return Add(self, other)
def __radd__(self, other):
from .ops import Add # pylint: disable=C0415
return Add(other, self)
def __sub__(self, other):
from .ops import Sub # pylint: disable=C0415
return Sub(self, other)
def __rsub__(self, other):
from .ops import Sub # pylint: disable=C0415
return Sub(other, self)
def __mul__(self, other):
from .ops import Mul # pylint: disable=C0415
return Mul(self, other)
def __rmul__(self, other):
from .ops import Mul # pylint: disable=C0415
return Mul(self, other)
def __div__(self, other):
from .ops import Div # pylint: disable=C0415
return Div(self, other)
def __rdiv__(self, other):
from .ops import Div # pylint: disable=C0415
return Div(other, self)
def __truediv__(self, other):
from .ops import Div # pylint: disable=C0415
return Div(self, other)
def __rtruediv__(self, other):
from .ops import Div # pylint: disable=C0415
return Div(other, self)
def __pow__(self, other):
from .ops import Power # pylint: disable=C0415
return Power(self, other)
def __and__(self, other):
from .ops import And # pylint: disable=C0415
return And(self, other)
def __rand__(self, other):
from .ops import And # pylint: disable=C0415
return And(other, self)
def __or__(self, other):
from .ops import Or # pylint: disable=C0415
return Or(self, other)
def __ror__(self, other):
from .ops import Or # pylint: disable=C0415
return Or(other, self)
def load(self, instrument, start_index, end_index, *args):
"""load feature
This function is responsible for loading feature/expression based on the expression engine.
The concrete implementation will be separated into two parts:
1) caching data, handle errors.
- This part is shared by all the expressions and implemented in Expression
2) processing and calculating data based on the specific expression.
- This part is different in each expression and implemented in each expression
Expression Engine is shared by different data.
Different data will have different extra information for `args`.
Parameters
----------
instrument : str
instrument code.
start_index : str
feature start index [in calendar].
end_index : str
feature end index [in calendar].
*args may contain following information:
1) if it is used in basic expression engine data, it contains following arguments
freq: str
feature frequency.
2) if is used in PIT data, it contains following arguments
cur_pit:
it is designed for the point-in-time data.
Returns
----------
pd.Series
feature series: The index of the series is the calendar index
"""
from .cache import H # pylint: disable=C0415
# cache
cache_key = str(self), instrument, start_index, end_index, *args
if cache_key in H["f"]:
return H["f"][cache_key]
if start_index is not None and end_index is not None and start_index > end_index:
raise ValueError("Invalid index range: {} {}".format(start_index, end_index))
try:
series = self._load_internal(instrument, start_index, end_index, *args)
except Exception as e:
get_module_logger("data").debug(
f"Loading data error: instrument={instrument}, expression={str(self)}, "
f"start_index={start_index}, end_index={end_index}, args={args}. "
f"error info: {str(e)}"
)
raise
series.name = str(self)
H["f"][cache_key] = series
return series
@abc.abstractmethod
def _load_internal(self, instrument, start_index, end_index, *args) -> pd.Series:
raise NotImplementedError("This function must be implemented in your newly defined feature")
@abc.abstractmethod
def get_longest_back_rolling(self):
"""Get the longest length of historical data the feature has accessed
This is designed for getting the needed range of the data to calculate
the features in specific range at first. However, situations like
Ref(Ref($close, -1), 1) can not be handled rightly.
So this will only used for detecting the length of historical data needed.
"""
# TODO: forward operator like Ref($close, -1) is not supported yet.
raise NotImplementedError("This function must be implemented in your newly defined feature")
@abc.abstractmethod
def get_extended_window_size(self):
"""get_extend_window_size
For to calculate this Operator in range[start_index, end_index]
We have to get the *leaf feature* in
range[start_index - lft_etd, end_index + rght_etd].
Returns
----------
(int, int)
lft_etd, rght_etd
"""
raise NotImplementedError("This function must be implemented in your newly defined feature")
class Feature(Expression):
"""Static Expression
This kind of feature will load data from provider
"""
def __init__(self, name=None):
if name:
self._name = name
else:
self._name = type(self).__name__
def __str__(self):
return "$" + self._name
def _load_internal(self, instrument, start_index, end_index, freq):
# load
from .data import FeatureD # pylint: disable=C0415
return FeatureD.feature(instrument, str(self), start_index, end_index, freq)
def get_longest_back_rolling(self):
return 0
def get_extended_window_size(self):
return 0, 0
class PFeature(Feature):
def __str__(self):
return "$$" + self._name
def _load_internal(self, instrument, start_index, end_index, cur_time):
from .data import PITD # pylint: disable=C0415
return PITD.period_feature(instrument, str(self), start_index, end_index, cur_time)
class ExpressionOps(Expression):
"""Operator Expression
This kind of feature will use operator for feature
construction on the fly.
"""
|
py | 1a3fd1b79c113f66ee8f043289e72a1fe033813d | #!/usr/bin/python3
import pandas as pd
from os.path import join as oj
import os
def load_google_mobility(data_dir='.'):
''' Load in Google Community Mobility Reports
Parameters
----------
data_dir : str; path to the data directory containing 'google_mobility.csv'
Returns
-------
data frame
'''
# download directly from source to get daily updates
cur_dir = os.getcwd()
os.chdir(data_dir)
os.system("wget https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv -O google_mobility.csv")
raw = pd.read_csv('google_mobility.csv')
os.chdir(cur_dir)
return raw
if __name__ == '__main__':
raw = load_google_mobility()
print('loaded google_mobility successfully.')
|
py | 1a3fd3fa6a7b17c1a192fefeed62e7fe8e073cbb | # Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import torch
from condensa.util import EventTimer
class DC(object):
"""Condensa direct compression optimizer."""
def compress(self,
w,
pi,
delta,
trainloader,
testloader,
valloader,
criterion):
"""
Performs model compression using direct optimization.
:param w: PyTorch model.
:type w: `torch.nn.Module`
:param pi: Compression function.
:param delta: Decompression function.
:param trainloader: Training dataloader.
:param testloader: Test dataloader.
:param valloader: Validation dataloader.
:param criterion: Loss criterion.
"""
statistics = dict()
timer_dc = EventTimer()
with torch.no_grad():
compressed = deepcopy(w)
pi(compressed)
statistics['total_elapsed'] = timer_dc.elapsed_seconds
return compressed, statistics
|
py | 1a3fd44fe0aa242b5842c429447d7f5107298513 | """
Makes possible reporter classes,
which are triggered on particular events and may provide information to the user,
may do something else such as checkpointing, or may do both.
"""
from __future__ import division, print_function
import time
from neat.math_util import mean, stdev
from neat.six_util import itervalues, iterkeys
# TODO: Add a curses-based reporter.
class ReporterSet(object):
"""
Keeps track of the set of reporters
and gives methods to dispatch them at appropriate points.
"""
def __init__(self):
self.reporters = []
def add(self, reporter):
self.reporters.append(reporter)
def remove(self, reporter):
self.reporters.remove(reporter)
def start_generation(self, gen):
for r in self.reporters:
r.start_generation(gen)
def end_generation(self, config, population, species_set):
for r in self.reporters:
r.end_generation(config, population, species_set)
def post_evaluate(self, config, population, species, best_genome):
for r in self.reporters:
r.post_evaluate(config, population, species, best_genome)
def post_reproduction(self, config, population, species):
for r in self.reporters:
r.post_reproduction(config, population, species)
def complete_extinction(self):
for r in self.reporters:
r.complete_extinction()
def found_solution(self, config, generation, best):
for r in self.reporters:
r.found_solution(config, generation, best)
def species_stagnant(self, sid, species):
for r in self.reporters:
r.species_stagnant(sid, species)
def info(self, msg):
for r in self.reporters:
r.info(msg)
class BaseReporter(object):
"""Definition of the reporter interface expected by ReporterSet."""
def start_generation(self, generation):
pass
def end_generation(self, config, population, species_set):
pass
def post_evaluate(self, config, population, species, best_genome):
pass
def post_reproduction(self, config, population, species):
pass
def complete_extinction(self):
pass
def found_solution(self, config, generation, best):
pass
def species_stagnant(self, sid, species):
pass
def info(self, msg):
pass
class StdOutReporter(BaseReporter):
bestFitness = 0.0
"""Uses `print` to output information about the run; an example reporter class."""
def __init__(self, show_species_detail):
self.show_species_detail = show_species_detail
self.generation = None
self.generation_start_time = None
self.generation_times = []
self.num_extinctions = 0
def start_generation(self, generation):
self.generation = generation
print('\n ****** Running generation {0} ****** \n'.format(generation))
self.generation_start_time = time.time()
def end_generation(self, config, population, species_set):
ng = len(population)
ns = len(species_set.species)
if self.show_species_detail:
print('Population of {0:d} members in {1:d} species:'.format(ng, ns))
sids = list(iterkeys(species_set.species))
sids.sort()
print(" ID age size fitness adj fit stag")
print(" ==== === ==== ======= ======= ====")
for sid in sids:
s = species_set.species[sid]
a = self.generation - s.created
n = len(s.members)
f = "--" if s.fitness is None else "{:.1f}".format(s.fitness)
af = "--" if s.adjusted_fitness is None else "{:.3f}".format(s.adjusted_fitness)
st = self.generation - s.last_improved
print(
" {: >4} {: >3} {: >4} {: >7} {: >7} {: >4}".format(sid, a, n, f, af, st))
else:
print('Population of {0:d} members in {1:d} species'.format(ng, ns))
elapsed = time.time() - self.generation_start_time
self.generation_times.append(elapsed)
self.generation_times = self.generation_times[-10:]
average = sum(self.generation_times) / len(self.generation_times)
print('Total extinctions: {0:d}'.format(self.num_extinctions))
if len(self.generation_times) > 1:
print("Generation time: {0:.3f} sec ({1:.3f} average)".format(elapsed, average))
else:
print("Generation time: {0:.3f} sec".format(elapsed))
def post_evaluate(self, config, population, species, best_genome):
# pylint: disable=no-self-use
fitnesses = [c.fitness for c in itervalues(population)]
fit_mean = mean(fitnesses)
fit_std = stdev(fitnesses)
best_species_id = species.get_species_id(best_genome.key)
print('Population\'s average fitness: {0:3.5f} stdev: {1:3.5f}'.format(fit_mean, fit_std))
print(
'Best fitness: {0:3.5f} - size: {1!r} - species {2} - id {3}'.format(best_genome.fitness,
best_genome.size(),
best_species_id,
best_genome.key))
res = open("result.csv", "a")
res.write('{0:3.5f},{1:3.5f} \n'.format(best_genome.fitness, fit_mean))
res.close()
# andrew add
if (best_genome.fitness > self.bestFitness):
self.bestFitness = best_genome.fitness
best = open("best.txt", "a")
best.write('\nBest genome:\n{!s}'.format(best_genome))
best.close()
# andrew end
def complete_extinction(self):
self.num_extinctions += 1
print('All species extinct.')
def found_solution(self, config, generation, best):
print('\nBest individual in generation {0} meets fitness threshold - complexity: {1!r}'.format(
self.generation, best.size()))
def species_stagnant(self, sid, species):
if self.show_species_detail:
print("\nSpecies {0} with {1} members is stagnated: removing it".format(sid, len(species.members)))
def info(self, msg):
print(msg)
|
py | 1a3fd4cc54466166743bcf40d7967f4507160c6e | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.base import Base
from cuml.common.handle import Handle
import cuml.common.cuda as cuda
from cuml.cluster.dbscan import DBSCAN
from cuml.cluster.kmeans import KMeans
from cuml.cluster.agglomerative import AgglomerativeClustering
from cuml.cluster.hdbscan import HDBSCAN
from cuml.datasets.arima import make_arima
from cuml.datasets.blobs import make_blobs
from cuml.datasets.regression import make_regression
from cuml.datasets.classification import make_classification
from cuml.decomposition.pca import PCA
from cuml.decomposition.tsvd import TruncatedSVD
from cuml.decomposition.incremental_pca import IncrementalPCA
from cuml.fil.fil import ForestInference
from cuml.ensemble.randomforestclassifier import RandomForestClassifier
from cuml.ensemble.randomforestregressor import RandomForestRegressor
from cuml.explainer.kernel_shap import KernelExplainer
from cuml.explainer.permutation_shap import PermutationExplainer
from cuml.fil import fil
from cuml.internals.global_settings import (
GlobalSettings, _global_settings_data)
from cuml.kernel_ridge.kernel_ridge import KernelRidge
from cuml.linear_model.elastic_net import ElasticNet
from cuml.linear_model.lasso import Lasso
from cuml.linear_model.linear_regression import LinearRegression
from cuml.linear_model.logistic_regression import LogisticRegression
from cuml.linear_model.mbsgd_classifier import MBSGDClassifier
from cuml.linear_model.mbsgd_regressor import MBSGDRegressor
from cuml.linear_model.ridge import Ridge
from cuml.manifold.t_sne import TSNE
from cuml.manifold.umap import UMAP
from cuml.metrics.accuracy import accuracy_score
from cuml.metrics.cluster.adjusted_rand_index import adjusted_rand_score
from cuml.metrics.regression import r2_score
from cuml.model_selection import train_test_split
from cuml.naive_bayes.naive_bayes import MultinomialNB
from cuml.neighbors.nearest_neighbors import NearestNeighbors
from cuml.neighbors.kneighbors_classifier import KNeighborsClassifier
from cuml.neighbors.kneighbors_regressor import KNeighborsRegressor
from cuml.preprocessing.LabelEncoder import LabelEncoder
from cuml.random_projection.random_projection import GaussianRandomProjection
from cuml.random_projection.random_projection import SparseRandomProjection
from cuml.random_projection.random_projection import \
johnson_lindenstrauss_min_dim
from cuml.solvers.cd import CD
from cuml.solvers.sgd import SGD
from cuml.solvers.qn import QN
from cuml.svm import SVC
from cuml.svm import SVR
from cuml.svm import LinearSVC
from cuml.svm import LinearSVR
from cuml.tsa import stationarity
from cuml.tsa.arima import ARIMA
from cuml.tsa.auto_arima import AutoARIMA
from cuml.tsa.holtwinters import ExponentialSmoothing
from cuml.common.pointer_utils import device_of_gpu_matrix
from cuml.common.memory_utils import set_global_output_type, using_output_type
# Import verion. Remove at end of file
from ._version import get_versions
# Version configuration
__version__ = get_versions()['version']
del get_versions
def __getattr__(name):
if name == 'global_settings':
try:
return _global_settings_data.settings
except AttributeError:
_global_settings_data.settings = GlobalSettings()
return _global_settings_data.settings
raise AttributeError(f"module {__name__} has no attribute {name}")
|
py | 1a3fd5305dd14608e6756d0809d9166d71eb45c0 | from .exception import *
from .bitoperations import *
from copy import deepcopy
# Finding range sum [i,j] in a flat array
class FenwickTree():
"""Fenwick Tree is Binary Indexed tree.
"""
def __init__(self, values):
self.size = len(values)
self.values = values
self.tree = [0]
self.tree.extend(deepcopy(values))
#one-based array
for i in range(1,self.size):
parent = i + least_significan_bit(i)
if parent <= self.size:
self.tree[parent] += self.tree[i]
def prefix_sum(self, i):
sum = 0
while i > 0 :
sum += self.tree[i]
i &= ~least_significan_bit(i)
print(sum)
return sum
def sum(self, i, j):
if j < i:
raise ValueError("Make sure j >= i")
return self.prefix_sum(j) - self.prefix_sum(i-1)
def point_update(self, i, x):
while i <= self.size:
self.tree[i] = self.tree[i] + x
i += least_significan_bit(i)
return self.tree
def get(self, i):
return self.sum(i,i)
def set(self, i, val):
return self.point_update(i , (val - self.sum(i,i))) |
py | 1a3fd61afd1e642a8c48225d17b9633cdb9b575d | #!/usr/bin/env python
# Simple checker for whether valgrind found errors
import sys
import xml.etree.ElementTree as ElementTree
e = ElementTree.parse(sys.argv[1])
states = [x.find('state').text for x in e.findall('status')]
errors = [x.find('kind').text for x in e.findall('error')]
if "RUNNING" not in states or "FINISHED" not in states:
raise Exception("Valgrind didn't run successfully, states seen: %s" % str(states))
if errors:
raise Exception("Valgrind found some errors: %s" % str(errors))
sys.exit(0)
|
py | 1a3fd6cd55b4fd3662a003215760d113c163e75e | import json
from typing import Optional
from unittest.mock import patch
from allauth.account.signals import user_logged_in
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django_allauth_webauthn.models import WebauthnData
class BaseTests:
class TestWebauthn(TestCase):
# Mocked random challenges during registration and login
REGISTRATION_CHALLENGE: Optional[str] = None
LOGIN_CHALLENGE: Optional[str] = None
# Expected data to be read from get requests or written by post requests
REGISTRATION_GET_DATA: Optional[dict] = None
REGISTRATION_POST_DATA: Optional[dict] = None
LOGIN_GET_DATA: Optional[dict] = None
LOGIN_POST_DATA: Optional[dict] = None
# The WebauthnData device which fits the aboves testdata
DEVICE: Optional[dict] = None
# Another unspecified WebauthnData device to test login failure with aboves data,
# i.e. test if valid authentication data for the wrong device fails
DEVICE_2 = {
"credential_id": "yd3MdXVR-YYXXLvn8PthHCTNztwsJq41i_JDHo8Z3Ks",
"public_key": "pQECAyYgASFYIAyjRD_Dgdb6odM44rDRlcrKIeR_X_HMr6mgp4xsjyuVIlggZDGtL3LWBx3TJrbs1y72FYaF4q-GapajZrx4faRdGTE", # noqa
"sign_counter": 5,
"name": "Device #2",
}
def setUp(self):
# Track the signals sent via allauth.
self.user_logged_in_count = 0
user_logged_in.connect(self._login_callback)
def _login_callback(self, sender, **kwargs):
self.user_logged_in_count += 1
def setup_testcase(
self,
username="test",
password="testpa$$w0rD",
id=None,
login=False,
set_session_user_id=False,
set_session_challenge=None,
devices=None,
):
createargs = {"username": username}
if id:
createargs["id"] = id
user = get_user_model().objects.create(**createargs)
user.set_password(password)
user.save()
if devices:
for device in devices:
device = WebauthnData.objects.create(user=user, **device)
if set_session_user_id or set_session_challenge:
session = self.client.session
if set_session_user_id:
session["allauth_webauthn_user_id"] = user.id
if set_session_challenge:
session["allauth_webauthn_challenge"] = set_session_challenge
session.save()
if login:
self.client.force_login(user)
return user
def test_account_login_without_webauthn_enabled(self):
user = self.setup_testcase()
# Manually login user
response = self.client.post(
reverse("account_login"),
{"login": user.username, "password": "testpa$$w0rD"},
)
self.assertRedirects(
response,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
self.assertEqual(self.user_logged_in_count, 1)
# Try to access a protected page
response = self.client.get(reverse("protected"))
self.assertEqual(response.content, b"secret content")
def test_account_login_with_webauthn_enabled(self):
user = self.setup_testcase(devices=[self.DEVICE])
# Manually login user
response = self.client.post(
reverse("account_login"),
{"login": user.username, "password": "testpa$$w0rD"},
)
self.assertRedirects(response, reverse("webauthn-login"), fetch_redirect_response=False)
self.assertEqual(self.user_logged_in_count, 0)
def test_account_login_with_webauthn_enabled_and_next_page_(self):
user = self.setup_testcase(devices=[self.DEVICE])
# Manually login user
redirect_next = "?next=" + reverse("protected")
response = self.client.post(
reverse("account_login") + redirect_next,
{"login": user.username, "password": "testpa$$w0rD"},
)
self.assertRedirects(
response,
reverse("webauthn-login") + redirect_next,
fetch_redirect_response=False,
)
self.assertEqual(self.user_logged_in_count, 0)
def test_webauthn_register_get_without_valid_session(self):
response = self.client.get(reverse("webauthn-register"))
self.assertRedirects(
response,
reverse("account_login") + "?next=" + reverse("webauthn-register"),
fetch_redirect_response=False,
)
@patch("django_allauth_webauthn.views.random_numbers_letters")
def test_webauthn_register_get(self, mock_challenge):
mock_challenge.return_value = self.REGISTRATION_CHALLENGE
self.setup_testcase(id=2, login=True, set_session_user_id=True)
response = self.client.get(reverse("webauthn-register"))
self.assertEqual(response.status_code, 200)
registration_data = json.loads(response.content)
self.assertEqual(registration_data, self.REGISTRATION_GET_DATA)
def test_webauthn_register_post_without_logged_in_user_fails(self):
response = self.client.post(reverse("webauthn-register"))
self.assertRedirects(
response,
reverse("account_login") + "?next=" + reverse("webauthn-register"),
fetch_redirect_response=False,
)
def test_webauthn_register_post_without_session_challenge_fails(self):
self.setup_testcase(id=2, login=True)
response = self.client.post(reverse("webauthn-register"))
self.assertEqual(response.status_code, 422)
def test_webauthn_register_post_without_data_fails(self):
user = self.setup_testcase(
id=2,
login=True,
set_session_user_id=True,
set_session_challenge=self.REGISTRATION_CHALLENGE,
)
response = self.client.post(reverse("webauthn-register"))
self.assertRedirects(
response,
settings.DJANGO_ALLAUTH_WEBAUTHN_REGISTRATION_ERROR_URL,
fetch_redirect_response=False,
)
self.assertFalse(WebauthnData.objects.filter(user=user).exists())
def test_webauthn_register_post_with_token_from_some_account_fails(self):
# Register the device to another account
self.setup_testcase(username="other", devices=[self.DEVICE])
user = self.setup_testcase(
id=2,
login=True,
set_session_user_id=True,
set_session_challenge=self.REGISTRATION_CHALLENGE,
)
self.assertFalse(WebauthnData.objects.filter(user=user).exists())
response = self.client.post(reverse("webauthn-register"), self.REGISTRATION_POST_DATA)
self.assertRedirects(
response,
reverse("test-registration-error"),
fetch_redirect_response=False,
)
# Registering a known token did not work
self.assertFalse(WebauthnData.objects.filter(user=user).exists())
def test_webauthn_register_post(self):
user = self.setup_testcase(
id=2,
login=True,
set_session_user_id=True,
set_session_challenge=self.REGISTRATION_CHALLENGE,
)
self.assertFalse(WebauthnData.objects.filter(user=user).exists())
response = self.client.post(reverse("webauthn-register"), self.REGISTRATION_POST_DATA)
self.assertTrue(WebauthnData.objects.filter(user=user).exists())
device = WebauthnData.objects.filter(user=user).last()
self.assertEqual(device.credential_id, self.DEVICE["credential_id"])
self.assertEqual(device.public_key, self.DEVICE["public_key"])
self.assertEqual(device.sign_counter, self.DEVICE["sign_counter"])
self.assertRedirects(
response,
settings.LOGIN_REDIRECT_URL,
fetch_redirect_response=False,
)
def test_login_view_without_session_user_id_fails(self):
self.setup_testcase(id=2, devices=[self.DEVICE])
response = self.client.get(reverse("webauthn-login"))
self.assertRedirects(response, reverse("account_login"), fetch_redirect_response=False)
def test_login_view(self):
self.setup_testcase(id=2, devices=[self.DEVICE], set_session_user_id=True)
self.client.get(reverse("webauthn-login"))
self.assertTemplateUsed("django_allauth_webauthn/login.html")
@patch("django_allauth_webauthn.views.random_numbers_letters")
def test_verify_get(self, mock_challenge):
mock_challenge.return_value = self.LOGIN_CHALLENGE
self.setup_testcase(id=2, devices=[self.DEVICE], set_session_user_id=True)
response = self.client.get(reverse("webauthn-verify"))
self.assertEqual(response.status_code, 200)
login_data = json.loads(response.content)
self.assertEqual(login_data, self.LOGIN_GET_DATA)
# Ensure that the challenge is set correctly to the session, too.
session = self.client.session
self.assertEqual(session["allauth_webauthn_challenge"], self.LOGIN_CHALLENGE)
def test_verify_post_without_session_user_id_fails(self):
self.setup_testcase(id=2, devices=[self.DEVICE], set_session_challenge=self.LOGIN_CHALLENGE)
response = self.client.post(reverse("webauthn-verify"), self.LOGIN_POST_DATA)
self.assertRedirects(response, reverse("test-login-error"))
def test_verify_post_without_session_challenge_fails(self):
self.setup_testcase(id=2, devices=[self.DEVICE], set_session_user_id=True)
response = self.client.post(reverse("webauthn-verify"), self.LOGIN_POST_DATA)
self.assertRedirects(response, reverse("test-login-error"))
def test_verify_post_without_correct_device_fails(self):
self.setup_testcase(
id=2,
devices=[self.DEVICE_2],
set_session_user_id=True,
set_session_challenge=self.LOGIN_CHALLENGE,
)
response = self.client.post(reverse("webauthn-verify"), self.LOGIN_POST_DATA)
self.assertRedirects(response, reverse("test-login-error"))
def test_verify_post_with_replayed_data_fails(self):
"""Do a basic test with a replay attack in terms of an invalid sign counter (less or equal the actual one)"""
user = self.setup_testcase(
id=2,
devices=[self.DEVICE],
set_session_user_id=True,
set_session_challenge=self.LOGIN_CHALLENGE,
)
device = user.webauthndata_set.first()
device.sign_counter = 2
device.save()
self.assertEqual(self.user_logged_in_count, 0)
response = self.client.post(reverse("webauthn-verify"), self.LOGIN_POST_DATA)
# User not logged in?
self.assertEqual(self.user_logged_in_count, 0)
self.assertRedirects(response, reverse("test-login-error"))
def test_verify_post(self):
user = self.setup_testcase(
id=2,
devices=[self.DEVICE_2, self.DEVICE],
set_session_user_id=True,
set_session_challenge=self.LOGIN_CHALLENGE,
)
device = user.webauthndata_set.get(credential_id=self.DEVICE["credential_id"])
initial_sign_counter = device.sign_counter
self.assertEqual(self.user_logged_in_count, 0)
self.client.post(reverse("webauthn-verify"), self.LOGIN_POST_DATA)
# User logged in?
self.assertEqual(self.user_logged_in_count, 1)
# Sign counter increased?
device.refresh_from_db()
self.assertGreater(device.sign_counter, initial_sign_counter)
# Session variables removed / sanitized?
session = self.client.session
self.assertNotIn("allauth_webauthn_user_id", session)
self.assertNotIn("allauth_webauthn_challenge", session)
def test_verify_post_with_redirect(self):
user = self.setup_testcase(
id=2,
devices=[self.DEVICE_2, self.DEVICE],
set_session_user_id=True,
set_session_challenge=self.LOGIN_CHALLENGE,
)
user.webauthndata_set.get(credential_id=self.DEVICE["credential_id"])
response = self.client.post(
reverse("webauthn-verify") + "?next=" + reverse("protected"),
self.LOGIN_POST_DATA,
)
# User logged in?
self.assertEqual(self.user_logged_in_count, 1)
# Redirected to target page?
self.assertRedirects(response, reverse("protected"))
def test_account_login_with_webauthn_enabled_fails_without_token(self):
user = self.setup_testcase(id=2, devices=[self.DEVICE])
response = self.client.post(
reverse("account_login"),
{"login": user.username, "password": "testpa$$w0rD"},
)
self.assertRedirects(response, reverse("webauthn-login"), fetch_redirect_response=False)
self.assertEqual(self.user_logged_in_count, 0)
# Protected page should not render but redirect back to login
response = self.client.get(reverse("protected"))
self.assertRedirects(
response,
reverse("account_login") + "?next=/protected",
fetch_redirect_response=False,
)
def test_rename_post_for_not_owned_devices_fails(self):
other_user = self.setup_testcase(username="other", devices=[self.DEVICE_2])
self.setup_testcase(devices=[self.DEVICE], login=True)
other_user_device = other_user.webauthndata_set.get(credential_id=self.DEVICE_2["credential_id"])
self.assertEqual(other_user_device.name, "Device #2")
with self.assertRaises(WebauthnData.DoesNotExist):
self.client.post(
reverse("webauthn-rename", kwargs={"pk": other_user_device.pk}),
{"name": "My new name"},
)
other_user_device.refresh_from_db()
self.assertEqual(other_user_device.name, "Device #2")
def test_rename_post(self):
user = self.setup_testcase(devices=[self.DEVICE], login=True)
device = user.webauthndata_set.get(credential_id=self.DEVICE["credential_id"])
self.assertEqual(device.name, "Device #1")
response = self.client.post(
reverse("webauthn-rename", kwargs={"pk": device.pk}),
{"name": "My new name"},
)
self.assertRedirects(
response,
reverse("removed-renamed-success"),
fetch_redirect_response=False,
)
device.refresh_from_db()
self.assertEqual(device.name, "My new name")
def test_remove_post_for_not_owned_devices_fails(self):
other_user = self.setup_testcase(username="other", devices=[self.DEVICE_2])
self.setup_testcase(devices=[self.DEVICE], login=True)
other_user_device = other_user.webauthndata_set.get(credential_id=self.DEVICE_2["credential_id"])
self.assertTrue(other_user.webauthndata_set.filter(credential_id=self.DEVICE_2["credential_id"]).exists())
with self.assertRaises(WebauthnData.DoesNotExist):
self.client.post(reverse("webauthn-remove", kwargs={"pk": other_user_device.pk}))
self.assertTrue(other_user.webauthndata_set.filter(credential_id=self.DEVICE_2["credential_id"]).exists())
def test_remove_post(self):
user = self.setup_testcase(devices=[self.DEVICE], login=True)
device = user.webauthndata_set.get(credential_id=self.DEVICE["credential_id"])
self.assertTrue(user.webauthndata_set.filter(credential_id=self.DEVICE["credential_id"]).exists())
self.client.post(reverse("webauthn-remove", kwargs={"pk": device.pk}))
self.assertFalse(user.webauthndata_set.filter(credential_id=self.DEVICE["credential_id"]).exists())
@override_settings(DJANGO_ALLAUTH_WEBAUTHN_LOGIN_TEMPLATE="alternative_login.html")
def test_login_custom_template(self):
self.setup_testcase(id=2, devices=[self.DEVICE], set_session_user_id=True)
response = self.client.get(reverse("webauthn-login"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "alternative_login.html")
self.assertInHTML("Alternative Login Template", str(response.content))
class TestWebauthnCTAP2(BaseTests.TestWebauthn):
REGISTRATION_CHALLENGE = "gWr81DvWPCLkOPwBqyUOXDr7XnsMQcw1"
LOGIN_CHALLENGE = "vXUgAJyIOdouNr3dACKs7NW4WMh6nMRJ"
# The dicts are dumped and loaded with json to ensure equal json output with the test data.
# Otherwise some single-value tuples are compared against single-value lists...
REGISTRATION_GET_DATA = json.loads(
json.dumps(
{
"challenge": REGISTRATION_CHALLENGE,
"rp": {"name": "Webauthn Test", "id": "localhost"},
"user": {
"id": "Mg==",
"name": "test",
"displayName": ("Webauthn Test user: test",),
"icon": "https://localhost:8000/favicon.ico",
},
"pubKeyCredParams": [
{"alg": -7, "type": "public-key"},
{"alg": -257, "type": "public-key"},
{"alg": -37, "type": "public-key"},
],
"timeout": 60000,
"excludeCredentials": [],
"attestation": "direct",
"extensions": {"webauthn.loc": True},
}
)
)
REGISTRATION_POST_DATA = {
"id": "sDSeeqI7re2Qij2reZvgFtZ8YOpXYjkDX23pgtjllZA",
"rawId": "sDSeeqI7re2Qij2reZvgFtZ8YOpXYjkDX23pgtjllZA",
"type": "public-key",
"attObj": "o2NmbXRmcGFja2VkZ2F0dFN0bXSjY2FsZyZjc2lnWEgwRgIhAL21FJyx959Rs3nwo61SpNu8Gt3X1blxGnDnfjRRcKcNAiEA3M7PhbCUTChCRqNPIh1fbOA5Zto4RWdOY_OsTn81TlJjeDVjgVkB3jCCAdowggF9oAMCAQICAQEwDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMCVVMxETAPBgNVBAoMCENocm9taXVtMSIwIAYDVQQLDBlBdXRoZW50aWNhdG9yIEF0dGVzdGF0aW9uMRowGAYDVQQDDBFCYXRjaCBDZXJ0aWZpY2F0ZTAeFw0xNzA3MTQwMjQwMDBaFw00MTA5MTAxOTU4NTZaMGAxCzAJBgNVBAYTAlVTMREwDwYDVQQKDAhDaHJvbWl1bTEiMCAGA1UECwwZQXV0aGVudGljYXRvciBBdHRlc3RhdGlvbjEaMBgGA1UEAwwRQmF0Y2ggQ2VydGlmaWNhdGUwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASNYX5lyVCOZLzFZzrIKmeZ2jwURmgsJYxGP__fWN_S-j5sN4tT15XEpN_7QZnt14YvI6uvAgO0uJEboFaZlOEBoyUwIzATBgsrBgEEAYLlHAIBAQQEAwIFIDAMBgNVHRMBAf8EAjAAMA0GCSqGSIb3DQEBCwUAA0gAMEUCIQC9SGLply8pw6QIsW67rLNSUeUXoPaHbsh7SpsPrPNYtwIgZKEWn1CpRIh4p7h460VMOxe8EQ1_FlBA_bIqIsTzWEBoYXV0aERhdGFYpEmWDeWIDoxodDQXD2R2YFuP5K65ooYyx5lc87qDHZdjRQAAAAEBAgMEBQYHCAECAwQFBgcIACCwNJ56ojut7ZCKPat5m-AW1nxg6ldiOQNfbemC2OWVkKUBAgMmIAEhWCAs8n6Uv2IdoEdN3FqEmFhc22KRTR2vbjizTEi0zQgP2SJYIBOVdl7yJa_5GEENQMcAoXJvHav2qesgQQ6P3rTEJmsc", # noqa
"clientData": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiZ1dyODFEdldQQ0xrT1B3QnF5VU9YRHI3WG5zTVFjdzEiLCJvcmlnaW4iOiJodHRwczovL2xvY2FsaG9zdDo4MDAwIiwiY3Jvc3NPcmlnaW4iOmZhbHNlfQ", # noqa
"registrationClientExtensions": "{}",
}
LOGIN_GET_DATA = json.loads(
json.dumps(
{
"challenge": LOGIN_CHALLENGE,
"timeout": 60000,
"rpId": "localhost",
"allowCredentials": [
{
"id": "sDSeeqI7re2Qij2reZvgFtZ8YOpXYjkDX23pgtjllZA",
"type": "public-key",
}
],
"userVerification": "preferred",
}
)
)
LOGIN_POST_DATA = {
"id": "sDSeeqI7re2Qij2reZvgFtZ8YOpXYjkDX23pgtjllZA",
"rawId": "sDSeeqI7re2Qij2reZvgFtZ8YOpXYjkDX23pgtjllZA",
"type": "public-key",
"authData": "SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MFAAAAAg==",
"clientData": "eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoidlhVZ0FKeUlPZG91TnIzZEFDS3M3Tlc0V01oNm5NUkoiLCJvcmlnaW4iOiJodHRwczovL2xvY2FsaG9zdDo4MDAwIiwiY3Jvc3NPcmlnaW4iOmZhbHNlfQ==", # noqa
"signature": "3045022100809f264357de167540f67f398bce9b23fc70269d9f4d6e45214723dfc051b22202204922922be6649bb01af1a96d794b03e652b74e1e8ff17fbedca9bdcc2e6853a4", # noqa
"assertionClientExtensions": "{}",
}
DEVICE = {
"credential_id": "sDSeeqI7re2Qij2reZvgFtZ8YOpXYjkDX23pgtjllZA",
"public_key": "pQECAyYgASFYICzyfpS_Yh2gR03cWoSYWFzbYpFNHa9uOLNMSLTNCA_ZIlggE5V2XvIlr_kYQQ1AxwChcm8dq_ap6yBBDo_etMQmaxw",
"sign_counter": 1,
"name": "Device #1",
}
class TestWebauthnU2F(BaseTests.TestWebauthn):
REGISTRATION_CHALLENGE = "W5TDQtAUGXS7TqwIMlI9TJLvc8Zixwfy"
LOGIN_CHALLENGE = "9owV5Nr47k5ImAxNOEr1bGPosTiulIYO"
# The dicts are dumped and loaded with json to ensure equal json output with the test data.
# Otherwise some single-value tuples are compared against single-value lists...
REGISTRATION_GET_DATA = json.loads(
json.dumps(
{
"challenge": REGISTRATION_CHALLENGE,
"rp": {"name": "Webauthn Test", "id": "localhost"},
"user": {
"id": "Mg==",
"name": "test",
"displayName": ("Webauthn Test user: test",),
"icon": "https://localhost:8000/favicon.ico",
},
"pubKeyCredParams": [
{"alg": -7, "type": "public-key"},
{"alg": -257, "type": "public-key"},
{"alg": -37, "type": "public-key"},
],
"timeout": 60000,
"excludeCredentials": [],
"attestation": "direct",
"extensions": {"webauthn.loc": True},
}
)
)
REGISTRATION_POST_DATA = {
"id": "aCh9CpDe1omGzbU_7zNebWWZtYrdgnchxQWKKFc6HJY",
"rawId": "aCh9CpDe1omGzbU_7zNebWWZtYrdgnchxQWKKFc6HJY",
"type": "public-key",
"attObj": "o2NmbXRoZmlkby11MmZnYXR0U3RtdKJjc2lnWEgwRgIhAPXJyJWQ4rMuJZMEyObKCQcrDkwGjLFqUzFZZpFwbx9NAiEAiIR4z-lcsXsaURPkF9rS5ePjDL3fZKfgonZOoWfjz-ZjeDVjgVkB3zCCAdswggF9oAMCAQICAQEwDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMCVVMxETAPBgNVBAoMCENocm9taXVtMSIwIAYDVQQLDBlBdXRoZW50aWNhdG9yIEF0dGVzdGF0aW9uMRowGAYDVQQDDBFCYXRjaCBDZXJ0aWZpY2F0ZTAeFw0xNzA3MTQwMjQwMDBaFw00MTA5MTAyMDA0NDZaMGAxCzAJBgNVBAYTAlVTMREwDwYDVQQKDAhDaHJvbWl1bTEiMCAGA1UECwwZQXV0aGVudGljYXRvciBBdHRlc3RhdGlvbjEaMBgGA1UEAwwRQmF0Y2ggQ2VydGlmaWNhdGUwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASNYX5lyVCOZLzFZzrIKmeZ2jwURmgsJYxGP__fWN_S-j5sN4tT15XEpN_7QZnt14YvI6uvAgO0uJEboFaZlOEBoyUwIzATBgsrBgEEAYLlHAIBAQQEAwIFIDAMBgNVHRMBAf8EAjAAMA0GCSqGSIb3DQEBCwUAA0kAMEYCIQDCpbuX5xS4iJIsc3V9i_Vndw0OvEiPlfiOpuAoHGjZ-QIhAMhPya13X5hoWBKTUeAwE-Tfw9zc27JDCGOXGD4AQy6raGF1dGhEYXRhWKRJlg3liA6MaHQ0Fw9kdmBbj-SuuaKGMseZXPO6gx2XY0EAAAAAAAAAAAAAAAAAAAAAAAAAAAAgaCh9CpDe1omGzbU_7zNebWWZtYrdgnchxQWKKFc6HJalAQIDJiABIVggwtSQRUx62PwYiNH1-8UlZuW8dHff4F0Wap0MOHik2gciWCDrFo_4N_dlSXQ5t4s92VKxHDTzl1AzVH3P4PFLOtbr7g", # noqa
"clientData": "eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiVzVURFF0QVVHWFM3VHF3SU1sSTlUSkx2YzhaaXh3ZnkiLCJvcmlnaW4iOiJodHRwczovL2xvY2FsaG9zdDo4MDAwIiwiY3Jvc3NPcmlnaW4iOmZhbHNlfQ", # noqa
"registrationClientExtensions": "{}",
}
LOGIN_GET_DATA = json.loads(
json.dumps(
{
"challenge": LOGIN_CHALLENGE,
"timeout": 60000,
"rpId": "localhost",
"allowCredentials": [
{
"id": "aCh9CpDe1omGzbU_7zNebWWZtYrdgnchxQWKKFc6HJY",
"type": "public-key",
}
],
"userVerification": "preferred",
}
)
)
LOGIN_POST_DATA = {
"id": "aCh9CpDe1omGzbU_7zNebWWZtYrdgnchxQWKKFc6HJY",
"rawId": "aCh9CpDe1omGzbU_7zNebWWZtYrdgnchxQWKKFc6HJY",
"type": "public-key",
"authData": "SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MBAAAAAg==",
"clientData": "eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiOW93VjVOcjQ3azVJbUF4Tk9FcjFiR1Bvc1RpdWxJWU8iLCJvcmlnaW4iOiJodHRwczovL2xvY2FsaG9zdDo4MDAwIiwiY3Jvc3NPcmlnaW4iOmZhbHNlfQ==", # noqa
"signature": "3045022100c015418be3da081df47d399b58a03d2745ebf7ecbd2cd7acbcc1da650ed8139802201e2b98f7ca27112970463abfbd56f1aab32ae8613df76aa6882a8cb816c49749", # noqa
"assertionClientExtensions": "{}",
}
DEVICE = {
"credential_id": "aCh9CpDe1omGzbU_7zNebWWZtYrdgnchxQWKKFc6HJY",
"public_key": "pQECAyYgASFYIMLUkEVMetj8GIjR9fvFJWblvHR33-BdFmqdDDh4pNoHIlgg6xaP-Df3ZUl0ObeLPdlSsRw085dQM1R9z-DxSzrW6-4",
"sign_counter": 0,
"name": "Device #1",
}
|
py | 1a3fd6e4df68c43b0b0060f0a05db59557d9057a | # coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.fastmath.ops."""
import collections
from absl.testing import parameterized
import gin
import jax.numpy as jnp
import numpy as onp
from tensorflow import test
from trax import fastmath
_TestNamedtuple = collections.namedtuple('_TestNamedtuple', ['x'])
class BackendTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
gin.clear_config()
def override_gin(self, bindings):
gin.parse_config_files_and_bindings(None, bindings)
def test_backend_imports_correctly(self):
backend = fastmath.backend()
self.assertEqual(jnp, backend['np'])
self.assertNotEqual(onp, backend['np'])
self.override_gin("backend.name = 'numpy'")
backend = fastmath.backend()
self.assertNotEqual(jnp, backend['np'])
self.assertEqual(onp, backend['np'])
def test_backend_can_be_set(self):
self.assertEqual(fastmath.backend_name(), 'jax')
fastmath.set_backend('tensorflow-numpy')
self.assertEqual(fastmath.backend_name(), 'tensorflow-numpy')
fastmath.set_backend(None)
self.assertEqual(fastmath.backend_name(), 'jax')
def test_numpy_backend_delegation(self):
# Assert that we are getting JAX's numpy backend.
backend = fastmath.backend()
numpy = fastmath.numpy
self.assertEqual(jnp, backend['np'])
# Assert that `numpy` calls the appropriate gin configured functions and
# properties.
self.assertTrue(numpy.isinf(numpy.inf))
self.assertEqual(jnp.isinf, numpy.isinf)
self.assertEqual(jnp.inf, numpy.inf)
# Assert that we will now get the pure numpy backend.
self.override_gin("backend.name = 'numpy'")
backend = fastmath.backend()
numpy = fastmath.numpy
self.assertEqual(onp, backend['np'])
# Assert that `numpy` calls the appropriate gin configured functions and
# properties.
self.assertTrue(numpy.isinf(numpy.inf))
self.assertEqual(onp.isinf, numpy.isinf)
self.assertEqual(onp.inf, numpy.inf)
@parameterized.named_parameters(
('_' + b.value, b) for b in (fastmath.Backend.JAX, fastmath.Backend.TFNP))
def test_fori_loop(self, backend):
with fastmath.use_backend(backend):
res = fastmath.fori_loop(2, 5, lambda i, x: x + i, 1)
self.assertEqual(res, 1 + 2 + 3 + 4)
def test_nested_map(self):
inp = {'a': ([0, 1], 2), 'b': _TestNamedtuple(3)}
out = {'a': ([1, 2], 3), 'b': _TestNamedtuple(4)}
self.assertEqual(fastmath.nested_map(lambda x: x + 1, inp), out)
def test_nested_stack(self):
inp = [
{'a': ([0, 1], 2), 'b': _TestNamedtuple(3)},
{'a': ([1, 2], 3), 'b': _TestNamedtuple(4)},
]
out = {'a': ([[0, 1], [1, 2]], [2, 3]), 'b': _TestNamedtuple([3, 4])}
onp.testing.assert_equal(fastmath.nested_stack(inp), out)
def test_names_match(self):
# Names match up.
for backend_enum, backend_obj in fastmath.ops._backend_dict.items():
self.assertEqual(backend_enum.value, backend_obj['name'])
# Every backend appears in the dictionary.
for backend_enum in fastmath.ops.Backend:
self.assertIn(backend_enum, fastmath.ops._backend_dict)
def test_use_backend_str(self):
with fastmath.use_backend('tensorflow-numpy'):
self.assertEqual(fastmath.backend_name(), 'tensorflow-numpy')
def test_use_backend_enum(self):
with fastmath.use_backend(fastmath.Backend.NUMPY):
self.assertEqual(fastmath.backend_name(), 'numpy')
if __name__ == '__main__':
test.main()
|
py | 1a3fd6e4fc17abfd2852ddf2cea37092ce8b07fb | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0001_squashed_0012_auto_20170921_1332'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('slug', models.SlugField()),
('title', models.CharField(max_length=255)),
('text', models.TextField()),
('url', models.URLField(max_length=255)),
('sponsor', models.ForeignKey(to='sponsors.Sponsor', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
py | 1a3fd73351a5eacd072168fc92a4ebf20a378922 | #!/usr/bin/env python3
header = '''
file {
name="/opt/rtcds/userapps/release/vis/common/medm/steppingmotor/OVERVIEW/STANDALONE_STEPPER_OVERVIEW.adl"
version=030107
}
display {
object {
x=1996
y=56
width=512
height=400
}
clr=14
bclr=11
cmap=""
gridSpacing=5
gridOn=0
snapToGrid=0
}
"color map" {
ncolors=65
colors {
ffffff,
ececec,
dadada,
c8c8c8,
bbbbbb,
aeaeae,
9e9e9e,
919191,
858585,
787878,
696969,
5a5a5a,
464646,
2d2d2d,
000000,
00d800,
1ebb00,
339900,
2d7f00,
216c00,
fd0000,
de1309,
be190b,
a01207,
820400,
5893ff,
597ee1,
4b6ec7,
3a5eab,
27548d,
fbf34a,
f9da3c,
eeb62b,
e19015,
cd6100,
ffb0ff,
d67fe2,
ae4ebc,
8b1a96,
610a75,
a4aaff,
8793e2,
6a73c1,
4d52a4,
343386,
c7bb6d,
b79d5c,
a47e3c,
7d5627,
58340f,
99ffff,
73dfff,
4ea5f9,
2a63e4,
0a00b8,
ebf1b5,
d4db9d,
bbc187,
a6a462,
8b8239,
73ff6b,
52da3b,
3cb420,
289315,
1a7309,
}
}
'''
channel_dict = {
'TEST_P0_GAS': 0,
'TEST_P1_GAS': 1,
'TEST_P2_GAS': 2,
'TEST_P3_GAS': 3,
'TEST_P4_GAS': 4,
'TEST_P5_GAS': 5
}
#common = '/opt/rtcds/userapps/release/vis/common'
common = './'
def top(x,y):
width = 300
height = 100
txt = '''
composite {{
object {{
x={x}
y={y}
width=300
height=30
}}
"composite name"=""
"composite file"="./OVERVIEW_TOP.adl"
}}
'''.format(common=common,x=x,y=y)
return txt,width,height
def mini(x,y,system,stage,dof,damp,bio,stepname,stepid,motor,label,mode='ERR'):
width = 480
height = 25
txt = '''
composite {{
object {{
x={x}
y={y}
width=550
height=30
}}
"composite name"=""
"composite file"="./OVERVIEW_MINI.adl;IFO=$(IFO),ifo=$(ifo),SYSTEM={system},STAGE={stage},DOF={dof},DAMP={damp},BIO={bio},STEPNAME={stepname},STEPID={stepid},MOTOR={motor},LABEL={label}"
}}
'''.format(common=common,x=x,y=y,system=system,stage=stage,dof=dof,damp=damp,bio=bio,stepname=stepname,stepid=stepid,label=label,motor=motor)
return txt,width,height
def head(x,y,system,mtype):
width = 300
height = 55
txt = '''
composite {{
object {{
x={x}
y={y}
width=300
height=55
}}
"composite name"=""
"composite file"="./HEAD_MINI.adl;IFO=$(IFO),ifo=$(ifo),SYSTEM={system},TYPE={mtype}"
}}
'''.format(common=common,x=x,y=y,system=system,mtype=mtype)
return txt,width,height
def foot(x,y,stepperid):
width = 300
height = 50
txt = '''
composite {{
object {{
x={x}
y={y}
width=300
height=30
}}
"composite name"=""
"composite file"="./FOOT_MINI.adl;IFO=$(IFO),ifo=$(ifo),STEPPERID={stepperid}"
}}
'''.format(common=common,x=x,y=y,stepperid=stepperid)
return txt,width,height
def mtype_is(system):
if 'TM' in system:
mtype = 'TM'
elif 'BS' == system:
mtype = 'BS'
elif 'SR' in system:
mtype = 'SR'
else:
mtype = None
return mtype
def damp_is(system,mode='ERR'):
if system in ['BS','SR2','SR3','SRM']:
damp = 'DCCTRL'
else:
damp = 'DAMP'
return damp
def bio_is(system):
if system in ['BS','SR2','SR3','SRM']:
bio = 'BIO'
else:
bio = 'BO'
return bio
def stepname_is(dof):
if dof == 'GAS':
return 'STEP_GAS'
else:
return 'STEP_IP'
def stepperid_is(system):
if system == 'PRM' or system == 'PR3':
return 'PR0'
else:
return system
def stepid_is(system,stage):
if stage == 'IP':
return system+'_IP'
else:
return stepperid_is(system)+'_GAS'
def motor_is(system,stage,dof):
if stage == 'IP':
return dof
else:
return channel_dict[system+'_'+stage+'_'+dof]
def label_is(stage,dof):
if stage == 'IP':
if dof == 'F0Y':
return 'F0_Y'
if dof == 'A':
return stage + '_H1'
if dof == 'B':
return stage + '_H2'
if dof == 'C':
return stage + '_H3'
return stage + '_' + dof
if __name__=='__main__':
systems = ['TEST'] # TEST
# ERROR mode
# TypeA
# K1:VIS-ITMY_IP_DAMP_L_INMON
# K1:VIS-ITMY_F0_DAMP_GAS_INMON
# TypeB
# K1:VIS-BS_IP_DCCTRL_L_INMON
# K1:VIS-BS_F0_DCCTRL_GAS_INMON
# TypeBp
# K1:VIS-PR2_BF_DAMP_GAS_INMON
#
# FB mode
# TypeA
# K1:VIS-ETMY_IP_SUMOUT_L_OUTMON
# K1:VIS-ETMY_F0_SUMOUT_GAS_OUTMON
# TypeB
# K1:VIS-BS_IP_DCCTRL_L_OUTMON
# K1:VIS-BS_F0_COILOUTF_GAS_OUTMON
# TypeBp
# K1:VIS-PR2_SF_DAMP_GAS_OUTMON
stages = {'TEST':['P0','P1','P2','P3','P4','P5']}
dofs = {'P0':['GAS'],
'P1':['GAS'],
'P2':['GAS'],
'P3':['GAS'],
'P4':['GAS'],
'P5':['GAS'],}
mode = 'ERR'
height = 10
width = 0
_h0 = height
_w0 = width
contents = header
_h = 0
_w = 0
with open('./STANDALONE_STEPPER_OVERVIEW.adl','w') as f:
txt,w0,h0 = top(width,height)
contents += txt
height += h0
_h0 = height
for num,system in enumerate(systems):
print('{0}'.format(system))
mtype = mtype_is(system)
stepperid = stepperid_is(system)
txt,w0,h0 = head(width,height,system,mtype)
contents += txt
_h = h0
for stage in stages[system]:
print(' - ',stage,dofs[stage])
for dof in dofs[stage]:
damp = damp_is(system)
bio = bio_is(system)
stepname = stepname_is(dof)
stepid = stepid_is(system,stage)
motor = motor_is(system,stage,dof)
label = label_is(stage, dof)
txt,w1,h1 = mini(width,height+_h,system,stage,dof,damp,bio,stepname,stepid,motor,label,mode=mode)
_h += h1
contents += txt
txt,w2,h2 = foot(width,height+_h,stepperid)
contents += txt
_h += h2
_w = max(w0,w1,w2) + 2
q,mod = divmod(num+1,4)
height = q*320 + _h0
width = mod*_w + _w0
f.write(contents)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.