id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
19759
|
import rospy
MOVE_CYCLE_PERIOD = 0.01
def move_towards(target, current, step=1):
if abs(target-current) < step:
return target, True
else:
if target > current:
return current + step, False
else:
return current - step, False
def move_leg(leg, coxa=None, femur=None, tibia=None, step=1.3):
coxa_done = True
femur_done = True
tibia_done = True
if coxa:
leg.coxa, coxa_done = move_towards(coxa, leg.coxa, step)
if femur:
leg.femur, femur_done = move_towards(femur, leg.femur, step)
if tibia:
leg.tibia, tibia_done = move_towards(tibia, leg.tibia, step)
return coxa_done and femur_done and tibia_done
def is_leg_close(leg, coxa=None, femur=None, tibia=None, tolerance=20):
coxa_close = True
femur_close = True
tibia_close = True
if coxa:
coxa_close = leg.coxa + tolerance > coxa > leg.coxa - tolerance
if femur:
femur_close = leg.femur + tolerance > femur > leg.femur - tolerance
if tibia:
tibia_close = leg.tibia + tolerance > tibia > leg.tibia - tolerance
return coxa_close and femur_close and tibia_close
class FoldingManager(object):
def __init__(self, body_controller):
super(FoldingManager, self).__init__()
self.body_controller = body_controller
self.last_motor_position = None
def position_femur_tibia(self):
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, None, 60, 240)
lm = move_leg(self.last_motor_position.left_middle, None, 60, 240)
lr = move_leg(self.last_motor_position.left_rear, None, 60, 240)
rf = move_leg(self.last_motor_position.right_front, None, 240, 60)
rm = move_leg(self.last_motor_position.right_middle, None, 240, 60)
rr = move_leg(self.last_motor_position.right_rear, None, 240, 60)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
rospy.sleep(0.05)
def check_if_folded(self):
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
lf = is_leg_close(self.last_motor_position.left_front, 240)
lm = is_leg_close(self.last_motor_position.left_middle, 240) or is_leg_close(self.last_motor_position.left_middle, 60)
lr = is_leg_close(self.last_motor_position.left_rear, 60)
rf = is_leg_close(self.last_motor_position.right_front, 60)
rm = is_leg_close(self.last_motor_position.right_middle, 60) or is_leg_close(self.last_motor_position.right_middle, 240)
rr = is_leg_close(self.last_motor_position.right_rear, 240)
return lf and lm and lr and rf and rm and rr
def unfold(self):
self.position_femur_tibia()
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = False
lr = False
rf = False
rr = False
if self.last_motor_position.left_middle.coxa > 120:
lf = move_leg(self.last_motor_position.left_front, 150)
lm = move_leg(self.last_motor_position.left_middle, 150)
if self.last_motor_position.left_middle.coxa < 180:
lr = move_leg(self.last_motor_position.left_rear, 150)
if self.last_motor_position.right_middle.coxa < 180:
rf = move_leg(self.last_motor_position.right_front, 150)
rm = move_leg(self.last_motor_position.right_middle, 150)
if self.last_motor_position.right_middle.coxa > 120:
rr = move_leg(self.last_motor_position.right_rear, 150)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, tibia=210)
lm = move_leg(self.last_motor_position.left_middle, tibia=210)
lr = move_leg(self.last_motor_position.left_rear, tibia=210)
rf = move_leg(self.last_motor_position.right_front, tibia=90)
rm = move_leg(self.last_motor_position.right_middle, tibia=90)
rr = move_leg(self.last_motor_position.right_rear, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
def fold(self):
self.position_femur_tibia()
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
if not self.check_if_folded():
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, 150)
rm = move_leg(self.last_motor_position.right_middle, 150)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, 240)
lr = move_leg(self.last_motor_position.left_rear, 60)
rf = move_leg(self.last_motor_position.right_front, 60)
rr = move_leg(self.last_motor_position.right_rear, 240)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr and rf and rr:
break
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, 240)
rm = move_leg(self.last_motor_position.right_middle, 60)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
def unfold_on_ground(self):
self.position_femur_tibia()
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
# lift middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, tibia=200)
rm = move_leg(self.last_motor_position.right_middle, tibia=100)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
# fold out middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, coxa=150)
rm = move_leg(self.last_motor_position.right_middle, coxa=150)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
# lower right leg
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rm = move_leg(self.last_motor_position.right_middle, femur=170, tibia=100)
self.body_controller.set_motors(self.last_motor_position)
if rm:
break
# unfold right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, coxa=150)
rr = move_leg(self.last_motor_position.right_rear, coxa=150)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# lift right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, tibia=90)
rr = move_leg(self.last_motor_position.right_rear, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# switch lifted side
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=130, tibia=200)
rm = move_leg(self.last_motor_position.right_middle, femur=240, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if rm and lm:
break
# unfold left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, coxa=150)
lr = move_leg(self.last_motor_position.left_rear, coxa=150)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# lift left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, tibia=210)
lr = move_leg(self.last_motor_position.left_rear, tibia=210)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# lift middle left
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=60, tibia=210)
self.body_controller.set_motors(self.last_motor_position)
if lm:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
def fold_on_ground(self):
current_position = self.body_controller.read_hexapod_motor_positions()
self.last_motor_position = current_position
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, 150, femur=60, tibia=210)
lm = move_leg(self.last_motor_position.left_middle, 150, femur=60, tibia=210)
lr = move_leg(self.last_motor_position.left_rear, 150, femur=60, tibia=210)
rf = move_leg(self.last_motor_position.right_front, 150, femur=240, tibia=90)
rm = move_leg(self.last_motor_position.right_middle, 150, femur=240, tibia=90)
rr = move_leg(self.last_motor_position.right_rear, 150, femur=240, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if lf and lm and lr and rf and rm and rr:
break
# lower right leg
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rm = move_leg(self.last_motor_position.right_middle, femur=170, tibia=100)
self.body_controller.set_motors(self.last_motor_position)
if rm:
break
# compress right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, None, 240, 60)
rr = move_leg(self.last_motor_position.right_rear, None, 240, 60)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# fold right legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
rf = move_leg(self.last_motor_position.right_front, 60)
rr = move_leg(self.last_motor_position.right_rear, 240)
self.body_controller.set_motors(self.last_motor_position)
if rf and rr:
break
# switch lifted side
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=130, tibia=200)
rm = move_leg(self.last_motor_position.right_middle, femur=240, tibia=90)
self.body_controller.set_motors(self.last_motor_position)
if rm and lm:
break
# compress left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, None, 60, 240)
lr = move_leg(self.last_motor_position.left_rear, None, 60, 240)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# fold left legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lf = move_leg(self.last_motor_position.left_front, 240)
lr = move_leg(self.last_motor_position.left_rear, 60)
self.body_controller.set_motors(self.last_motor_position)
if lf and lr:
break
# lift left middle leg
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, femur=60, tibia=210)
self.body_controller.set_motors(self.last_motor_position)
if lm:
break
# fold middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, 230)
rm = move_leg(self.last_motor_position.right_middle, 70)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
# compress middle legs
while True:
rospy.sleep(MOVE_CYCLE_PERIOD)
lm = move_leg(self.last_motor_position.left_middle, None, 60, 240)
rm = move_leg(self.last_motor_position.right_middle, None, 240, 60)
self.body_controller.set_motors(self.last_motor_position)
if lm and rm:
break
rospy.sleep(0.2)
self.body_controller.set_torque(False)
|
19773
|
import glob, os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge
class Flownet2:
def __init__(self, bilinear_warping_module):
self.weights = dict()
for key, shape in self.all_variables():
self.weights[key] = tf.get_variable(key, shape=shape)
self.bilinear_warping_module = bilinear_warping_module
def leaky_relu(self, x, s):
assert s > 0 and s < 1, "Wrong s"
return tf.maximum(x, s*x)
def warp(self, x, flow):
return self.bilinear_warping_module.bilinear_warping(x, tf.stack([flow[:,:,:,1], flow[:,:,:,0]], axis=3))
# flip true -> [:,:,:,0] y axis downwards
# [:,:,:,1] x axis
# as in matrix indexing
#
# false returns 0->x, 1->y
def __call__(self, im0, im1, flip=True):
f = self.get_blobs(im0, im1)['predict_flow_final']
if flip:
f = tf.stack([f[:,:,:,1], f[:,:,:,0]], axis=3)
return f
def get_optimizer(self, flow, target, learning_rate=1e-4):
#flow = self.__call__(im0, im1)
loss = tf.reduce_sum(flow * target) # target holding the gradients!
opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95, beta2=0.99, epsilon=1e-8)
opt = opt.minimize(loss, var_list=
# [v for k,v in self.weights.iteritems() if (k.startswith('net3_') or k.startswith('netsd_') or k.startswith('fuse_'))])
[v for k,v in self.weights.iteritems() if ((k.startswith('net3_') or k.startswith('netsd_') or k.startswith('fuse_')) and not ('upsample' in k or 'deconv' in k))])
return opt, loss
# If I run the network with large images (1024x2048) it crashes due to memory
# constraints on a 12Gb titan X.
# See https://github.com/tensorflow/tensorflow/issues/5816#issuecomment-268710077
# for a possible explanation. I fix it by adding run_after in the section with
# the correlation layer so that 441 large tensors are not allocated at the same time
def run_after(self, a_tensor, b_tensor):
"""Force a to run after b"""
ge.reroute.add_control_inputs(a_tensor.op, [b_tensor.op])
# without epsilon I get nan-errors when I backpropagate
def l2_norm(self, x):
return tf.sqrt(tf.maximum(1e-5, tf.reduce_sum(x**2, axis=3, keep_dims=True)))
def get_blobs(self, im0, im1):
blobs = dict()
batch_size = tf.to_int32(tf.shape(im0)[0])
width = tf.to_int32(tf.shape(im0)[2])
height = tf.to_int32(tf.shape(im0)[1])
TARGET_WIDTH = width
TARGET_HEIGHT = height
divisor = 64.
ADAPTED_WIDTH = tf.to_int32(tf.ceil(tf.to_float(width)/divisor) * divisor)
ADAPTED_HEIGHT = tf.to_int32(tf.ceil(tf.to_float(height)/divisor) * divisor)
SCALE_WIDTH = tf.to_float(width) / tf.to_float(ADAPTED_WIDTH);
SCALE_HEIGHT = tf.to_float(height) / tf.to_float(ADAPTED_HEIGHT);
blobs['img0'] = im0
blobs['img1'] = im1
blobs['img0s'] = blobs['img0']*0.00392156862745098
blobs['img1s'] = blobs['img1']*0.00392156862745098
#mean = np.array([0.411451, 0.432060, 0.450141])
mean = np.array([0.37655231, 0.39534855, 0.40119368])
blobs['img0_nomean'] = blobs['img0s'] - mean
blobs['img1_nomean'] = blobs['img1s'] - mean
blobs['img0_nomean_resize'] = tf.image.resize_bilinear(blobs['img0_nomean'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['img1_nomean_resize'] = tf.image.resize_bilinear(blobs['img1_nomean'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['conv1a'] = tf.pad(blobs['img0_nomean_resize'], [[0,0], [3,3], [3,3], [0,0]])
blobs['conv1a'] = tf.nn.conv2d(blobs['conv1a'], self.weights['conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv1_b']
blobs['conv1a'] = self.leaky_relu(blobs['conv1a'], 0.1)
blobs['conv1b'] = tf.pad(blobs['img1_nomean_resize'], [[0,0], [3,3], [3,3], [0,0]])
blobs['conv1b'] = tf.nn.conv2d(blobs['conv1b'], self.weights['conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv1_b']
blobs['conv1b'] = self.leaky_relu(blobs['conv1b'], 0.1)
blobs['conv2a'] = tf.pad(blobs['conv1a'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv2a'] = tf.nn.conv2d(blobs['conv2a'], self.weights['conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv2_b']
blobs['conv2a'] = self.leaky_relu(blobs['conv2a'], 0.1)
blobs['conv2b'] = tf.pad(blobs['conv1b'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv2b'] = tf.nn.conv2d(blobs['conv2b'], self.weights['conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv2_b']
blobs['conv2b'] = self.leaky_relu(blobs['conv2b'], 0.1)
blobs['conv3a'] = tf.pad(blobs['conv2a'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv3a'] = tf.nn.conv2d(blobs['conv3a'], self.weights['conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv3_b']
blobs['conv3a'] = self.leaky_relu(blobs['conv3a'], 0.1)
blobs['conv3b'] = tf.pad(blobs['conv2b'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv3b'] = tf.nn.conv2d(blobs['conv3b'], self.weights['conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv3_b']
blobs['conv3b'] = self.leaky_relu(blobs['conv3b'], 0.1)
# this might be considered a bit hacky
tmp = []
x1_l = []
x2_l = []
for di in range(-20, 21, 2):
for dj in range(-20, 21, 2):
x1 = tf.pad(blobs['conv3a'], [[0,0], [20,20], [20,20], [0,0]])
x2 = tf.pad(blobs['conv3b'], [[0,0], [20-di,20+di], [20-dj,20+dj], [0,0]])
x1_l.append(x1)
x2_l.append(x2)
c = tf.nn.conv2d(x1*x2, tf.ones([1, 1, 256, 1])/256., strides=[1,1,1,1], padding='VALID')
tmp.append(c[:,20:-20,20:-20,:])
for i in range(len(tmp)-1):
#self.run_after(tmp[i], tmp[i+1])
self.run_after(x1_l[i], tmp[i+1])
self.run_after(x2_l[i], tmp[i+1])
blobs['corr'] = tf.concat(tmp, axis=3)
blobs['corr'] = self.leaky_relu(blobs['corr'], 0.1)
blobs['conv_redir'] = tf.nn.conv2d(blobs['conv3a'], self.weights['conv_redir_w'], strides=[1,1,1,1], padding="VALID") + self.weights['conv_redir_b']
blobs['conv_redir'] = self.leaky_relu(blobs['conv_redir'], 0.1)
blobs['blob16'] = tf.concat([blobs['conv_redir'], blobs['corr']], axis=3)
blobs['conv3_1'] = tf.nn.conv2d(blobs['blob16'], self.weights['conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv3_1_b']
blobs['conv3_1'] = self.leaky_relu(blobs['conv3_1'], 0.1)
blobs['conv4'] = tf.pad(blobs['conv3_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv4'] = tf.nn.conv2d(blobs['conv4'], self.weights['conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv4_b']
blobs['conv4'] = self.leaky_relu(blobs['conv4'], 0.1)
blobs['conv4_1'] = tf.nn.conv2d(blobs['conv4'], self.weights['conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv4_1_b']
blobs['conv4_1'] = self.leaky_relu(blobs['conv4_1'], 0.1)
blobs['conv5'] = tf.pad(blobs['conv4_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv5'] = tf.nn.conv2d(blobs['conv5'], self.weights['conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv5_b']
blobs['conv5'] = self.leaky_relu(blobs['conv5'], 0.1)
blobs['conv5_1'] = tf.nn.conv2d(blobs['conv5'], self.weights['conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv5_1_b']
blobs['conv5_1'] = self.leaky_relu(blobs['conv5_1'], 0.1)
blobs['conv6'] = tf.pad(blobs['conv5_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv6'] = tf.nn.conv2d(blobs['conv6'], self.weights['conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv6_b']
blobs['conv6'] = self.leaky_relu(blobs['conv6'], 0.1)
blobs['conv6_1'] = tf.nn.conv2d(blobs['conv6'], self.weights['conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv6_1_b']
blobs['conv6_1'] = self.leaky_relu(blobs['conv6_1'], 0.1)
blobs['predict_flow6'] = tf.nn.conv2d(blobs['conv6_1'], self.weights['Convolution1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution1_b']
blobs['deconv5'] = tf.nn.conv2d_transpose(blobs['conv6_1'], self.weights['deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['deconv5_b']
blobs['deconv5'] = self.leaky_relu(blobs['deconv5'], 0.1)
blobs['upsampled_flow6_to_5'] = tf.nn.conv2d_transpose(blobs['predict_flow6'], self.weights['upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['upsample_flow6to5_b']
blobs['concat5'] = tf.concat([blobs['conv5_1'], blobs['deconv5'], blobs['upsampled_flow6_to_5']], axis=3)
blobs['predict_flow5'] = tf.pad(blobs['concat5'], [[0,0], [1,1], [1,1], [0,0]])
blobs['predict_flow5'] = tf.nn.conv2d(blobs['predict_flow5'], self.weights['Convolution2_w'], strides=[1,1,1,1], padding="VALID") + self.weights['Convolution2_b']
blobs['deconv4'] = tf.nn.conv2d_transpose(blobs['concat5'], self.weights['deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['deconv4_b']
blobs['deconv4'] = self.leaky_relu(blobs['deconv4'], 0.1)
blobs['upsampled_flow5_to_4'] = tf.nn.conv2d_transpose(blobs['predict_flow5'], self.weights['upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['upsample_flow5to4_b']
blobs['concat4'] = tf.concat([blobs['conv4_1'], blobs['deconv4'], blobs['upsampled_flow5_to_4']], axis=3)
blobs['predict_flow4'] = tf.nn.conv2d(blobs['concat4'], self.weights['Convolution3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution3_b']
blobs['deconv3'] = tf.nn.conv2d_transpose(blobs['concat4'], self.weights['deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['deconv3_b']
blobs['deconv3'] = self.leaky_relu(blobs['deconv3'], 0.1)
blobs['upsampled_flow4_to_3'] = tf.nn.conv2d_transpose(blobs['predict_flow4'], self.weights['upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['upsample_flow4to3_b']
blobs['concat3'] = tf.concat([blobs['conv3_1'], blobs['deconv3'], blobs['upsampled_flow4_to_3']], axis=3)
blobs['predict_flow3'] = tf.nn.conv2d(blobs['concat3'], self.weights['Convolution4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution4_b']
blobs['deconv2'] = tf.nn.conv2d_transpose(blobs['concat3'], self.weights['deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['deconv2_b']
blobs['deconv2'] = self.leaky_relu(blobs['deconv2'], 0.1)
blobs['upsampled_flow3_to_2'] = tf.nn.conv2d_transpose(blobs['predict_flow3'], self.weights['upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['upsample_flow3to2_b']
blobs['concat2'] = tf.concat([blobs['conv2a'], blobs['deconv2'], blobs['upsampled_flow3_to_2']], axis=3)
blobs['predict_flow2'] = tf.nn.conv2d(blobs['concat2'], self.weights['Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution5_b']
blobs['blob41'] = blobs['predict_flow2'] * 20.
blobs['blob42'] = tf.image.resize_bilinear(blobs['blob41'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['blob43'] = self.warp(blobs['img1_nomean_resize'], blobs['blob42'])
blobs['blob44'] = blobs['img0_nomean_resize'] - blobs['blob43']
#blobs['blob45'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob44']**2, axis=3, keep_dims=True))
blobs['blob45'] = self.l2_norm(blobs['blob44'])
blobs['blob46'] = 0.05*blobs['blob42']
blobs['blob47'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize'], blobs['blob43'], blobs['blob46'], blobs['blob45']], axis=3)
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE FIRST BRANCH #####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob48'] = tf.pad(blobs['blob47'], [[0,0], [3,3], [3,3], [0,0]])
blobs['blob48'] = tf.nn.conv2d(blobs['blob48'], self.weights['net2_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv1_b']
blobs['blob48'] = self.leaky_relu(blobs['blob48'], 0.1)
blobs['blob49'] = tf.pad(blobs['blob48'], [[0,0], [2,2], [2, 2], [0,0]])
blobs['blob49'] = tf.nn.conv2d(blobs['blob49'], self.weights['net2_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv2_b']
blobs['blob49'] = self.leaky_relu(blobs['blob49'], 0.1)
blobs['blob50'] = tf.pad(blobs['blob49'], [[0,0], [2,2], [2,2], [0,0]])
blobs['blob50'] = tf.nn.conv2d(blobs['blob50'], self.weights['net2_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv3_b']
blobs['blob50'] = self.leaky_relu(blobs['blob50'], 0.1)
blobs['blob51'] = tf.nn.conv2d(blobs['blob50'], self.weights['net2_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv3_1_b']
blobs['blob51'] = self.leaky_relu(blobs['blob51'], 0.1)
blobs['blob52'] = tf.pad(blobs['blob51'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob52'] = tf.nn.conv2d(blobs['blob52'], self.weights['net2_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv4_b']
blobs['blob52'] = self.leaky_relu(blobs['blob52'], 0.1)
blobs['blob53'] = tf.nn.conv2d(blobs['blob52'], self.weights['net2_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv4_1_b']
blobs['blob53'] = self.leaky_relu(blobs['blob53'], 0.1)
blobs['blob54'] = tf.pad(blobs['blob53'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob54'] = tf.nn.conv2d(blobs['blob54'], self.weights['net2_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv5_b']
blobs['blob54'] = self.leaky_relu(blobs['blob54'], 0.1)
blobs['blob55'] = tf.nn.conv2d(blobs['blob54'], self.weights['net2_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv5_1_b']
blobs['blob55'] = self.leaky_relu(blobs['blob55'], 0.1)
blobs['blob56'] = tf.pad(blobs['blob55'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob56'] = tf.nn.conv2d(blobs['blob56'], self.weights['net2_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv6_b']
blobs['blob56'] = self.leaky_relu(blobs['blob56'], 0.1)
blobs['blob57'] = tf.nn.conv2d(blobs['blob56'], self.weights['net2_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv6_1_b']
blobs['blob57'] = self.leaky_relu(blobs['blob57'], 0.1)
blobs['blob58'] = tf.nn.conv2d(blobs['blob57'], self.weights['net2_predict_conv6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv6_b']
blobs['blob59'] = tf.nn.conv2d_transpose(blobs['blob57'], self.weights['net2_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['net2_deconv5_b']
blobs['blob59'] = self.leaky_relu(blobs['blob59'], 0.1)
blobs['blob60'] = tf.nn.conv2d_transpose(blobs['predict_flow6'], self.weights['net2_net2_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow6to5_b']
blobs['blob61'] = tf.concat([blobs['blob55'], blobs['blob59'], blobs['blob60']], axis=3)
blobs['blob62'] = tf.nn.conv2d(blobs['blob61'], self.weights['net2_predict_conv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv5_b']
blobs['blob63'] = tf.nn.conv2d_transpose(blobs['blob61'], self.weights['net2_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['net2_deconv4_b']
blobs['blob63'] = self.leaky_relu(blobs['blob63'], 0.1)
blobs['blob64'] = tf.nn.conv2d_transpose(blobs['blob62'], self.weights['net2_net2_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow5to4_b']
blobs['blob65'] = tf.concat([blobs['blob53'], blobs['blob63'], blobs['blob64']], axis=3)
blobs['blob66'] = tf.nn.conv2d(blobs['blob65'], self.weights['net2_predict_conv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv4_b']
blobs['blob67'] = tf.nn.conv2d_transpose(blobs['blob65'], self.weights['net2_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['net2_deconv3_b']
blobs['blob67'] = self.leaky_relu(blobs['blob67'], 0.1)
blobs['blob68'] = tf.nn.conv2d_transpose(blobs['blob66'], self.weights['net2_net2_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow4to3_b']
blobs['blob69'] = tf.concat([blobs['blob51'], blobs['blob67'], blobs['blob68']], axis=3)
blobs['blob70'] = tf.nn.conv2d(blobs['blob69'], self.weights['net2_predict_conv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv3_b']
blobs['blob71'] = tf.nn.conv2d_transpose(blobs['blob69'], self.weights['net2_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['net2_deconv2_b']
blobs['blob71'] = self.leaky_relu(blobs['blob71'], 0.1)
blobs['blob72'] = tf.nn.conv2d_transpose(blobs['blob70'], self.weights['net2_net2_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow3to2_b']
blobs['blob73'] = tf.concat([blobs['blob49'], blobs['blob71'], blobs['blob72']], axis=3)
blobs['blob74'] = tf.nn.conv2d(blobs['blob73'], self.weights['net2_predict_conv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv2_b']
blobs['blob75'] = blobs['blob74'] * 20.
blobs['blob76'] = tf.image.resize_bilinear(blobs['blob75'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['blob77'] = self.warp(blobs['img1_nomean_resize'], blobs['blob76'])
blobs['blob78'] = blobs['img0_nomean_resize'] - blobs['blob77']
#blobs['blob79'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob78']**2, axis=3, keep_dims=True))
blobs['blob79'] = self.l2_norm(blobs['blob78'])
blobs['blob80'] = 0.05*blobs['blob76']
blobs['blob81'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize'], blobs['blob77'], blobs['blob80'], blobs['blob79']], axis=3)
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE SECOND BRANCH ####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob82'] = tf.pad(blobs['blob81'], [[0,0], [3,3], [3,3], [0,0]])
blobs['blob82'] = tf.nn.conv2d(blobs['blob82'], self.weights['net3_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv1_b']
blobs['blob82'] = self.leaky_relu(blobs['blob82'], 0.1)
blobs['blob83'] = tf.pad(blobs['blob82'], [[0,0], [2,2], [2, 2], [0,0]])
blobs['blob83'] = tf.nn.conv2d(blobs['blob83'], self.weights['net3_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv2_b']
blobs['blob83'] = self.leaky_relu(blobs['blob83'], 0.1)
blobs['blob84'] = tf.pad(blobs['blob83'], [[0,0], [2,2], [2,2], [0,0]])
blobs['blob84'] = tf.nn.conv2d(blobs['blob84'], self.weights['net3_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv3_b']
blobs['blob84'] = self.leaky_relu(blobs['blob84'], 0.1)
blobs['blob85'] = tf.nn.conv2d(blobs['blob84'], self.weights['net3_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv3_1_b']
blobs['blob85'] = self.leaky_relu(blobs['blob85'], 0.1)
blobs['blob86'] = tf.pad(blobs['blob85'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob86'] = tf.nn.conv2d(blobs['blob86'], self.weights['net3_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv4_b']
blobs['blob86'] = self.leaky_relu(blobs['blob86'], 0.1)
blobs['blob87'] = tf.nn.conv2d(blobs['blob86'], self.weights['net3_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv4_1_b']
blobs['blob87'] = self.leaky_relu(blobs['blob87'], 0.1)
blobs['blob88'] = tf.pad(blobs['blob87'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob88'] = tf.nn.conv2d(blobs['blob88'], self.weights['net3_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv5_b']
blobs['blob88'] = self.leaky_relu(blobs['blob88'], 0.1)
blobs['blob89'] = tf.nn.conv2d(blobs['blob88'], self.weights['net3_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv5_1_b']
blobs['blob89'] = self.leaky_relu(blobs['blob89'], 0.1)
blobs['blob90'] = tf.pad(blobs['blob89'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob90'] = tf.nn.conv2d(blobs['blob90'], self.weights['net3_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv6_b']
blobs['blob90'] = self.leaky_relu(blobs['blob90'], 0.1)
blobs['blob91'] = tf.nn.conv2d(blobs['blob90'], self.weights['net3_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv6_1_b']
blobs['blob91'] = self.leaky_relu(blobs['blob91'], 0.1)
blobs['blob92'] = tf.nn.conv2d(blobs['blob91'], self.weights['net3_predict_conv6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv6_b']
blobs['blob93'] = tf.nn.conv2d_transpose(blobs['blob91'], self.weights['net3_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['net3_deconv5_b']
blobs['blob93'] = self.leaky_relu(blobs['blob93'], 0.1)
blobs['blob94'] = tf.nn.conv2d_transpose(blobs['blob92'], self.weights['net3_net3_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow6to5_b']
blobs['blob95'] = tf.concat([blobs['blob89'], blobs['blob93'], blobs['blob94']], axis=3)
blobs['blob96'] = tf.nn.conv2d(blobs['blob95'], self.weights['net3_predict_conv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv5_b']
blobs['blob97'] = tf.nn.conv2d_transpose(blobs['blob95'], self.weights['net3_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['net3_deconv4_b']
blobs['blob97'] = self.leaky_relu(blobs['blob97'], 0.1)
blobs['blob98'] = tf.nn.conv2d_transpose(blobs['blob96'], self.weights['net3_net3_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow5to4_b']
blobs['blob99'] = tf.concat([blobs['blob87'], blobs['blob97'], blobs['blob98']], axis=3)
blobs['blob100'] = tf.nn.conv2d(blobs['blob99'], self.weights['net3_predict_conv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv4_b']
blobs['blob101'] = tf.nn.conv2d_transpose(blobs['blob99'], self.weights['net3_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['net3_deconv3_b']
blobs['blob101'] = self.leaky_relu(blobs['blob101'], 0.1)
blobs['blob102'] = tf.nn.conv2d_transpose(blobs['blob100'], self.weights['net3_net3_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow4to3_b']
blobs['blob103'] = tf.concat([blobs['blob85'], blobs['blob101'], blobs['blob102']], axis=3)
blobs['blob104'] = tf.nn.conv2d(blobs['blob103'], self.weights['net3_predict_conv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv3_b']
blobs['blob105'] = tf.nn.conv2d_transpose(blobs['blob103'], self.weights['net3_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['net3_deconv2_b']
blobs['blob105'] = self.leaky_relu(blobs['blob105'], 0.1)
blobs['blob106'] = tf.nn.conv2d_transpose(blobs['blob104'], self.weights['net3_net3_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow3to2_b']
blobs['blob107'] = tf.concat([blobs['blob83'], blobs['blob105'], blobs['blob106']], axis=3)
blobs['blob108'] = tf.nn.conv2d(blobs['blob107'], self.weights['net3_predict_conv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv2_b']
blobs['blob109'] = blobs['blob108'] * 20.
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE THIRD BRANCH ####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob110'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize']], axis=3)
#self.run_after(blobs['blob110'], blobs['blob109'])
blobs['blob111'] = tf.nn.conv2d(blobs['blob110'], self.weights['netsd_conv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv0_b']
blobs['blob111'] = self.leaky_relu(blobs['blob111'], 0.1)
blobs['blob112'] = tf.pad(blobs['blob111'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob112'] = tf.nn.conv2d(blobs['blob112'], self.weights['netsd_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv1_b']
blobs['blob112'] = self.leaky_relu(blobs['blob112'], 0.1)
blobs['blob113'] = tf.nn.conv2d(blobs['blob112'], self.weights['netsd_conv1_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv1_1_b']
blobs['blob113'] = self.leaky_relu(blobs['blob113'], 0.1)
blobs['blob114'] = tf.pad(blobs['blob113'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob114'] = tf.nn.conv2d(blobs['blob114'], self.weights['netsd_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv2_b']
blobs['blob114'] = self.leaky_relu(blobs['blob114'], 0.1)
blobs['blob115'] = tf.nn.conv2d(blobs['blob114'], self.weights['netsd_conv2_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv2_1_b']
blobs['blob115'] = self.leaky_relu(blobs['blob115'], 0.1)
blobs['blob116'] = tf.pad(blobs['blob115'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob116'] = tf.nn.conv2d(blobs['blob116'], self.weights['netsd_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv3_b']
blobs['blob116'] = self.leaky_relu(blobs['blob116'], 0.1)
blobs['blob117'] = tf.nn.conv2d(blobs['blob116'], self.weights['netsd_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv3_1_b']
blobs['blob117'] = self.leaky_relu(blobs['blob117'], 0.1)
blobs['blob118'] = tf.pad(blobs['blob117'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob118'] = tf.nn.conv2d(blobs['blob118'], self.weights['netsd_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv4_b']
blobs['blob118'] = self.leaky_relu(blobs['blob118'], 0.1)
blobs['blob119'] = tf.nn.conv2d(blobs['blob118'], self.weights['netsd_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv4_1_b']
blobs['blob119'] = self.leaky_relu(blobs['blob119'], 0.1)
blobs['blob120'] = tf.pad(blobs['blob119'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob120'] = tf.nn.conv2d(blobs['blob120'], self.weights['netsd_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv5_b']
blobs['blob120'] = self.leaky_relu(blobs['blob120'], 0.1)
blobs['blob121'] = tf.nn.conv2d(blobs['blob120'], self.weights['netsd_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv5_1_b']
blobs['blob121'] = self.leaky_relu(blobs['blob121'], 0.1)
blobs['blob122'] = tf.pad(blobs['blob121'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob122'] = tf.nn.conv2d(blobs['blob122'], self.weights['netsd_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv6_b']
blobs['blob122'] = self.leaky_relu(blobs['blob122'], 0.1)
blobs['blob123'] = tf.nn.conv2d(blobs['blob122'], self.weights['netsd_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv6_1_b']
blobs['blob123'] = self.leaky_relu(blobs['blob123'], 0.1)
blobs['blob124'] = tf.nn.conv2d(blobs['blob123'], self.weights['netsd_Convolution1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution1_b']
blobs['blob125'] = tf.nn.conv2d_transpose(blobs['blob123'], self.weights['netsd_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['netsd_deconv5_b']
blobs['blob125'] = self.leaky_relu(blobs['blob125'], 0.1)
blobs['blob126'] = tf.nn.conv2d_transpose(blobs['blob124'], self.weights['netsd_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow6to5_b']
blobs['blob127'] = tf.concat([blobs['blob121'], blobs['blob125'], blobs['blob126']], axis=3)
blobs['blob128'] = tf.nn.conv2d(blobs['blob127'], self.weights['netsd_interconv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv5_b']
blobs['blob129'] = tf.nn.conv2d(blobs['blob128'], self.weights['netsd_Convolution2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution2_b']
blobs['blob130'] = tf.nn.conv2d_transpose(blobs['blob127'], self.weights['netsd_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['netsd_deconv4_b']
blobs['blob130'] = self.leaky_relu(blobs['blob130'], 0.1)
blobs['blob131'] = tf.nn.conv2d_transpose(blobs['blob129'], self.weights['netsd_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow5to4_b']
blobs['blob132'] = tf.concat([blobs['blob119'], blobs['blob130'], blobs['blob131']], axis=3)
blobs['blob133'] = tf.nn.conv2d(blobs['blob132'], self.weights['netsd_interconv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv4_b']
blobs['blob134'] = tf.nn.conv2d(blobs['blob133'], self.weights['netsd_Convolution3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution3_b']
blobs['blob135'] = tf.nn.conv2d_transpose(blobs['blob132'], self.weights['netsd_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['netsd_deconv3_b']
blobs['blob135'] = self.leaky_relu(blobs['blob135'], 0.1)
blobs['blob136'] = tf.nn.conv2d_transpose(blobs['blob134'], self.weights['netsd_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow4to3_b']
blobs['blob137'] = tf.concat([blobs['blob117'], blobs['blob135'], blobs['blob136']], axis=3)
blobs['blob138'] = tf.nn.conv2d(blobs['blob137'], self.weights['netsd_interconv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv3_b']
blobs['blob139'] = tf.nn.conv2d(blobs['blob138'], self.weights['netsd_Convolution4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution4_b']
blobs['blob140'] = tf.nn.conv2d_transpose(blobs['blob137'], self.weights['netsd_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['netsd_deconv2_b']
blobs['blob140'] = self.leaky_relu(blobs['blob140'], 0.1)
blobs['blob141'] = tf.nn.conv2d_transpose(blobs['blob139'], self.weights['netsd_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow3to2_b']
blobs['blob142'] = tf.concat([blobs['blob115'], blobs['blob140'], blobs['blob141']], axis=3)
blobs['blob143'] = tf.nn.conv2d(blobs['blob142'], self.weights['netsd_interconv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv2_b']
blobs['blob144'] = tf.nn.conv2d(blobs['blob143'], self.weights['netsd_Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution5_b']
blobs['blob145'] = 0.05*blobs['blob144']
blobs['blob146'] = tf.image.resize_nearest_neighbor(blobs['blob145'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=False)
blobs['blob147'] = tf.image.resize_nearest_neighbor(blobs['blob109'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=False)
#blobs['blob148'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob146']**2, axis=3, keep_dims=True))
blobs['blob148'] = self.l2_norm(blobs['blob146'])
#blobs['blob149'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob147']**2, axis=3, keep_dims=True))
blobs['blob149'] = self.l2_norm(blobs['blob147'])
blobs['blob150'] = self.warp(blobs['img1_nomean_resize'], blobs['blob146'])
blobs['blob151'] = blobs['img0_nomean_resize'] - blobs['blob150']
#blobs['blob152'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob151']**2, axis=3, keep_dims=True))
blobs['blob152'] = self.l2_norm(blobs['blob151'])
blobs['blob153'] = self.warp(blobs['img1_nomean_resize'], blobs['blob147'])
blobs['blob154'] = blobs['img0_nomean_resize'] - blobs['blob153']
#blobs['blob155'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob154']**2, axis=3, keep_dims=True))
blobs['blob155'] = self.l2_norm(blobs['blob154'])
blobs['blob156'] = tf.concat([blobs['img0_nomean_resize'], blobs['blob146'], blobs['blob147'], blobs['blob148'], blobs['blob149'], blobs['blob152'], blobs['blob155']], axis=3)
blobs['blob157'] = tf.nn.conv2d(blobs['blob156'], self.weights['fuse_conv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv0_b']
blobs['blob157'] = self.leaky_relu(blobs['blob157'], 0.1)
blobs['blob158'] = tf.pad(blobs['blob157'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob158'] = tf.nn.conv2d(blobs['blob158'], self.weights['fuse_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['fuse_conv1_b']
blobs['blob158'] = self.leaky_relu(blobs['blob158'], 0.1)
blobs['blob159'] = tf.nn.conv2d(blobs['blob158'], self.weights['fuse_conv1_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv1_1_b']
blobs['blob159'] = self.leaky_relu(blobs['blob159'], 0.1)
blobs['blob160'] = tf.pad(blobs['blob159'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob160'] = tf.nn.conv2d(blobs['blob160'], self.weights['fuse_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['fuse_conv2_b']
blobs['blob160'] = self.leaky_relu(blobs['blob160'], 0.1)
blobs['blob161'] = tf.nn.conv2d(blobs['blob160'], self.weights['fuse_conv2_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv2_1_b']
blobs['blob161'] = self.leaky_relu(blobs['blob161'], 0.1)
blobs['blob162'] = tf.nn.conv2d(blobs['blob161'], self.weights['fuse__Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution5_b']
blobs['blob163'] = tf.nn.conv2d_transpose(blobs['blob161'], self.weights['fuse_deconv1_w'], output_shape=[batch_size, ADAPTED_HEIGHT/2, ADAPTED_WIDTH/2, 32], strides=[1,2,2,1]) + self.weights['fuse_deconv1_b']
blobs['blob163'] = self.leaky_relu(blobs['blob163'], 0.1)
blobs['blob164'] = tf.nn.conv2d_transpose(blobs['blob162'], self.weights['fuse_upsample_flow2to1_w'], output_shape=[batch_size, ADAPTED_HEIGHT/2, ADAPTED_WIDTH/2, 2], strides=[1,2,2,1]) + self.weights['fuse_upsample_flow2to1_b']
blobs['blob165'] = tf.concat([blobs['blob159'], blobs['blob163'], blobs['blob164']], axis=3)
blobs['blob166'] = tf.nn.conv2d(blobs['blob165'], self.weights['fuse_interconv1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_interconv1_b']
blobs['blob167'] = tf.nn.conv2d(blobs['blob166'], self.weights['fuse__Convolution6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution6_b']
blobs['blob168'] = tf.nn.conv2d_transpose(blobs['blob165'], self.weights['fuse_deconv0_w'], output_shape=[batch_size, ADAPTED_HEIGHT/1, ADAPTED_WIDTH/1, 16], strides=[1,2,2,1]) + self.weights['fuse_deconv0_b']
blobs['blob168'] = self.leaky_relu(blobs['blob168'], 0.1)
blobs['blob169'] = tf.nn.conv2d_transpose(blobs['blob167'], self.weights['fuse_upsample_flow1to0_w'], output_shape=[batch_size, ADAPTED_HEIGHT, ADAPTED_WIDTH, 2], strides=[1,2,2,1]) + self.weights['fuse_upsample_flow1to0_b']
blobs['blob170'] = tf.concat([blobs['blob157'], blobs['blob168'], blobs['blob169']], axis=3)
blobs['blob171'] = tf.nn.conv2d(blobs['blob170'], self.weights['fuse_interconv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_interconv0_b']
blobs['blob172'] = tf.nn.conv2d(blobs['blob171'], self.weights['fuse__Convolution7_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution7_b']
blobs['predict_flow_resize'] = tf.image.resize_bilinear(blobs['blob172'], size=[TARGET_HEIGHT, TARGET_WIDTH], align_corners=True)
scale = tf.stack([SCALE_WIDTH, SCALE_HEIGHT])
scale = tf.reshape(scale, [1,1,1,2])
blobs['predict_flow_final'] = scale*blobs['predict_flow_resize']
self.blobs = blobs
return blobs
def all_variables(self):
return [('netsd_deconv5_w', (4, 4, 512, 1024)),
('netsd_conv1_b', (64,)),
('netsd_upsample_flow5to4_w', (4, 4, 2, 2)),
('conv2_b', (128,)),
('fuse__Convolution5_w', (3, 3, 128, 2)),
('netsd_conv4_1_w', (3, 3, 512, 512)),
('netsd_interconv3_w', (3, 3, 386, 128)),
('netsd_deconv4_w', (4, 4, 256, 1026)),
('deconv4_b', (256,)),
('fuse_interconv0_w', (3, 3, 82, 16)),
('netsd_Convolution2_b', (2,)),
('net3_conv4_b', (512,)),
('net3_conv3_b', (256,)),
('net3_predict_conv2_w', (3, 3, 194, 2)),
('net3_predict_conv3_b', (2,)),
('conv6_1_w', (3, 3, 1024, 1024)),
('fuse_upsample_flow2to1_b', (2,)),
('Convolution1_w', (3, 3, 1024, 2)),
('net3_deconv3_w', (4, 4, 128, 770)),
('net2_deconv3_b', (128,)),
('fuse_conv1_w', (3, 3, 64, 64)),
('conv5_w', (3, 3, 512, 512)),
('Convolution4_w', (3, 3, 386, 2)),
('fuse_conv0_b', (64,)),
('net2_conv3_w', (5, 5, 128, 256)),
('upsample_flow4to3_b', (2,)),
('netsd_conv4_1_b', (512,)),
('fuse_upsample_flow2to1_w', (4, 4, 2, 2)),
('netsd_conv4_b', (512,)),
('net2_net2_upsample_flow3to2_b', (2,)),
('net3_predict_conv4_b', (2,)),
('fuse_upsample_flow1to0_b', (2,)),
('conv4_1_w', (3, 3, 512, 512)),
('deconv2_b', (64,)),
('net2_conv4_1_w', (3, 3, 512, 512)),
('net3_deconv4_w', (4, 4, 256, 1026)),
('net2_deconv5_b', (512,)),
('netsd_deconv5_b', (512,)),
('net2_deconv2_b', (64,)),
('net3_conv2_b', (128,)),
('conv_redir_w', (1, 1, 256, 32)),
('fuse_conv1_1_b', (128,)),
('net2_deconv5_w', (4, 4, 512, 1024)),
('net2_conv5_b', (512,)),
('net2_conv4_w', (3, 3, 256, 512)),
('net2_predict_conv6_w', (3, 3, 1024, 2)),
('netsd_conv5_b', (512,)),
('deconv4_w', (4, 4, 256, 1026)),
('net2_net2_upsample_flow4to3_b', (2,)),
('fuse__Convolution6_w', (3, 3, 32, 2)),
('net3_deconv2_w', (4, 4, 64, 386)),
('net2_conv6_1_w', (3, 3, 1024, 1024)),
('netsd_conv0_b', (64,)),
('netsd_conv5_1_w', (3, 3, 512, 512)),
('net2_conv6_1_b', (1024,)),
('net3_conv2_w', (5, 5, 64, 128)),
('net3_predict_conv6_w', (3, 3, 1024, 2)),
('net3_conv4_1_b', (512,)),
('net3_net3_upsample_flow4to3_w', (4, 4, 2, 2)),
('net2_deconv2_w', (4, 4, 64, 386)),
('deconv3_b', (128,)),
('netsd_interconv5_b', (512,)),
('net2_conv3_1_w', (3, 3, 256, 256)),
('netsd_interconv4_w', (3, 3, 770, 256)),
('net3_deconv3_b', (128,)),
('fuse_conv0_w', (3, 3, 11, 64)),
('net3_predict_conv6_b', (2,)),
('fuse_upsample_flow1to0_w', (4, 4, 2, 2)),
('netsd_deconv3_b', (128,)),
('net3_predict_conv5_w', (3, 3, 1026, 2)),
('netsd_conv5_w', (3, 3, 512, 512)),
('netsd_interconv5_w', (3, 3, 1026, 512)),
('netsd_Convolution3_w', (3, 3, 256, 2)),
('net2_predict_conv4_w', (3, 3, 770, 2)),
('deconv2_w', (4, 4, 64, 386)),
('net3_predict_conv5_b', (2,)),
('fuse__Convolution5_b', (2,)),
('fuse__Convolution7_w', (3, 3, 16, 2)),
('net2_net2_upsample_flow6to5_w', (4, 4, 2, 2)),
('netsd_conv3_b', (256,)),
('net3_conv6_w', (3, 3, 512, 1024)),
('net3_conv1_b', (64,)),
('netsd_Convolution4_b', (2,)),
('net3_conv3_w', (5, 5, 128, 256)),
('netsd_conv0_w', (3, 3, 6, 64)),
('net2_conv4_b', (512,)),
('net2_predict_conv3_w', (3, 3, 386, 2)),
('net3_net3_upsample_flow3to2_w', (4, 4, 2, 2)),
('fuse_conv1_1_w', (3, 3, 64, 128)),
('deconv5_b', (512,)),
('fuse__Convolution7_b', (2,)),
('net3_conv6_1_w', (3, 3, 1024, 1024)),
('net3_net3_upsample_flow5to4_w', (4, 4, 2, 2)),
('net3_conv4_w', (3, 3, 256, 512)),
('upsample_flow5to4_w', (4, 4, 2, 2)),
('conv4_1_b', (512,)),
('img0s_aug_b', (320, 448, 3, 1)),
('conv5_1_b', (512,)),
('net3_conv4_1_w', (3, 3, 512, 512)),
('upsample_flow5to4_b', (2,)),
('net3_conv3_1_b', (256,)),
('Convolution1_b', (2,)),
('upsample_flow4to3_w', (4, 4, 2, 2)),
('conv5_1_w', (3, 3, 512, 512)),
('conv3_1_b', (256,)),
('conv3_w', (5, 5, 128, 256)),
('net2_conv2_b', (128,)),
('net3_net3_upsample_flow6to5_w', (4, 4, 2, 2)),
('upsample_flow3to2_b', (2,)),
('netsd_Convolution5_w', (3, 3, 64, 2)),
('netsd_interconv2_w', (3, 3, 194, 64)),
('net2_predict_conv6_b', (2,)),
('net2_deconv4_w', (4, 4, 256, 1026)),
('scale_conv1_b', (2,)),
('net2_net2_upsample_flow5to4_w', (4, 4, 2, 2)),
('netsd_conv2_b', (128,)),
('netsd_conv2_1_b', (128,)),
('netsd_upsample_flow6to5_w', (4, 4, 2, 2)),
('net2_predict_conv5_b', (2,)),
('net3_conv6_1_b', (1024,)),
('netsd_conv6_w', (3, 3, 512, 1024)),
('Convolution4_b', (2,)),
('net2_predict_conv4_b', (2,)),
('fuse_deconv1_b', (32,)),
('conv3_1_w', (3, 3, 473, 256)),
('net3_deconv2_b', (64,)),
('netsd_conv6_b', (1024,)),
('net2_conv5_1_w', (3, 3, 512, 512)),
('net3_conv5_1_w', (3, 3, 512, 512)),
('deconv5_w', (4, 4, 512, 1024)),
('fuse_conv2_b', (128,)),
('netsd_conv1_1_b', (128,)),
('netsd_upsample_flow6to5_b', (2,)),
('Convolution5_w', (3, 3, 194, 2)),
('scale_conv1_w', (1, 1, 2, 2)),
('net2_net2_upsample_flow5to4_b', (2,)),
('conv6_1_b', (1024,)),
('fuse_conv2_1_b', (128,)),
('netsd_Convolution5_b', (2,)),
('netsd_conv3_1_b', (256,)),
('conv2_w', (5, 5, 64, 128)),
('fuse_conv2_w', (3, 3, 128, 128)),
('net2_conv2_w', (5, 5, 64, 128)),
('conv3_b', (256,)),
('net3_deconv5_w', (4, 4, 512, 1024)),
('img1s_aug_w', (1, 1, 1, 1)),
('netsd_conv2_w', (3, 3, 128, 128)),
('conv6_w', (3, 3, 512, 1024)),
('netsd_conv4_w', (3, 3, 256, 512)),
('net2_conv1_w', (7, 7, 12, 64)),
('netsd_Convolution1_w', (3, 3, 1024, 2)),
('netsd_conv1_w', (3, 3, 64, 64)),
('netsd_deconv4_b', (256,)),
('conv4_w', (3, 3, 256, 512)),
('conv5_b', (512,)),
('net3_deconv5_b', (512,)),
('netsd_interconv3_b', (128,)),
('net3_conv3_1_w', (3, 3, 256, 256)),
('net2_predict_conv5_w', (3, 3, 1026, 2)),
('Convolution3_b', (2,)),
('netsd_conv5_1_b', (512,)),
('netsd_interconv4_b', (256,)),
('conv4_b', (512,)),
('net3_net3_upsample_flow6to5_b', (2,)),
('Convolution5_b', (2,)),
('fuse_conv2_1_w', (3, 3, 128, 128)),
('net3_net3_upsample_flow4to3_b', (2,)),
('conv1_w', (7, 7, 3, 64)),
('upsample_flow6to5_b', (2,)),
('conv6_b', (1024,)),
('netsd_upsample_flow3to2_w', (4, 4, 2, 2)),
('net2_deconv3_w', (4, 4, 128, 770)),
('netsd_conv2_1_w', (3, 3, 128, 128)),
('netsd_Convolution3_b', (2,)),
('netsd_upsample_flow4to3_w', (4, 4, 2, 2)),
('fuse_interconv1_w', (3, 3, 162, 32)),
('netsd_upsample_flow4to3_b', (2,)),
('netsd_conv3_1_w', (3, 3, 256, 256)),
('netsd_deconv3_w', (4, 4, 128, 770)),
('net3_conv5_b', (512,)),
('net3_conv5_1_b', (512,)),
('net2_net2_upsample_flow4to3_w', (4, 4, 2, 2)),
('net2_net2_upsample_flow3to2_w', (4, 4, 2, 2)),
('net2_conv3_b', (256,)),
('netsd_conv6_1_w', (3, 3, 1024, 1024)),
('fuse_deconv0_b', (16,)),
('net2_predict_conv2_w', (3, 3, 194, 2)),
('net2_conv1_b', (64,)),
('net2_conv6_b', (1024,)),
('net3_predict_conv2_b', (2,)),
('net2_conv4_1_b', (512,)),
('netsd_Convolution4_w', (3, 3, 128, 2)),
('deconv3_w', (4, 4, 128, 770)),
('fuse_deconv1_w', (4, 4, 32, 128)),
('netsd_Convolution2_w', (3, 3, 512, 2)),
('netsd_Convolution1_b', (2,)),
('net2_conv3_1_b', (256,)),
('fuse_conv1_b', (64,)),
('net2_deconv4_b', (256,)),
('net3_predict_conv4_w', (3, 3, 770, 2)),
('Convolution3_w', (3, 3, 770, 2)),
('netsd_upsample_flow3to2_b', (2,)),
('net3_net3_upsample_flow3to2_b', (2,)),
('fuse_interconv0_b', (16,)),
('Convolution2_w', (3, 3, 1026, 2)),
('net2_conv6_w', (3, 3, 512, 1024)),
('netsd_conv3_w', (3, 3, 128, 256)),
('netsd_upsample_flow5to4_b', (2,)),
('net3_predict_conv3_w', (3, 3, 386, 2)),
('conv_redir_b', (32,)),
('net2_conv5_1_b', (512,)),
('upsample_flow6to5_w', (4, 4, 2, 2)),
('net2_net2_upsample_flow6to5_b', (2,)),
('net3_conv6_b', (1024,)),
('fuse__Convolution6_b', (2,)),
('Convolution2_b', (2,)),
('upsample_flow3to2_w', (4, 4, 2, 2)),
('net3_conv1_w', (7, 7, 12, 64)),
('fuse_deconv0_w', (4, 4, 16, 162)),
('img0s_aug_w', (1, 1, 1, 1)),
('netsd_conv1_1_w', (3, 3, 64, 128)),
('netsd_deconv2_b', (64,)),
('net2_conv5_w', (3, 3, 512, 512)),
('fuse_interconv1_b', (32,)),
('netsd_conv6_1_b', (1024,)),
('netsd_interconv2_b', (64,)),
('img1s_aug_b', (320, 448, 3, 1)),
('netsd_deconv2_w', (4, 4, 64, 386)),
('net2_predict_conv3_b', (2,)),
('net2_predict_conv2_b', (2,)),
('net3_deconv4_b', (256,)),
('net3_net3_upsample_flow5to4_b', (2,)),
('conv1_b', (64,)),
('net3_conv5_w', (3, 3, 512, 512))]
|
19787
|
import sys
import pytest
import aiohttp_mako
from aiohttp import web
@pytest.fixture
def app():
app = web.Application()
lookup = aiohttp_mako.setup(app, input_encoding='utf-8',
output_encoding='utf-8',
default_filters=['decode.utf8'])
tplt = "<html><body><h1>${head}</h1>${text}</body></html>"
lookup.put_string('tplt.html', tplt)
return app
|
19814
|
from __future__ import print_function
import warnings
import numpy as np
C4 = 261.6 # Hz
piano_max = 4186.01 # Hz
piano_min = 27.5000 # Hz - not audible
__all__ = ['cent_per_value','get_f_min','get_f_max','FrequencyScale']
def cent_per_value(f_min, f_max, v_min, v_max):
"""
This function takes in a frequency max and min, and y value max and min and returns a y scale parameter in units of cents/y value.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_min : float
Minimum frequency.
f_max : float
Maximum frequency.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
A y-scale parameter in units of cents/y value.
"""
step = 1200 * np.log2(f_max / f_min) / (v_max - v_min)
return step
def get_f_min(f_max, cents_per_value, v_min, v_max):
"""
This function takes in a y value max and min, a maximum frequency and a y scale parameter in units of cents/y value, and returns the minimum frequency that fits to such a scale.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_max : float
Maximum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
Minimum frequency.
"""
f_min = f_max / (2 ** ((v_max - v_min) * cents_per_value / 1200))
return f_min
def get_f_max(f_min, cents_per_value, v_min, v_max):
"""
This function takes in a y value max and min, a minimum frequency and a y scale parameter in units of cents/y value, and returns the maximum frequency that fits to such a scale.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_min : float
Minimum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
Maximum frequency.
"""
f_max = f_min * (2 ** ((v_max - v_min) * cents_per_value / 1200))
return f_max
class FrequencyScale(object):
"""
This class builds a frequency scale and populates the namespace of frequency objects based on the given inputs from the following combos:
- frequency_min, frequency_max, y value min and y value max
- frequency_max, cents_per_value, y value min and y value max
- frequency_min, cents_per_value, y value min and y value max
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
frequency_min : float
Minimum frequency.
frequency_max : float
Maximum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
value_min : float
Description of parameter `value_min`.
value_max : float
Description of parameter `value_max`.
verbose : bool
Flag to toggle printing functions.
"""
def __init__(self, value_min, value_max,
frequency_min=None, frequency_max=None, cents_per_value=None,
verbose=False):
if verbose:
print('initial vals (fmin, fmax, vmin, vmax):',
frequency_min, frequency_max, value_min, value_max)
# checking for which inputs were given
self.y_inputs = []
if frequency_min != None:
self.y_inputs.append('frequency_min')
if frequency_max != None:
self.y_inputs.append('frequency_max')
if cents_per_value != None:
self.y_inputs.append('cents_per_value')
self.y_n_inputs = len(self.y_inputs)
# raising exception if anything other than two inputs were given
if self.y_n_inputs != 2:
raise Exception('Frequency takes 2 of the frequency_min, frequency_max, and cents_per_value inputs. You inputted {} inputs, which were {}.'.format(
self.y_n_inputs, self.y_inputs))
# frequency_min and frequency_max input case
if (cents_per_value == None):
cents_per_value = cent_per_value(frequency_min, frequency_max,
value_min, value_max)
# cents_per_value and frequency_max input case
if (frequency_min == None):
frequency_min = get_f_min(frequency_max, cents_per_value,
value_min, value_max)
# cents_per_value and frequency_min input case
if (frequency_max == None):
frequency_max = get_f_max(frequency_min, cents_per_value,
value_min, value_max)
self.y_value_min = value_min
self.y_value_max = value_max
self.y_frequency_max = frequency_max
self.y_frequency_min = frequency_min
self.y_cents_per_value = cents_per_value
if self.y_frequency_max > piano_max:
warnings.warn('Your maximum frequency of {} Hz is above a pianos maximum of {} Hz.'.format(
np.round(self.y_frequency_max, 2), piano_max))
if self.y_frequency_min < piano_min:
warnings.warn('Your minimum frequency of {} Hz is below a pianos minimum of {} Hz.'.format(
np.round(self.y_frequency_min, 2), piano_min))
if self.y_value_min > self.y_value_max:
warnings.warn('Min y value is greater than max y value.')
if verbose:
print('initial vals (f_min, f_max, y_min, y_max):', self.y_frequency_min,
self.y_frequency_max, self.y_value_min, self.y_value_max)
def freq(v): return self.y_frequency_min * \
2 ** ((v - self.y_value_min) * self.y_cents_per_value / 1200)
self.y_freq_translate_to_range = lambda array: list(map(freq, array))
if verbose:
print('Frequency Scale Built')
|
19872
|
import numpy as np
import h5py
import pyglib.basic.units as units
import pyglib.basic.splot as splot
'''
Equation of state.
'''
def Murnaghan(parameters, vol):
'''
Given a vector of parameters and volumes, return a vector of energies.
equation From PRB 28,5480 (1983)
'''
E0 = parameters[0]
B0 = parameters[1]
BP = parameters[2]
V0 = parameters[3]
return E0 + B0 * vol / BP * (((V0 / vol)**BP) / \
(BP - 1) + 1) - V0 * B0 / (BP - 1.0)
def Murnaghan_pv(parameters, vol):
'''
function P(V).
'''
B0 = parameters[1]
BP = parameters[2]
V0 = parameters[3]
return B0 / BP * ((V0 / vol)**BP - 1.0)
def eos_fit_fun(pars, y, x):
'''
The objective function that will be minimized.
'''
return y - Murnaghan(pars, x)
def get_ev_fit(v, e):
'''
Fitting the Birch-Murnaghan EOS to data. v in \A^3, e in eV.
Based on http://gilgamesh.cheme.cmu.edu/doc/software/jacapo/
appendices/appendix-eos.html
'''
from pylab import polyfit
from scipy.optimize import leastsq
# fit a parabola to the data
# y = ax^2 + bx + c
a, b, c = polyfit(v, e, 2)
'''The parabola does not fit the data very well, but we can use it to get
some analytical guesses for other parameters.
V0 = minimum energy volume, or where dE/dV=0
E = aV^2 + bV + c
dE/dV = 2aV + b = 0
V0 = -b/2a
E0 is the minimum energy, which is:
E0 = aV0^2 + bV0 + c
B is equal to V0*d^2E/dV^2, which is just 2a*V0
and from experience we know Bprime_0 is usually a small number like 4
'''
# now here are our initial guesses.
v0 = -b / (2 * a)
e0 = a * v0**2 + b * v0 + c
b0 = 2 * a * v0
bP = 4
# initial guesses in the same order used in the Murnaghan function
x0 = [e0, b0, bP, v0]
murnpars, ier = leastsq(eos_fit_fun, x0, args=(e, v))
return murnpars
def h5get_mfit_ev(nmesh_fac=10, fsave='results.h5', path='/lapw'):
'''Calculate and save Murnaghan fiting results in fsave.
Interpolated e-v and p-v data on volume mesh with a factor a
nmesh_fac of the original one are also stored.
'''
# Get e,v data.
with h5py.File(fsave, 'r') as f:
e_list = f[path+'/etot_list'][...]
v_list = f['/vol_list'][...]
# fitting
murnpars = get_ev_fit(v_list, e_list)
vh = np.linspace(v_list[0], v_list[-1], nmesh_fac * len(v_list) - 1)
eh = Murnaghan(murnpars, vh)
ph = Murnaghan_pv(murnpars, vh)*units.eVA_GPa
with h5py.File(fsave, 'a') as f:
if path+'/eosfit' in f:
del f[path+'/eosfit']
f[path+'/eosfit/e0'] = murnpars[0]
f[path+'/eosfit/b0'] = murnpars[1]
f[path+'/eosfit/bp'] = murnpars[2]
f[path+'/eosfit/v0'] = murnpars[3]
f[path+'/eosfit/v_list'] = vh
f[path+'/eosfit/e_list'] = eh
f[path+'/eosfit/p_list'] = ph
splot.xy2_plot([v_list, vh], [e_list, eh], ['o', '-'], ['raw', 'fitting'],
xlabel='V ($\AA^3$/primitive cell)',
ylabel='E (eV/primitive cell)', fsave=path+'_evfit.pdf')
splot.xy_plot(vh, ph, xlabel='V ($\AA^3$/primitive cell)',
ylabel='P (GPa)', fsave=path+'_pvfit.pdf')
def eos_spline(v, e, tol):
'''
Get volume, energy, pressure, and bulk modulus using spline, given
v in \A^3 and e in eV.
'''
from scipy.interpolate import UnivariateSpline
s = UnivariateSpline(v, e, k=3, s=tol)
vh = np.linspace(v[0], v[-1], 10 * len(v) - 1)
eh = [s.derivatives(i)[0] for i in vh]
ph = [-s.derivatives(i)[1] * units.eVA_GPa for i in vh]
bh = [s.derivatives(i)[2] * vh[i] * units.eVA_GPa for i in vh]
return vh, eh, ph, bh
|
19885
|
from dataclasses import dataclass, field
from typing import Any, Dict, List
from aiographql.client.error import GraphQLError
from aiographql.client.request import GraphQLRequestContainer
@dataclass(frozen=True)
class GraphQLBaseResponse(GraphQLRequestContainer):
json: Dict[str, Any] = field(default_factory=dict)
@dataclass(frozen=True)
class GraphQLResponse(GraphQLBaseResponse):
"""
GraphQL Response object wrapping response data and any errors. This object also
contains the a copy of the :class:`GraphQLRequest` that produced this response.
"""
@property
def errors(self) -> List[GraphQLError]:
"""
A list of :class:`GraphQLError` objects if server responded with query errors.
"""
return [GraphQLError.load(error) for error in self.json.get("errors", list())]
@property
def data(self) -> Dict[str, Any]:
"""The data payload the server responded with."""
return self.json.get("data", dict())
@property
def query(self) -> str:
"""The query string used to produce this response."""
return self.request.query
|
19896
|
import time
from unittest import mock
import pytest
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.http import Http404
from django.test import RequestFactory, TestCase
from django.urls import reverse
from wagtail.admin.edit_handlers import ObjectList
from wagtail.core.blocks.stream_block import StreamBlockValidationError
from wagtail.core.models import Collection
from wagtail.images import get_image_model
from wagtail.images.tests.utils import get_test_image_file
from wagtail.tests.utils import WagtailPageTests, WagtailTestUtils
from wagtail_factories import ImageFactory
from core.mixins import AuthenticatedUserRequired
from core.models import (
AbstractObjectHash,
CaseStudyRelatedPages,
Country,
CuratedListPage,
DetailPage,
IndustryTag,
InterstitialPage,
LandingPage,
LessonPlaceholderPage,
ListPage,
MagnaPageChooserPanel,
Product,
Region,
Tag,
TopicPage,
case_study_body_validation,
)
from domestic.models import DomesticDashboard, DomesticHomePage, GreatDomesticHomePage
from tests.helpers import SetUpLocaleMixin, make_test_video
from tests.unit.core import factories
from .factories import (
CaseStudyFactory,
DetailPageFactory,
LessonPlaceholderPageFactory,
StructurePageFactory,
TopicPageFactory,
)
def test_object_hash():
mocked_file = mock.Mock()
mocked_file.read.return_value = b'foo'
hash = AbstractObjectHash.generate_content_hash(mocked_file)
assert hash == 'acbd18db4cc2f85cedef654fccc4a4d8'
@pytest.mark.django_db
def test_detail_page_can_mark_as_read(client, domestic_homepage, user, domestic_site, mock_get_user_profile):
# given the user has not read a lesson
client.force_login(user)
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=True)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page)
client.get(detail_page.url)
# then the progress is saved
read_hit = detail_page.page_views.get()
assert read_hit.sso_id == str(user.pk)
assert read_hit.list_page == list_page
@pytest.mark.django_db
def test_detail_page_cannot_mark_as_read(client, domestic_homepage, user, domestic_site, mock_get_user_profile):
# given the user has not read a lesson
client.force_login(user)
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page)
client.get(detail_page.url)
# then the progress is saved
assert detail_page.page_views.count() == 0
@pytest.mark.django_db
def test_detail_page_anon_user_not_marked_as_read(client, domestic_homepage, domestic_site, mock_get_user_profile):
# given the user has not read a lesson
clp = factories.CuratedListPageFactory(parent=domestic_homepage)
topic_page = factories.TopicPageFactory(parent=clp)
detail_page = factories.DetailPageFactory(parent=topic_page)
client.get(detail_page.url)
# then the progress is unaffected
assert detail_page.page_views.count() == 0
@pytest.mark.django_db
def test_curated_list_page_has_link_in_context_back_to_parent(
client,
domestic_homepage,
domestic_site,
mock_export_plan_detail_list,
patch_get_user_lesson_completed,
user,
mock_get_user_profile,
):
list_page = factories.ListPageFactory(
parent=domestic_homepage, record_read_progress=False, slug='example-learning-homepage'
)
curated_list_page = factories.CuratedListPageFactory(parent=list_page, slug='example-module')
expected_url = list_page.url
assert expected_url == '/example-learning-homepage/'
client.force_login(user) # because unauthed users get redirected
resp = client.get(curated_list_page.url)
# Make a more precise string to search for: one that's marked up as a
# hyperlink target, at least
expected_link_string = f'href="{expected_url}"'
assert expected_link_string.encode('utf-8') in resp.content
@pytest.mark.django_db
@pytest.mark.parametrize(
'querystring_to_add,expected_backlink_value',
(
('', None),
('?return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F', '/export-plan/1/about-your-business/'),
(
'?return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar',
'/export-plan/1/about-your-business/?foo=bar',
),
(
'?bam=baz&return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar',
'/export-plan/1/about-your-business/?foo=bar', # NB: bam=baz should not be here
),
('?bam=baz&return-link=example%2Fexport-plan%2Fpath%2F%3Ffoo%3Dbar', None),
(
(
'?bam=baz&return-link=https%3A%2F%2Fphishing.example.com'
'%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar'
),
None,
),
(
(
'?bam=baz&return-link=%3A%2F%2Fphishing.example.com'
'%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar'
),
None,
),
('?bam=baz', None),
(
'?bam=baz&return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar',
'/export-plan/1/about-your-business/?foo=bar',
),
),
ids=(
'no backlink querystring present',
'backlink querystring present without encoded querystring of its own',
'backlink querystring present WITH encoded querystring of its own',
'backlink querystring present WITH encoded querystring and other args',
'backlink querystring present WITH bad payload - path does not start with / ',
'backlink querystring present WITH bad payload - path is a full URL',
'backlink querystring present WITH bad payload - path is a URL with flexible proto',
'backlink querystring NOT present BUT another querystring is',
'backlink querystring present WITH OTHER QUERYSTRING TOO',
),
)
def test_detail_page_get_context_handles_backlink_querystring_appropriately(
rf, domestic_homepage, domestic_site, user, querystring_to_add, expected_backlink_value, export_plan_data
):
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page, template='learn/detail_page.html')
lesson_page_url = detail_page.url
if querystring_to_add:
lesson_page_url += querystring_to_add
request = rf.get(lesson_page_url)
request.user = user
context = detail_page.get_context(request)
if expected_backlink_value is None:
assert 'backlink' not in context
else:
assert context.get('backlink') == expected_backlink_value
@pytest.mark.django_db
@pytest.mark.parametrize(
'backlink_path,expected',
(
(None, None),
('', None),
('/export-plan/1/about-your-business/', 'About your business'),
('/export-plan/1/business-objectives/', 'Business objectives'),
('/export-plan/1/target-markets-research/', 'Target markets research'),
('/export-plan/1/adapting-your-product/', 'Adapting your product'),
('/export-plan/1/marketing-approach/', 'Marketing approach'),
('/export-plan/1/costs-and-pricing/', 'Costs and pricing'),
('/export-plan/1/funding-and-credit/', 'Funding and credit'),
('/export-plan/1/getting-paid/', 'Getting paid'),
('/export-plan/1/travel-plan/', 'Travel plan'),
('/export-plan/1/business-risk/', 'Business risk'),
('/export-plan/1/adapting-your-product/?foo=bar', 'Adapting your product'),
('/export-plan/', None),
('/path/that/will/not/match/anything/', None),
),
ids=(
'no backlink',
'empty string backlink',
'Seeking: About your business',
'Seeking: Business objectives',
'Seeking: Target markets research',
'Seeking: Adapting your product',
'Seeking: Marketing approach',
'Seeking: Costs and pricing',
'Seeking: Getting paid',
'Seeking: Funding and credit',
'Seeking: Travel plan',
'Seeking: Business risk',
'Valid backlink with querystring does not break name lookup',
'backlink for real page that is not an export plan step',
'backlink for a non-existent page',
),
)
def test_detail_page_get_context_gets_backlink_title_based_on_backlink(
backlink_path,
expected,
en_locale,
):
detail_page = factories.DetailPageFactory(template='learn/detail_page.html')
assert detail_page._get_backlink_title(backlink_path) == expected
@pytest.mark.django_db
def test_case_study__str_method():
case_study = CaseStudyFactory(title='', summary_context='Test Co')
assert f'{case_study}' == 'Test Co'
case_study = CaseStudyFactory(title='Alice and Bob export to every continent', summary_context='Test Co')
assert f'{case_study}' == 'Alice and Bob export to every continent'
@pytest.mark.django_db
def test_case_study__timestamps():
case_study = CaseStudyFactory(summary_context='Test Co')
created = case_study.created
modified = case_study.created
assert created == modified
time.sleep(1) # Forgive this - we need to have a real, later save
case_study.save()
case_study.refresh_from_db()
assert case_study.created == created
assert case_study.modified > modified
_case_study_top_level_error_message = (
'This block must contain one Media section (with one or two items in it) and one Text section.'
)
_case_study_one_video_only_error_message = 'Only one video may be used in a case study.'
_case_study_video_order_error_message = 'The video must come before a still image.'
@pytest.mark.django_db
@pytest.mark.parametrize(
'block_type_values,exception_message',
(
(['text'], _case_study_top_level_error_message),
([('media', ('video',))], _case_study_top_level_error_message),
([], None),
(['text', 'text'], _case_study_top_level_error_message),
(['text', ('media', ('video', 'image'))], _case_study_top_level_error_message),
([('media', ('video',)), ('media', ('video',))], _case_study_top_level_error_message),
(['text', ('media', ('video', 'image')), 'text'], _case_study_top_level_error_message),
([('media', ('video', 'image')), 'text', ('media', ('video', 'image'))], _case_study_top_level_error_message),
([('media', ('video', 'image')), 'text'], None),
([('media', ('video',)), 'text'], None),
([('media', ('image',)), 'text'], None),
([('media', ('image', 'image')), 'text'], None),
([('media', ('image', 'video')), 'text'], _case_study_video_order_error_message),
([('media', ('video', 'video')), 'text'], _case_study_one_video_only_error_message),
(['quote', ('media', ('video', 'image')), 'text'], None),
(['quote', 'quote', ('media', ('video', 'image')), 'text'], None),
),
ids=(
'1. Top-level check: text node only: not fine',
'2. Top-level check: media node only: not fine',
'3. Top-level check: no nodes: fine - requirement is done at a higher level',
'4. Top-level check: two text nodes: not fine',
'5. Top-level check: text before media: not fine',
'6. Top-level check: two media nodes: not fine',
'7. Top-level check: text, media, text: not fine',
'8. Top-level check: media, text, media: not fine',
'9. media node (video and image) and text node: fine',
'10. media node (video only) and text node: fine',
'11. media node (image only) and text node: fine',
'12. media node (two images) and text node: fine',
'13. media node (image before video) and text node: not fine',
'14. media node (two videos) and text node: not fine',
'15. quote node, media node (video and image) and text node: fine',
'16. 2 quote nodes, media node (video and image) and text node: fine',
),
)
def test_case_study_body_validation(block_type_values, exception_message):
def _create_block(block_type):
mock_block = mock.Mock()
mock_block.block_type = block_type
return mock_block
value = []
for block_spec in block_type_values:
if type(block_spec) == tuple:
parent_block = _create_block(block_spec[0])
children = []
for subblock_spec in block_spec[1]:
children.append(_create_block(subblock_spec))
parent_block.value = children
value.append(parent_block)
else:
value.append(_create_block(block_spec))
if exception_message:
with pytest.raises(StreamBlockValidationError) as ctx:
case_study_body_validation(value)
assert ctx.message == exception_message
else:
# should not blow up
case_study_body_validation(value)
class LandingPageTests(WagtailPageTests):
def test_can_be_created_under_homepage(self):
self.assertAllowedParentPageTypes(
LandingPage,
{
DomesticHomePage,
GreatDomesticHomePage,
},
)
def test_can_be_created_under_landing_page(self):
self.assertAllowedSubpageTypes(LandingPage, {ListPage, InterstitialPage, DomesticDashboard})
class ListPageTests(WagtailPageTests):
def test_can_be_created_under_landing_page(self):
self.assertAllowedParentPageTypes(ListPage, {LandingPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(ListPage, {CuratedListPage})
class CuratedListPageTests(WagtailPageTests):
def test_can_be_created_under_list_page(self):
self.assertAllowedParentPageTypes(CuratedListPage, {ListPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(CuratedListPage, {TopicPage})
@pytest.mark.django_db
def test_curatedlistpage_count_detail_pages(curated_list_pages_with_lessons):
data = curated_list_pages_with_lessons
clp_1 = data[0][0]
clp_2 = data[1][0]
assert clp_1.count_detail_pages == 2 # 2 pages, placeholder ignored
assert clp_2.count_detail_pages == 1 # 1 page only, no placeholders at all
class TopicPageTests(WagtailPageTests):
def test_parent_page_types(self):
self.assertAllowedParentPageTypes(TopicPage, {CuratedListPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(
TopicPage,
{
DetailPage,
LessonPlaceholderPage,
},
)
@pytest.mark.django_db
def test_topic_page_redirects_to_module(
rf,
domestic_homepage,
domestic_site,
):
# The topic pages should never render their own content - they are basically
# scaffolding to give us a sensible page tree. As such they shouldn't be
# rendered
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = TopicPageFactory(
parent=curated_list_page,
)
# Check that we have the page tree set up correctly, else this is None
assert curated_list_page.url is not None
for page_method in ('serve', 'serve_preview'):
request = rf.get(topic_page.url)
resp = getattr(topic_page, page_method)(request)
assert resp._headers['location'] == ('Location', curated_list_page.url)
class LessonPlaceholderPageTests(WagtailPageTests):
def test_parent_page_types(self):
self.assertAllowedParentPageTypes(LessonPlaceholderPage, {TopicPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(LessonPlaceholderPage, {})
@pytest.mark.django_db
def test_context_cms_generic_page(rf, domestic_homepage):
assert 'page' in domestic_homepage.get_context(rf)
@pytest.mark.django_db
def test_placeholder_page_redirects_to_module(
rf,
domestic_homepage,
domestic_site,
):
# The topic pages should never render their own content and instead redirect
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = TopicPageFactory(
parent=curated_list_page,
)
placeholder_page = LessonPlaceholderPageFactory(parent=topic_page)
# Check that we have the page tree set up correctly, else this is None
assert curated_list_page.url is not None
for page_method in ('serve', 'serve_preview'):
request = rf.get(placeholder_page.url)
resp = getattr(placeholder_page, page_method)(request)
assert resp._headers['location'] == ('Location', curated_list_page.url)
@pytest.mark.django_db
def test_structure_page_redirects_to_http404(
rf,
domestic_homepage,
domestic_site,
):
# The structure pages should never render their own content and instead return Http404
structure_page = StructurePageFactory(parent=domestic_homepage)
for page_method in ('serve', 'serve_preview'):
request = rf.get('/foo/')
with pytest.raises(Http404):
getattr(structure_page, page_method)(request)
class DetailPageTests(SetUpLocaleMixin, WagtailPageTests):
def test_parent_page_types(self):
self.assertAllowedParentPageTypes(DetailPage, {TopicPage})
def test_detail_page_creation_for_single_hero_image(self):
detail_page = DetailPageFactory(hero=[('Image', ImageFactory())])
self.assert_(detail_page, True)
def test_validation_kick_for_multiple_hero_image(self):
with pytest.raises(ValidationError):
detail_page = DetailPageFactory(hero=[('Image', ImageFactory()), ('Image', ImageFactory())])
self.assert_(detail_page, None)
@pytest.mark.django_db
def test_redirection_for_unauthenticated_user(
client,
domestic_homepage,
domestic_site,
mock_export_plan_detail_list,
patch_get_user_lesson_completed,
user,
mock_get_user_profile,
):
landing_page = factories.LandingPageFactory(parent=domestic_homepage)
interstitial_page = factories.InterstitialPageFactory(parent=landing_page)
list_page = factories.ListPageFactory(parent=domestic_homepage)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page)
pages = [
landing_page,
interstitial_page,
list_page,
curated_list_page,
detail_page,
]
for page in pages:
assert isinstance(page, AuthenticatedUserRequired)
for page in pages:
response = client.get(page.url, follow=False)
assert response.status_code == 302
assert response._headers['location'] == ('Location', f'/signup/?next={page.url}')
# Show an authenticated user can still get in there
client.force_login(user)
for page in pages:
response = client.get(page.url, follow=False)
assert response.status_code == 200
class TestImageAltRendition(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
root_collection, _ = Collection.objects.get_or_create(name='Root', depth=0)
great_image_collection = root_collection.add_child(name='Great Images')
# Create an image with alt text
AltTextImage = get_image_model() # Noqa
self.image = AltTextImage.objects.create(
title='Test image', file=get_test_image_file(), alt_text='smart alt text', collection=great_image_collection
)
def test_image_alt_rendition(self):
rendition = self.image.get_rendition('width-100')
assert rendition.alt == 'smart alt text'
assert self.image.title != rendition.alt
class TestGreatMedia(TestCase):
def test_sources_mp4_with_no_transcript(self):
media = make_test_video()
self.assertEqual(
media.sources,
[
{
'src': '/media/movie.mp4',
'type': 'video/mp4',
'transcript': None,
}
],
)
def test_sources_mp4_with_transcript(self):
media = make_test_video(transcript='A test transcript text')
self.assertEqual(
media.sources,
[
{
'src': '/media/movie.mp4',
'type': 'video/mp4',
'transcript': 'A test transcript text',
}
],
)
def test_subtitles__present(self):
media = make_test_video()
media.subtitles_en = 'Dummy subtitles content'
media.save()
self.assertTrue(media.subtitles_en)
expected = [
{
'srclang': 'en',
'label': 'English',
'url': reverse('core:subtitles-serve', args=[media.id, 'en']),
'default': False,
},
]
self.assertEqual(media.subtitles, expected)
def test_subtitles__not_present(self):
media = make_test_video()
self.assertFalse(media.subtitles_en)
self.assertEqual(media.subtitles, [])
class TestSmallSnippets(TestCase):
# Most snippets are generally small models. Move them out of this test case
# into their own if/when they gain any custom methods beyond __str__
def test_region(self):
region = Region.objects.create(name='Test Region')
self.assertEqual(region.name, 'Test Region')
self.assertEqual(f'{region}', 'Test Region') # tests __str__
def test_country(self):
region = Region.objects.create(name='Test Region')
# NB: slugs are not automatically set.
# The SlugField is about valiation, not auto-population by default
country1 = Country.objects.create(
name='Test Country',
slug='test-country',
)
country2 = Country.objects.create(
name='Other Country',
slug='other-country',
region=region,
)
country_unicode = Country.objects.create(
name='Téßt Country',
slug='tt-country',
)
self.assertEqual(country1.name, 'Test Country')
self.assertEqual(country1.slug, 'test-country')
self.assertEqual(country1.region, None)
self.assertEqual(f'{country1}', 'Test Country') # tests __str__
self.assertEqual(country2.name, 'Other Country')
self.assertEqual(country2.slug, 'other-country')
self.assertEqual(country2.region, region)
self.assertEqual(country_unicode.name, 'Téßt Country')
# by default, ASCII only - https://docs.djangoproject.com/en/2.2/ref/utils/#django.utils.text.slugify
self.assertEqual(country_unicode.slug, 'tt-country')
self.assertEqual(country_unicode.region, None)
self.assertEqual(f'{country_unicode}', 'Téßt Country') # tests __str__
def test_country_sets_slug_on_save(self):
country = Country.objects.create(name='Test Country')
country.refresh_from_db()
self.assertEqual(country.slug, 'test-country')
# Slug is set only on first save, if not already set
country_2 = Country.objects.create(name='Another Country')
self.assertEqual(country_2.slug, 'another-country')
country_2.name = 'Changed country name'
country_2.save()
country_2.refresh_from_db()
self.assertEqual(
country_2.slug,
'another-country',
'Slug should not have changed',
)
# Can specify slug up-front
country_3 = Country.objects.create(
name='Country Three',
slug='somewhere',
)
country_3.refresh_from_db()
self.assertEqual(country_3.slug, 'somewhere')
# Can't reuse slug
with self.assertRaises(IntegrityError):
Country.objects.create(name='Test Country')
def test_product(self):
product = Product.objects.create(name='Test Product')
self.assertEqual(product.name, 'Test Product')
self.assertEqual(f'{product}', 'Test Product') # tests __str__
def test_tag(self):
tag = Tag.objects.create(name='Test Tag')
self.assertEqual(tag.name, 'Test Tag')
self.assertEqual(f'{tag}', 'Test Tag') # tests __str__
def test_industry_tag(self):
tag = IndustryTag.objects.create(name='Test IndustryTag')
self.assertEqual(tag.name, 'Test IndustryTag')
self.assertEqual(f'{tag}', 'Test IndustryTag') # tests __str__
class TestMagnaPageChooserPanel(SetUpLocaleMixin, TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
user = AnonymousUser() # technically, Anonymous users cannot access the admin
self.request.user = user
model = CaseStudyRelatedPages # a model with a foreign key to Page which we want to render as a page chooser
# a MagnaPageChooserPanel class that works on CaseStudyRelatedPages's 'page' field
self.edit_handler = ObjectList(
[MagnaPageChooserPanel('page', [DetailPage, CuratedListPage, TopicPage])]
).bind_to(model=model, request=self.request)
self.my_page_chooser_panel = self.edit_handler.children[0]
# build a form class containing the fields that MyPageChooserPanel wants
self.PageChooserForm = self.edit_handler.get_form_class()
# a test instance of PageChooserModel, pointing to the 'christmas' page
self.detail_page = DetailPageFactory(slug='detail-page')
self.test_instance = model.objects.create(page=self.detail_page)
self.form = self.PageChooserForm(instance=self.test_instance)
self.page_chooser_panel = self.my_page_chooser_panel.bind_to(instance=self.test_instance, form=self.form)
def test_magna_page_chooser_panel_target_models(self):
result = (
MagnaPageChooserPanel('page', [DetailPage, CuratedListPage, TopicPage])
.bind_to(model=MagnaPageChooserPanel)
.target_models()
)
self.assertEqual(result, [DetailPage, CuratedListPage, TopicPage])
def test_magna_page_chooser_panel_render_as_empty_field(self):
test_instance = CaseStudyRelatedPages()
form = self.PageChooserForm(instance=test_instance)
page_chooser_panel = self.my_page_chooser_panel.bind_to(instance=test_instance, form=form, request=self.request)
result = page_chooser_panel.render_as_field()
self.assertIn('<span class="title"></span>', result)
self.assertIn('Choose a page', result)
|
19918
|
import const
def corpora2idx(sents, ind2idx):
return [[ind2idx[w] if w in ind2idx else const.UNK for w in s] for s in sents]
|
19927
|
from flask import Flask, render_template, session, redirect, url_for
app = Flask(__name__)
app.config['SECRET_KEY'] = '<PASSWORD>'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/set-background/<mode>')
def set_background(mode):
session['mode'] = mode
return redirect(url_for('index'))
@app.route('/drop-session')
def drop_session():
session.pop('mode', None)
return redirect(url_for('index'))
|
19947
|
import os
import salt.utils.platform
from tests.support.mock import patch
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_system as win_system
except Exception as exc: # pylint: disable=broad-except
win_system = exc
class WinSystemImportTestCase(TestCase):
"""
Simply importing should not raise an error, especially on Linux
"""
def test_import(self):
if isinstance(win_system, Exception):
raise Exception(
"Importing win_system caused traceback: {}".format(win_system)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
class WinSystemTestCase(TestCase):
"""
Test cases for salt.utils.win_system
"""
def test_get_computer_name(self):
"""
Should return the computer name
"""
with patch("win32api.GetComputerNameEx", return_value="FAKENAME"):
self.assertEqual(win_system.get_computer_name(), "FAKENAME")
def test_get_computer_name_fail(self):
"""
If it fails, it returns False
"""
with patch("win32api.GetComputerNameEx", return_value=None):
self.assertFalse(win_system.get_computer_name())
def test_get_pending_computer_name(self):
"""
Will return the pending computer name if one is pending
"""
expected = "PendingName"
patch_value = {"vdata": expected}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
result = win_system.get_pending_computer_name()
self.assertEqual(expected, result)
def test_get_pending_computer_name_none(self):
"""
Will return the None if the pending computer is the current name
"""
patch_value = {"vdata": os.environ.get("COMPUTERNAME")}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_computer_name_false(self):
"""
Will return False if there is no pending computer name
"""
with patch("salt.utils.win_reg.read_value", return_value=False):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_component_servicing(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_1(self):
"""
If the RebootPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_2(self):
"""
If the RebootInProgress key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_3(self):
"""
If the PackagesPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_domain_join(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_1(self):
"""
If the AvoidSpnSet key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_2(self):
"""
If the JoinDomain key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_file_rename_false_1(self):
"""
If none of the value names exist, should return False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_false_2(self):
"""
If one of the value names exists but is not set, should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_true_1(self):
"""
If one of the value names exists and is set, should return True
"""
patched_return = {"success": True, "vdata": "some value"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_servermanager_false_1(self):
"""
If the CurrentRebootAttempts value name does not exist, should return
False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_servermanager())
def test_get_pending_servermanager_false_2(self):
"""
If the CurrentRebootAttempts value name exists but is not an integer,
should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_servermanager_true(self):
"""
If the CurrentRebootAttempts value name exists and is an integer,
should return True
"""
patched_return = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_dvd_reboot(self):
"""
If the DVDRebootSignal value name does not exist, should return False
"""
with patch("salt.utils.win_reg.value_exists", return_value=False):
self.assertFalse(win_system.get_pending_dvd_reboot())
def test_get_pending_dvd_reboot_true(self):
"""
If the DVDRebootSignal value name exists, should return True
"""
with patch("salt.utils.win_reg.value_exists", return_value=True):
self.assertTrue(win_system.get_pending_dvd_reboot())
def test_get_pending_update(self):
"""
If none of the keys exist and there are not subkeys, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False), patch(
"salt.utils.win_reg.list_keys", return_value=[]
):
self.assertFalse(win_system.get_pending_update())
def test_get_pending_update_true_1(self):
"""
If the RebootRequired key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_update())
def test_get_pending_update_true_2(self):
"""
If the PostRebootReporting key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_update())
def test_get_reboot_required_witnessed_false_1(self):
"""
The ``Reboot Required`` value name does not exist, should return False
"""
patched_data = {"vdata": None}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_false_2(self):
"""
The ``Reboot required`` value name is set to 0, should return False
"""
patched_data = {"vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_true(self):
"""
The ``Reboot required`` value name is set to 1, should return True
"""
patched_data = {"vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_reboot_required_witnessed())
def test_set_reboot_required_witnessed(self):
"""
The call to ``set_value`` should return True and should be called with
the specified parameters
"""
with patch("salt.utils.win_reg.set_value", return_value=True) as sv:
self.assertTrue(win_system.set_reboot_required_witnessed())
sv.assert_called_once_with(
hive="HKLM",
key=win_system.MINION_VOLATILE_KEY,
volatile=True,
vname=win_system.REBOOT_REQUIRED_NAME,
vdata=1,
vtype="REG_DWORD",
)
def test_get_pending_update_exe_volatile_false_1(self):
"""
If UpdateExeVolatile value name is 0, should return False
"""
patched_data = {"success": True, "vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_false_2(self):
"""
If UpdateExeVolatile value name is not present, should return False
"""
patched_data = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_true_1(self):
"""
If UpdateExeVolatile value name is not 0, should return True
"""
patched_data = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_pending_update_exe_volatile())
def test_get_pending_reboot(self):
"""
If all functions return Falsy data, should return False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
self.assertFalse(win_system.get_pending_reboot())
def test_get_pending_reboot_true_1(self):
"""
If any boolean returning functions return True, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_true_2(self):
"""
If a computer name is returned, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_details(self):
"""
All items False should return a dictionary with all items False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
expected = {
"Pending Component Servicing": False,
"Pending Computer Rename": False,
"Pending DVD Reboot": False,
"Pending File Rename": False,
"Pending Join Domain": False,
"Pending ServerManager": False,
"Pending Update": False,
"Pending Windows Update": False,
"Reboot Required Witnessed": False,
"Volatile Update Exe": False,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
def test_get_pending_reboot_details_true(self):
"""
All items True should return a dictionary with all items True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=True
), patch("salt.utils.win_update.needs_reboot", return_value=True), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=True
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=True
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=True
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=True
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=True
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=True
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
expected = {
"Pending Component Servicing": True,
"Pending Computer Rename": True,
"Pending DVD Reboot": True,
"Pending File Rename": True,
"Pending Join Domain": True,
"Pending ServerManager": True,
"Pending Update": True,
"Pending Windows Update": True,
"Reboot Required Witnessed": True,
"Volatile Update Exe": True,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
|
19957
|
import logging
import os
import re
import time
import urllib
from threading import Thread
import xmlrpclib
from Queue import Queue
from flask import current_app as app, render_template, request, redirect, abort, jsonify, json as json_mod, url_for, session, Blueprint
from itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature
from passlib.hash import bcrypt_sha256
from CTFd.utils import sha512, is_safe_url, authed, can_send_mail, sendmail, can_register, get_config, verify_email
from CTFd.models import db, Teams, Pages
import CTFd.auth
import CTFd.views
def create_user_thread(q):
while True:
user_pair = q.get(block=True)
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
if user_pair[2] == "create":
shell.add_user(user_pair[0], user_pair[1])
elif user_pair[2] == "change":
shell.change_user(user_pair[0], user_pair[1])
def load(app):
shell = Blueprint('shell', __name__, template_folder='shell-templates')
app.register_blueprint(shell, url_prefix='/shell')
page = Pages('shell',""" """ )
auth = Blueprint('auth', __name__)
shellexists = Pages.query.filter_by(route='shell').first()
if not shellexists:
db.session.add(page)
db.session.commit()
@app.route('/shell', methods=['GET'])
def shell_view():
if not authed():
return redirect(url_for('auth.login', next=request.path))
return render_template('shell.html',root=request.script_root)
@app.route('/register', methods=['POST', 'GET'])
def register():
if not can_register():
return redirect(url_for('auth.login'))
if request.method == 'POST':
errors = []
name = request.form['name']
email = request.form['email']
password = request.form['password']
name_len = len(name) < 2
names = Teams.query.add_columns('name', 'id').filter_by(name=name).first()
emails = Teams.query.add_columns('email', 'id').filter_by(email=email).first()
pass_short = len(password) == 0
pass_long = len(password) > 32
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request.form['email'])
if not valid_email:
errors.append("That email doesn't look right")
if names:
errors.append('That team name is already taken')
if emails:
errors.append('That email has already been used')
if pass_short:
errors.append('Pick a longer password')
if pass_long:
errors.append('Pick a shorter password')
if name_len:
errors.append('Pick a longer team name')
if len(errors) > 0:
return render_template('register.html', errors=errors, name=request.form['name'], email=request.form['email'], password=request.form['password'])
else:
with app.app_context():
team = Teams(name, email.lower(), password)
db.session.add(team)
db.session.commit()
db.session.flush()
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
shell.add_user(name, password)
session['username'] = team.name
session['id'] = team.id
session['admin'] = team.admin
session['nonce'] = sha512(os.urandom(10))
if can_send_mail() and get_config('verify_emails'): # Confirming users is enabled and we can send email.
db.session.close()
logger = logging.getLogger('regs')
logger.warn("[{0}] {1} registered (UNCONFIRMED) with {2}".format(time.strftime("%m/%d/%Y %X"),
request.form['name'].encode('utf-8'),
request.form['email'].encode('utf-8')))
return redirect(url_for('auth.confirm_user'))
else: # Don't care about confirming users
if can_send_mail(): # We want to notify the user that they have registered.
sendmail(request.form['email'], "You've successfully registered for {}".format(get_config('ctf_name')))
db.session.close()
logger = logging.getLogger('regs')
logger.warn("[{0}] {1} registered with {2}".format(time.strftime("%m/%d/%Y %X"), request.form['name'].encode('utf-8'), request.form['email'].encode('utf-8')))
return redirect(url_for('challenges.challenges_view'))
else:
return render_template('register.html')
def reset_password(data=None):
if data is not None and request.method == "GET":
return render_template('reset_password.html', mode='set')
if data is not None and request.method == "POST":
try:
s = TimedSerializer(app.config['SECRET_KEY'])
name = s.loads(urllib.unquote_plus(data.decode('base64')), max_age=1800)
except BadTimeSignature:
return render_template('reset_password.html', errors=['Your link has expired'])
except:
return render_template('reset_password.html', errors=['Your link appears broken, please try again.'])
team = Teams.query.filter_by(name=name).first_or_404()
password = request.form['password'].strip()
name = team.name
pass_short = len(password) == 0
pass_long = len(password) > 32
#http://stackoverflow.com/questions/19605150/regex-for-password-must-be-contain-at-least-8-characters-least-1-number-and-bot
errors = []
if pass_short:
errors.append('Pick a longer password')
if pass_long:
errors.append('Pick a shorter password')
if len(errors) > 0:
return render_template('reset_password.html', errors=errors)
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
shell.change_user(name, password)
team.password = <PASSWORD>(password)
db.session.commit()
db.session.close()
return redirect(url_for('auth.login'))
if request.method == 'POST':
email = request.form['email'].strip()
team = Teams.query.filter_by(email=email).first()
if not team:
return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
s = TimedSerializer(app.config['SECRET_KEY'])
token = s.dumps(team.name)
text = """
Did you initiate a password reset?
{0}/{1}
""".format(url_for('auth.reset_password', _external=True), urllib.quote_plus(token.encode('base64')))
sendmail(email, text)
return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
return render_template('reset_password.html')
def profile():
if authed():
if request.method == "POST":
errors = []
name = request.form.get('name')
email = request.form.get('email')
website = request.form.get('website')
affiliation = request.form.get('affiliation')
country = request.form.get('country')
user = Teams.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
names = Teams.query.filter_by(name=name).first()
name_len = len(request.form['name']) < 2
emails = Teams.query.filter_by(email=email).first()
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
password = request.form['password'].strip()
pass_short = len(password) == 0
pass_long = len(password) > 32
if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \
(not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):
errors.append("Your old password doesn't match what we have.")
if not valid_email:
errors.append("That email doesn't look right")
if not get_config('prevent_name_change') and names and name != session['username']:
errors.append('That team name is already taken')
if emails and emails.id != session['id']:
errors.append('That email has already been used')
if not get_config('prevent_name_change') and name_len:
errors.append('Pick a longer team name')
if website.strip() and not validate_url(website):
errors.append("That doesn't look like a valid URL")
if pass_short:
errors.append('Pick a longer password')
if pass_long:
errors.append('Pick a shorter password')
if len(errors) > 0:
return render_template('profile.html', name=name, email=email, website=website,
affiliation=affiliation, country=country, errors=errors)
else:
team = Teams.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
team.name = name
if team.email != email.lower():
team.email = email.lower()
if get_config('verify_emails'):
team.verified = False
session['username'] = team.name
if 'password' in request.form.keys() and not len(request.form['password']) == 0:
team.password = <PASSWORD>(request.form.get('password'))
password = request.form['password'].strip()
team.website = website
team.affiliation = affiliation
team.country = country
name = team.name
if password:
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
shell.change_user(name, password)
db.session.commit()
db.session.close()
return redirect(url_for('views.profile'))
else:
user = Teams.query.filter_by(id=session['id']).first()
name = user.name
email = user.email
website = user.website
affiliation = user.affiliation
country = user.country
prevent_name_change = get_config('prevent_name_change')
confirm_email = get_config('verify_emails') and not user.verified
return render_template('profile.html', name=name, email=email, website=website, affiliation=affiliation,
country=country, prevent_name_change=prevent_name_change, confirm_email=confirm_email)
else:
return redirect(url_for('auth.login'))
app.view_functions['auth.reset_password'] = reset_password
app.view_functions['auth.register'] = register
app.view_functions['views.profile'] = profile
|
19965
|
expected_output = {
"ospf-statistics-information": {
"ospf-statistics": {
"dbds-retransmit": "203656",
"dbds-retransmit-5seconds": "0",
"flood-queue-depth": "0",
"lsas-acknowledged": "225554974",
"lsas-acknowledged-5seconds": "0",
"lsas-flooded": "66582263",
"lsas-flooded-5seconds": "0",
"lsas-high-prio-flooded": "375568998",
"lsas-high-prio-flooded-5seconds": "0",
"lsas-nbr-transmit": "3423982",
"lsas-nbr-transmit-5seconds": "0",
"lsas-requested": "3517",
"lsas-requested-5seconds": "0",
"lsas-retransmit": "8064643",
"lsas-retransmit-5seconds": "0",
"ospf-errors": {
"subnet-mismatch-error": "12"
},
"packet-statistics": [
{
"ospf-packet-type": "Hello",
"packets-received": "5703920",
"packets-received-5seconds": "3",
"packets-sent": "6202169",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "DbD",
"packets-received": "185459",
"packets-received-5seconds": "0",
"packets-sent": "212983",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSReq",
"packets-received": "208",
"packets-received-5seconds": "0",
"packets-sent": "214",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSUpdate",
"packets-received": "16742100",
"packets-received-5seconds": "0",
"packets-sent": "15671465",
"packets-sent-5seconds": "0"
},
{
"ospf-packet-type": "LSAck",
"packets-received": "2964236",
"packets-received-5seconds": "0",
"packets-sent": "5229203",
"packets-sent-5seconds": "0"
}
],
"total-database-summaries": "0",
"total-linkstate-request": "0",
"total-retransmits": "0"
}
}
}
|
19970
|
from .Layer import *
class Multiply(Layer):
def __init__(self, models, *args, **kwargs):
self.check_inputs(models, 2)
Layer.__init__(self, models, *args, **kwargs)
def reshape(self):
self.Y = np.zeros(self.X[0].shape)
def forward(self):
self.Y = np.multiply(self.X[0], self.X[1])
def backward(self):
self.dX = [np.multiply(self.dY, self.X[1]), np.multiply(self.dY, self.X[0])]
class MultiplyConstant(Layer):
def __init__(self, model, *args, **kwargs):
self.check_inputs(model, 1)
Layer.__init__(self, model, *args, **kwargs)
self.constant = kwargs["constant"]
def reshape(self):
self.Y = np.zeros(self.X.shape)
def forward(self):
self.Y = self.X * self.constant
def backward(self):
self.dX = self.dY * self.constant
Multiply.OP_L = MultiplyConstant
Multiply.OP_R = MultiplyConstant
|
19971
|
from project.settings import INSTALLED_APPS, ALLOWED_HOSTS, BASE_DIR
import os
INSTALLED_APPS.append( 'webpack_loader',)
INSTALLED_APPS.append( 'app',)
ALLOWED_HOSTS.append('*',)
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static',)
os.path.join(BASE_DIR, 'app', 'vueapp','dist', 'static')
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'static/vueapp/',
'STATS_FILE': os.path.join(BASE_DIR, 'app', 'vueapp', 'webpack-stats.json')
}
}
INTERNAL_IPS = (
'0.0.0.0',
'127.0.0.1',
)
|
19982
|
import numpy as np
import itertools
from .contrib import compress_filter, smooth, residual_model
from .contrib import reduce_interferences
def expectation_maximization(y, x, iterations=2, verbose=0, eps=None):
r"""Expectation maximization algorithm, for refining source separation
estimates.
This algorithm allows to make source separation results better by
enforcing multichannel consistency for the estimates. This usually means
a better perceptual quality in terms of spatial artifacts.
The implementation follows the details presented in [1]_, taking
inspiration from the original EM algorithm proposed in [2]_ and its
weighted refinement proposed in [3]_, [4]_.
It works by iteratively:
* Re-estimate source parameters (power spectral densities and spatial
covariance matrices) through :func:`get_local_gaussian_model`.
* Separate again the mixture with the new parameters by first computing
the new modelled mixture covariance matrices with :func:`get_mix_model`,
prepare the Wiener filters through :func:`wiener_gain` and apply them
with :func:`apply_filter``.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [3] <NAME> and <NAME> and <NAME>. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [4] <NAME> and <NAME> and <NAME>. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [5] <NAME> and <NAME> and <NAME> "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
initial estimates for the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex STFT of the mixture signal
iterations: int [scalar]
number of iterations for the EM algorithm.
verbose: boolean
display some information if True
eps: float or None [scalar]
The epsilon value to use for regularization and filters.
If None, the default will use the epsilon of np.real(x) dtype.
Returns
-------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
estimated sources after iterations
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
estimated power spectral densities
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
estimated spatial covariance matrices
Note
-----
* You need an initial estimate for the sources to apply this
algorithm. This is precisely what the :func:`wiener` function does.
* This algorithm *is not* an implementation of the "exact" EM
proposed in [1]_. In particular, it does compute the posterior
covariance matrices the same (exact) way. Instead, it uses the
simplified approximate scheme initially proposed in [5]_ and further
refined in [3]_, [4]_, that boils down to just take the empirical
covariance of the recent source estimates, followed by a weighted
average for the update of the spatial covariance matrix. It has been
empirically demonstrated that this simplified algorithm is more
robust for music separation.
Warning
-------
It is *very* important to make sure `x.dtype` is `np.complex`
if you want double precision, because this function will **not**
do such conversion for you from `np.complex64`, in case you want the
smaller RAM usage on purpose.
It is usually always better in terms of quality to have double
precision, by e.g. calling :func:`expectation_maximization`
with ``x.astype(np.complex)``.
This is notably needed if you let common deep learning frameworks like
PyTorch or TensorFlow do the STFT, because this usually happens in
single precision.
"""
# to avoid dividing by zero
if eps is None:
eps = np.finfo(np.real(x[0]).dtype).eps
# dimensions
(nb_frames, nb_bins, nb_channels) = x.shape
nb_sources = y.shape[-1]
# allocate the spatial covariance matrices and PSD
R = np.zeros((nb_bins, nb_channels, nb_channels, nb_sources), x.dtype)
v = np.zeros((nb_frames, nb_bins, nb_sources))
if verbose:
print('Number of iterations: ', iterations)
regularization = np.sqrt(eps) * (
np.tile(np.eye(nb_channels, dtype=np.complex64),
(1, nb_bins, 1, 1)))
for it in range(iterations):
# constructing the mixture covariance matrix. Doing it with a loop
# to avoid storing anytime in RAM the whole 6D tensor
if verbose:
print('EM, iteration %d' % (it+1))
for j in range(nb_sources):
# update the spectrogram model for source j
v[..., j], R[..., j] = get_local_gaussian_model(
y[..., j],
eps)
for t in range(nb_frames):
Cxx = get_mix_model(v[None, t, ...], R)
Cxx += regularization
inv_Cxx = _invert(Cxx, eps)
# separate the sources
for j in range(nb_sources):
W_j = wiener_gain(v[None, t, ..., j], R[..., j], inv_Cxx)
y[t, ..., j] = apply_filter(x[None, t, ...], W_j)[0]
return y, v, R
def wiener(v, x, iterations=1, use_softmask=True, eps=None):
"""Wiener-based separation for multichannel audio.
The method uses the (possibly multichannel) spectrograms `v` of the
sources to separate the (complex) Short Term Fourier Transform `x` of the
mix. Separation is done in a sequential way by:
* Getting an initial estimate. This can be done in two ways: either by
directly using the spectrograms with the mixture phase, or
by using :func:`softmask`.
* Refinining these initial estimates through a call to
:func:`expectation_maximization`.
This implementation also allows to specify the epsilon value used for
regularization. It is based on [1]_, [2]_, [3]_, [4]_.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] <NAME> and <NAME> and <NAME>. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [3] <NAME> and <NAME> and <NAME>. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [4] <NAME> and <NAME> and <NAME> "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, {1,nb_channels}, nb_sources)]
spectrograms of the sources. This is a nonnegative tensor that is
usually the output of the actual separation method of the user. The
spectrograms may be mono, but they need to be 4-dimensional in all
cases.
x: np.ndarray [complex, shape=(nb_frames, nb_bins, nb_channels)]
STFT of the mixture signal.
iterations: int [scalar]
number of iterations for the EM algorithm
use_softmask: boolean
* if `False`, then the mixture phase will directly be used with the
spectrogram as initial estimates.
* if `True`, a softmasking strategy will be used as described in
:func:`softmask`.
eps: {None, float}
Epsilon value to use for computing the separations. This is used
whenever division with a model energy is performed, i.e. when
softmasking and when iterating the EM.
It can be understood as the energy of the additional white noise
that is taken out when separating.
If `None`, the default value is taken as `np.finfo(np.real(x[0])).eps`.
Returns
-------
y: np.ndarray
[complex, shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
STFT of estimated sources
Note
----
* Be careful that you need *magnitude spectrogram estimates* for the
case `softmask==False`.
* We recommand to use `softmask=False` only if your spectrogram model is
pretty good, e.g. when the output of a deep neural net. In the case
it is not so great, opt for an initial softmasking strategy.
* The epsilon value will have a huge impact on performance. If it's large,
only the parts of the signal with a significant energy will be kept in
the sources. This epsilon then directly controls the energy of the
reconstruction error.
Warning
-------
As in :func:`expectation_maximization`, we recommend converting the
mixture `x` to double precision `np.complex` *before* calling
:func:`wiener`.
"""
if use_softmask:
y = softmask(v, x, eps=eps)
else:
y = v * np.exp(1j*np.angle(x[..., None]))
if not iterations:
return y
# we need to refine the estimates. Scales down the estimates for
# numerical stability
max_abs = max(1, np.abs(x).max()/10.)
x /= max_abs
y = expectation_maximization(y/max_abs, x, iterations, eps=eps)[0]
return y*max_abs
def softmask(v, x, logit=None, eps=None):
"""Separates a mixture with a ratio mask, using the provided sources
spectrograms estimates. Additionally allows compressing the mask with
a logit function for soft binarization.
The filter does *not* take multichannel correlations into account.
The masking strategy can be traced back to the work of <NAME> in the
case of *power* spectrograms [1]_. In the case of *fractional* spectrograms
like magnitude, this filter is often referred to a "ratio mask", and
has been shown to be the optimal separation procedure under alpha-stable
assumptions [2]_.
References
----------
.. [1] <NAME>,"Extrapolation, Inerpolation, and Smoothing of Stationary
Time Series." 1949.
.. [2] <NAME> and <NAME>. "Generalized Wiener filtering with
fractional power spectrograms." 2015 IEEE International Conference on
Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2015.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
spectrograms of the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
mixture signal
logit: {None, float between 0 and 1}
enable a compression of the filter. If not None, it is the threshold
value for the logit function: a softmask above this threshold is
brought closer to 1, and a softmask below is brought closer to 0.
Returns
-------
ndarray, shape=(nb_frames, nb_bins, nb_channels, nb_sources)
estimated sources
"""
# to avoid dividing by zero
if eps is None:
eps = np.finfo(np.real(x[0]).dtype).eps
total_energy = np.sum(v, axis=-1, keepdims=True)
filter = v/(eps + total_energy.astype(x.dtype))
if logit is not None:
filter = compress_filter(filter, eps, thresh=logit, multichannel=False)
return filter * x[..., None]
def _invert(M, eps):
"""
Invert matrices, with special fast handling of the 1x1 and 2x2 cases.
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Parameters
----------
M: np.ndarray [shape=(..., nb_channels, nb_channels)]
matrices to invert: must be square along the last two dimensions
eps: [scalar]
regularization parameter to use _only in the case of matrices
bigger than 2x2
Returns
-------
invM: np.ndarray, [shape=M.shape]
inverses of M
"""
nb_channels = M.shape[-1]
if nb_channels == 1:
# scalar case
invM = 1.0/(M+eps)
elif nb_channels == 2:
# two channels case: analytical expression
det = (
M[..., 0, 0]*M[..., 1, 1] -
M[..., 0, 1]*M[..., 1, 0])
invDet = 1.0/(det)
invM = np.empty_like(M)
invM[..., 0, 0] = invDet*M[..., 1, 1]
invM[..., 1, 0] = -invDet*M[..., 1, 0]
invM[..., 0, 1] = -invDet*M[..., 0, 1]
invM[..., 1, 1] = invDet*M[..., 0, 0]
else:
# general case : no use of analytical expression (slow!)
invM = np.linalg.pinv(M, eps)
return invM
def wiener_gain(v_j, R_j, inv_Cxx):
"""
Compute the wiener gain for separating one source, given all parameters.
It is the matrix applied to the mix to get the posterior mean of the source
as in [1]_
References
----------
.. [1] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
Parameters
----------
v_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
power spectral density of the target source.
R_j: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
spatial covariance matrix of the target source
inv_Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
inverse of the mixture covariance matrices
Returns
-------
G: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
wiener filtering matrices, to apply to the mix, e.g. through
:func:`apply_filter` to get the target source estimate.
"""
(_, nb_channels) = R_j.shape[:2]
# computes multichannel Wiener gain as v_j R_j inv_Cxx
G = np.zeros_like(inv_Cxx)
for (i1, i2, i3) in itertools.product(*(range(nb_channels),)*3):
G[..., i1, i2] += (R_j[None, :, i1, i3] * inv_Cxx[..., i3, i2])
G *= v_j[..., None, None]
return G
def apply_filter(x, W):
"""
Applies a filter on the mixture. Just corresponds to a matrix
multiplication.
Parameters
----------
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
STFT of the signal on which to apply the filter.
W: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
filtering matrices, as returned, e.g. by :func:`wiener_gain`
Returns
-------
y_hat: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
filtered signal
"""
nb_channels = W.shape[-1]
# apply the filter
y_hat = 0+0j
for i in range(nb_channels):
y_hat += W[..., i] * x[..., i, None]
return y_hat
def get_mix_model(v, R):
"""
Compute the model covariance of a mixture based on local Gaussian models.
simply adds up all the v[..., j] * R[..., j]
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
Power spectral densities for the sources
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
Spatial covariance matrices of each sources
Returns
-------
Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
Covariance matrix for the mixture
"""
nb_channels = R.shape[1]
(nb_frames, nb_bins, nb_sources) = v.shape
Cxx = np.zeros((nb_frames, nb_bins, nb_channels, nb_channels), R.dtype)
for j in range(nb_sources):
Cxx += v[..., j, None, None] * R[None, ..., j]
return Cxx
def _covariance(y_j):
"""
Compute the empirical covariance for a source.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)].
complex stft of the source.
Returns
-------
Cj: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
just y_j * conj(y_j.T): empirical covariance for each TF bin.
"""
(nb_frames, nb_bins, nb_channels) = y_j.shape
Cj = np.zeros((nb_frames, nb_bins, nb_channels, nb_channels),
y_j.dtype)
for (i1, i2) in itertools.product(*(range(nb_channels),)*2):
Cj[..., i1, i2] += y_j[..., i1] * np.conj(y_j[..., i2])
return Cj
def get_local_gaussian_model(y_j, eps=1.):
r"""
Compute the local Gaussian model [1]_ for a source given the complex STFT.
First get the power spectral densities, and then the spatial covariance
matrix, as done in [1]_, [2]_
References
----------
.. [1] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [2] <NAME> and <NAME> and <NAME>. "Low bitrate informed
source separation of realistic mixtures." 2013 IEEE International
Conference on Acoustics, Speech and Signal Processing. IEEE, 2013.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex stft of the source.
eps: float [scalar]
regularization term
Returns
-------
v_j: np.ndarray [shape=(nb_frames, nb_bins)]
power spectral density of the source
R_J: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
Spatial covariance matrix of the source
"""
v_j = np.mean(np.abs(y_j)**2, axis=2)
# updates the spatial covariance matrix
nb_frames = y_j.shape[0]
R_j = 0
weight = eps
for t in range(nb_frames):
R_j += _covariance(y_j[None, t, ...])
weight += v_j[None, t, ...]
R_j /= weight[..., None, None]
return v_j, R_j
|
19994
|
import os
import requests
from typing import Optional, List
from pydantic import Field, validator
from dbt_cloud.command.command import DbtCloudAccountCommand
from dbt_cloud.field import JOB_ID_FIELD
class DbtCloudJobRunCommand(DbtCloudAccountCommand):
"""Triggers a dbt Cloud job run and returns a status JSON response."""
job_id: int = JOB_ID_FIELD
cause: str = Field(
default="Triggered via API",
description="A text description of the reason for running this job",
)
git_sha: Optional[str] = Field(
description="The git sha to check out before running this job"
)
git_branch: Optional[str] = Field(
description="The git branch to check out before running this job"
)
schema_override: Optional[str] = Field(
description="Override the destination schema in the configured target for this job"
)
dbt_version_override: Optional[str] = Field(
description="Override the version of dbt used to run this job"
)
threads_override: Optional[int] = Field(
description="Override the number of threads used to run this job"
)
target_name_override: Optional[str] = Field(
description="Override the target.name context variable used when running this job"
)
generate_docs_override: Optional[bool] = Field(
description="Override whether or not this job generates docs (true=yes, false=no)"
)
timeout_seconds_override: Optional[int] = Field(
description="Override the timeout in seconds for this job"
)
steps_override: Optional[List[str]] = Field(
description="Override the list of steps for this job"
)
@validator("steps_override")
def check_steps_override_is_none_if_empty(cls, value):
return value or None
@property
def api_url(self) -> str:
return f"{super().api_url}/jobs/{self.job_id}/run/"
def execute(self) -> requests.Response:
response = requests.post(
url=self.api_url,
headers=self.request_headers,
json=self.get_payload(),
)
return response
|
20000
|
from eblib import libcollect
# Create a LibCollect object
lc = libcollect.LibCollect()
# Prepare arguments for do_collect
#
# Path to the script (can be absolute or relative)
scriptname = 'plotting_data_monitor.pyw'
# Ask the resulting distribution to be placed in
# directory distrib
targetdir = 'distrib'
# Specify which libraries to exclude from the
# distribution (because you know they're installed
# on the target machine)
excludes = ["PyQt4",
"numpy",
"serial",
"pywin",
"win32api",
"win32com"]
# This does the actual work
# See the documentation of LibCollect for more options
#
lc.do_collect( scriptname,
targetdir,
excludes,
verbose=True)
|
20038
|
from fastapi import HTTPException, Query, APIRouter
from starlette.requests import Request
from starlette.status import HTTP_404_NOT_FOUND
from .models import db, Metadata
mod = APIRouter()
@mod.get("/metadata")
async def search_metadata(
request: Request,
data: bool = Query(
False,
description="Switch to returning a list of GUIDs (false), "
"or GUIDs mapping to their metadata (true).",
),
limit: int = Query(
10, description="Maximum number of records returned. (max: 2000)"
),
offset: int = Query(0, description="Return results at this given offset."),
):
"""Search the metadata.
Without filters, this will return all data. Add filters as query strings like this:
GET /metadata?a=1&b=2
This will match all records that have metadata containing all of:
{"a": 1, "b": 2}
The values are always treated as strings for filtering. Nesting is supported:
GET /metadata?a.b.c=3
Matching records containing:
{"a": {"b": {"c": 3}}}
Providing the same key with more than one value filters records whose value of the
given key matches any of the given values. But values of different keys must all
match. For example:
GET /metadata?a.b.c=3&a.b.c=33&a.b.d=4
Matches these:
{"a": {"b": {"c": 3, "d": 4}}}
{"a": {"b": {"c": 33, "d": 4}}}
{"a": {"b": {"c": "3", "d": 4, "e": 5}}}
But won't match these:
{"a": {"b": {"c": 3}}}
{"a": {"b": {"c": 3, "d": 5}}}
{"a": {"b": {"d": 5}}}
{"a": {"b": {"c": "333", "d": 4}}}
"""
limit = min(limit, 2000)
queries = {}
for key, value in request.query_params.multi_items():
if key not in {"data", "limit", "offset"}:
queries.setdefault(key, []).append(value)
def add_filter(query):
for path, values in queries.items():
query = query.where(
db.or_(Metadata.data[list(path.split("."))].astext == v for v in values)
)
return query.offset(offset).limit(limit)
if data:
return {
metadata.guid: metadata.data
for metadata in await add_filter(Metadata.query).gino.all()
}
else:
return [
row[0]
for row in await add_filter(db.select([Metadata.guid]))
.gino.return_model(False)
.all()
]
@mod.get("/metadata/{guid:path}")
async def get_metadata(guid):
"""Get the metadata of the GUID."""
metadata = await Metadata.get(guid)
if metadata:
return metadata.data
else:
raise HTTPException(HTTP_404_NOT_FOUND, f"Not found: {guid}")
def init_app(app):
app.include_router(mod, tags=["Query"])
|
20057
|
import numpy as np
import torch
from modules.frustum import get_box_corners_3d
from kitti_meters.util import get_box_iou_3d
__all__ = ['MeterFrustumKitti']
class MeterFrustumKitti:
def __init__(self, num_heading_angle_bins, num_size_templates, size_templates, class_name_to_class_id,
metric='iou_3d'):
super().__init__()
assert metric in ['iou_2d', 'iou_3d', 'accuracy', 'iou_3d_accuracy', 'iou_3d_class_accuracy']
self.metric = metric
self.num_heading_angle_bins = num_heading_angle_bins
self.num_size_templates = num_size_templates
self.size_templates = size_templates.view(self.num_size_templates, 3)
self.heading_angle_bin_centers = torch.arange(0, 2 * np.pi, 2 * np.pi / self.num_heading_angle_bins)
self.class_name_to_class_id = class_name_to_class_id
self.reset()
def reset(self):
self.total_seen_num = 0
self.total_correct_num = 0
self.iou_3d_corrent_num = 0
self.iou_2d_sum = 0
self.iou_3d_sum = 0
self.iou_3d_corrent_num_per_class = {cls: 0 for cls in self.class_name_to_class_id.keys()}
self.total_seen_num_per_class = {cls: 0 for cls in self.class_name_to_class_id.keys()}
def update(self, outputs, targets):
if self.metric == 'accuracy':
mask_logits = outputs['mask_logits']
mask_logits_target = targets['mask_logits']
self.total_seen_num += mask_logits_target.numel()
self.total_correct_num += torch.sum(mask_logits.argmax(dim=1) == mask_logits_target).item()
else:
center = outputs['center'] # (B, 3)
heading_scores = outputs['heading_scores'] # (B, NH)
heading_residuals = outputs['heading_residuals'] # (B, NH)
size_scores = outputs['size_scores'] # (B, NS)
size_residuals = outputs['size_residuals'] # (B, NS, 3)
center_target = targets['center'] # (B, 3)
heading_bin_id_target = targets['heading_bin_id'] # (B, )
heading_residual_target = targets['heading_residual'] # (B, )
size_template_id_target = targets['size_template_id'] # (B, )
size_residual_target = targets['size_residual'] # (B, 3)
class_id_target = targets['class_id'].cpu().numpy() # (B, )
batch_size = center.size(0)
batch_id = torch.arange(batch_size, device=center.device)
self.size_templates = self.size_templates.to(center.device)
self.heading_angle_bin_centers = self.heading_angle_bin_centers.to(center.device)
heading_bin_id = torch.argmax(heading_scores, dim=1)
heading = self.heading_angle_bin_centers[heading_bin_id] + heading_residuals[batch_id, heading_bin_id]
size_template_id = torch.argmax(size_scores, dim=1)
size = self.size_templates[size_template_id] + size_residuals[batch_id, size_template_id] # (B, 3)
corners = get_box_corners_3d(centers=center, headings=heading, sizes=size, with_flip=False) # (B, 8, 3)
heading_target = self.heading_angle_bin_centers[heading_bin_id_target] + heading_residual_target # (B, )
size_target = self.size_templates[size_template_id_target] + size_residual_target # (B, 3)
corners_target = get_box_corners_3d(centers=center_target, headings=heading_target,
sizes=size_target, with_flip=False) # (B, 8, 3)
iou_3d, iou_2d = get_box_iou_3d(corners.cpu().detach().numpy(), corners_target.cpu().detach().numpy())
self.iou_2d_sum += iou_2d.sum()
self.iou_3d_sum += iou_3d.sum()
self.iou_3d_corrent_num += np.sum(iou_3d >= 0.7)
self.total_seen_num += batch_size
for cls, cls_id in self.class_name_to_class_id.items():
mask = (class_id_target == cls_id)
self.iou_3d_corrent_num_per_class[cls] += np.sum(iou_3d[mask] >= (0.7 if cls == 'Car' else 0.5))
self.total_seen_num_per_class[cls] += np.sum(mask)
def compute(self):
if self.metric == 'iou_3d':
return self.iou_3d_sum / self.total_seen_num
elif self.metric == 'iou_2d':
return self.iou_2d_sum / self.total_seen_num
elif self.metric == 'accuracy':
return self.total_correct_num / self.total_seen_num
elif self.metric == 'iou_3d_accuracy':
return self.iou_3d_corrent_num / self.total_seen_num
elif self.metric == 'iou_3d_class_accuracy':
return sum(self.iou_3d_corrent_num_per_class[cls] / max(self.total_seen_num_per_class[cls], 1)
for cls in self.class_name_to_class_id.keys()) / len(self.class_name_to_class_id)
else:
raise KeyError
|
20080
|
def filter_dict(dictionary_to_filter):
return dict((k, v) for k, v in dictionary_to_filter.items() if v is not None)
|
20094
|
from appJar import gui
def press(btn):
if btn == "info": app.infoBox("Title Here", "Message here...")
if btn == "error": app.errorBox("Title Here", "Message here...")
if btn == "warning": app.warningBox("Title Here", "Message here...")
if btn == "yesno": app.yesNoBox("Title Here", "Message here...")
if btn == "question": app.questionBox("Title Here", "Message here...")
if btn == "ok": app.okBox("Title Here", "Message here...")
if btn == "retry": app.retryBox("Title Here", "Message here...")
if btn == "text": app.textBox("Title Here", "Message here...")
if btn == "number": app.numberBox("Title Here", "Message here...")
app=gui()
app.addButtons(["info", "error", "warning", "yesno", "question"], press)
app.addButtons(["ok", "retry", "text", "number"], press)
app.go()
|
20099
|
from custom_objects import FinanceCalculator
from tkinter import messagebox
class CalculationsPresenter(object):
def __init__(self, view):
self.view = view
def convert_price(self, price):
try:
converted_price = FinanceCalculator.decimal_to_treasury(price)
self.view.display_conversion(new_price=converted_price)
return None
except (ValueError, IndexError) as err:
pass
try:
converted_price = FinanceCalculator.treasury_to_decimal(price)
self.view.display_conversion(new_price=converted_price)
except (ValueError, IndexError) as err:
messagebox.showinfo(
message="An example of a valid price would be 108.50 or 108-16",
title="Invalid Price",
)
|
20111
|
import subprocess, re, sys
def get_coref_score(metric, path_to_scorer, gold=None, preds=None):
output=subprocess.check_output(["perl", path_to_scorer, metric, preds, gold]).decode("utf-8")
output=output.split("\n")[-3]
matcher=re.search("Coreference: Recall: \(.*?\) (.*?)% Precision: \(.*?\) (.*?)% F1: (.*?)%", output)
if matcher is not None:
recall=float(matcher.group(1))
precision=float(matcher.group(2))
f1=float(matcher.group(3))
return recall, precision, f1
def get_conll(path_to_scorer, gold=None, preds=None):
bcub_r, bcub_p, bcub_f=get_coref_score("bcub", path_to_scorer, gold, preds)
muc_r, muc_p, muc_f=get_coref_score("muc", path_to_scorer, gold, preds)
ceaf_r, ceaf_p, ceaf_f=get_coref_score("ceafe", path_to_scorer, gold, preds)
print("bcub:\t%.1f" % bcub_f)
print("muc:\t%.1f" % muc_f)
print("ceaf:\t%.1f" % ceaf_f)
avg=(bcub_f + muc_f + ceaf_f)/3.
print("Average F1: %.1f" % (avg))
# Generate Latex table
# print("%.1f&%.1f&%.1f&%.1f" % (bcub_f, muc_f, ceaf_f, avg))
return bcub_f, avg
if __name__ == "__main__":
goldFile=sys.argv[1]
predFile=sys.argv[2]
scorer=sys.argv[3]
bcub_f, avg=get_conll(scorer, gold=goldFile, preds=predFile)
|
20120
|
from string import ascii_letters
import textwrap
from fontTools.misc.testTools import getXML
from fontTools import subset
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.subset.svg import NAMESPACES, ranges
import pytest
etree = pytest.importorskip("lxml.etree")
@pytest.fixture
def empty_svg_font():
glyph_order = [".notdef"] + list(ascii_letters)
pen = TTGlyphPen(glyphSet=None)
pen.moveTo((0, 0))
pen.lineTo((0, 500))
pen.lineTo((500, 500))
pen.lineTo((500, 0))
pen.closePath()
glyph = pen.glyph()
glyphs = {g: glyph for g in glyph_order}
fb = FontBuilder(unitsPerEm=1024, isTTF=True)
fb.setupGlyphOrder(glyph_order)
fb.setupCharacterMap({ord(c): c for c in ascii_letters})
fb.setupGlyf(glyphs)
fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
fb.setupHorizontalHeader()
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestSVG", "styleName": "Regular"})
svg_table = newTable("SVG ")
svg_table.docList = []
fb.font["SVG "] = svg_table
return fb.font
def new_svg(**attrs):
return etree.Element("svg", {"xmlns": NAMESPACES["svg"], **attrs})
def _lines(s):
return textwrap.dedent(s).splitlines()
@pytest.mark.parametrize(
"gids, retain_gids, expected_xml",
[
# keep four glyphs in total, don't retain gids, which thus get remapped
(
"2,4-6",
False,
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph1" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="3" startGlyphID="3">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph3" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
# same four glyphs, but we now retain gids
(
"2,4-6",
True,
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="5" startGlyphID="5">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph5" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="6" startGlyphID="6">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph6" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
],
)
def test_subset_single_glyph_per_svg(
empty_svg_font, tmp_path, gids, retain_gids, expected_xml
):
font = empty_svg_font
svg_docs = font["SVG "].docList
for i in range(1, 11):
svg = new_svg()
etree.SubElement(svg, "path", {"id": f"glyph{i}", "d": f"M{i},{i}"})
svg_docs.append((etree.tostring(svg).decode(), i, i))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={gids}",
"--retain_gids" if retain_gids else "--no-retain_gids",
]
)
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
# This contains a bunch of cross-references between glyphs, paths, gradients, etc.
# Note the path coordinates are completely made up and not meant to be rendered.
# We only care about the tree structure, not it's visual content.
COMPLEX_SVG = """\
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
<path id="p1" d="M3,3"/>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph3">
<use xlink:href="#p1"/>
</g>
<use id="glyph4" xlink:href="#glyph1" x="10"/>
<use id="glyph5" xlink:href="#glyph2" y="-10"/>
<g id="glyph6">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
<g id="group1">
<g id="glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph7">
<path d="M4,4"/>
</g>
<g id="glyph8">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
<path id="M6,6"/>
</g>
<path d="M7,7"/>
</g>
<g id="glyph9">
<use xlink:href="#p2"/>
</g>
<g id="glyph10">
<use xlink:href="#p3"/>
</g>
</g>
<g id="glyph11">
<path d="M7,7" fill="url(#rg4)"/>
</g>
<g id="glyph12">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
"""
@pytest.mark.parametrize(
"subset_gids, expected_xml",
[
# we only keep gid=2, with 'glyph2' defined inside 'glyph1': 'glyph2'
# is renamed 'glyph1' to match the new subset indices, and the old 'glyph1'
# is kept (as it contains 'glyph2') but renamed '.glyph1' to avoid clash
(
"2",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id=".glyph1">
<g id="glyph1">
<path d="M0,0"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
# we keep both gid 1 and 2: the glyph elements' ids stay as they are (only the
# range endGlyphID change); a gradient is kept since it's referenced by glyph1
(
"1,2",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# both gid 3 and 6 refer (via <use xlink:href="#...") to path 'p1', which
# is thus kept in <defs>; the glyph ids and range start/end are renumbered.
"3,6",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path id="p1" d="M3,3"/>
</defs>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<g id="glyph2">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph4' uses the whole 'glyph1' element (translated); we keep the latter
# renamed to avoid clashes with new gids
"3-4",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<path id="p1" d="M3,3"/>
</defs>
<g id=".glyph1">
<g id=".glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<use id="glyph2" xlink:href="#.glyph1" x="10"/>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph9' uses a path 'p2' defined inside 'glyph7', the latter is excluded
# from our subset, thus gets renamed '.glyph7'; an unrelated element with
# same id=".glyph7" doesn't clash because it was dropped.
# Similarly 'glyph10' uses path 'p3' defined inside 'glyph8', also excluded
# from subset and prefixed with '.'. But since an id=".glyph8" is already
# used in the doc, we append a .{digit} suffix to disambiguate.
"9,10",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="group1">
<g id=".glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph8.1">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p2"/>
</g>
<g id="glyph2">
<use xlink:href="#p3"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph11' uses gradient 'rg4' which inherits from 'rg3', which inherits
# from 'rg2', etc.
"11",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
</defs>
<g id="glyph1">
<path d="M7,7" fill="url(#rg4)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph12' contains a style attribute with inline CSS declarations that
# contains references to a gradient fill and a clipPath: we keep those
"12",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
],
)
def test_subset_svg_with_references(
empty_svg_font, tmp_path, subset_gids, expected_xml
):
font = empty_svg_font
font["SVG "].docList.append((COMPLEX_SVG, 1, 12))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={subset_gids}",
"--pretty-svg",
]
)
subset_font = TTFont(subset_path)
if expected_xml is not None:
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
else:
assert "SVG " not in subset_font
def test_subset_svg_empty_table(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append((etree.tostring(svg).decode(), 1, 1))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# there's no gid=2 in SVG table, drop the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
def test_subset_svg_missing_glyph(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append(
(
etree.tostring(svg).decode(),
1,
# the range endGlyphID=2 declares two glyphs however our svg contains
# only one glyph element with id="glyph1", the "glyph2" one is absent.
# Techically this would be invalid according to the OT-SVG spec.
2,
)
)
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# make sure we don't crash when we don't find the expected "glyph2" element
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=1"])
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == [
'<svgDoc endGlyphID="1" startGlyphID="1">',
' <![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><rect id="glyph1" x="1" y="2"/></svg>]]>',
"</svgDoc>",
]
# ignore the missing gid even if included in the subset; in this test case we
# end up with an empty svg document--which is dropped, along with the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
@pytest.mark.parametrize(
"ints, expected_ranges",
[
((), []),
((0,), [(0, 0)]),
((0, 1), [(0, 1)]),
((1, 1, 1, 1), [(1, 1)]),
((1, 3), [(1, 1), (3, 3)]),
((4, 2, 1, 3), [(1, 4)]),
((1, 2, 4, 5, 6, 9, 13, 14, 15), [(1, 2), (4, 6), (9, 9), (13, 15)]),
],
)
def test_ranges(ints, expected_ranges):
assert list(ranges(ints)) == expected_ranges
|
20176
|
from inqry.system_specs import win_physical_disk
UNIQUE_ID_OUTPUT = """
UniqueId
--------
{256a2559-ce63-5434-1bee-3ff629daa3a7}
{4069d186-f178-856e-cff3-ba250c28446d}
{4da19f06-2e28-2722-a0fb-33c02696abcd}
50014EE20D887D66
eui.0025384161B6798A
5000C5007A75E216
500A07510F1A545C
ATA LITEONIT LMT-256M6M mSATA 256GB TW0XXM305508532M0705
IDE\Diskpacker-virtualbox-iso-1421140659-disk1__F.R7BNPC\5&1944dbef&0&0.0.0:vagrant-2012-r2
"""
def test_creating_list_of_unique_disk_ids():
expected_physical_disks = {'{256a2559-ce63-5434-1bee-3ff629daa3a7}',
'{4069d186-f178-856e-cff3-ba250c28446d}',
'{4da19f06-2e28-2722-a0fb-33c02696abcd}',
'50014EE20D887D66',
'eui.0025384161B6798A',
'5000C5007A75E216',
'500A07510F1A545C',
'ATA LITEONIT LMT-256M6M mSATA 256GB TW0XXM305508532M0705',
"IDE\Diskpacker-virtualbox-iso-1421140659-disk1__F.R7BNPC\5&1944dbef&0&0.0.0:vagrant-2012-r2"}
assert expected_physical_disks == set(win_physical_disk.get_physical_disk_identifiers(UNIQUE_ID_OUTPUT))
|
20202
|
import os
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
# Code so automated tests will run properly
# Check for environment variable, if that fails, use getpass().
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
my_device = {
"device_type": "cisco_xe",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
with ConnectHandler(**my_device) as net_connect:
output = net_connect.send_command("show ip int brief", use_genie=True)
# output = net_connect.send_command("show ip arp", use_genie=True)
pprint(output)
|
20210
|
import pytest
from conflow.merge import merge_factory
from conflow.node import Node, NodeList, NodeMap
def test_merge_node_node(default_config):
base = Node('base', 'node_A')
other = Node('other', 'node_B')
assert merge_factory(base, other, default_config) == other
def test_merge_node_nodelist(default_config):
base = Node('base', 'node_A')
other = NodeList('other', [2])
assert merge_factory(base, other, default_config) == other
def test_merge_node_nodemap(default_config):
base = Node('base', 'node_A')
other = NodeMap('other', {
'db': {
'master': {
'host': 'other'
}
}
})
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_node(default_config):
base = NodeList('other', [2])
other = Node('base', 'node_A')
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_nodelist_override(default_config):
base = NodeList('base', [1])
other = NodeList('other', [2])
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_nodelist_extend(extend_list_config):
base = NodeList('base', [1])
other = NodeList('other', [2])
expected = NodeList('base', [1, 2])
assert merge_factory(base, other, extend_list_config) == expected
def test_merge_nodelist_nodemap(default_config):
base = NodeList('base', [1])
other = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_node(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = Node('base', 'node_A')
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_nodelist(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = NodeList('base', [1])
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_nodemap_override(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = NodeMap('other', {
'db': {
'master': {
'host': 'other'
}
}
})
result = merge_factory(base, other, default_config)
assert result.db.master.host == 'other'
def test_merge_nodemap_nodemap_extend(default_config):
base = NodeMap('base', {
'master': {
'host': 'master'
}
})
other = NodeMap('other', {
'slave': {
'host': 'slave'
}
})
result = merge_factory(base, other, default_config)
assert 'master' in result
assert 'slave' in result
def test_merge_nodemap_nodemap_empty(default_config):
base = NodeMap('base', {})
other = NodeMap('other', {})
expected = NodeMap('expected', {})
assert merge_factory(base, other, default_config) == expected
def test_merge_different_types_strict(strict_config):
base = NodeMap('base', {'merged_key': {'a': 'b'}})
other = NodeMap('other', {'merged_key': 1})
with pytest.raises(RuntimeError) as error:
merge_factory(base, other, strict_config)
error_message = (
"Cannot merge `{'a': 'b'}` and `1` with key `merged_key`"
)
assert str(error.value) == error_message
|
20233
|
import unittest
import os.path
import requests_mock
import tableauserverclient as TSC
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in.xml')
SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_impersonate.xml')
SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_error.xml')
class AuthTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
self.baseurl = self.server.auth.baseurl
def test_sign_in(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_with_personal_access_tokens(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken',
personal_access_token='<PASSWORD>', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_impersonate(self):
with open(SIGN_IN_IMPERSONATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password',
user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgz<PASSWORD>', self.server.auth_token)
self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', self.server.site_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', self.server.user_id)
def test_sign_in_error(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('testuser', '<PASSWORD>')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_invalid_token(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken', personal_access_token='invalid')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_without_auth(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('', '')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_out(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
m.post(self.baseurl + '/signout', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.sign_out()
self.assertIsNone(self.server._auth_token)
self.assertIsNone(self.server._site_id)
self.assertIsNone(self.server._user_id)
def test_switch_site(self):
self.server.version = '2.6'
baseurl = self.server.auth.baseurl
site_id, user_id, auth_token = list('<PASSWORD>')
self.server._set_auth(site_id, user_id, auth_token)
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/switchSite', text=response_xml)
site = TSC.SiteItem('Samples', 'Samples')
self.server.auth.switch_site(site)
self.assertEqual('eIX6mvFsq<PASSWORD>4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>-8120<PASSWORD>', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_revoke_all_server_admin_tokens(self):
self.server.version = "3.10"
baseurl = self.server.auth.baseurl
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/signin', text=response_xml)
m.post(baseurl + '/revokeAllServerAdminTokens', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.revoke_all_server_admin_tokens()
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
|
20234
|
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.dynamic = False # if the agents are moving or not
self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not
# number states per agent
self.nx_system = 4
# numer of observations per agent
self.n_features = 6
# number of actions per agent
self.nu = 2
# problem parameters from file
self.n_agents = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.comm_radius2 = self.comm_radius * self.comm_radius
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
# intitialize state matrices
self.x = np.zeros((self.n_agents, self.nx_system))
self.u = np.zeros((self.n_agents, self.nu))
self.mean_vel = np.zeros((self.n_agents, self.nu))
self.init_vel = np.zeros((self.n_agents, self.nu))
self.a_net = np.zeros((self.n_agents, self.n_agents))
# TODO : what should the action space be? is [-1,1] OK?
self.max_accel = 1
self.gain = 10.0 # TODO - adjust if necessary - may help the NN performance
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
dtype=np.float32)
self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),
dtype=np.float32)
self.fig = None
self.line1 = None
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
#u = np.reshape(u, (-1, 2))
assert u.shape == (self.n_agents, self.nu)
self.u = u
if self.dynamic:
# x position
self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt
# y position
self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt
# x velocity
self.x[:, 2] = self.x[:, 2] + self.gain * self.u[:, 0] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
# y velocity
self.x[:, 3] = self.x[:, 3] + self.gain * self.u[:, 1] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
return self._get_obs(), self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
# TODO adjust to desired reward
# action_cost = -1.0 * np.sum(np.square(self.u))
#curr_variance = -1.0 * np.sum((np.var(self.x[:, 2:4], axis=0)))
versus_initial_vel = -1.0 * np.sum(np.sum(np.square(self.x[:, 2:4] - self.mean_vel), axis=1))
#return curr_variance + versus_initial_vel
return versus_initial_vel
def reset(self):
x = np.zeros((self.n_agents, self.nx_system))
degree = 0
min_dist = 0
min_dist_thresh = 0.1 # 0.25
# generate an initial configuration with all agents connected,
# and minimum distance between agents > min_dist_thresh
while degree < 2 or min_dist < min_dist_thresh:
# randomly initialize the location and velocity of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_agents,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_agents,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[1]
# compute distances between agents
a_net = self.dist2_mat(x)
# compute minimum distance between agents and degree of network to check if good initial configuration
min_dist = np.sqrt(np.min(np.min(a_net)))
a_net = a_net < self.comm_radius2
degree = np.min(np.sum(a_net.astype(int), axis=1))
# keep good initialization
self.mean_vel = np.mean(x[:, 2:4], axis=0)
self.init_vel = x[:, 2:4]
self.x = x
self.a_net = self.get_connectivity(self.x)
return self._get_obs()
def _get_obs(self):
# state_values = self.x
state_values = np.hstack((self.x, self.init_vel)) # initial velocities are part of state to make system observable
if self.dynamic:
state_network = self.get_connectivity(self.x)
else:
state_network = self.a_net
return (state_values, state_network)
def dist2_mat(self, x):
"""
Compute squared euclidean distances between agents. Diagonal elements are infinity
Args:
x (): current state of all agents
Returns: symmetric matrix of size (n_agents, n_agents) with A_ij the distance between agents i and j
"""
x_loc = np.reshape(x[:, 0:2], (self.n_agents,2,1))
a_net = np.sum(np.square(np.transpose(x_loc, (0,2,1)) - np.transpose(x_loc, (2,0,1))), axis=2)
np.fill_diagonal(a_net, np.Inf)
return a_net
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current state of all agents
Returns: adjacency matrix of network
"""
a_net = self.dist2_mat(x)
a_net = (a_net < self.comm_radius2).astype(float)
if self.mean_pooling:
# Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling
n_neighbors = np.reshape(np.sum(a_net, axis=1), (self.n_agents,1)) # TODO or axis=0? Is the mean in the correct direction?
n_neighbors[n_neighbors == 0] = 1
a_net = a_net / n_neighbors
return a_net
def controller(self):
"""
Consensus-based centralized flocking with no obstacle avoidance
Returns: the optimal action
"""
# TODO implement Tanner 2003?
u = np.mean(self.x[:,2:4], axis=0) - self.x[:,2:4]
u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)
return u
def render(self, mode='human'):
"""
Render the environment with agents as points in 2D space
"""
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
pass
|
20237
|
import sys,os
from torch.autograd import Variable
import torch.optim as optim
from tensorboardX import SummaryWriter
import torch
import time
import shutil
from torch.utils.data import DataLoader
import csv
from samp_net import EMDLoss, AttributeLoss, SAMPNet
from config import Config
from cadb_dataset import CADBDataset
from test import evaluation_on_cadb
def calculate_accuracy(predict, target, threhold=2.6):
assert target.shape == predict.shape, '{} vs. {}'.format(target.shape, predict.shape)
bin_tar = target > threhold
bin_pre = predict > threhold
correct = (bin_tar == bin_pre).sum()
acc = correct.float() / target.size(0)
return correct,acc
def build_dataloader(cfg):
trainset = CADBDataset('train', cfg)
trainloader = DataLoader(trainset,
batch_size=cfg.batch_size,
shuffle=True,
num_workers=cfg.num_workers,
drop_last=False)
return trainloader
class Trainer(object):
def __init__(self, model, cfg):
self.cfg = cfg
self.model = model
self.device = torch.device('cuda:{}'.format(self.cfg.gpu_id))
self.trainloader = build_dataloader(cfg)
self.optimizer = self.create_optimizer()
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, mode='min', patience=5)
self.epoch = 0
self.iters = 0
self.avg_mse = 0.
self.avg_emd = 0.
self.avg_acc = 0.
self.avg_att = 0.
self.smooth_coe = 0.4
self.smooth_mse = None
self.smooth_emd = None
self.smooth_acc = None
self.smooth_att = None
self.mse_loss = torch.nn.MSELoss()
self.emd_loss = EMDLoss()
self.test_acc = []
self.test_emd1 = []
self.test_emd2 = []
self.test_mse = []
self.test_srcc = []
self.test_lcc = []
if cfg.use_attribute:
self.att_loss = AttributeLoss(cfg.attribute_weight)
self.least_metric = 1.
self.writer = self.create_writer()
def create_optimizer(self):
# for param in self.model.backbone.parameters():
# param.requires_grad = False
bb_params = list(map(id, self.model.backbone.parameters()))
lr_params = filter(lambda p:id(p) not in bb_params, self.model.parameters())
params = [
{'params': lr_params, 'lr': self.cfg.lr},
{'params': self.model.backbone.parameters(), 'lr': self.cfg.lr * 0.01}
]
if self.cfg.optimizer == 'adam':
optimizer = optim.Adam(params,
weight_decay=self.cfg.weight_decay)
elif self.cfg.optimizer == 'sgd':
optimizer = optim.SGD(params,
momentum=self.cfg.momentum,
weight_decay=self.cfg.weight_decay)
else:
raise ValueError(f"not such optimizer {self.cfg.optimizer}")
return optimizer
def create_writer(self):
print('Create tensorboardX writer...', self.cfg.log_dir)
writer = SummaryWriter(log_dir=self.cfg.log_dir)
return writer
def run(self):
for epoch in range(self.cfg.max_epoch):
self.run_epoch()
self.epoch += 1
self.scheduler.step(metrics=self.least_metric)
self.writer.add_scalar('Train/lr', self.optimizer.param_groups[0]['lr'], self.epoch)
if self.epoch % self.cfg.save_epoch == 0:
checkpoint_path = os.path.join(self.cfg.checkpoint_dir, 'model-{epoch}.pth')
torch.save(self.model.state_dict(), checkpoint_path.format(epoch=self.epoch))
print('Save checkpoint...')
if self.epoch % self.cfg.test_epoch == 0:
test_emd = self.eval_training()
if test_emd < self.least_metric:
self.least_metric = test_emd
checkpoint_path = os.path.join(self.cfg.checkpoint_dir, 'model-best.pth')
torch.save(self.model.state_dict(), checkpoint_path)
print('Update best checkpoint...')
self.writer.add_scalar('Test/Least EMD', self.least_metric, self.epoch)
def eval_training(self):
avg_acc, avg_r1_emd, avg_r2_emd, avg_mse, SRCC, LCC = \
evaluation_on_cadb(self.model, self.cfg)
self.writer.add_scalar('Test/Average EMD(r=2)', avg_r2_emd, self.epoch)
self.writer.add_scalar('Test/Average EMD(r=1)', avg_r1_emd, self.epoch)
self.writer.add_scalar('Test/Average MSE', avg_mse, self.epoch)
self.writer.add_scalar('Test/Accuracy', avg_acc, self.epoch)
self.writer.add_scalar('Test/SRCC', SRCC, self.epoch)
self.writer.add_scalar('Test/LCC', LCC, self.epoch)
error = avg_r1_emd
self.test_acc.append(avg_acc)
self.test_emd1.append(avg_r1_emd)
self.test_emd2.append(avg_r2_emd)
self.test_mse.append(avg_mse)
self.test_srcc.append(SRCC)
self.test_lcc.append(LCC)
self.write2csv()
return error
def write2csv(self):
csv_path = os.path.join(self.cfg.exp_path, '..', '{}.csv'.format(self.cfg.exp_name))
header = ['epoch', 'Accuracy', 'EMD r=1', 'EMD r=2', 'MSE', 'SRCC', 'LCC']
epoches = list(range(len(self.test_acc)))
metrics = [epoches, self.test_acc, self.test_emd1, self.test_emd2,
self.test_mse, self.test_srcc, self.test_lcc]
rows = [header]
for i in range(len(epoches)):
row = [m[i] for m in metrics]
rows.append(row)
for name, m in zip(header, metrics):
if name == 'epoch':
continue
index = m.index(min(m))
if name in ['Accuracy', 'SRCC', 'LCC']:
index = m.index(max(m))
title = 'best {} (epoch-{})'.format(name, index)
row = [l[index] for l in metrics]
row[0] = title
rows.append(row)
with open(csv_path, 'w') as f:
cw = csv.writer(f)
cw.writerows(rows)
print('Save result to ', csv_path)
def dist2ave(self, pred_dist):
pred_score = torch.sum(pred_dist* torch.Tensor(range(1,6)).to(pred_dist.device), dim=-1, keepdim=True)
return pred_score
def run_epoch(self):
self.model.train()
for batch, data in enumerate(self.trainloader):
self.iters += 1
image = data[0].to(self.device)
score = data[1].to(self.device)
score_dist = data[2].to(self.device)
saliency = data[3].to(self.device)
attributes = data[4].to(self.device)
weight = data[5].to(self.device)
pred_weight, pred_atts, pred_dist = self.model(image, saliency)
if self.cfg.use_weighted_loss:
dist_loss = self.emd_loss(score_dist, pred_dist, weight)
else:
dist_loss = self.emd_loss(score_dist, pred_dist)
if self.cfg.use_attribute:
att_loss = self.att_loss(attributes, pred_atts)
loss = dist_loss + att_loss
else:
loss = dist_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.avg_emd += dist_loss.item()
self.avg_att += att_loss.item()
pred_score = self.dist2ave(pred_dist)
correct, accuracy = calculate_accuracy(pred_score, score)
self.avg_acc += accuracy.item()
if (self.iters+1) % self.cfg.display_steps == 0:
print('ground truth: average={}'.format(score.view(-1)))
print('prediction: average={}'.format(pred_score.view(-1)))
self.avg_emd = self.avg_emd / self.cfg.display_steps
self.avg_acc = self.avg_acc / self.cfg.display_steps
if self.cfg.use_attribute:
self.avg_att = self.avg_att / self.cfg.display_steps
if self.smooth_emd != None:
self.avg_emd = (1-self.smooth_coe) * self.avg_emd + self.smooth_coe * self.smooth_emd
self.avg_acc = (1-self.smooth_coe) * self.avg_acc + self.smooth_coe * self.smooth_acc
if self.cfg.use_attribute:
self.avg_att = (1-self.smooth_coe) * self.avg_att + self.smooth_coe * self.smooth_att
self.writer.add_scalar('Train/AttributeLoss', self.avg_att, self.iters)
self.writer.add_scalar('Train/EMD_Loss', self.avg_emd, self.iters)
self.writer.add_scalar('Train/Accuracy', self.avg_acc, self.iters)
if self.cfg.use_attribute:
print('Traning Epoch:{}/{} Current Batch: {}/{} EMD_Loss:{:.4f} Attribute_Loss:{:.4f} ACC:{:.2%} lr:{:.6f} '.
format(
self.epoch, self.cfg.max_epoch,
batch, len(self.trainloader),
self.avg_emd, self.avg_att,
self.avg_acc,
self.optimizer.param_groups[0]['lr']))
else:
print(
'Traning Epoch:{}/{} Current Batch: {}/{} EMD_Loss:{:.4f} ACC:{:.2%} lr:{:.6f} '.
format(
self.epoch, self.cfg.max_epoch,
batch, len(self.trainloader),
self.avg_emd, self.avg_acc,
self.optimizer.param_groups[0]['lr']))
self.smooth_emd = self.avg_emd
self.smooth_acc = self.avg_acc
self.avg_mse = 0.
self.avg_emd = 0.
self.avg_acc = 0.
if self.cfg.use_attribute:
self.smooth_att = self.avg_att
self.avg_att = 0.
print()
if __name__ == '__main__':
cfg = Config()
cfg.create_path()
device = torch.device('cuda:{}'.format(cfg.gpu_id))
# evaluate(cfg)
for file in os.listdir('./'):
if file.endswith('.py'):
shutil.copy(file, cfg.exp_path)
print('Backup ', file)
model = SAMPNet(cfg)
model = model.train().to(device)
trainer = Trainer(model, cfg)
trainer.run()
|
20239
|
import pygame
from pygame.mixer import music
from pystage.core.assets import SoundManager
from pystage.core._base_sprite import BaseSprite
import time
class _Sound(BaseSprite):
# Like for costumes and backdrops, we need a class structure here.
# Plus a global sound manager.
def __init__(self):
super().__init__()
self.sound_manager = SoundManager(self)
self.mixer = pygame.mixer
self.mixer.init(channels=2)
self.current_pan = 0
self.current_pitch = 0
self.current_volume = 100
def pystage_addsound(self, name):
self.sound_manager.add_sound(name)
def sound_play(self, name, loop=0):
channel = self.mixer.find_channel()
sound = self.sound_manager.get_sound(name)
if sound is not None:
channel.play(sound, loop)
return channel
def sound_playuntildone(self, name):
sound = self.sound_manager.get_sound(name)
if sound is not None:
self.mixer.find_channel().play(sound, 0)
# time.sleep(sound.get_length())
# This need to be done via wait time in code block
# TODO: Add this function to yield blocks.
self.code_manager.current_block.add_to_wait_time = sound.get_length()
def sound_stopallsounds(self):
self.mixer.stop()
def sound_changeeffectby_pitch(self, value):
# TODO: for pitching there is no ready to use code in pygame. To do so
# we must operate on the audio array itself.
# -360 to 360, 10 is a half-step, 120 an octave
# changes only the speed of the sound
pass
sound_changeeffectby_pitch.opcode = "sound_changeeffectby"
sound_changeeffectby_pitch.param = "EFFECT"
sound_changeeffectby_pitch.value = "PITCH"
sound_changeeffectby_pitch.translation = "sound_effects_pitch"
def sound_changeeffectby_pan(self, value):
# norm pan value from -100/100 to range 0/1
self.current_pan += value
self.current_pan = min(100, max(-100, self.current_pan))
self._apply()
sound_changeeffectby_pan.opcode = "sound_changeeffectby"
sound_changeeffectby_pan.param = "EFFECT"
sound_changeeffectby_pan.value = "PAN"
sound_changeeffectby_pan.translation = "sound_effects_pan"
def sound_seteffectto_pitch(self, value):
# TODO: for pitching there is no ready to use code in pygame. To do so
# we must operate on the audio array itself.
pass
sound_seteffectto_pitch.opcode = "sound_seteffectto"
sound_seteffectto_pitch.param = "EFFECT"
sound_seteffectto_pitch.value = "PITCH"
sound_seteffectto_pitch.translation = "sound_effects_pitch"
def sound_seteffectto_pan(self, value):
# Values from -100 (left) to 100 (right)
self.current_pan = value
self.current_pan = min(100, max(-100, self.current_pan))
self._apply()
sound_seteffectto_pan.opcode = "sound_seteffectto"
sound_seteffectto_pan.param = "EFFECT"
sound_seteffectto_pan.value = "PAN"
sound_seteffectto_pan.translation = "sound_effects_pan"
def sound_cleareffects(self):
self.current_pan = 0
self.current_pitch = 0
self._apply()
# apply pitch
def _apply(self):
# norm pan value from -100/100 to range 0/1
pgpan = (self.current_pan + 100) / 200
pgvolume = self.current_volume / 100
for channel_id in range(self.mixer.get_num_channels()):
if pgpan > 0.5:
self.mixer.Channel(channel_id).set_volume(1, 0)
else:
self.mixer.Channel(channel_id).set_volume(0, 1)
for channel_id in range(self.mixer.get_num_channels()):
self.mixer.Channel(channel_id).set_volume(pgvolume)
def sound_changevolumeby(self, value):
self.current_volume += value
self.current_volume = min(100, max(0, self.current_volume))
self._apply()
def sound_setvolumeto(self, value):
self.current_volume = value
self.current_volume = min(100, max(0, self.current_volume))
self._apply()
def sound_volume(self):
# as we hide the channel mechanic, we assume all channels are set to the same volume
return self.mixer.Channel(0).get_volume() * 100
|
20298
|
import os
import dgl
import time
import argparse
import numpy as np
import torch as th
import distutils.util
import torch.nn.functional as F
import utils
import models
import data_loader
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
dev = th.device('cuda' if th.cuda.is_available() else 'cpu')
if __name__ == '__main__':
argparser = argparse.ArgumentParser("training")
argparser.add_argument('--adj-path', type=str, default='../data/adj_matrix_formal_stage.pkl')
argparser.add_argument('--feat-path', type=str, default='../data/feature_formal_stage.npy')
argparser.add_argument('--label-path', type=str, default='../data/train_labels_formal_stage.npy')
argparser.add_argument('--output-dir', type=str, default='./saved_models/')
argparser.add_argument('--output-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--if-load-model', type=lambda x: bool(distutils.util.strtobool(x)), default=False)
argparser.add_argument('--model-dir', type=str, default='./saved_models/')
argparser.add_argument('--model-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--num-epochs', type=int, default=5000)
argparser.add_argument('--num-hidden', type=int, default=128)
argparser.add_argument('--num-layers', type=int, default=3)
argparser.add_argument('--lr', type=float, default=0.001)
argparser.add_argument('--dropout', type=float, default=0.1)
argparser.add_argument('--adj-norm', type=lambda x: bool(distutils.util.strtobool(x)), default=True)
argparser.add_argument('--feat-norm', type=str, default=None)
args = argparser.parse_args()
print(vars(args))
dataset = data_loader.KddDataset(args.adj_path, args.feat_path, args.label_path, indices)
adj = dataset.adj
features = dataset.features
labels = dataset.labels
train_mask = dataset.train_mask
val_mask = dataset.val_mask
test_mask = dataset.test_mask
size_raw = features.shape[0]
size_reduced = size_raw - 50000
graph = dgl.DGLGraph()
if args.adj_norm:
adj = utils.adj_preprocess(adj)
feat_norm_func = utils.feat_norm(args.feat_norm)
graph.from_scipy_sparse_matrix(adj)
features = th.FloatTensor(features).to(dev)
features[th.where(features < -1.0)[0]] = 0
features[th.where(features > 1.0)[0]] = 0
features = feat_norm_func(features)
labels = th.LongTensor(labels).to(dev)
graph.ndata['features'] = features
model = models.TAGCN(100, args.num_hidden, 20, args.num_layers, activation=F.leaky_relu, dropout=args.dropout)
if args.if_load_model:
model_states = th.load(os.path.join(args.model_dir, args.model_name), map_location=dev)
model.load_state_dict(model_states)
model = model.to(dev)
optimizer = th.optim.Adam(model.parameters(), lr=args.lr)
dur = []
for epoch in range(args.num_epochs):
t0 = time.time()
logits = model(graph, features).to(dev)
logp = F.log_softmax(logits, 1)[:size_reduced]
loss = F.nll_loss(logp[train_mask], labels[train_mask]).to(dev)
optimizer.zero_grad()
loss.backward()
optimizer.step()
dur.append(time.time() - t0)
if epoch % 10 == 0:
train_acc = utils.compute_acc(logp, labels, train_mask)
val_acc = utils.compute_acc(logp, labels, val_mask)
print('Epoch {:05d} | Loss {:.4f} | Train Acc {:.4f} | Val Acc {:.4f} '
'| Time(s) {:.4f} | GPU {:.1f} MiB'.format(
epoch, loss, train_acc, val_acc, np.mean(dur), th.cuda.max_memory_allocated() / 1000000))
th.save(model.state_dict(), os.path.join(args.output_dir, args.output_name))
|
20310
|
from collections import namedtuple
RGB = namedtuple("RGB", "red, green, blue")
COLORS = {
"red": RGB(255, 0, 0),
"orange-deep": RGB(255, 40, 0),
"orange": RGB(255, 120, 0),
"yellow": RGB(255, 200, 0),
"yellow-acid": RGB(160, 255, 0),
"green": RGB(0, 255, 0),
"green-forest": RGB(34, 139, 34),
"green-spring": RGB(0, 255, 127),
"green-teal": RGB(0, 128, 128),
"green-turquoise": RGB(0, 199, 140),
"green-coral": RGB(0, 255, 50),
"cyan": RGB(0, 255, 255),
"blue": RGB(0, 0, 255),
"blue-light": RGB(65, 105, 225),
"blue-navy": RGB(0, 0, 128),
"blue-aqua": RGB(0, 255, 255),
"purple": RGB(128, 0, 128),
"pink": RGB(255, 0, 178),
"magenta": RGB(255, 0, 255),
"black": RGB(0, 0, 0),
"white": RGB(255, 255, 255),
"brown": RGB(139, 69, 19),
"gold": RGB(255, 215, 0),
"hotpink": RGB(255, 105, 180),
"lightblue": RGB(173, 216, 230),
"lightgreen": RGB(152, 251, 152),
"lightpink": RGB(255, 182, 193),
"lightyellow": RGB(255, 255, 224),
"maroon": RGB(128, 0, 0),
"mint": RGB(189, 252, 201),
"olive": RGB(85, 107, 47),
"peach": RGB(255, 100, 100),
"plum": RGB(221, 160, 221),
"sepia": RGB(94, 38, 18),
"skyblue": RGB(135, 206, 235),
"steelblue": RGB(70, 130, 180),
"tan": RGB(210, 180, 140),
"violetred": RGB(208, 32, 144),
}
GRADIENTS = {
"Rainbow": {
"colors": [
"red",
"orange",
"yellow",
"green",
"green-turquoise",
"blue",
"purple",
"pink",
]
},
"Dancefloor": {"colors": ["red", "pink", "blue"]},
"Plasma": {"colors": ["blue", "purple", "red", "orange-deep", "yellow"]},
"Ocean": {"colors": ["blue-aqua", "blue"]},
"Viridis": {"colors": ["purple", "blue", "green-teal", "green", "yellow"]},
"Jungle": {"colors": ["green", "green-forest", "orange"]},
"Spring": {"colors": ["pink", "orange-deep", "yellow"]},
"Winter": {"colors": ["green-turquoise", "green-coral"]},
"Frost": {"colors": ["blue", "blue-aqua", "purple", "pink"]},
"Sunset": {"colors": ["blue-navy", "orange", "red"]},
"Borealis": {
"colors": [
"orange-deep",
"purple",
"green-turquoise",
"green",
]
},
"Rust": {"colors": ["orange-deep", "red"]},
"Christmas": {
"colors": [
"red",
"red",
"red",
"red",
"red",
"green",
"green",
"green",
"green",
"green",
],
"method": "repeat",
},
"Winamp": {
"colors": [
"green",
"yellow",
"orange",
"orange-deep",
"red",
]
},
}
|
20325
|
from menpofit.result import (ParametricIterativeResult,
MultiScaleParametricIterativeResult)
class LucasKanadeAlgorithmResult(ParametricIterativeResult):
r"""
Class for storing the iterative result of a Lucas-Kanade Image Alignment
optimization algorithm.
Parameters
----------
shapes : `list` of `menpo.shape.PointCloud`
The `list` of shapes per iteration. The first and last members
correspond to the initial and final shapes, respectively.
homogeneous_parameters : `list` of ``(n_parameters,)`` `ndarray`
The `list` of parameters of the homogeneous transform per iteration.
The first and last members correspond to the initial and final
shapes, respectively.
initial_shape : `menpo.shape.PointCloud` or ``None``, optional
The initial shape from which the fitting process started. If
``None``, then no initial shape is assigned.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
costs : `list` of `float` or ``None``, optional
The `list` of cost per iteration. If ``None``, then it is assumed that
the cost function cannot be computed for the specific algorithm.
"""
def __init__(self, shapes, homogeneous_parameters, initial_shape=None,
image=None, gt_shape=None, costs=None):
super(LucasKanadeAlgorithmResult, self).__init__(
shapes=shapes, shape_parameters=homogeneous_parameters,
initial_shape=initial_shape, image=image, gt_shape=gt_shape,
costs=costs)
self._homogeneous_parameters = homogeneous_parameters
@property
def homogeneous_parameters(self):
r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._shape_parameters
class LucasKanadeResult(MultiScaleParametricIterativeResult):
r"""
Class for storing the multi-scale iterative fitting result of an ATM. It
holds the shapes, shape parameters and costs per iteration.
Parameters
----------
results : `list` of :map:`ATMAlgorithmResult`
The `list` of optimization results per scale.
scales : `list` or `tuple`
The `list` of scale values per scale (low to high).
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that transform the shapes into
the original image space.
scale_transforms : `list` of `menpo.shape.Scale`
The list of scaling transforms per scale.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
"""
def __init__(self, results, scales, affine_transforms, scale_transforms,
image=None, gt_shape=None):
super(LucasKanadeResult, self).__init__(
results=results, scales=scales, affine_transforms=affine_transforms,
scale_transforms=scale_transforms, image=image, gt_shape=gt_shape)
# Create parameters list
self._homogeneous_parameters = []
for r in results:
self._homogeneous_parameters += r.homogeneous_parameters
# Correct n_iters
self._n_iters -= len(scales)
@property
def homogeneous_parameters(self):
r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""
return self._homogeneous_parameters
@property
def shape_parameters(self):
# Use homogeneous_parameters instead.
raise AttributeError
|
20358
|
from setuptools import setup, find_packages
version = {}
with open("nltools/version.py") as f:
exec(f.read(), version)
with open("requirements.txt") as f:
requirements = f.read().splitlines()
extra_setuptools_args = dict(tests_require=["pytest"])
setup(
name="nltools",
version=version["__version__"],
author="<NAME>",
author_email="<EMAIL>",
url="https://cosanlab.github.io/nltools",
python_requires=">=3.6",
install_requires=requirements,
extras_require={"interactive_plots": ["ipywidgets>=5.2.2"]},
packages=find_packages(exclude=["nltools/tests"]),
package_data={"nltools": ["resources/*"]},
include_package_data=True,
license="LICENSE.txt",
description="A Python package to analyze neuroimaging data",
long_description="nltools is a collection of python tools to perform "
"preprocessing, univariate GLMs, and predictive "
"multivariate modeling of neuroimaging data. It is the "
"analysis engine powering www.neuro-learn.org.",
keywords=["neuroimaging", "preprocessing", "analysis", "machine-learning"],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
],
**extra_setuptools_args
)
|
20366
|
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
import numpy as np
import cv2
class Model:
def __init__(self,confidence_thresh=0.6):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_thresh # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
self.model = DefaultPredictor(cfg)
def get_seg_output(self,image:np.array):
out = self.model(image)['instances']
outputs = [(out.pred_masks[i],out.pred_classes[i]) for i in range(len(out.pred_classes)) if out.pred_classes[i]==0]
return outputs
class Preprocessing:
def __init__(self,kernel,dilate_iter=5,erode_iter=1):
self.kernel = kernel
self.dilate_iter = dilate_iter
self.erode_iter = erode_iter
def get_target_mask(self,masks):
out = np.zeros(masks[0].shape)
for mask in masks:
out += mask
out = np.clip(out,0,1)
return out
def get_trimap(self,masks):
target_mask = self.get_target_mask(masks)
erode = cv2.erode(target_mask.astype('uint8'),self.kernel,iterations=self.erode_iter)
dilate = cv2.dilate(target_mask.astype('uint8'),self.kernel,iterations=self.dilate_iter)
h, w = target_mask.shape
trimap = np.zeros((h, w, 2))
trimap[erode == 1, 1] = 1
trimap[dilate == 0, 0] = 1
return trimap
|
20367
|
from setuptools import setup
setup(
name="example-advanced-package", version="0.0.0", packages=[],
)
|
20368
|
from distutils.core import setup
setup(
name='pyASA',
packages=['pyASA'],
version='0.1.0',
description='Wrapper for the Cisco ASA REST API',
author='xpac',
author_email='<EMAIL>',
url='https://github.com/xpac1985/pyASA',
download_url='https://github.com/xpac1985/pyASA/tarball/0.1.0',
keywords=['cisco', 'asa', 'rest-api', 'wrapper', 'alpha'],
classifiers=[],
)
|
20376
|
def _recipes_pil_prescript(plugins):
try:
import Image
have_PIL = False
except ImportError:
from PIL import Image
have_PIL = True
import sys
def init():
if Image._initialized >= 2:
return
if have_PIL:
try:
import PIL.JpegPresets
sys.modules["JpegPresets"] = PIL.JpegPresets
except ImportError:
pass
for plugin in plugins:
try:
if have_PIL:
try:
# First try absolute import through PIL (for
# Pillow support) only then try relative imports
m = __import__("PIL." + plugin, globals(), locals(), [])
m = getattr(m, plugin)
sys.modules[plugin] = m
continue
except ImportError:
pass
__import__(plugin, globals(), locals(), [])
except ImportError:
print("Image: failed to import")
if Image.OPEN or Image.SAVE:
Image._initialized = 2
return 1
Image.init = init
|
20397
|
import torch
import numpy as np
PAD_TOKEN_INDEX = 0
def pad_masking(x, target_len):
# x: (batch_size, seq_len)
batch_size, seq_len = x.size()
padded_positions = x == PAD_TOKEN_INDEX # (batch_size, seq_len)
pad_mask = padded_positions.unsqueeze(1).expand(batch_size, target_len, seq_len)
return pad_mask
def subsequent_masking(x):
# x: (batch_size, seq_len - 1)
batch_size, seq_len = x.size()
subsequent_mask = np.triu(np.ones(shape=(seq_len, seq_len)), k=1).astype('uint8')
subsequent_mask = torch.tensor(subsequent_mask).to(x.device)
subsequent_mask = subsequent_mask.unsqueeze(0).expand(batch_size, seq_len, seq_len)
return subsequent_mask
|
20441
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from common import MayanAppConfig
from .licenses import * # NOQA
class MIMETypesApp(MayanAppConfig):
name = 'mimetype'
verbose_name = _('MIME types')
def ready(self, *args, **kwargs):
super(MIMETypesApp, self).ready(*args, **kwargs)
|
20474
|
from base import BaseDataSet, BaseDataLoader
from utils import pallete
import numpy as np
import os
import scipy
import torch
from PIL import Image
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
import json
class VOCDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 21
self.palette = pallete.get_voc_pallete(self.num_classes)
super(VOCDataset, self).__init__(**kwargs)
def _set_files(self):
self.root = os.path.join(self.root, 'VOCdevkit/VOC2012')
if self.split == "val":
file_list = os.path.join("dataloaders/voc_splits", f"{self.split}" + ".txt")
elif self.split in ["train_supervised", "train_unsupervised"]:
file_list = os.path.join("dataloaders/voc_splits", f"{self.n_labeled_examples}_{self.split}" + ".txt")
else:
raise ValueError(f"Invalid split name {self.split}")
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, "r"))]
self.files, self.labels = list(zip(*file_list))
def _load_data(self, index):
image_path = os.path.join(self.root, self.files[index][1:])
image = np.asarray(Image.open(image_path), dtype=np.float32)
image_id = self.files[index].split("/")[-1].split(".")[0]
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id+".png")
else:
label_path = os.path.join(self.root, self.labels[index][1:])
label = np.asarray(Image.open(label_path), dtype=np.int32)
return image, label, image_id
class VOC(BaseDataLoader):
def __init__(self, kwargs):
self.MEAN = [0.485, 0.456, 0.406]
self.STD = [0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
try:
shuffle = kwargs.pop('shuffle')
except:
shuffle = False
num_workers = kwargs.pop('num_workers')
self.dataset = VOCDataset(**kwargs)
super(VOC, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None)
|
20488
|
import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
def binary_reg(x: torch.Tensor):
# forward: f(x) = (x>=0)
# backward: f(x) = sigmoid
a = torch.sigmoid(x)
b = a.detach()
c = (x.detach() >= 0).float()
return a - b + c
class HIN2vec(nn.Module):
def __init__(self, node_size, path_size, embed_dim, sigmoid_reg=False, r=True):
super().__init__()
self.reg = torch.sigmoid if sigmoid_reg else binary_reg
self.__initialize_model(node_size, path_size, embed_dim, r)
def __initialize_model(self, node_size, path_size, embed_dim, r):
self.start_embeds = nn.Embedding(node_size, embed_dim)
self.end_embeds = self.start_embeds if r else nn.Embedding(node_size, embed_dim)
self.path_embeds = nn.Embedding(path_size, embed_dim)
# self.classifier = nn.Sequential(
# nn.Linear(embed_dim, 1),
# nn.Sigmoid(),
# )
def forward(self, start_node: torch.LongTensor, end_node: torch.LongTensor, path: torch.LongTensor):
# assert start_node.dim() == 1 # shape = (batch_size,)
s = self.start_embeds(start_node) # (batch_size, embed_size)
e = self.end_embeds(end_node)
p = self.path_embeds(path)
p = self.reg(p)
agg = torch.mul(s, e)
agg = torch.mul(agg, p)
# agg = F.sigmoid(agg)
# output = self.classifier(agg)
output = torch.sigmoid(torch.sum(agg, axis=1))
return output
def train(log_interval, model, device, train_loader: DataLoader, optimizer, loss_function, epoch):
model.train()
for idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data[:, 0], data[:, 1], data[:, 2])
loss = loss_function(output.view(-1), target)
loss.backward()
optimizer.step()
if idx % log_interval == 0:
print(f'\rTrain Epoch: {epoch} '
f'[{idx * len(data)}/{len(train_loader.dataset)} ({100. * idx / len(train_loader):.3f}%)]\t'
f'Loss: {loss.item():.3f}\t\t',
# f'data = {data}\t target = {target}',
end='')
print()
class NSTrainSet(Dataset):
"""
完全随机的负采样 todo 改进一下?
"""
def __init__(self, sample, node_size, neg=5):
"""
:param node_size: 节点数目
:param neg: 负采样数目
:param sample: HIN.sample()返回值,(start_node, end_node, path_id)
"""
print('init training dataset...')
l = len(sample)
x = np.tile(sample, (neg + 1, 1))
y = np.zeros(l * (1 + neg))
y[:l] = 1
# x[l:, 2] = np.random.randint(0, path_size - 1, (l * neg,))
x[l:, 1] = np.random.randint(0, node_size - 1, (l * neg,))
self.x = torch.LongTensor(x)
self.y = torch.FloatTensor(y)
self.length = len(x)
print('finished')
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.length
if __name__ == '__main__':
## test binary_reg
print('sigmoid')
a = torch.tensor([-1.,0.,1.],requires_grad=True)
b = torch.sigmoid(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
print('binary')
a = torch.tensor([-1., 0., 1.], requires_grad=True)
b = binary_reg(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
|
20491
|
SECRET_KEY = None
DB_HOST = "localhost"
DB_NAME = "kido"
DB_USERNAME = "kido"
DB_PASSWORD = "<PASSWORD>"
COMPRESSOR_DEBUG = False
COMPRESSOR_OFFLINE_COMPRESS = True
|
20493
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def soft_dice_score(
output: torch.Tensor, target: torch.Tensor, smooth: float = 0.0, eps: float = 1e-7, dims=None) -> torch.Tensor:
assert output.size() == target.size()
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
# print('cardinality', cardinality, 'intersection', intersection)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
dice_score = (2.0 * intersection + smooth) / (cardinality + smooth).clamp_min(eps)
# print('dice_score', dice_score)
return dice_score
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, eps=1e-7, ignore_index=None, weight=None, mode='MULTICLASS_MODE'):
"""Implementation of Dice loss for image segmentation task.
https://github.com/qubvel/segmentation_models.pytorch
"""
super().__init__()
self.smooth = smooth
self.eps = eps
self.ignore_index = ignore_index
self.weight = weight
self.mode = mode
def forward(self, output, target):
bs = target.size(0)
num_classes = output.size(1)
dims = (0, 2)
# print(self.mode, self.ignore_index)
if self.mode == 'MULTICLASS_MODE':
output = output.log_softmax(dim=1).exp()
else:
output = F.logsigmoid(output).exp()
# output = output.log_softmax(dim=1).exp()
if self.mode == 'BINARY_MODE':
target = target.view(bs, 1, -1)
output = output.view(bs, 1, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask
target = target * mask
else:
target = target.view(bs, -1)
output = output.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask.unsqueeze(1)
target = F.one_hot((target * mask).to(torch.long), num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) * mask.unsqueeze(1)
else:
target = F.one_hot(target, num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) # H, C, H*W
scores = soft_dice_score(output, target.type_as(output), smooth=self.smooth, eps=self.eps, dims=dims)
loss = 1.0 - scores
mask = target.sum(dims) > 0
loss *= mask.to(loss.dtype)
return loss.mean()
|
20494
|
import inspect
import re
from functools import update_wrapper
from typing import Optional
def is_interactive() -> bool:
try:
_ = get_ipython().__class__.__name__ # type: ignore
return True
except NameError:
return False
def get_attr_docstring(class_type, attr_name) -> Optional[str]:
if attr_name == 'get':
attr_name = '__call__'
attr = getattr(class_type, attr_name, None)
if attr and attr.__doc__:
return re.sub(r' {3,}', '', attr.__doc__)
return None
def default_attr_filter(x) -> bool: # pylint: disable=unused-argument
return True
def get_class_docstring(class_type, attr_filter=default_attr_filter, extended=False):
def format_attribute(x):
attr = getattr(class_type, x)
if isinstance(attr, property):
name = f'.{x}'
else:
if extended:
sig = str(inspect.signature(attr)).replace('self, ', '')
else:
sig = '()'
name = f'.{x}{sig}'
if extended:
doc = get_attr_docstring(class_type, x)
else:
doc = ''
return f'{name}{doc}'
def filter_attribute(x):
return all(
[
not x.startswith('_'),
attr_filter(x),
not isinstance(getattr(class_type, x), property),
]
)
return '\n'.join(
map(
format_attribute,
filter(
filter_attribute,
dir(class_type),
),
),
)
def inline_doc(method):
if not is_interactive():
return method
doc = [repr(method)]
if method.__doc__:
doc.append(re.sub(r' {3,}', '', method.__doc__))
class CustomReprDescriptor:
def __get__(self, instance, owner):
class MethodWrapper:
def __init__(self):
self.class_instance = instance
self.doc = '\n'.join(doc)
def __call__(self, *args, **kwargs):
return method(self.class_instance, *args, **kwargs)
def __repr__(self):
return self.doc
return update_wrapper(MethodWrapper(), method)
return CustomReprDescriptor()
class InlineDocstring(type):
def __new__(mcs, name, bases, attrs, **kwargs):
if is_interactive():
new_attrs = {}
for attr_name, attr in attrs.items():
if callable(attr) and attr.__doc__ and not attr_name.startswith('_'):
attr = inline_doc(attr)
new_attrs[attr_name] = attr
else:
new_attrs = attrs
return type.__new__(mcs, name, bases, new_attrs, **kwargs)
|
20498
|
import logging
import inspect
import re
from collections import OrderedDict
from gremlinpy.gremlin import Gremlin, Param, AS
from .entity import (_Entity, Vertex, Edge, GenericVertex, GenericEdge,
ENTITY_MAP)
from .exception import (AstronomerQueryException, AstronomerMapperException)
from .traversal import Traversal
from .util import (camel_to_underscore, GIZMO_ID, GIZMO_LABEL, GIZMO_TYPE,
GIZMO_ENTITY, GIZMO_VARIABLE, entity_name)
logger = logging.getLogger(__name__)
ENTITY_MAPPER_MAP = {}
GENERIC_MAPPER = 'generic.mapper'
_count = -1
_query_count = 0
_query_params = {}
def next_query_variable():
global _count
_count += 1
return '{}_{}'.format(GIZMO_VARIABLE, _count)
def get_entity_mapper(entity=None, name=GENERIC_MAPPER):
if isinstance(entity, _Entity):
name = get_qualified_instance_name(entity)
else:
name = get_qualified_name(entity)
if name not in ENTITY_MAPPER_MAP:
name = GENERIC_MAPPER
return ENTITY_MAPPER_MAP[name](self)
def next_param_name(param):
param = re.sub('\W', '_', param)
if param not in _query_params:
_query_params[param] = -1
_query_params[param] += 1
return '{}_{}'.format(param, _query_params[param])
def next_param(param, value):
if isinstance(value, _Entity):
value = entity_name(value)
return Param(next_param_name(param), value)
def next_entity_param(entity, param, value):
name = entity_name(entity)
field = '{}_{}'.format(name, param)
return next_param(field, value)
class Mapper:
def __init__(self, request, gremlin=None, auto_commit=True,
graph_instance_name=None):
if not gremlin:
gremlin = Gremlin()
self.request = request
self.gremlin = gremlin
self.auto_commit = auto_commit
self.graph_instance_name = graph_instance_name
if not self.auto_commit and not self.graph_instance_name:
error = ('If auto_commit is set, we need to know the'
' graph instance name')
logger.exception(error)
raise ArgumentError(error)
self.reset()
def reset(self):
self.gremlin.reset()
global _query_count
global _count
global _query_params
_query_count = 0
_count = 0
_query_params = {}
self.queries = []
self.return_vars = []
self.entities = OrderedDict() # ensure FIFO for testing
self.del_entities = {}
self.params = {}
self.callbacks = {}
self._magic_method = None
def get_entity_variable(self, entity):
ret = None
for key, def_entity in self.entities.items():
if entity == def_entity:
return key
return ret
def get_mapper(self, entity=None, name=GENERIC_MAPPER):
if entity is not None:
name = entity_name(entity)
if name not in ENTITY_MAPPER_MAP:
name = GENERIC_MAPPER
return ENTITY_MAPPER_MAP[name](self)
def enqueue_mapper(self, mapper):
self.queries += mapper.queries
self.return_vars += mapper.return_vars
self.entities.update(mapper.entities)
self.params.update(mapper.params)
for entity, callbacks in mapper.callbacks.items():
exisiting = self.callbacks.get(entity, [])
self.callbacks[entity] = exisiting + callbacks
mapper.reset()
return self
def enqueue_script(self, gremlin=None, script=None, params=None):
if gremlin is not None:
script = [str(gremlin),]
params = gremlin.bound_params
gremlin.reset()
if script:
self.queries += script
if params:
self.params.update(params)
return self
def __getattr__(self, magic_method):
"""magic method that works in conjunction with __call__
method these two methods are used to shortcut the retrieval
of an entity's mapper and call a specific method against
this chain:
user = User()
user_mapper = mapper.get_mapper(user)
emails = user_mapper.get_emails(user)
can be shortened into:
user = User()
emails = mapper.get_emails(user)
"""
self._magic_method = magic_method
return self
def __call__(self, *args, **kwargs):
mapper = self.get_mapper(args[0])
return getattr(mapper, self._magic_method)(*args, **kwargs)
async def data(self, entity, *args):
"""utility method used to retrieve an entity's data. It
also allows for method chaining in order to augment the
resulting data.
class MyMapper(_GenericMapper):
async def add_two(self, entity, data):
data['two'] = 2
return data
async def add_three(self, entity, data):
data['three'] = 3
return data
entity = User()
data = await mapper.data(user, 'add_two', 'add_three')
the resulting data will have the data from the User class,
plus a two and a three member
"""
collection = isinstance(entity, Collection)
async def get_data(entity, data):
retrieved = data
for method in args:
mapper = self.get_mapper(entity)
async def wrapper(entity, data):
res = await getattr(mapper, method)(entity=entity,
data=data)
return res
retrieved = await wrapper(entity=entity,
data=retrieved)
return retrieved
if collection:
data = []
for coll_entity in entity:
mapper = self.get_mapper(coll_entity)
entity_data = await mapper.data(coll_entity)
res = await get_data(coll_entity, entity_data)
data.append(res)
else:
mapper = self.get_mapper(entity)
entity_data = await mapper.data(entity)
data = await get_data(entity, entity_data)
return data
def save(self, entity, bind_return=True, mapper=None,
callback=None, **kwargs):
if mapper is None:
mapper = self.get_mapper(entity)
logger.debug(('Saving entity: {} with mapper:'
' {}').format(entity.__repr__, mapper))
mapper.save(entity, bind_return, callback, **kwargs)
return self.enqueue_mapper(mapper)
def delete(self, entity, mapper=None, callback=None):
if mapper is None:
mapper = self.get_mapper(entity)
logger.debug(('Deleting entity: {} with mapper:'
' {}').format(entity.__repr__, mapper))
mapper.delete(entity, callback=callback)
# manually add the deleted entity to the self.entities
# collection for callbacks
from random import randrange
key = 'DELETED_%s_entity' % str(randrange(0, 999999999))
self.del_entities[key] = entity
return self.enqueue_mapper(mapper)
def create(self, data=None, entity=None, data_type='python'):
if data is None:
data = {}
if entity:
mapper = self.get_mapper(entity)
else:
name = data.get(GIZMO_ENTITY, GENERIC_MAPPER)
if isinstance(name, (list, tuple)):
name = name[0]['value']
mapper = self.get_mapper(name=name)
kwargs = {
'data': data,
'entity': entity,
'data_type': data_type,
}
return mapper.create(**kwargs)
def connect(self, out_v, in_v, label=None, data=None, edge_entity=None,
data_type='python'):
"""
method used to connect two vertices and create an Edge object
the resulting edge is not saved to to graph until it is passed to
save allowing further augmentation
"""
if not isinstance(out_v, Vertex):
if not isinstance(out_v, (str, int)):
err = 'The out_v needs to be either a Vertex or an id'
logger.exception(err)
raise AstronomerMapperException(err)
if not isinstance(in_v, Vertex):
if not isinstance(in_v, (str, int)):
err = 'The in_v needs to be either a Vertex or an id'
logger.exception(err)
raise AstronomerMapperException(err)
if data is None:
data = {}
data['outV'] = out_v
data['inV'] = in_v
data[GIZMO_TYPE] = 'edge'
data[GIZMO_LABEL[0]] = label
return self.create(data=data, entity=edge_entity, data_type=data_type)
def start(self, entity):
mapper = self.get_mapper(entity)
return mapper.start(entity)
def _build_queries(self):
if len(self.return_vars) > 0:
returns = []
for k in self.return_vars:
returns.append("'{}': {}".format(k, k))
ret = '[{}]'.format(', '.join(returns))
self.queries.append(ret)
return self
def get(self, entity):
mapper = self.get_mapper(entity)
return mapper.get(entity)
def apply_statement(self, statement):
self.gremlin.apply_statement(statement)
return self
async def send(self):
self._build_queries()
script = ";\n".join(self.queries)
params = self.params
entities = self.entities
callbacks = self.callbacks
entities.update(self.del_entities)
self.reset()
res = await self.query(script=script, params=params,
update_entities=entities, callbacks=callbacks)
return res
async def query(self, script=None, params=None, gremlin=None,
update_entities=None, callbacks=None, collection=None):
if gremlin is not None:
script = str(gremlin)
params = gremlin.bound_params
gremlin.reset()
if script is None:
script = ''
if params is None:
params = {}
if update_entities is None:
update_entities = {}
self.reset()
response = await self.request.send(script, params, update_entities)
for k, entity in update_entities.items():
cbs = callbacks.get(entity, [])
for c in cbs:
c(entity)
if not collection:
collection = Collection
return collection(self, response)
class _RootMapper(type):
"""
In the case of custom mappers, this metaclass will register the entity name
with the mapper object. This is done so that when entities are loaded by
name the associated mapper is used to CRUD it.
This only works when the Mapper.create method is used to
create the entity
"""
def __new__(cls, name, bases, attrs):
cls = super(_RootMapper, cls).__new__(cls, name, bases, attrs)
entity = attrs.pop('entity', None)
if entity:
map_name = entity_name(entity)
ENTITY_MAPPER_MAP[map_name] = cls
elif name == 'EntityMapper':
ENTITY_MAPPER_MAP[GENERIC_MAPPER] = cls
return cls
def __call__(cls, *args, **kwargs):
mapper = super(_RootMapper, cls).__call__(*args, **kwargs)
for field in dir(mapper):
if field.startswith('_'):
continue
val = getattr(mapper, field)
if inspect.isclass(val) and issubclass(val, EntityMapper):
if mapper.mapper:
instance = val(mapper.mapper)
setattr(mapper, field, instance)
return mapper
class EntityMapper(metaclass=_RootMapper):
VARIABLE = GIZMO_VARIABLE
unique = False
unique_fields = None
save_statements = None
def __init__(self, mapper=None):
self.mapper = mapper
self.gremlin = None
if self.mapper:
self.gremlin = mapper.gremlin
self.reset()
def reset(self):
self.queries = []
self.return_vars = []
self.entities = {}
self.params = {}
self.callbacks = {}
async def data(self, entity):
return entity.data
def get(self, entity):
trav = self.start(entity)
vertex = issubclass(self.entity, Vertex)
param_value = str(self.entity)
param_name = 'out_{}_{}'.format(entity.__class__.__name__, param_value)
entity_param = next_param(param_name, param_value)
if vertex:
trav.out().hasLabel(entity_param)
else:
trav.outE(entity_param)
return trav
def enqueue(self, query, bind_return=True):
for entry in query.queries:
script = entry['script']
if script in self.queries:
continue
if bind_return:
variable = next_query_variable()
script = '{} = {}'.format(variable, script)
if 'entity' in entry:
self.entities[variable] = entry['entity']
self.return_vars.append(variable)
self.queries.append(script)
self.params.update(entry['params'])
return self
def _enqueue_callback(self, entity, callback):
if callback:
listed = self.callbacks.get(entity, [])
if isinstance(callback, (list, tuple)):
listed += list(callback)
elif callback:
listed.append(callback)
self.callbacks[entity] = listed
return self
def on_create(self, entity):
pass
def on_update(self, entity):
pass
def on_delete(self, entity):
pass
def _build_save_statements(self, entity, query, **kwargs):
statement_query = Query(self.mapper)
query_gremlin = Gremlin(self.gremlin.gv)
for entry in query.queries:
query_gremlin.bind_params(entry['params'])
for statement in self.save_statements:
instance = statement(entity, self, query, **kwargs)
query_gremlin.apply_statement(instance)
statement_query._add_query(str(query_gremlin),
query_gremlin.bound_params, entity=entity)
return statement_query
def start(self, entity=None):
return Traversal(self.mapper, entity or self.entity)
def save(self, entity, bind_return=True, callback=None, *args, **kwargs):
"""callback and be a single callback or a list of them"""
method = '_save_edge' if entity[GIZMO_TYPE] == 'edge' else \
'_save_vertex'
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
if entity[GIZMO_ID]:
callback.insert(0, self.on_update)
else:
callback.insert(0, self.on_create)
self._enqueue_callback(entity, callback)
return getattr(self, method)(entity=entity, bind_return=bind_return)
def _save_vertex(self, entity, bind_return=True):
"""
method used to save a entity. IF both the unique_type and unique_fields
params are set, it will run a sub query to check to see if an entity
exists that matches those values
"""
query = Query(self.mapper)
ref = self.mapper.get_entity_variable(entity)
"""
check to see if the entity has been used already in the current script
execution.
If it has use the reference
if it hasnt, go through the process of saving it
"""
if ref:
query._add_query(ref, params=None, entity=entity)
return self.enqueue(query, bind_return)
query.save(entity)
if not entity[GIZMO_ID] and self.unique_fields:
from .statement import MapperUniqueVertex
if not self.save_statements:
self.save_statements = []
if MapperUniqueVertex not in self.save_statements:
self.save_statements.append(MapperUniqueVertex)
if self.save_statements and len(self.save_statements):
statement_query = self._build_save_statements(entity, query)
return self.enqueue(statement_query, bind_return)
else:
return self.enqueue(query, bind_return)
def _save_edge(self, entity, bind_return=True):
query = Query(self.mapper)
save = True
edge_ref = self.mapper.get_entity_variable(entity)
out_v = entity.out_v
out_v_id = out_v[GIZMO_ID] if isinstance(out_v, Vertex) else None
in_v = entity.in_v
in_v_id = in_v[GIZMO_ID] if isinstance(in_v, Vertex) else None
out_v_ref = self.mapper.get_entity_variable(out_v)
in_v_ref = self.mapper.get_entity_variable(in_v)
if edge_ref:
query._add_query(edge_ref, params=None, entity=entity)
return self.enqueue(query, bind_return)
"""
both out_v and in_v are checked to see if the entities stored in each
respective variable has been used.
If they have not and they are Vertex instances with an empty _id,
send them to be saved.
if they have been used, use the reference variable in the create edge
logic
"""
query.save(entity)
if not entity[GIZMO_ID] and self.unique and in_v_id and out_v_id:
from .statement import MapperUniqueEdge
if not self.save_statements:
self.save_statements = []
if MapperUniqueEdge not in self.save_statements:
self.save_statements.append(MapperUniqueEdge)
if self.save_statements and len(self.save_statements):
statement_query = self._build_save_statements(entity, query,
out_v_id=out_v_id, in_v_id=in_v_id,
label=entity[GIZMO_LABEL[0]], direction=self.unique)
return self.enqueue(statement_query, False)
else:
return self.enqueue(query, bind_return)
def delete(self, entity, lookup=True, callback=None):
query = Query(self.mapper)
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
query.delete(entity)
callback.insert(0, self.on_delete)
self._enqueue_callback(entity, callback)
return self.enqueue(query, False)
def create(self, data=None, entity=None, data_type='python'):
"""
Method used to create a new entity based on the data that is passed in.
If the kwarg entity is passed in, it will be used to create the
entity else if utils.GIZMO_ENTITY is in data, that will be used
finally, entity.GenericVertex or entity.GenericEdge will be used to
construct the entity
"""
check = True
if data is None:
data = {}
if entity is not None:
try:
label = data.get(GIZMO_LABEL[0], None)
entity = entity(data=data, data_type=data_type)
check = False
for f, r in entity._relationships.items():
r._mapper = self.mapper
r._entity = entity
except Exception as e:
pass
if check:
try:
if GIZMO_ENTITY in data:
name = data[GIZMO_ENTITY]
if isinstance(name, (list, tuple)):
name = name[0]['value']
entity = ENTITY_MAP[name](data=data, data_type=data_type)
for f, r in entity._relationships.items():
r._mapper = self.mapper
r._entity = entity
else:
raise
except Exception as e:
# all else fails create a GenericVertex unless _type is 'edge'
if data.get(GIZMO_TYPE, None) == 'edge':
entity = GenericEdge(data=data, data_type=data_type)
else:
entity = GenericVertex(data=data, data_type=data_type)
if GIZMO_ID in data:
entity[GIZMO_ID] = data[GIZMO_ID]
return entity
def delete(self, entity, lookup=True, callback=None):
query = Query(self.mapper)
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
query.delete(entity)
callback.insert(0, self.on_delete)
self._enqueue_callback(entity, callback)
return self.enqueue(query, False)
class Query:
def __init__(self, mapper):
self.mapper = mapper
self.gremlin = Gremlin(self.mapper.gremlin.gv)
self.queries = []
self.fields = []
self.reset()
def reset(self):
self.fields = []
return self
def _add_query(self, script, params=None, entity=None):
if params is None:
params = {}
self.queries.append({
'script': script,
'params': params,
'entity': entity,
})
return self
def _add_gremlin_query(self, entity=None):
script = str(self.gremlin)
params = self.gremlin.bound_params
self._add_query(script, params, entity)
return self.reset()
def _field_changes(self, gremlin, entity, ignore=None):
ignore = ignore or []
entity_name = str(entity)
entity_alias = '{}_alias'.format(entity_name)
entity_alias = next_param(entity_alias, entity_alias)
def add_field(field, data):
values = data.get('values', data.get('value', None))
if not isinstance(values, (list, tuple,)):
values = [values, ]
for i, value in enumerate(values):
name = '{}_{}_{}'.format(entity_name, field, i)
prop = "'{}'".format(field)
gremlin.property(prop, Param(name, value))
def add_property(field, value, properties=None, ignore=None):
ignore = ignore or []
if field.startswith('T.'):
val_param = next_param('{}_{}'.format(entity_name,
field), value)
gremlin.unbound('property', field, val_param)
return
field_name = '{}_{}'.format(entity_name, field)
prop = next_param(field_name, field)
value_name = '{}_value'.format(field_name)
value_param = next_param(value_name, value)
params = [prop, value_param]
if properties:
for key, val in properties.items():
prop_key = next_param('{}_{}'.format(prop.name,
key), key)
prop_val = next_param('{}_{}_val'.format(prop.name,
key), val)
params += [prop_key, prop_val]
gremlin.property(*params)
for field, changes in entity.changes.items():
if field in ignore:
continue
if changes['immutable']:
for val in changes['values']['values']:
add_property(field, val)
elif changes['deleted']:
prop = next_param('{}_{}'.format(entity_name, field), field)
remove = Gremlin('').it.get().func('remove')
gremlin.AS(entity_alias).properties(prop)
gremlin.sideEffect.close(remove)
gremlin.select(entity_alias)
else:
for action, value in changes['values'].items():
if action == 'added':
for val in value:
add_property(field, val['value'],
val['properties'])
def _add_vertex(self, entity, set_variable=None):
entity.data_type = 'graph'
gremlin = self.gremlin
label = None
ignore = ['T.label', 'label']
if entity['label']:
label = next_entity_param(entity, 'label', entity['label'])
gremlin.unbound('addV', 'T.label', label)
else:
gremlin.addV()
if set_variable:
gremlin.set_ret_variable(set_variable, ignore=[GIZMO_ID, ])
self._field_changes(gremlin, entity, ignore=ignore)
gremlin.func('next')
entity.data_type = 'python'
return self._add_gremlin_query(entity)
def _update_entity(self, entity, set_variable=None):
entity.data_type = 'graph'
gremlin = self.gremlin
entity_type, entity_id = entity.get_rep()
if not entity_id:
error = (('The entity {} scheduled to be updated does not have'
' an id').format(str(entity)))
logger.exception(error)
raise Exception()
_id = next_param('{}_ID'.format(str(entity)), entity_id)
ignore = [GIZMO_ID, GIZMO_LABEL[1]]
alias = '{}_{}_updating'.format(entity_type, entity_id)
alias = next_param(alias, alias)
getattr(gremlin, entity_type.upper())(_id)
gremlin.AS(alias)
self._field_changes(gremlin, entity, ignore=ignore)
gremlin.select(alias).next()
entity.data_type = 'python'
return self._add_gremlin_query(entity)
def _add_edge(self, entity, set_variable=None):
if not entity[GIZMO_LABEL[0]]:
msg = 'A label is required in order to create an edge'
logger.exception(msg)
raise AstronomerQueryException(msg)
def get_or_create_ends():
"""this function will determine if the edge has both ends. If
either end is an _Entity object it will get the reference to
the object or save it and create a reference. Either the entity's
id or reference will be used when saving the edge.
"""
out_v = entity.out_v
out_v_ref = None
in_v = entity.in_v
in_v_ref = None
if out_v is None or in_v is None:
error = ('Both out and in vertices must be set before'
' saving the edge')
logger.exception(error)
raise AstronomerQueryException(error)
if isinstance(out_v, _Entity):
if out_v[GIZMO_ID]:
out_v = out_v[GIZMO_ID]
else:
out_v_ref = self.mapper.get_entity_variable(out_v)
if not out_v_ref:
self.mapper.save(out_v)
out_v_ref = self.mapper.get_entity_variable(out_v)
if out_v_ref:
out_v = out_v_ref
if isinstance(in_v, _Entity):
if in_v[GIZMO_ID]:
in_v = in_v[GIZMO_ID]
else:
in_v_ref = self.mapper.get_entity_variable(in_v)
if not in_v_ref:
self.mapper.save(in_v)
in_v_ref = self.mapper.get_entity_variable(in_v)
if in_v_ref:
in_v = in_v_ref
return {
'out': {
'is_ref': out_v_ref,
'v': out_v,
},
'in': {
'is_ref': in_v_ref,
'v': in_v,
},
}
ends = get_or_create_ends()
name = str(entity)
gremlin = self.gremlin
g = Gremlin(gremlin.gv)
label = next_param('{}_label'.format(name), entity[GIZMO_LABEL[0]])
"""
g.V($OUT_ID).next().addEdge($LABEL, g.V($IN_ID).next()).property(....)
"""
in_v = ends['in']
out_v = ends['out']
if in_v['is_ref']:
g.unbound('V', in_v['v'])
else:
in_id = next_param('{}_in'.format(name), in_v['v'])
g.V(in_id)
g.func('next')
if out_v['is_ref']:
gremlin.unbound('V', out_v['v'])
else:
out_id = next_param('{}_out'.format(name), out_v['v'])
gremlin.V(out_id)
ignore = [GIZMO_LABEL[0], GIZMO_LABEL[1], GIZMO_TYPE]
edge_args = [label, g]
# edge properites only get one value and no meta-properties
for field, changes in entity.changes.items():
if field in ignore:
continue
try:
if changes['immutable']:
value = changes['values']['values'][-1]
else:
value = changes['values'][-1]
except:
continue
field_param = next_param('{}_{}'.format(name, field), field)
field_value = next_param('{}_value'.format(field_param.name),
value)
edge_args += [field_param, field_value]
gremlin.func('next').addEdge(*edge_args)
return self._add_gremlin_query(entity)
def save(self, entity, set_variable=None):
if not entity[GIZMO_TYPE]:
msg = 'The entity does not have a type defined'
logger.exception(msg)
raise AstronomerQueryException(msg)
entity_type = entity[GIZMO_TYPE]
if not entity[GIZMO_ID]:
if entity_type == 'vertex':
self._add_vertex(entity, set_variable)
else:
self._add_edge(entity, set_variable)
else:
self._update_entity(entity, set_variable)
def delete(self, entity):
entity_type, _id = entity.get_rep()
if not _id:
msg = ('The entity does not have an id defined and'
' connot be deleted')
logger.exception(msg)
raise AstronomerQueryException(msg)
if not entity[GIZMO_TYPE]:
msg = 'The entity does not have a type defined'
logger.exception(msg)
raise AstronomerQueryException(msg)
delete = next_param('{}_ID'.format(str(entity)), _id)
getattr(self.gremlin, entity_type)(delete).next().func('remove')
return self._add_gremlin_query(entity)
class Collection(object):
def __init__(self, mapper, response=None):
self.mapper = mapper
if not response:
response = lambda: None
response.data = []
self.response = response
self._entities = {}
self._index = 0
self._data_type = 'python'
def first(self):
return self[0]
def last(self):
return self[-1]
def get_data(self):
return [x for x in self.response.data]
data = property(get_data)
@property
def entity_data(self):
"""
this will get the instance data instead of the
raw data. This will use the mapper to create each
entity. Which may have a custom data attribute
"""
return [x.data for x in self]
@property
async def mapper_data(self):
"""this will get the data from the entity's mapper if it has a
custom mapper
"""
data = []
if len(self):
mapper = self.mapper.get_mapper(self[0])
for entity in self:
data.append(await mapper.data(entity))
return data
def __len__(self):
return len(self.response.data)
def __getitem__(self, key):
entity = self._entities.get(key, None)
if entity is None:
try:
data = self.response[key]
if data is not None:
entity = self.mapper.create(data=data,
data_type=self._data_type)
entity.dirty = False
self._entities[key] = entity
else:
raise StopIteration()
except Exception as e:
raise StopIteration()
return entity
def __setitem__(self, key, value):
self._entities[key] = value
def __delitem__(self, key):
if key in self._entities:
del self._entities[key]
def __iter__(self):
return self
def __next__(self):
entity = self[self._index]
self._index += 1
return entity
|
20506
|
class InterpreterException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SymbolNotFound(InterpreterException):
pass
class UnexpectedCharacter(InterpreterException):
pass
class ParserSyntaxError(InterpreterException):
pass
class DuplicateSymbol(InterpreterException):
pass
class InterpreterRuntimeError(InterpreterException):
pass
class InvalidParamCount(InterpreterRuntimeError):
pass
|
20520
|
import cv2
import ProcessWithCV2
img1 = cv2.imread("D:/py/chinese/7.png")
img2 = cv2.imread("D:/py/chinese/8.png")
a = ProcessWithCV2.dHash(img1, img2, 1)
print(a)
|
20540
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'popcorn_gallery.tutorials.views',
url(r'^(?P<slug>[\w-]+)/$', 'object_detail', name='object_detail'),
url(r'^$', 'object_list', name='object_list'),
)
|
20547
|
from distutils.spawn import find_executable
from os import path
import click
from .settings import (
BASE_DEVELOPMENT_REQUIREMENTS_FILENAME,
BASE_REQUIREMENTS_FILENAME,
DEVELOPMENT_REQUIREMENTS_FILENAME,
REQUIREMENTS_FILENAME,
)
from .util import print_and_run
def _ensure_pip_tools_installed():
if not find_executable('pip-sync'):
click.echo('Installing pip-tools')
print_and_run(('pip', 'install', 'pip-tools'))
@click.group()
def cli():
pass
@cli.command()
@click.option('--dev', is_flag=True, default=False)
def install(dev):
_ensure_pip_tools_installed()
requirements_file = (
DEVELOPMENT_REQUIREMENTS_FILENAME
if dev
else REQUIREMENTS_FILENAME
)
print_and_run(('pip-sync', requirements_file))
click.echo('Requirements setup complete!')
@cli.command()
def update():
_ensure_pip_tools_installed()
print_and_run((
'pip-compile',
'-q',
f'--output-file={path.relpath(REQUIREMENTS_FILENAME)}',
path.relpath(BASE_REQUIREMENTS_FILENAME),
))
click.echo(f'Requiremnts file updated: {path.relpath(REQUIREMENTS_FILENAME)}')
@cli.command()
def update_dev():
_ensure_pip_tools_installed()
print_and_run((
'pip-compile',
'-q',
f'--output-file={path.relpath(DEVELOPMENT_REQUIREMENTS_FILENAME)}',
path.relpath(BASE_DEVELOPMENT_REQUIREMENTS_FILENAME),
))
click.echo(f'Development requirements file updated: {DEVELOPMENT_REQUIREMENTS_FILENAME}')
if __name__ == '__main__':
cli()
|
20601
|
import sys
sys.path.append("../../configs")
#../../configs
from path import EXP_PATH
import numpy as np
DECAY_PARAMS_DICT =\
{
'stair' :
{
128 :{
'a1': {'initial_lr' : 1e-5, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a2' : {'initial_lr' : 3e-4, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a3' : {'initial_lr' : 1e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a4' : {'initial_lr' : 3e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a5' : {'initial_lr' : 1e-2, 'decay_steps' : 50000, 'decay_rate' : 0.3}
}
},
'piecewise' :
{
128 : {
'a1' : {'boundaries' : [10000, 20000], 'values' : [1e-4, 3e-5, 1e-5]},
'a2' : {'boundaries' : [10000, 20000], 'values' : [3e-4, 1e-4, 3e-5]},
'a3' : {'boundaries' : [10000, 20000], 'values' : [1e-3, 3e-4, 1e-4]},
'a4' : {'boundaries' : [10000, 20000], 'values' : [3e-3, 1e-3, 3e-4]},
'a5' : {'boundaries' : [10000, 20000], 'values' : [1e-2, 3e-3, 1e-3]},
'b1' : {'boundaries' : [20000, 35000], 'values' : [1e-4, 3e-5, 1e-5]},
'b2' : {'boundaries' : [20000, 35000], 'values' : [3e-4, 1e-4, 3e-5]},
'b3' : {'boundaries' : [20000, 35000], 'values' : [1e-3, 3e-4, 1e-4]},
'b4' : {'boundaries' : [20000, 35000], 'values' : [3e-3, 1e-3, 3e-4]},
'b5' : {'boundaries' : [20000, 35000], 'values' : [1e-2, 3e-3, 1e-3]}
}
}
}
ACTIVATE_K_SET = np.arange(1, 5)
K_SET = [1,4,16]
RESULT_DIR = EXP_PATH+"cifar_exps/"
#========================PARAM============================#
DATASET= 'cifar'
GPU_ID = 0
BATCH_SIZE = 128
EPOCH = 300
NSCLASS = 16
# model
EMBED_M= 64
CONV_NAME = 'conv1'
# metric loss
LOSS_TYPE = 'triplet'
MARGIN_ALPHA = 0.3
LAMBDA = 0.003 # regularization for npair
# learning
DECAY_TYPE = 'stair'
DECAY_PARAM_TYPE = 'a3'
|
20613
|
from aiohttp.test_utils import TestClient
import pytest
import typing
import unittest.mock
from rolling.kernel import Kernel
from rolling.model.character import CharacterModel
from rolling.model.character import MINIMUM_BEFORE_EXHAUSTED
from rolling.server.document.affinity import AffinityDirectionType
from rolling.server.document.affinity import AffinityJoinType
from rolling.server.document.affinity import CHIEF_STATUS
from rolling.server.document.affinity import MEMBER_STATUS
from rolling.server.document.build import BuildDocument
from rolling.server.document.build import DOOR_MODE_LABELS
from rolling.server.document.build import DOOR_MODE__CLOSED
from rolling.server.document.build import DOOR_MODE__CLOSED_EXCEPT_FOR
from rolling.server.document.build import DoorDocument
@pytest.fixture
def websocket_prepare_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.prepare") as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_listen_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager._listen"
) as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_close_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager.close_websocket"
) as mock_:
yield mock_
@pytest.fixture
def socket_send_str_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.send_str") as mock_:
yield mock_
class TestDoor:
def _place_door(self, kernel: Kernel) -> DoorDocument:
build = kernel.build_lib.place_build(
world_row_i=1,
world_col_i=1,
zone_row_i=10,
zone_col_i=10,
build_id="DOOR",
under_construction=False,
)
return build
def _create_rule(
self,
kernel: Kernel,
author: CharacterModel,
door: BuildDocument,
mode: str,
affinity_ids: typing.Optional[typing.List[int]],
) -> None:
kernel.door_lib.update(
character_id=author.id,
build_id=door.id,
new_mode=mode,
new_affinity_ids=affinity_ids,
)
def test_one_rule_lock__author_here__stranger_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
def test_one_rule_lock_except__author_here__stranger_cant_but_member_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_franck_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
franck = worldmapc_franck_model
# Given
aff = kernel.affinity_lib.create(
name="aff1",
join_type=AffinityJoinType.ACCEPT_ALL,
direction_type=AffinityDirectionType.ONE_DIRECTOR,
)
kernel.affinity_lib.join(
character_id=xena.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=CHIEF_STATUS[0],
)
kernel.affinity_lib.join(
character_id=franck.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=MEMBER_STATUS[0],
)
door = self._place_door(kernel)
self._create_rule(
kernel,
author=xena,
door=door,
mode=DOOR_MODE__CLOSED_EXCEPT_FOR,
affinity_ids=[aff.id],
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=franck.id
)
def test_two_rule_lock__author_here_and_first_can__stranger_second_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_two_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_dead__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
kernel.character_lib.kill(character_id=xena.id)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_vulnerable__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
xena_doc = kernel.character_lib.get_document(xena.id)
xena_doc.tiredness = MINIMUM_BEFORE_EXHAUSTED + 1
kernel.server_db_session.add(xena_doc)
kernel.server_db_session.commit()
xena = kernel.character_lib.get(id_=xena.id)
assert xena.vulnerable
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_left_when_back_in_zone(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=2,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":true}' in event_str for event_str in events_str_list])
# When
socket_send_str_mock.reset_mock()
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_update_rule(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
worldmapc_web_app: TestClient,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
web = worldmapc_web_app
# Given
door = self._place_door(kernel)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
response = await web.post(
f"/character/{xena.id}/door/{door.id}?mode={DOOR_MODE_LABELS[DOOR_MODE__CLOSED]}"
)
assert response.status == 200
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
|
20623
|
from contextlib import contextmanager
import pytest
from sai import SaiObjType
@contextmanager
def config(npu):
topo_cfg = {
"lo_rif_oid": None,
"cpu_port_oid": None,
}
# Create Loopback RIF
lo_rif_oid = npu.create(SaiObjType.ROUTER_INTERFACE,
[
"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", npu.default_vrf_oid,
"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_LOOPBACK",
"SAI_ROUTER_INTERFACE_ATTR_MTU", "9100"
])
topo_cfg["lo_rif_oid"] = lo_rif_oid
# Get CPU port
cpu_port_oid = npu.get(npu.oid, ["SAI_SWITCH_ATTR_CPU_PORT", "oid:0x0"]).oid()
topo_cfg["cpu_port_oid"] = cpu_port_oid
# Get port HW lanes
for oid in npu.port_oids:
port_lanes = npu.get(oid, ["SAI_PORT_ATTR_HW_LANE_LIST", "8:0,0,0,0,0,0,0,0"]).to_list()
# Remove default VLAN members
vlan_mbr_oids = npu.get_list(npu.default_vlan_oid, "SAI_VLAN_ATTR_MEMBER_LIST", "oid:0x0")
for oid in vlan_mbr_oids:
npu.remove(oid)
# Remove default 1Q bridge members
dot1q_mbr_oids = npu.get_list(npu.dot1q_br_oid, "SAI_BRIDGE_ATTR_PORT_LIST", "oid:0x0")
for oid in dot1q_mbr_oids:
bp_type = npu.get(oid, ["SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT"]).value()
if bp_type == "SAI_BRIDGE_PORT_TYPE_PORT":
npu.remove(oid)
npu.dot1q_bp_oids.clear()
# Create default routes
npu.create_route("0.0.0.0/0", npu.default_vrf_oid, None,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_DROP"])
npu.create_route("::/0", npu.default_vrf_oid, None,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_DROP"])
# Create Loopback RIF routes
npu.create_route("fe80::5054:ff:fe12:3456/128", npu.default_vrf_oid, cpu_port_oid,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_FORWARD"])
npu.create_route("fe80::/10", npu.default_vrf_oid, cpu_port_oid,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_FORWARD"])
yield topo_cfg
# TODO: TEARDOWN
# Remove default routes
npu.remove_route("fe80::/10", npu.default_vrf_oid)
npu.remove_route("fe80::5054:ff:fe12:3456/128", npu.default_vrf_oid)
npu.remove_route("::/0", npu.default_vrf_oid)
npu.remove_route("0.0.0.0/0", npu.default_vrf_oid)
# Create default 1Q bridge members
for oid in npu.port_oids:
bp_oid = npu.create(SaiObjType.BRIDGE_PORT,
[
"SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT",
"SAI_BRIDGE_PORT_ATTR_PORT_ID", oid,
# "SAI_BRIDGE_PORT_ATTR_BRIDGE_ID", dot1q_br.oid(),
"SAI_BRIDGE_PORT_ATTR_ADMIN_STATE", "true"
])
npu.dot1q_bp_oids.append(bp_oid)
# Create default VLAN members and set PVID
for idx, oid in enumerate(npu.port_oids):
npu.create_vlan_member(npu.default_vlan_oid, npu.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
npu.set(oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", npu.default_vlan_id])
# Remove Loopback RIF
npu.remove(lo_rif_oid)
|
20628
|
import time
import textwrap
import math
import binascii
from inkfish.create_discriminant import create_discriminant
from inkfish.classgroup import ClassGroup
from inkfish.iterate_squarings import iterate_squarings
from inkfish import proof_wesolowski
from inkfish.proof_of_time import (create_proof_of_time_nwesolowski,
check_proof_of_time_nwesolowski,
generate_r_value)
from inkfish import proof_pietrzak
from tests.int_mod_n import int_mod_n
start_t = 0
time_multiplier = 1000 # Use milliseconds
def start_bench():
global start_t
start_t = time.time() * time_multiplier
def end_bench(name, iterations):
global start_t
print("%-80s" % name, round(((time.time() * time_multiplier) - start_t)
/ (iterations), 2), "ms")
def bench_classgroup():
D = create_discriminant(b"seed", 512)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 512 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 512 bit square", 10000)
D = create_discriminant(b"seed", 1024)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 1024 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 1024 bit square", 10000)
D = create_discriminant(b"seed", 2048)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 2048 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 2048 bit square", 10000)
def bench_discriminant_generation():
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 512)
end_bench("Generate 512 bit discriminant", 100)
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 1024)
end_bench("Generate 1024 bit discriminant", 100)
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 2048)
end_bench("Generate 2048 bit discriminant", 100)
def bench_vdf_iterations():
D = create_discriminant(b"seed", 512)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(10):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 512bit classgroup", 10)
D = create_discriminant(b"seed", 1024)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(2):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 1024bit classgroup", 2)
D = create_discriminant(b"seed", 2048)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(2):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 2048bit classgroup", 2)
# 2048 bit modulus
prime = int(''.join(textwrap.dedent("""
2634427397878110232503205795695468045251992992603340168049253044454387
1080897872360133472596339100961569230393163880927301060812730934043766
3646941725034559080490451986171041751558689035115943134790395616490035
9846986660803055891526943083539429058955074960014718229954545667371414
8029627597753998530121193913181474174423003742206534823264658175666814
0135440982296559552013264268674093709650866928458407571602481922443634
2306826340229149641664159565679297958087282612514993965471602016939198
7906354607787482381087158402527243744342654041944357821920600344804411
149211019651477131981627171025001255607692340155184929729""").split(
"\n")))
initial_x = int_mod_n(15619920774592561628351138998371642294622340518469892832433140464182509560910157, prime)
start_bench()
for _ in range(2):
iterate_squarings(initial_x, [10000])
end_bench("VDF 10000 iterations, 2048bit RSA modulus", 2)
# 4096 bit modulus
prime = int(''.join(textwrap.dedent("""
8466908771297228398108729385413406312941234872779790501232479567685076
4762372651919166693555570188656362906279057098994287649807661604067499
3053172889374223358861501556862285892231110003666671700028271837785598
2711897721600334848186874197010418494909265899320941516493102418008649
1453168421248338831347183727052419170386543046753155080120058844782449
2367606252473029574371603403502901208633055707823115620627698680602710
8443465519855901353485395338769455628849759950055397510380800451786140
7656499749760023191493764704430968335226478156774628814806959050849093
5035645687560103462845054697907307302184358040130405297282437884344166
7188530230135000709764482573583664708281017375197388209508666190855611
3020636147999796942848529907410787587958203267319164458728792653638371
7065019972034334447374200594285558460255762459285837794285154075321806
4811493971019446075650166775528463987738853022894781860563097254152754
1001763544907553312158598519824602240430350073539728131177239628816329
0179188493240741373702361870220590386302554494325819514615309801491107
2710093592877658471507118356670261129465668437063636041245619411937902
0658733974883998301959084381087966405508661151837877497650143949507846
1522640311670422105209760172585337397687461""").split("\n")))
initial_x = int_mod_n(15619920774592561628351138998371642294622340518469892832433140464182509560910157, prime)
start_bench()
for _ in range(2):
iterate_squarings(initial_x, [10000])
end_bench("VDF 10000 iterations, 4096bit RSA modulus", 2)
def bench_wesolowski():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
L, k, _ = proof_wesolowski.approximate_parameters(iterations)
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = [i * k * L for i in range(0, math.ceil(iterations/(k*L)) + 1)]
powers_to_calculate += [iterations]
start_t = time.time() * time_multiplier
powers = iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t)
y = powers[iterations]
identity = ClassGroup.identity_for_discriminant(discriminant)
start_t = time.time() * time_multiplier
start_bench()
for _ in range(5):
proof = proof_wesolowski.generate_proof(identity, x, y, iterations, k, L, powers)
end_bench("Wesolowski " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, proof", 5)
proof_time = round((time.time() * time_multiplier - start_t) / 5)
print(" - Percentage of VDF time:", (proof_time / vdf_time) * 100, "%")
start_bench()
for _ in range(10):
assert(proof_wesolowski.verify_proof(x, y, proof, iterations))
end_bench("Wesolowski " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, verification", 10)
def bench_nwesolowski():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
L, k, _ = proof_wesolowski.approximate_parameters(iterations)
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = [i * k * L for i in range(0, math.ceil(iterations/(k*L)) + 1)]
start_t = time.time() * time_multiplier
for _ in range(20):
iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t) / 20
start_t = time.time() * time_multiplier
start_bench()
for _ in range(20):
result, proof = create_proof_of_time_nwesolowski(discriminant, x, iterations,
discriminant_length, 2, depth=0)
end_bench("n-wesolowski depth 2 " + str(discriminant_length) + "b class group, "
+ str(iterations) + " iterations, proof", 20)
proof_time = round((time.time() * time_multiplier - start_t) / 20)
print(" - Percentage of VDF time:", (((proof_time - vdf_time) / vdf_time) * 100), "%")
start_bench()
for _ in range(20):
assert(check_proof_of_time_nwesolowski(discriminant, x, result + proof, iterations, discriminant_length))
end_bench("n-wesolowski depth 2 " + str(discriminant_length) + "b class group, "
+ str(iterations) + " iterations, verification", 20)
def bench_pietrzak():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
delta = 8
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = proof_pietrzak.cache_indeces_for_count(iterations)
start_t = time.time() * time_multiplier
powers = iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t)
y = powers[iterations]
identity = ClassGroup.identity_for_discriminant(discriminant)
start_t = time.time() * time_multiplier
start_bench()
for _ in range(5):
proof = proof_pietrzak.generate_proof(x, iterations, delta, y, powers,
identity, generate_r_value, discriminant_length)
end_bench("Pietrzak " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, proof", 10)
proof_time = round((time.time() * time_multiplier - start_t) / 10)
print(" - Percentage of VDF time:", (proof_time / vdf_time) * 100, "%")
start_bench()
for _ in range(10):
assert(proof_pietrzak.verify_proof(x, y, proof, iterations, delta,
generate_r_value, discriminant_length))
end_bench("Pietrzak " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, verification", 10)
def bench_main():
bench_classgroup()
bench_discriminant_generation()
bench_vdf_iterations()
bench_wesolowski()
bench_nwesolowski()
bench_pietrzak()
if __name__ == '__main__':
bench_main()
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
20668
|
import openmoc
import openmc.openmoc_compatible
import openmc.mgxs
import numpy as np
import matplotlib
# Enable Matplotib to work for headless nodes
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
opts = openmoc.options.Options()
openmoc.log.set_log_level('NORMAL')
###############################################################################
# Eigenvalue Calculation w/o SPH Factors
###############################################################################
# Initialize 2-group OpenMC multi-group cross section library for a pin cell
mgxs_lib = openmc.mgxs.Library.load_from_file(filename='mgxs', directory='.')
# Create an OpenMOC Geometry from the OpenMOC Geometry
openmoc_geometry = \
openmc.openmoc_compatible.get_openmoc_geometry(mgxs_lib.geometry)
# Load cross section data
openmoc_materials = \
openmoc.materialize.load_openmc_mgxs_lib(mgxs_lib, openmoc_geometry)
# Initialize FSRs
openmoc_geometry.initializeFlatSourceRegions()
# Initialize an OpenMOC TrackGenerator
track_generator = openmoc.TrackGenerator(
openmoc_geometry, opts.num_azim, opts.azim_spacing)
track_generator.generateTracks()
# Initialize an OpenMOC Solver
solver = openmoc.CPUSolver(track_generator)
solver.setConvergenceThreshold(opts.tolerance)
solver.setNumThreads(opts.num_omp_threads)
# Run an eigenvalue calulation with the MGXS from OpenMC
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_no_sph = solver.getKeff()
# Extract the OpenMOC scalar fluxes
fluxes_no_sph = openmoc.process.get_scalar_fluxes(solver)
###############################################################################
# Eigenvalue Calculation with SPH Factors
###############################################################################
# Compute SPH factors
sph, sph_mgxs_lib, sph_indices = \
openmoc.materialize.compute_sph_factors(
mgxs_lib, azim_spacing=opts.azim_spacing,
num_azim=opts.num_azim, num_threads=opts.num_omp_threads)
# Load the SPH-corrected MGXS library data
materials = \
openmoc.materialize.load_openmc_mgxs_lib(sph_mgxs_lib, openmoc_geometry)
# Run an eigenvalue calculation with the SPH-corrected modified MGXS library
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_with_sph = solver.getKeff()
# Report the OpenMC and OpenMOC eigenvalues
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/o SPH: \t%1.5f', keff_no_sph)
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/ SPH: \t%1.5f', keff_with_sph)
openmoc.log.py_printf('RESULT', 'OpenMC keff: \t\t1.17574 +/- 0.00086')
###############################################################################
# Extracting Scalar Fluxes
###############################################################################
openmoc.log.py_printf('NORMAL', 'Plotting data...')
# Plot the cells
openmoc.plotter.plot_cells(openmoc_geometry)
# Extract the OpenMOC scalar fluxes
fluxes_sph = openmoc.process.get_scalar_fluxes(solver)
fluxes_sph *= sph
# Extract the OpenMC scalar fluxes
num_fsrs = openmoc_geometry.getNumFSRs()
num_groups = openmoc_geometry.getNumEnergyGroups()
openmc_fluxes = np.zeros((num_fsrs, num_groups), dtype=np.float64)
nufission_xs = np.zeros((num_fsrs, num_groups), dtype=np.float64)
# Get the OpenMC flux in each FSR
for fsr in range(num_fsrs):
# Find the OpenMOC cell and volume for this FSR
openmoc_cell = openmoc_geometry.findCellContainingFSR(fsr)
cell_id = openmoc_cell.getId()
fsr_volume = track_generator.getFSRVolume(fsr)
# Store the volume-averaged flux
mgxs = mgxs_lib.get_mgxs(cell_id, 'nu-fission')
flux = mgxs.tallies['flux'].mean.flatten()
flux = np.flipud(flux) / fsr_volume
openmc_fluxes[fsr, :] = flux
nufission_xs[fsr, :] = mgxs.get_xs(nuclide='all')
# Extract energy group edges
group_edges = mgxs_lib.energy_groups.group_edges
group_edges += 1e-3 # Adjust lower bound to 1e-3 eV (for loglog scaling)
# Compute difference in energy bounds for each group
group_edges = np.flipud(group_edges)
# Normalize fluxes with the fission source
openmc_fluxes /= np.sum(openmc_fluxes * nufission_xs)
fluxes_sph /= np.sum(fluxes_sph * nufission_xs)
fluxes_no_sph /= np.sum(fluxes_no_sph * nufission_xs)
###############################################################################
# Plot the OpenMC, OpenMOC Scalar Fluxes
###############################################################################
# Extend the mgxs values array for matplotlib's step plot of fluxes
openmc_fluxes = np.insert(openmc_fluxes, 0, openmc_fluxes[:,0], axis=1)
fluxes_no_sph = np.insert(fluxes_no_sph, 0, fluxes_no_sph[:,0], axis=1)
fluxes_sph = np.insert(fluxes_sph, 0, fluxes_sph[:,0], axis=1)
# Plot OpenMOC and OpenMC fluxes in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, openmc_fluxes[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, fluxes_no_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.plot(group_edges, fluxes_sph[fsr,:],
drawstyle='steps', color='g', linewidth=2)
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Flux')
plt.title('Normalized Flux ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmc', 'openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/flux-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
###############################################################################
# Plot OpenMC-to-OpenMOC Scalar Flux Errors
###############################################################################
# Compute the percent relative error in the flux
rel_err_no_sph = np.zeros(openmc_fluxes.shape)
rel_err_sph = np.zeros(openmc_fluxes.shape)
for fsr in range(num_fsrs):
delta_flux_no_sph = fluxes_no_sph[fsr,:] - openmc_fluxes[fsr,:]
delta_flux_sph = fluxes_sph[fsr,:] - openmc_fluxes[fsr,:]
rel_err_no_sph[fsr,:] = delta_flux_no_sph / openmc_fluxes[fsr,:] * 100.
rel_err_sph[fsr,:] = delta_flux_sph / openmc_fluxes[fsr,:] * 100.
# Plot OpenMOC relative flux errors in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, rel_err_no_sph[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, rel_err_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Relative Error [%]')
plt.title('OpenMOC-to-OpenMC Flux Rel. Err. ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/rel-err-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
|
20705
|
import os
import sys
import signal
import asyncio
import json
import time
import traceback
import typing
import socket
import re
import select
import websockets
if sys.platform != "win32":
import termios
import tty
else:
import msvcrt
import win32api
from .. import api
from ..shared import constants, log, types as t
from ..shared.constants import State
import conducto.internal.host_detection as hostdet
if sys.version_info < (3, 7):
# create_task is stdlib in 3.7, but we can declare it as a synonym for the
# 3.6 ensure_future
asyncio.create_task = asyncio.ensure_future
STATE_TO_COLOR = {
State.PENDING: log.Color.TRUEWHITE,
State.QUEUED: log.Color.GRAY,
State.RUNNING: log.Color.BLUE,
State.DONE: log.Color.GREEN,
State.ERROR: log.Color.RED,
State.WORKER_ERROR: log.Color.PURPLE,
}
class Listener(object):
def update_node(self, name, data):
pass
async def background_task(self, title):
pass
async def key_press(self, char):
# Listeners are passed the quit_func so that they can decide when to exit
pass
def render(self):
pass
def shutdown(self):
pass
def connect(token: t.Token, pipeline_id: t.PipelineId, starthelp: str):
pipeline = api.Pipeline().get(pipeline_id, token=token)
ui = ShellUI(token, pipeline, starthelp)
if sys.platform == "win32":
win32api.SetConsoleCtrlHandler(ui.ctrl_c, True)
try:
asyncio.get_event_loop().run_until_complete(ui.run())
except Exception:
ui.reset_stdin()
traceback.print_exc()
class ShellUI(object):
def __init__(self, token, pipeline: dict, starthelp: str):
self.pipeline = pipeline
self.quitting = False
self.loop = asyncio.get_event_loop()
self.gw_socket = None
self.start_func_complete = None
self.starthelp = starthelp
from . import one_line, full_screen
self.listeners: typing.List[Listener] = [one_line.OneLineDisplay(self)]
@property
def allow_sleep(self):
# TODO: This is an ugly time-out to avoid shutting down the shell UI
# because the NS cache still believes the pipeline is sleeping.
return self.start_func_complete and time.time() > self.start_func_complete + 3.0
async def view_loop(self):
"""
Every 0.25 seconds render the pipeline
"""
log.info("[view] starting")
while True:
await asyncio.sleep(0.25)
for listener in self.listeners:
listener.render()
def set_gw(self, gw_socket):
self.gw_socket = gw_socket
async def wait_gw(self):
while self.gw_socket is None:
await asyncio.sleep(0.1)
async def start_pipeline(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Manager().launch(pipeline_id)
await self.wait_gw()
payload = {"type": "SET_AUTORUN", "payload": {"value": True}}
await self.gw_socket.send(json.dumps(payload))
async def sleep_pipeline(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Pipeline().sleep_standby(pipeline_id)
else:
payload = {"type": "CLOSE_PROGRAM", "payload": None}
await self.gw_socket.send(json.dumps(payload))
async def reset(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Manager().launch(pipeline_id)
await self.wait_gw()
payload = {"type": "RESET", "payload": ["/"]}
await self.gw_socket.send(json.dumps(payload))
async def gw_socket_loop(self):
"""
Loop and listen for socket messages
"""
start_tasks = await self.run_start_func()
pl = constants.PipelineLifecycle
while True:
if (
self.pipeline is None
or self.pipeline.get("status", None) not in pl.active
):
await asyncio.sleep(0.5)
continue
if start_tasks is not None:
tasks = start_tasks
# we clear the start_tasks now since later reconnects should
# show reconnecting.
start_tasks = None
else:
msg = "Connection lost. Reconnecting"
pretasks = [xx.background_task(msg) for xx in self.listeners]
tasks = [asyncio.create_task(task) for task in pretasks]
try:
websocket = await api.connect_to_pipeline(self.pipeline["pipeline_id"])
except PermissionError:
print()
print("You are not permitted to connect to this pipeline.")
self.quit()
break
except ConnectionError:
self.quit()
break
for task in tasks:
task.cancel()
for listener in self.listeners:
listener.install_normal_key_mode()
self.set_gw(websocket)
was_slept = False
try:
await websocket.send(
json.dumps({"type": "RENDER_NODE", "payload": "/"})
)
log.info("[gw_socket_loop] starting")
async for msg_text in websocket:
msg = json.loads(msg_text)
if msg["type"] in ("NODES_STATE_UPDATE", "RENDER_NODE"):
log.debug(f"incoming gw message {msg['type']}")
for name, data in msg["payload"].items():
for listener in self.listeners:
listener.update_node(name, data)
elif msg["type"] == "SLEEP":
was_slept = True
# we are done here, do not try to reconnect.
break
except websockets.ConnectionClosedError as e:
log.debug(f"ConnectionClosedError {e.code} {e.reason}")
self.set_gw(None)
if was_slept:
break
def get_ns_url(self):
url = api.Config().get_url()
url = re.sub("^http", "ws", url) + "/ns/"
return url
async def reconnect_ns(self):
ns_url = self.get_ns_url()
log.debug("[run] Connecting to", ns_url)
header = {"Authorization": f"bearer {api.Config().get_token(refresh=False)}"}
# we retry connection for roughly 2 minutes
for i in range(45):
try:
websocket = await websockets.connect(ns_url, extra_headers=header)
break
except (
websockets.ConnectionClosedError,
websockets.InvalidStatusCode,
socket.gaierror,
):
log.debug(f"cannot connect to ns ... waiting {i}")
await asyncio.sleep(min(3.0, (2 ** i) / 8))
else:
self.quit()
return None
log.debug("[run] ns Connected")
return websocket
async def ns_socket_loop(self):
"""
Loop and listen for socket messages
"""
while True:
msg = "Connection lost. Reconnecting"
if self.start_func_complete is not None:
pretasks = [xx.background_task(msg) for xx in self.listeners]
else:
pretasks = []
tasks = [asyncio.create_task(task) for task in pretasks]
websocket = await self.reconnect_ns()
for task in tasks:
task.cancel()
if websocket is None:
if self.start_func_complete is not None:
for listener in self.listeners:
listener.install_disconnect_mode()
self.quit()
break
if self.start_func_complete is not None:
for listener in self.listeners:
listener.install_normal_key_mode()
subscribe = {
"type": "SUBSCRIBE",
"payload": {"pipeline_id": self.pipeline["pipeline_id"]},
}
await websocket.send(json.dumps(subscribe))
try:
log.info("[ns_socket_loop] starting")
async for msg_text in websocket:
msg = json.loads(msg_text)
if msg["type"] in ("FULL_INFO_UPDATE",):
log.debug(f"incoming ns message {msg['type']}")
progs = msg["payload"]["programIdToInfo"]
try:
self.pipeline = progs[self.pipeline["pipeline_id"]]
except KeyError:
# TODO: the NS cache may not yet have the pipeline,
# this is to allow for that.
if self.allow_sleep:
raise
else:
continue
if "state" not in self.pipeline["meta"]:
self.pipeline["meta"] = {
"state": "pending",
"stateCounts": {x: 0 for x in STATE_TO_COLOR.keys()},
}
pl = constants.PipelineLifecycle
if self.pipeline["status"] in pl.sleeping and self.allow_sleep:
self.quit()
elif self.pipeline["status"] not in pl.active:
for listener in self.listeners:
listener.update_node("/", self.pipeline["meta"])
except websockets.ConnectionClosedError:
pass
def ctrl_c(self, a, b=None):
# This is the windows control C handler
self.quit()
return True
async def key_loop(self):
"""
Loop and listen for key inputs
"""
log.info("[key_loop] starting")
if sys.platform != "win32":
self.old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
async for char in stream_as_char_generator(self.loop, sys.stdin):
if ord(char) in (3, 4):
# Ctrl+c (sigint) & Ctrl+d (eof) get captured as a non-printing
# characters with ASCII code 3 & 4 respectively. Quit
# gracefully.
self.quit()
elif ord(char) == 26:
# Ctrl+z gets captured as a non-printing character with ASCII
# code 26. Send SIGSTOP and reset the terminal.
self.reset_stdin()
os.kill(os.getpid(), signal.SIGSTOP)
if sys.platform != "win32":
self.old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
for listener in self.listeners:
await listener.key_press(char)
self.reset_stdin()
def reset_stdin(self):
if hasattr(self, "old_settings"):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, self.old_settings)
async def run_start_func(self):
pretasks = [
xx.background_task(self.starthelp, immediate=True) for xx in self.listeners
]
tasks = [asyncio.create_task(task) for task in pretasks]
self.start_func_complete = time.time()
return tasks
async def run(self):
# Start all the loops. The view and socket loops are nonblocking The
# key_loop needs to be run separately because it blocks on user input
tasks = [
self.loop.create_task(self.view_loop()),
self.loop.create_task(self.gw_socket_loop()),
self.loop.create_task(self.ns_socket_loop()),
self.loop.create_task(self.key_loop()),
]
# Wait on all of them. The `gather` variable can be cancelled in
# `key_task()` if the user Ctrl+c's, which will cause the other loops
# to be cancelled gracefully.
self.gather_handle = asyncio.gather(*tasks)
try:
await self.gather_handle
except asyncio.CancelledError:
return
except websockets.ConnectionClosedError:
self.reset_stdin()
return
else:
log.error("gather_handle returned but it shouldn't have!")
raise Exception("gather_handle returned but it shouldn't have!")
finally:
for listener in self.listeners:
listener.shutdown()
def disconnect(self):
self.quit()
def quit(self):
"""
Make all event loops quit
"""
self.reset_stdin()
self.quitting = True
self.gather_handle.cancel()
def stdin_data():
return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], [])
async def stream_as_char_generator(loop, stream):
if sys.platform != "win32":
has_key = stdin_data
read_key = lambda: stream.read(1)
else:
has_key = msvcrt.kbhit
read_key = lambda: msvcrt.getch().decode("ascii")
while True:
await asyncio.sleep(0.05)
if has_key():
char = read_key()
if not char: # EOF.
break
yield char
|
20717
|
from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch
from .ddpg_impl import DDPGImpl
class TD3Impl(DDPGImpl):
_target_smoothing_sigma: float
_target_smoothing_clip: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
target_smoothing_sigma: float,
target_smoothing_clip: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._target_smoothing_sigma = target_smoothing_sigma
self._target_smoothing_clip = target_smoothing_clip
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_policy is not None
assert self._targ_q_func is not None
with torch.no_grad():
action = self._targ_policy(batch.next_observations)
# smoothing target
noise = torch.randn(action.shape, device=batch.device)
scaled_noise = self._target_smoothing_sigma * noise
clipped_noise = scaled_noise.clamp(
-self._target_smoothing_clip, self._target_smoothing_clip
)
smoothed_action = action + clipped_noise
clipped_action = smoothed_action.clamp(-1.0, 1.0)
return self._targ_q_func.compute_target(
batch.next_observations,
clipped_action,
reduction=self._target_reduction_type,
)
|
20769
|
import io
import zlib
import numpy as np
def maybe_compress(str, compress):
return zlib.compress(str) if compress else str
def maybe_decompress(str, decompress):
return zlib.decompress(str) if decompress else str
def serialize_numpy(arr: np.ndarray, compress: bool = False) -> str:
"""Serializes numpy array to string with optional zlib compression.
Args:
arr (np.ndarray): Numpy array to serialize.
compress (bool, optional): Whether to compress resulting string with zlib or not.
Defaults to False.
Returns:
str: serialized string
"""
buf = io.BytesIO()
assert isinstance(arr, np.ndarray)
np.save(buf, arr)
result = buf.getvalue()
return maybe_compress(result, compress)
def deserialize_numpy(serialized_string: str, decompress: bool = False) -> np.ndarray:
"""Deserializes numpy array from compressed string.
Args:
serialized_string (str): Serialized numpy array
decompress (bool, optional): Whether to decompress string with zlib before laoding.
Defaults to False.
Returns:
np.ndarray: deserialized numpy array
"""
str = maybe_decompress(serialized_string, decompress)
buf = io.BytesIO(str)
return np.load(buf)
|
20851
|
import numpy as np
from matplotlib import _api
from .axes_divider import make_axes_locatable, Size
from .mpl_axes import Axes
@_api.delete_parameter("3.3", "add_all")
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True, **kwargs):
"""
Parameters
----------
pad : float
Fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = pad * Size.AxesY(ax)
xsize = ((1-2*pad)/3) * Size.AxesX(ax)
ysize = ((1-2*pad)/3) * Size.AxesY(ax)
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = ax._axes_class
except AttributeError:
axes_class = type(ax)
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(), ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
@_api.deprecated("3.3", alternative="ax.imshow(np.dstack([r, g, b]))")
def imshow_rgb(ax, r, g, b, **kwargs):
return ax.imshow(np.dstack([r, g, b]), **kwargs)
class RGBAxes:
"""
4-panel imshow (RGB, R, G, B).
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Subclasses can override the ``_defaultAxesClass`` attribute.
Attributes
----------
RGB : ``_defaultAxesClass``
The axes object for the three-channel imshow.
R : ``_defaultAxesClass``
The axes object for the red channel imshow.
G : ``_defaultAxesClass``
The axes object for the green channel imshow.
B : ``_defaultAxesClass``
The axes object for the blue channel imshow.
"""
_defaultAxesClass = Axes
@_api.delete_parameter("3.3", "add_all")
def __init__(self, *args, pad=0, add_all=True, **kwargs):
"""
Parameters
----------
pad : float, default: 0
fraction of the axes height to put as padding.
add_all : bool, default: True
Whether to add the {rgb, r, g, b} axes to the figure.
This parameter is deprecated.
axes_class : matplotlib.axes.Axes
*args
Unpacked into axes_class() init for RGB
**kwargs
Unpacked into axes_class() init for RGB, R, G, B axes
"""
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
self.RGB = ax = axes_class(*args, **kwargs)
if add_all:
ax.get_figure().add_axes(ax)
else:
kwargs["add_all"] = add_all # only show deprecation in that case
self.R, self.G, self.B = make_rgb_axes(
ax, pad=pad, axes_class=axes_class, **kwargs)
# Set the line color and ticks for the axes.
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color("w")
ax1.axis[:].major_ticks.set_markeredgecolor("w")
@_api.deprecated("3.3")
def add_RGB_to_figure(self):
"""Add red, green and blue axes to the RGB composite's axes figure."""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""
Create the four images {rgb, r, g, b}.
Parameters
----------
r, g, b : array-like
The red, green, and blue arrays.
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images.
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
if not (r.shape == g.shape == b.shape):
raise ValueError(
f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')
RGB = np.dstack([r, g, b])
R = np.zeros_like(RGB)
R[:, :, 0] = r
G = np.zeros_like(RGB)
G[:, :, 1] = g
B = np.zeros_like(RGB)
B[:, :, 2] = b
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
@_api.deprecated("3.3", alternative="RGBAxes")
class RGBAxesBase(RGBAxes):
pass
|
20929
|
import numpy as np
import os,sys,time
import torch
import torch.nn.functional as torch_F
import collections
from easydict import EasyDict as edict
import util
class Pose():
def __call__(self,R=None,t=None):
assert(R is not None or t is not None)
if R is None:
if not isinstance(t,torch.Tensor): t = torch.tensor(t)
R = torch.eye(3,device=t.device).repeat(*t.shape[:-1],1,1)
elif t is None:
if not isinstance(R,torch.Tensor): R = torch.tensor(R)
t = torch.zeros(R.shape[:-1],device=R.device)
else:
if not isinstance(R,torch.Tensor): R = torch.tensor(R)
if not isinstance(t,torch.Tensor): t = torch.tensor(t)
assert(R.shape[:-1]==t.shape and R.shape[-2:]==(3,3))
R = R.float()
t = t.float()
pose = torch.cat([R,t[...,None]],dim=-1) # [...,3,4]
assert(pose.shape[-2:]==(3,4))
return pose
def invert(self,pose,use_inverse=False):
R,t = pose[...,:3],pose[...,3:]
R_inv = R.inverse() if use_inverse else R.transpose(-1,-2)
t_inv = (-R_inv@t)[...,0]
pose_inv = self(R=R_inv,t=t_inv)
return pose_inv
def compose(self,pose_list):
# pose_new(x) = poseN(...(pose2(pose1(x)))...)
pose_new = pose_list[0]
for pose in pose_list[1:]:
pose_new = self.compose_pair(pose_new,pose)
return pose_new
def compose_pair(self,pose_a,pose_b):
# pose_new(x) = pose_b(pose_a(x))
R_a,t_a = pose_a[...,:3],pose_a[...,3:]
R_b,t_b = pose_b[...,:3],pose_b[...,3:]
R_new = R_b@R_a
t_new = (R_b@t_a+t_b)[...,0]
pose_new = self(R=R_new,t=t_new)
return pose_new
pose = Pose()
def to_hom(X):
X_hom = torch.cat([X,torch.ones_like(X[...,:1])],dim=-1)
return X_hom
def world2cam(X,pose): # [B,N,3]
X_hom = to_hom(X)
return [email protected](-1,-2)
def cam2img(X,cam_intr):
return X@cam_intr.transpose(-1,-2)
def img2cam(X,cam_intr):
return X@cam_intr.inverse().transpose(-1,-2)
def cam2world(X,pose):
X_hom = to_hom(X)
pose_inv = Pose().invert(pose)
return X_hom@pose_inv.transpose(-1,-2)
def angle_to_rotation_matrix(a,axis):
roll = dict(X=1,Y=2,Z=0)[axis]
O = torch.zeros_like(a)
I = torch.ones_like(a)
M = torch.stack([torch.stack([a.cos(),-a.sin(),O],dim=-1),
torch.stack([a.sin(),a.cos(),O],dim=-1),
torch.stack([O,O,I],dim=-1)],dim=-2)
M = M.roll((roll,roll),dims=(-2,-1))
return M
def get_camera_grid(opt,batch_size,intr=None):
# compute image coordinate grid
if opt.camera.model=="perspective":
y_range = torch.arange(opt.H,dtype=torch.float32,device=opt.device).add_(0.5)
x_range = torch.arange(opt.W,dtype=torch.float32,device=opt.device).add_(0.5)
Y,X = torch.meshgrid(y_range,x_range) # [H,W]
xy_grid = torch.stack([X,Y],dim=-1).view(-1,2) # [HW,2]
elif opt.camera.model=="orthographic":
assert(opt.H==opt.W)
y_range = torch.linspace(-1,1,opt.H,device=opt.device)
x_range = torch.linspace(-1,1,opt.W,device=opt.device)
Y,X = torch.meshgrid(y_range,x_range) # [H,W]
xy_grid = torch.stack([X,Y],dim=-1).view(-1,2) # [HW,2]
xy_grid = xy_grid.repeat(batch_size,1,1) # [B,HW,2]
if opt.camera.model=="perspective":
grid_3D = img2cam(to_hom(xy_grid),intr) # [B,HW,3]
elif opt.camera.model=="orthographic":
grid_3D = to_hom(xy_grid) # [B,HW,3]
return xy_grid,grid_3D
def get_center_and_ray(opt,pose,intr=None,offset=None): # [HW,2]
batch_size = len(pose)
xy_grid,grid_3D = get_camera_grid(opt,batch_size,intr=intr) # [B,HW,3]
# compute center and ray
if opt.camera.model=="perspective":
if offset is not None:
grid_3D[...,:2] += offset
center_3D = torch.zeros(batch_size,1,3,device=opt.device) # [B,1,3]
elif opt.camera.model=="orthographic":
center_3D = torch.cat([xy_grid,torch.zeros_like(xy_grid[...,:1])],dim=-1) # [B,HW,3]
# transform from camera to world coordinates
grid_3D = cam2world(grid_3D,pose) # [B,HW,3]
center_3D = cam2world(center_3D,pose) # [B,HW,3]
ray = grid_3D-center_3D # [B,HW,3]
return center_3D,ray
def get_3D_points_from_depth(opt,center,ray,depth,multi_samples=False):
if multi_samples: center,ray = center[:,:,None],ray[:,:,None]
# x = c+dv
points_3D = center+ray*depth # [B,HW,3]/[B,HW,N,3]/[N,3]
return points_3D
def get_depth_from_3D_points(opt,center,ray,points_3D):
# d = ||x-c||/||v|| (x-c and v should be in same direction)
depth = (points_3D-center).norm(dim=-1,keepdim=True)/ray.norm(dim=-1,keepdim=True) # [B,HW,1]
return depth
|
20935
|
import logging, time, os
class Config:
def __init__(self, data_prefix):
# data_prefix = r'../data/'
self.data_prefix = data_prefix
self._multiwoz_damd_init()
def _multiwoz_damd_init(self):
self.vocab_path_train = self.data_prefix + '/multi-woz-processed/vocab'
self.data_path = self.data_prefix + '/multi-woz-processed/'
self.data_file = 'data_for_damd.json'
self.dev_list = self.data_prefix + '/multi-woz/valListFile.json'
self.test_list = self.data_prefix + '/multi-woz/testListFile.json'
self.dbs = {
'attraction': self.data_prefix + '/db/attraction_db_processed.json',
'hospital': self.data_prefix + '/db/hospital_db_processed.json',
'hotel': self.data_prefix + '/db/hotel_db_processed.json',
'police': self.data_prefix + '/db/police_db_processed.json',
'restaurant': self.data_prefix + '/db/restaurant_db_processed.json',
'taxi': self.data_prefix + '/db/taxi_db_processed.json',
'train': self.data_prefix + '/db/train_db_processed.json',
}
self.domain_file_path = self.data_prefix + '/multi-woz-processed/domain_files.json'
self.slot_value_set_path = self.data_prefix + '/db/value_set_processed.json'
self.exp_domains = ['all'] # hotel,train, attraction, restaurant, taxi
self.enable_aspn = True
self.use_pvaspn = False
self.enable_bspn = True
self.bspn_mode = 'bspn' # 'bspn' or 'bsdx'
self.enable_dspn = False # removed
self.enable_dst = False
self.exp_domains = ['all'] # hotel,train, attraction, restaurant, taxi
self.max_context_length = 900
self.vocab_size = 3000
|
20957
|
try:
import pathlib
except ImportError as e:
try:
import pathlib2 as pathlib
except ImportError:
raise e
def name_to_asserted_group_path(name):
path = pathlib.PurePosixPath(name)
if path.is_absolute():
raise NotImplementedError(
"Absolute paths are currently not supported and unlikely to be implemented."
)
if len(path.parts) < 1 and str(name) != ".":
raise NotImplementedError(
"Getting an item on a group with path '" + name + "' " +
"is not supported and unlikely to be implemented."
)
return path
def remove_root(name):
path = pathlib.PurePosixPath(name)
if path.is_absolute():
path = path.relative_to(path.root)
return path
|
20959
|
def bubblesort(L):
keepgoing = True
while keepgoing:
keepgoing = False
for i in range(len(L)-1):
if L[i]>L[i+1]:
L[i], L[i+1] = L[i+1], L[i]
keepgoing = True
|
20993
|
import numpy as np
import pytest
from arbol import aprint
from dexp.processing.utils.normalise import Normalise
from dexp.utils.backends import Backend
from dexp.utils.testing.testing import execute_both_backends
@execute_both_backends
@pytest.mark.parametrize(
"dexp_nuclei_background_data",
[dict(length_xy=128, dtype=np.float32)],
indirect=True,
)
def test_normalise(dexp_nuclei_background_data):
_, _, image = dexp_nuclei_background_data
image = image.astype(np.uint16) # required to convert afterwards
normalise = Normalise(image, low=-0.5, high=1, in_place=False, clip=True, dtype=np.float32)
image_normalised = normalise.forward(image)
image_denormalised = normalise.backward(image_normalised)
assert image_normalised.dtype == np.float32
assert image_denormalised.dtype == image.dtype
assert image_normalised.shape == image.shape
assert image_denormalised.shape == image.shape
assert image_normalised.min() >= -0.5
assert image_normalised.max() <= 1
assert image_normalised.max() - image_normalised.min() >= 1.5
assert image_denormalised.min() * (1 + 1e-3) >= image.min()
assert image_denormalised.max() <= (1 + 1e-3) * image.max()
assert (image_denormalised.max() - image_denormalised.min()) * (1 + 1e-3) >= image.max() - image.min()
xp = Backend.get_xp_module()
error = xp.median(xp.abs(image - image_denormalised)).item()
aprint(f"Error = {error}")
assert error < 1e-6
|
20996
|
from ..utils import AnalysisException
from .expressions import Expression
class Literal(Expression):
def __init__(self, value):
super().__init__()
self.value = value
def eval(self, row, schema):
return self.value
def __str__(self):
if self.value is True:
return "true"
if self.value is False:
return "false"
if self.value is None:
return "NULL"
return str(self.value)
def get_literal_value(self):
if hasattr(self.value, "expr") or isinstance(self.value, Expression):
raise AnalysisException("Value should not be a Column or an Expression,"
f" but got {type(self)}: {self}")
return self.value
def args(self):
return (self.value, )
__all__ = ["Literal"]
|
21014
|
from .layer_send import AxolotlSendLayer
from .layer_control import AxolotlControlLayer
from .layer_receive import AxolotlReceivelayer
|
21035
|
from .mem_bank import RGBMem, CMCMem
from .mem_moco import RGBMoCo, CMCMoCo
def build_mem(opt, n_data):
if opt.mem == 'bank':
mem_func = RGBMem if opt.modal == 'RGB' else CMCMem
memory = mem_func(opt.feat_dim, n_data,
opt.nce_k, opt.nce_t, opt.nce_m)
elif opt.mem == 'moco':
mem_func = RGBMoCo if opt.modal == 'RGB' else CMCMoCo
memory = mem_func(opt.feat_dim, opt.nce_k, opt.nce_t)
else:
raise NotImplementedError(
'mem not suported: {}'.format(opt.mem))
return memory
|
21043
|
import pyblaze.nn.data.extensions
from .noise import NoiseDataset, LabeledNoiseDataset
from .zip import ZipDataLoader
from .transform import TransformDataset
|
21059
|
from functools import partial
from typing import Callable
from typing import TYPE_CHECKING
from ...config import Conf
from .menu import Menu, MenuEntry, MenuSeparator
if TYPE_CHECKING:
from ...ui.views.disassembly_view import DisassemblyView
class DisasmInsnContextMenu(Menu):
"""
Dissembly Instruction's Context Menu Items and callback funcion.
It provides context menu for dissembly instructions in the Dissembly View.
For adding items in plugins, use `Workspace.add_disasm_insn_ctx_menu_entry`
and `Workspace.remove_disasm_insn_ctx_menu_entry`.
"""
def __init__(self, disasm_view: 'DisassemblyView'):
super().__init__("", parent=disasm_view)
self.insn_addr = None
self.entries.extend([
MenuEntry('T&oggle selection', self._toggle_instruction_selection),
MenuSeparator(),
MenuEntry('&XRefs...', self._popup_xrefs),
MenuSeparator(),
])
if Conf.has_operation_mango:
self.entries.extend([
MenuEntry("&Depends on...", self._popup_dependson_dialog),
MenuSeparator(),
])
self.entries.extend([
MenuEntry('E&xecute symbolically...', self._popup_newstate_dialog),
MenuEntry('&Avoid in execution...', self._avoid_in_execution),
MenuEntry('&Find in execution...', self._find_in_execution),
MenuEntry('Add &hook...', self._add_hook),
MenuEntry('View function &documentation...', self._view_docs)
])
@property
def _disasm_view(self) -> 'DisassemblyView':
return self.parent
def _popup_newstate_dialog(self):
self._disasm_view.popup_newstate_dialog(async_=True)
def _popup_dependson_dialog(self):
self._disasm_view.popup_dependson_dialog(use_operand=True)
def _toggle_instruction_selection(self):
self._disasm_view.infodock.toggle_instruction_selection(self.insn_addr)
def _avoid_in_execution(self):
self._disasm_view.avoid_addr_in_exec(self.insn_addr)
self._disasm_view.refresh()
def _find_in_execution(self):
self._disasm_view.find_addr_in_exec(self.insn_addr)
self._disasm_view.refresh()
def _add_hook(self):
self._disasm_view.popup_hook_dialog(async_=True)
def _view_docs(self):
if self._disasm_view is None:
return
addr = self._disasm_view._address_in_selection()
if addr is not None:
self._disasm_view.popup_func_doc_dialog(addr)
def _popup_xrefs(self):
if self._disasm_view is None or self._disasm_view._flow_graph is None:
return
r = self._disasm_view._flow_graph.get_selected_operand_info()
if r is not None:
_, ins_addr, operand = r
self._disasm_view.parse_operand_and_popup_xref_dialog(ins_addr, operand, async_=True)
#
# Public Methods
#
def add_menu_entry(self, text, callback: Callable[['DisasmInsnContextMenu'], None], add_separator_first=True):
if add_separator_first:
self.entries.append(MenuSeparator())
self.entries.append(MenuEntry(text, partial(callback, self)))
def remove_menu_entry(self, text, remove_preceding_separator=True):
for idx, m in enumerate(self.entries):
if not isinstance(m, MenuEntry):
continue
if m.caption == text:
self.entries.remove(m)
if remove_preceding_separator:
self.entries.pop(idx-1)
|
21079
|
class AnalyticalModelStick(AnalyticalModel,IDisposable):
"""
An element that represents a stick in the structural analytical model.
Could be one of beam,brace or column type.
"""
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def GetAlignmentMethod(self,selector):
"""
GetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> AnalyticalAlignmentMethod
Gets the alignment method for a given selector.
selector: End of the analytical model.
Returns: The alignment method at a given end.
"""
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetLocalCoordinateSystem(self,*__args):
"""
GetLocalCoordinateSystem(self: AnalyticalModelStick,point: XYZ) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified point.
point: The point on the analytical model stick element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
GetLocalCoordinateSystem(self: AnalyticalModelStick,parameter: float) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified parameter value along a curve.
parameter: The parameter value along a curve that should be in the range [0,1],where 0
represents start and 1 represents end of the element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
"""
pass
def GetMemberForces(self):
"""
GetMemberForces(self: AnalyticalModelStick) -> IList[MemberForces]
Gets the member forces associated with this element.
Returns: Returns a collection of Member Forces associated with this element. Empty
collection will be returned if element doesn't have any Member Forces.
To
find out with which end member forces are associated use
Autodesk::Revit::DB::Structure::MemberForces::Position
property to obtain a
position of Member Forces on element.
"""
pass
def GetProjectionPlaneY(self,selector):
"""
GetProjectionPlaneY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""
pass
def GetProjectionPlaneZ(self,selector):
"""
GetProjectionPlaneZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""
pass
def GetProjectionY(self,selector):
"""
GetProjectionY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionY
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""
pass
def GetProjectionZ(self,selector):
"""
GetProjectionZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionZ
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""
pass
def GetReleases(self,start,fx,fy,fz,mx,my,mz):
"""
GetReleases(self: AnalyticalModelStick,start: bool) -> (bool,bool,bool,bool,bool,bool)
Gets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""
pass
def GetReleaseType(self,start):
"""
GetReleaseType(self: AnalyticalModelStick,start: bool) -> ReleaseType
Gets the release type.
start: The position on analytical model stick element. True for start,false for end.
Returns: The type of release.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def RemoveAllMemberForces(self):
"""
RemoveAllMemberForces(self: AnalyticalModelStick) -> bool
Removes all member forces associated with element.
Returns: True if any member forces were removed,false otherwise.
"""
pass
def RemoveMemberForces(self,start):
"""
RemoveMemberForces(self: AnalyticalModelStick,start: bool) -> bool
Removes member forces defined for given position.
start: Member Forces position on analytical model stick element. True for start,false
for end.
Returns: True if member forces for provided position were removed,false otherwise.
"""
pass
def SetAlignmentMethod(self,selector,method):
"""
SetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector,method: AnalyticalAlignmentMethod)
Sets the alignment method for a given selector.
selector: End of the analytical model.
method: The alignment method at a given end.
"""
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetMemberForces(self,*__args):
"""
SetMemberForces(self: AnalyticalModelStick,start: bool,force: XYZ,moment: XYZ)
Adds Member Forces to element.
start: Member Forces position on analytical model stick element. True for start,false
for end.
force: The translational forces at specified position of the element.
The x value
of XYZ object represents force along x-axis of the analytical model coordinate
system,y along y-axis,z along z-axis respectively.
moment: The rotational forces at specified position of the element.
The x value of
XYZ object represents moment about x-axis of the analytical model coordinate
system,y about y-axis,z about z-axis respectively.
SetMemberForces(self: AnalyticalModelStick,memberForces: MemberForces)
Sets Member Forces to element.
memberForces: End to which member forces will be added is defined by setting
Autodesk::Revit::DB::Structure::MemberForces::Position
property in provided
Member Forces object.
"""
pass
def SetProjection(self,selector,*__args):
"""
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
"""
pass
def SetReleases(self,start,fx,fy,fz,mx,my,mz):
"""
SetReleases(self: AnalyticalModelStick,start: bool,fx: bool,fy: bool,fz: bool,mx: bool,my: bool,mz: bool)
Sets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""
pass
def SetReleaseType(self,start,releaseType):
"""
SetReleaseType(self: AnalyticalModelStick,start: bool,releaseType: ReleaseType)
Sets the release type.
start: The position on analytical model stick element. True for start,false for end.
releaseType: The type of release.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
21090
|
import doctest
from nose.tools import assert_equal, assert_true
from corehq.apps.fixtures.models import (
FieldList,
FixtureDataItem,
FixtureItemField,
)
from custom.abt.reports import fixture_utils
from custom.abt.reports.fixture_utils import (
dict_values_in,
fixture_data_item_to_dict,
)
def test_dict_values_in_param_none():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, None)
assert_true(result)
def test_dict_values_in_param_empty():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, {})
assert_true(result)
def test_dict_values_in_value_none():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, {'permutation': None})
assert_true(result)
def test_fixture_data_item_to_dict():
data_item = FixtureDataItem(
domain='test-domain',
data_type_id='123456',
fields={
'id': FieldList(
doc_type='FieldList',
field_list=[
FixtureItemField(
doc_type='FixtureItemField',
field_value='789abc',
properties={}
)
]
),
'name': FieldList(
doc_type='FieldList',
field_list=[
FixtureItemField(
doc_type='FixtureItemField',
field_value='John',
properties={'lang': 'en'}
),
FixtureItemField(
doc_type='FixtureItemField',
field_value='Jan',
properties={'lang': 'nld'}
),
FixtureItemField(
doc_type='FixtureItemField',
field_value='Jean',
properties={'lang': 'fra'}
),
]
)
}
)
dict_ = fixture_data_item_to_dict(data_item)
assert_equal(dict_, {
'id': '789abc',
'name': 'John'
})
def test_empty_fixture_data_item_to_dict():
data_item = FixtureDataItem(
domain='test-domain',
data_type_id='123456',
fields={
'id': FieldList(
doc_type='FieldList',
field_list=[]
),
'name': FieldList(
doc_type='FieldList',
field_list=[]
)
}
)
dict_ = fixture_data_item_to_dict(data_item)
assert_equal(dict_, {
'id': None,
'name': None,
})
def test_doctests():
results = doctest.testmod(fixture_utils)
assert results.failed == 0
|
21182
|
import os
import sys
from dataclasses import dataclass
import click
import numpy as np
import xgboost as xgb
from rich import print, traceback
WD = os.path.dirname(__file__)
@click.command()
@click.option('-i', '--input', required=True, type=str, help='Path to data file to predict.')
@click.option('-m', '--model', type=str, help='Path to an already trained XGBoost model. If not passed a default model will be loaded.')
@click.option('-c/-nc', '--cuda/--no-cuda', type=bool, default=False, help='Whether to enable cuda or not')
@click.option('-o', '--output', type=str, help='Path to write the output to')
def main(input: str, model: str, cuda: bool, output: str):
"""Command-line interface for {{ cookiecutter.project_name }}"""
print(r"""[bold blue]
{{ cookiecutter.project_name }}
""")
print('[bold blue]Run [green]{{ cookiecutter.project_name }} --help [blue]for an overview of all commands\n')
if not model:
model = get_xgboost_model(f'{WD}/models/xgboost_test_model.xgb')
else:
model = get_xgboost_model(model)
if cuda:
model.set_param({'predictor': 'gpu_predictor'})
print('[bold blue] Parsing data')
data_to_predict = parse_data_to_predict(input)
print('[bold blue] Performing predictions')
predictions = np.round(model.predict(data_to_predict.DM))
print(predictions)
if output:
print(f'[bold blue]Writing predictions to {output}')
write_results(predictions, output)
@dataclass
class Dataset:
X: np.ndarray
y: list
DM: xgb.DMatrix
gene_names: list
sample_names: list
def parse_data_to_predict(path_to_data_to_predict: str) -> Dataset:
"""
Parses the data to predict and returns a full Dataset include the DMatrix
:param path_to_data_to_predict: Path to the data on which predictions should be performed on
"""
X = []
y = []
gene_names = []
sample_names = []
with open(path_to_data_to_predict, "r") as file:
all_runs_info = next(file).split("\n")[0].split("\t")[2:]
for run_info in all_runs_info:
split_info = run_info.split("_")
y.append(int(split_info[0]))
sample_names.append(split_info[1])
for line in file:
split = line.split("\n")[0].split("\t")
X.append([float(x) for x in split[2:]])
gene_names.append(split[:2])
X = [list(i) for i in zip(*X)]
X_np = np.array(X)
DM = xgb.DMatrix(X_np, label=y)
return Dataset(X_np, y, DM, gene_names, sample_names)
def write_results(predictions: np.ndarray, path_to_write_to) -> None:
"""
Writes the predictions into a human readable file.
:param predictions: Predictions as a numpy array
:param path_to_write_to: Path to write the predictions to
"""
np.savetxt(path_to_write_to, predictions, delimiter=',')
def get_xgboost_model(path_to_xgboost_model: str):
"""
Fetches the model of choice and creates a booster from it.
:param path_to_xgboost_model: Path to the xgboost model1
"""
model = xgb.Booster()
model.load_model(os.path.abspath(path_to_xgboost_model))
return model
if __name__ == "__main__":
traceback.install()
sys.exit(main()) # pragma: no cover
|
21212
|
import unittest
import os
import json
import pandas as pd
import numpy as np
class TestingExercise2_07(unittest.TestCase):
def setUp(self) -> None:
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(ROOT_DIR, '..', 'dtypes.json'), 'r') as jsonfile:
self.dtyp = json.load(jsonfile)
self.data = pd.read_csv(os.path.join(ROOT_DIR, '..', 'Datasets', 'earthquake_data.csv'),
dtype = self.dtyp)
def test_object_vars(self):
self.object_variables = self.data.select_dtypes(include = [np.object]).nunique().sort_values()
self.assertEqual(max(self.object_variables), (3821))
if __name__ == '__main__':
unittest.main()
|
21227
|
from setuptools import setup
import src
setup(name='lsankidb',
version=src.__version__,
install_requires=['AnkiTools'],
description='"ls" for your local Anki database.',
#FIXME this duplicates README.md
long_description="""
.. image:: https://cdn.jsdelivr.net/gh/AurelienLourot/lsankidb@c9735756451d135f94601b816469128e0cdadba2/thirdparty/logo.png
:height: 64px
:width: 64px
:align: right
lsankidb
========
``ls`` for your local `Anki <https://apps.ankiweb.net/>`__ database.
Dump all your Anki terms in order to save them, search them, ``grep`` them or ``diff`` them.
::
$ lsankidb
Listing /home/me/.local/share/Anki2/User 1/collection.anki2 ...
Default
French
['Hello', 'Bonjour']
['How are you?', 'Comment ça va ?']
German
['Hello', 'Hallo']
['How are you?', "Wie geht's?"]
`See on GitHub. <https://github.com/AurelienLourot/lsankidb>`__
""",
keywords=['anki',
'terminal',
'cli',
'dump',
'ls',],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/AurelienLourot/lsankidb',
download_url='https://github.com/AurelienLourot/lsankidb/tarball/'
+ src.__version__,
license='public domain',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education',
'Topic :: Utilities'],
packages=['src'],
entry_points="""
[console_scripts]
lsankidb = src.lsankidb:main
""")
|
21232
|
from stix_shifter_utils.modules.base.stix_transmission.base_delete_connector import BaseDeleteConnector
class DeleteConnector(BaseDeleteConnector):
def __init__(self, api_client):
self.api_client = api_client
def delete_query_connection(self, search_id):
return {"success": True}
|
21413
|
import sys
output=sys.argv[1]
input_list=(sys.argv[2:])
EXP={}
header=[]
for input_file in input_list:
fi=open(input_file)
header=header+fi.readline().replace('"','').rstrip().split()
for line in fi:
seq=line.replace('"','').rstrip().split()
if seq[0] in EXP:
EXP[seq[0]]=EXP[seq[0]]+seq[1:]
else:
EXP[seq[0]]=seq[1:]
fi.close()
fo=open(output,'w')
fo.write('\t'.join(header)+'\n')
for gene in EXP:
if len(EXP[gene])==len(header):
fo.write(gene+'\t'+'\t'.join(EXP[gene])+'\n')
fo.close()
|
21454
|
from simcse import SimCSE
from esimcse import ESimCSE
from promptbert import PromptBERT
from sbert import SBERT
from cosent import CoSent
from config import Params
from log import logger
import torch
from transformers import AutoTokenizer
class SimCSERetrieval(object):
def __init__(self, pretrained_model_path, simcse_path, pool_type, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = SimCSE(Params.pretrained_model, pool_type, dropout)
self.checkpoint = torch.load(simcse_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class ESimCSERetrieval(object):
def __init__(self, pretrained_model_path, esimcse_path, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = ESimCSE(Params.pretrained_model, dropout)
self.checkpoint = torch.load(esimcse_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class PromptBertRetrieval(object):
def __init__(self, pretrained_model_path, promptbert_path, dropout):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
special_token_dict = {'additional_special_tokens': ['[X]']}
self.tokenizer.add_special_tokens(special_token_dict)
mask_id = self.tokenizer.convert_tokens_to_ids(Params.mask_token)
model = PromptBERT(pretrained_model_path, dropout, mask_id)
model.encoder.resize_token_embeddings(len(self.tokenizer))
checkpoint = torch.load(promptbert_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
self.checkpoint = checkpoint
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_mask_embedding(self, sentence):
device = "cpu"
prompt_sentence = Params.prompt_templates[0].replace("[X]", sentence)
prompt_encodings = self.tokenizer(prompt_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_mask_embedding = self.model.calculate_mask_embedding(prompt_encodings['input_ids'].to(device),
prompt_encodings['attention_mask'].to(device),
prompt_encodings['token_type_ids'].to(device))
return sentence_mask_embedding
def calculate_sentence_embedding(self, sentence):
device = "cpu"
prompt_sentence = Params.prompt_templates[0].replace("[X]", sentence)
sentence_num = len(self.tokenizer.tokenize(sentence))
template_sentence = Params.prompt_templates[0].replace("[X]", "[X]"*sentence_num)
prompt_encodings = self.tokenizer(prompt_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
template_encodings = self.tokenizer(template_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(prompt_input_ids=prompt_encodings['input_ids'].to(device),
prompt_attention_mask=prompt_encodings['attention_mask'].to(device),
prompt_token_type_ids=prompt_encodings['token_type_ids'].to(device),
template_input_ids=template_encodings['input_ids'].to(device),
template_attention_mask=template_encodings['attention_mask'].to(device),
template_token_type_ids=template_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
# sentence1_embedding = self.calculate_sentence_mask_embedding(sentence1)
# sentence2_embedding = self.calculate_sentence_mask_embedding(sentence2)
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class SBERTRetrieval(object):
def __init__(self, pretrained_model_path, sbert_path, pool_type, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = SBERT(Params.pretrained_model, pool_type, dropout)
self.checkpoint = torch.load(sbert_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['train_loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class CoSentRetrieval(object):
def __init__(self, pretrained_model_path, cosent_path):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = CoSent(Params.pretrained_model, Params.cosent_pool_type, Params.cosent_dropout)
self.checkpoint = torch.load(cosent_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
logger.info("start simcse model succussfully!")
esimcse_repeat_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_repeat_model, Params.esimcse_repeat_dropout)
logger.info("start esimcse repeat model succussfully!")
esimcse_same_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_same_model, Params.esimcse_same_dropout)
logger.info("start esimcse same model succussfully!")
esimcse_multi_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_multi_model, Params.esimcse_multi_dropout)
logger.info("start esimcse multi model succussfully!")
promptbert_retrieval = PromptBertRetrieval(Params.pretrained_model, Params.promptbert_model, Params.promptbert_dropout)
logger.info("start promptbert model succussfully!")
sbert_retrieval = SBERTRetrieval(Params.pretrained_model, Params.sbert_model, Params.sbert_pool_type, Params.sbert_dropout)
logger.info("start sbert model succussfully!")
cosent_retrieval = CoSentRetrieval(Params.pretrained_model, Params.cosent_model)
logger.info("start cosent model succussfully!")
if __name__ == "__main__":
# model_path = "models/esimcse_0.32_0.15_160.pth"
# model_path = "models/esimcse_multi_0.15_64.pth"
# model_path = "models/esimcse_0.15_64.pth"
# simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
# model_info = simcse_retrieval.print_checkpoint_info()
# print(model_info)
model_info = sbert_retrieval.print_checkpoint_info()
print(model_info)
while True:
print("input your sentence1:")
sentence1 = input()
print("input your sentence2:")
sentence2 = input()
sbert_sentence_similarity = sbert_retrieval.calculate_sentence_similarity(sentence1, sentence2)
# promptbert_sentence_similarity = prom.calculate_sentence_similarity(sentence1, sentence2)
# print("simcse sim: {}, promptbert sim: {}".format(simcse_sentence_similarity, promptbert_sentence_similarity))
print("sbert similarity: {}".format(sbert_sentence_similarity))
|
21458
|
import random
import requests
import time
HOSTS = [
'us-east-1',
'us-west-1',
'eu-west-1',
]
VEHICLES = [
'bike',
'scooter',
'car',
]
if __name__ == "__main__":
print(f"starting load generator")
time.sleep(15)
print('done sleeping')
while True:
host = HOSTS[random.randint(0, len(HOSTS) - 1)]
vehicle = VEHICLES[random.randint(0, len(VEHICLES) - 1)]
print(f"requesting {vehicle} from {host}")
resp = requests.get(f'http://web:8000/{vehicle}')
print(f"received {resp}")
time.sleep(random.uniform(0.2, 0.4))
|
21460
|
from django.contrib import admin
from . import models
class ReadOnlyAdminMixin():
def get_readonly_fields(self, request, obj=None):
return list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
class ReadOnlyAdmin(ReadOnlyAdminMixin, admin.ModelAdmin):
pass
class DerivationCodeAdmin(ReadOnlyAdmin):
list_display = ('code', 'description')
class FoodDescriptionAdmin(ReadOnlyAdmin):
list_display = ('ndb_no', 'food_group', 'short_desc')
class FoodGroupAdmin(ReadOnlyAdmin):
list_display = ('code', 'description')
class FootnoteAdmin(ReadOnlyAdmin):
list_display = ('pk', 'footnote_no', 'food_description', 'footnote_type')
class NutrientDefinitionAdmin(ReadOnlyAdmin):
list_display = ('nutrient_number', 'tagname', 'nutrient_description')
class SourceCodeAdmin(ReadOnlyAdmin):
list_display = ('source_code', 'description')
class WeightAdmin(ReadOnlyAdmin):
list_display = ('food_description', 'amount', 'measure_description')
admin.site.register(models.DerivationCode, DerivationCodeAdmin)
admin.site.register(models.FoodDescription, FoodDescriptionAdmin)
admin.site.register(models.FoodGroup, FoodGroupAdmin)
admin.site.register(models.Footnote, FootnoteAdmin)
admin.site.register(models.NutrientDefinition, NutrientDefinitionAdmin)
admin.site.register(models.SourceCode, SourceCodeAdmin)
admin.site.register(models.Weight, WeightAdmin)
|
21478
|
from pymp3decoder import Decoder
import contextlib
import os
import math
import pyaudio
CHUNK_SIZE = 4096
def take_chunk(content):
""" Split a buffer of data into chunks """
num_blocks = int(math.ceil(1.0*len(content)/CHUNK_SIZE))
for start in range(num_blocks):
yield content[CHUNK_SIZE*start:CHUNK_SIZE*(start+1)]
class TestPlayer:
@contextlib.contextmanager
def start(self):
try:
p = pyaudio.PyAudio()
self.decoder = Decoder(CHUNK_SIZE*20)
self.stream = p.open(format=p.get_format_from_width(2),
channels=2,
rate=44100,
output=True)
yield self.stream
finally:
self.stream.stop_stream()
self.stream.close()
p.terminate()
def test_file(self):
""" Open a file and decode it """
abs_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.mp3")
with open(abs_location, "rb") as in_file, self.start():
content = in_file.read()
for chunk in self.decoder.decode_iter(take_chunk(content)):
self.stream.write(chunk)
|
21519
|
import re
from . import tables
from .instr import Instruction
from .instr.nop import *
from .instr.alu import *
from .instr.bcd import *
from .instr.bit import *
from .instr.flag import *
from .instr.mov import *
from .instr.smov import *
from .instr.ld_st import *
from .instr.stack import *
from .instr.jmp import *
from .instr.call import *
from .instr.ctx import *
from .instr.trap import *
enumerations = {
'R': tables.rx_ax,
'I': tables.dsp8_dsp16_abs16,
'6': tables.dsp8_abs16,
'7': tables.r0x_r0y_dsp8_abs16,
'8': tables.r0x_dsp8_abs16,
'A': tables.reg16_dsp8_dsp16_dsp20_abs16,
'E': tables.reg8l_dsp8_dsp16_abs16,
'N': tables.reg8_dsp8_dsp16_abs16,
'C': tables.creg,
'J': tables.cnd_j3,
'K': tables.cnd_j4,
'M': tables.cnd_bm4,
}
encodings = {
'0111_011z_1111_dddd': AbsReg,
'0111_011z_0110_dddd': AdcImm,
'1011_000z_ssss_dddd': AdcReg,
'0111_011z_1110_dddd': Adcf,
'0111_011z_0100_dddd': AddImm,
'1100_100z_iiii_dddd': AddImm4,
'1000_0DDD;8': AddImm8,
'1010_000z_ssss_dddd': AddReg,
'0010_0DSS;7': AddReg8,
'0111_110z_1110_1011': AddImmSP,
'0111_1101_1011_iiii': AddImm4SP,
'1111_100z_iiii_dddd': Adjnz,
'0111_011z_0010_dddd': AndImm,
'1001_0DDD;8': AndImm8,
'1001_000z_ssss_dddd': AndReg,
'0001_0DSS;7': AndReg8,
'0111_1110_0100_ssss': Band,
'0111_1110_1000_dddd': Bclr,
'0100_0bbb': BclrSB,
'0111_1110_0010_dddd': Bmcnd,
'0111_1101_1101_CCCC;M': BmcndC,
'0111_1110_0101_ssss': Bnand,
'0111_1110_0111_ssss': Bnor,
'0111_1110_1010_dddd': Bnot,
'0101_0bbb': BnotSB,
'0111_1110_0011_ssss': Bntst,
'0111_1110_1101_ssss': Bnxor,
'0111_1110_0110_ssss': Bor,
'0111_1110_1001_dddd': Bset,
'0100_1bbb': BsetSB,
'0111_1110_1011_ssss': Btst,
'0101_1bbb': BtstSB,
'0111_1110_0000_dddd': Btstc,
'0111_1110_0001_dddd': Btsts,
'0111_1110_1100_ssss': Bxor,
'0000_0000': Brk,
'0111_011z_1000_dddd': CmpImm,
'1101_000z_iiii_dddd': CmpImm4,
'1110_0DDD;8': CmpImm8,
'1100_000z_ssss_dddd': CmpReg,
'0011_1DSS;7': CmpReg8,
'0111_1100_1110_1110': DadcImm8,
'0111_1101_1110_1110': DadcImm16,
'0111_1100_1110_0110': DadcReg8,
'0111_1101_1110_0110': DadcReg16,
'0111_1100_1110_1100': DaddImm8,
'0111_1101_1110_1100': DaddImm16,
'0111_1100_1110_0100': DaddReg8,
'0111_1101_1110_0100': DaddReg16,
'1010_1DDD;8': Dec,
'1111_d010': DecAdr,
'0111_110z_1110_0001': DivImm,
'0111_011z_1101_ssss': DivReg,
'0111_110z_1110_0000': DivuImm,
'0111_011z_1100_ssss': DivuReg,
'0111_110z_1110_0011': DivxImm,
'0111_011z_1001_ssss': DivxReg,
'0111_1100_1110_1111': DsbbImm8,
'0111_1101_1110_1111': DsbbImm16,
'0111_1100_1110_0111': DsbbReg8,
'0111_1101_1110_0111': DsbbReg16,
'0111_1100_1110_1101': DsubImm8,
'0111_1101_1110_1101': DsubImm16,
'0111_1100_1110_0101': DsubReg8,
'0111_1101_1110_0101': DsubReg16,
'0111_1100_1111_0010': Enter,
'0111_1101_1111_0010': Exitd,
'0111_1100_0110_DDDD;E': Exts,
'0111_1100_1111_0011': ExtsR0,
'1110_1011_0fff_0101': Fclr,
'1110_1011_0fff_0100': Fset,
'1010_0DDD;8': Inc,
'1011_d010': IncAdr,
'1110_1011_11ii_iiii': Int,
'1111_0110': Into,
'0110_1CCC;J': Jcnd1,
'0111_1101_1100_CCCC;K': Jcnd2,
'0110_0iii': Jmp3,
'1111_1110': Jmp8,
'1111_0100': Jmp16,
'1111_1100': JmpAbs,
'0111_1101_0010_ssss': Jmpi,
'0111_1101_0000_SSSS;A': JmpiAbs,
'1110_1110': Jmps,
'1111_0101': Jsr16,
'1111_1101': JsrAbs,
'0111_1101_0011_ssss': Jsri,
'0111_1101_0001_SSSS;A': JsriAbs,
'1110_1111': Jsrs,
'1110_1011_0DDD;C_0000': LdcImm,
'0111_1010_1DDD;C_ssss': LdcReg,
'0111_1100_1111_0000': Ldctx,
'0111_010z_1000_dddd': Lde,
'0111_010z_1001_dddd': LdeA0,
'0111_010z_1010_dddd': LdeA1A0,
'0111_1101_1010_0iii': Ldipl,
'0111_010z_1100_dddd': MovImmReg,
'1101_100z_iiii_dddd': MovImm4Reg,
'1100_0DDD;8': MovImm8Reg,
'1110_d010': MovImm8Adr,
'1010_d010': MovImm16Adr,
'1011_0DDD;8': MovZero8Reg,
'0111_001z_ssss_dddd': MovRegReg,
'0011_0dss': MovRegAdr,
'0000_0sDD;6': MovReg8Reg,
'0000_1DSS;7': MovRegReg8,
'0111_010z_1011_dddd': MovIndSPReg,
'0111_010z_0011_ssss': MovRegIndSP,
'1110_1011_0DDD;R_SSSS;I': Mova,
'0111_1100_10rr_DDDD;N': MovdirR0LReg,
'0111_1100_00rr_SSSS;N': MovdirRegR0L,
'0111_110z_0101_dddd': MulImm,
'0111_100z_ssss_dddd': MulReg,
'0111_110z_0100_dddd': MuluImm,
'0111_000z_ssss_dddd': MuluReg,
'0111_010z_0101_dddd': NegReg,
'0000_0100': Nop,
'0111_010z_0111_dddd': NotReg,
'1011_1DDD;8': NotReg8,
'0111_011z_0011_dddd': OrImm,
'1001_1DDD;8': OrImm8,
'1001_100z_ssss_dddd': OrReg,
'0001_1DSS;7': OrReg8,
'0111_010z_1101_dddd': Pop,
'1001_d010': PopReg8,
'1101_d010': PopAdr,
'1110_1011_0DDD;C_0011': Popc,
'1110_1101': Popm,
'0111_110z_1110_0010': PushImm,
'0111_010z_0100_ssss': Push,
'1000_s010': PushReg8,
'1100_s010': PushAdr,
'0111_1101_1001_SSSS;I': Pusha,
'1110_1011_0SSS;C_0010': Pushc,
'1110_1100': Pushm,
'1111_1011': Reit,
'0111_110z_1111_0001': Rmpa,
'1110_000z_iiii_dddd': RotImm4,
'0111_010z_0110_dddd': RotR1H,
'0111_011z_1010_dddd': Rolc,
'0111_011z_1011_dddd': Rorc,
'1111_0011': Rts,
'0111_011z_0111_dddd': SbbImm,
'1011_100z_ssss_dddd': SbbReg,
'1111_000z_iiii_dddd': ShaImm4,
'0111_010z_1111_dddd': ShaR1H,
'1110_1011_101d_iiii': Sha32Imm4,
'1110_1011_001d_0001': Sha32R1H,
'1110_100z_iiii_dddd': ShlImm4,
'0111_010z_1110_dddd': ShlR1H,
'1110_1011_100d_iiii': Shl32Imm4,
'1110_1011_000d_0001': Shl32R1H,
'0111_110z_1110_1001': Smovb,
'0111_110z_1110_1000': Smovf,
'0111_110z_1110_1010': Sstr,
'0111_1011_1SSS;C_dddd': StcReg,
'0111_1100_1100_DDDD;A': StcPc,
'0111_1101_1111_0000': Stctx,
'0111_010z_0000_ssss': Ste,
'0111_010z_0001_ssss': SteA0,
'0111_010z_0010_ssss': SteA1A0,
'1101_0DDD;8': Stnz,
'1100_1DDD;8': Stz,
'1101_1DDD;8': Stzx,
'0111_011z_0101_dddd': SubImm,
'1000_1DDD;8': SubImm8,
'1010_100z_ssss_dddd': SubReg,
'0010_1DSS;7': SubReg8,
'0111_011z_0000_dddd': TstImm,
'1000_000z_ssss_dddd': TstReg,
'1111_1111': Und,
'0111_1101_1111_0011': Wait,
'0111_101z_00ss_dddd': Xchg,
'0111_011z_0001_dddd': XorImm,
'1000_100z_ssss_dddd': XorReg,
}
def generate_tables():
for encoding, instr in encodings.items():
def expand_encoding(table, parts):
part, *parts = parts
if ';' in part:
part, enum = part.split(';', 2)
else:
enum = ''
assert len(part) == 4 and len(enum) <= 1
chunks = []
try:
chunks.append(int(part, 2))
except ValueError:
wildcard_part = re.sub(r'[A-Z]', '0', part)
instr_code = int(re.sub(r'[^01]', '0', wildcard_part), 2)
instr_mask = int(re.sub(r'[^01]', '0', wildcard_part.replace('0', '1')), 2)
operand_mask = int(re.sub(r'[^01]', '1', wildcard_part.replace('1', '0')), 2)
operand_code = 0
while True:
chunks.append(instr_code | operand_code)
if operand_code == operand_mask:
break
# The following line cleverly uses carries to make a counter only from the bits
# that are set in `operand_mask`. To understand it, consider that `instr_mask`
# is the inverse of `operand_mask`, and adding 1 to a 011...1 chunk changes it
# into a 100...0 chunk.
operand_code = ((operand_code | instr_mask) + 1) & operand_mask
if enum:
shift = 4 - re.search(r'[A-Z]+', part).end()
chunks, chunk_templates = [], chunks
for template in chunk_templates:
for legal_bits in enumerations[enum]:
chunks.append(template | (legal_bits << shift))
for chunk in chunks:
if parts:
try:
subtable = table[chunk]
except KeyError:
subtable = table[chunk] = dict()
assert isinstance(subtable, dict)
expand_encoding(subtable, parts)
else:
assert chunk not in table, "{} conflicts with {}".format(instr, table[chunk])
table[chunk] = instr
parts = encoding.split('_')
while re.match(r"^[a-z]+$", parts[-1]):
parts.pop()
expand_encoding(Instruction.opcodes, parts)
def print_assigned():
def contract_encoding(table, parts):
for part, entry in table.items():
if isinstance(entry, dict):
contract_encoding(entry, (*parts, part))
else:
encoding = '_'.join('{:04b}'.format(part) for part in (*parts, part))
mnemonic = entry().name()
print('{:20s} {}'.format(encoding, mnemonic))
contract_encoding(Instruction.opcodes, ())
def print_unassigned():
def contract_encoding(table, parts):
unassigned = set(range(16))
for part, entry in table.items():
unassigned.remove(part)
if isinstance(entry, dict):
contract_encoding(entry, (*parts, part))
for part in unassigned:
print('_'.join('{:04b}'.format(part) for part in (*parts, part)))
contract_encoding(Instruction.opcodes, ())
generate_tables()
# print_assigned()
# print_unassigned()
|
21524
|
from unittest import TestCase
from schemer import Schema, Array, ValidationException
from dusty.schemas.base_schema_class import DustySchema, DustySpecs
from ...testcases import DustyTestCase
class TestDustySchemaClass(TestCase):
def setUp(self):
self.base_schema = Schema({'street': {'type': basestring},
'house_number': {'type': int, 'default': 1}})
self.bigger_schema = Schema({'address': {'type': self.base_schema, 'default': {}},
'first_name': {'type': basestring, 'required': True},
'last_name': {'type': basestring, 'default': 'johnson'}})
def test_init_invalid_doc(self):
doc = {'street': 'dogstoon',
'house_number': '1'}
with self.assertRaises(ValidationException):
DustySchema(self.base_schema, doc)
def test_valid_doc(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults(self):
doc = {'street': 'dogstoon'}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults_more_complicated_1(self):
doc = {'first_name': 'dusty'}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['first_name'], 'dusty')
self.assertEquals(dusty_schema['last_name'], 'johnson')
self.assertEquals(dusty_schema['address'], {'house_number': 1})
def test_setting_defaults_more_complicated_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['address']['street'], 'dogstoon')
self.assertEquals(dusty_schema['address']['house_number'], 1)
def test_in_1(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertTrue('first_name' in dusty_schema)
def test_in_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertFalse('first_names' in dusty_schema)
def test_keys(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['street', 'house_number']), set(dusty_schema.keys()))
def test_values(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['dogstoon', 1]), set(dusty_schema.values()))
class TestDustySpecsClass(DustyTestCase):
def test_finds_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_lib('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_lib('lib-a'), specs['libs']['lib-a'])
def test_raises_without_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
with self.assertRaises(KeyError):
specs.get_app_or_lib('non-existant-thingy')
def test_get_app_or_service(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_service('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_service('service-a'), specs['services']['service-a'])
|
21546
|
import torch
from torch.autograd import Function
class Identity(Function):
@staticmethod
def forward(ctx, x, name):
ctx.name = name
return x.clone()
def backward(ctx, grad):
import pydevd
pydevd.settrace(suspend=False, trace_only_current_thread=True)
grad_temp = grad.clone()
return grad_temp, None
|
21568
|
import FWCore.ParameterSet.Config as cms
# This modifier sets replaces the default pattern recognition with mkFit for tobTecStep
trackingMkFitTobTecStep = cms.Modifier()
|
21590
|
from plugin.scrobbler.core import SessionEngine, SessionHandler
@SessionEngine.register
class PlayingHandler(SessionHandler):
__event__ = 'playing'
__src__ = ['create', 'pause', 'stop', 'start']
__dst__ = ['start', 'stop']
@classmethod
def process(cls, session, payload):
# Handle media change
if cls.has_media_changed(session, payload) and session.state in ['start', 'pause']:
yield 'stop', session.payload
# Handle current media
if cls.has_finished(session, payload):
if session.state in ['start', 'pause']:
yield 'stop', payload
elif session.state in ['create', 'pause', 'stop']:
yield 'start', payload
elif session.state == 'start':
yield None, payload
|
21625
|
import argparse
from os import listdir, path
import numpy as np
def convert(main_folder, output):
ret = []
for label, class_folder in listdir(main_folder):
class_folder_path = path.join(main_folder, class_folder)
for img_name in listdir(class_folder_path):
image_path = path.join(class_folder, img_name)
ret.append([image_path, str(label)])
np.savetxt(output, ret, delimiter=" ", fmt="%s %i")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Folder with classes subfolders to a file to train."
)
parser.add_argument("--folder", "-f", help="Folder to convert.")
parser.add_argument("--output", "-o", help="Output file.")
args = parser.parse_args()
convert(args.folder, args.output)
|
21642
|
import os
from sys import platform
def say_beep(n: int):
for i in range(0, n):
if platform == "darwin":
os.system("say beep")
|
21653
|
import simplejson as json
from .telegram_field import TelegramField
class WTelegramHeader(object):
def __init__(self):
# self._startField = TelegramField()
self._lField = TelegramField()
self._cField = TelegramField()
# self._crcField = TelegramField()
# self._stopField = TelegramField()
self._headerLength = 2
# self._headerLengthCRCStop = 8
@property
def headerLength(self):
return self._headerLength
# @property
# def headerLengthCRCStop(self):
# return self._headerLengthCRCStop
@property
def startField(self):
return self._startField
@startField.setter
def startField(self, value):
self._startField = TelegramField(value)
@property
def lField(self):
return self._lField
@lField.setter
def lField(self, value):
self._lField = TelegramField(value)
@property
def cField(self):
return self._cField
@cField.setter
def cField(self, value):
self._cField = TelegramField(value)
@property
def interpreted(self):
return {
'length': hex(self.lField.parts[0]),
'c': hex(self.cField.parts[0]),
}
# @property
# def crcField(self):
# return self._crcField
# @crcField.setter
# def crcField(self, value):
# self._crcField = TelegramField(value)
# @property
# def stopField(self):
# return self._stopField
# @stopField.setter
# def stopField(self, value):
# self._stopField = TelegramField(value)
def load(self, hat):
header = hat
if isinstance(hat, str):
header = list(map(ord, hat))
# self.startField = header[0]
self.lField = header[0]
self.cField = header[1]
# self.crcField = header[-2]
# self.stopField = header[-1]
def to_JSON(self):
return json.dumps(self.interpreted, sort_keys=False,
indent=4, use_decimal=True)
|
21669
|
from box.parser import Parser
from box.generator import Generator
import os
class Importer:
def __init__(self, path):
# Path to directory containing function graphs to import
self.path = os.path.abspath(path)
# { "FunctionName": <Generator>, ... }
self.function_declarations = {}
# List of (Parser, Generator) objects,
# one for each function graph .box file
self.parser_generators = self._parse_box_files()
print(self.function_declarations)
def _parse_box_files(self):
# Result is a list of tuples [(parser, generator), ...]
result = []
for file in os.listdir(self.path):
if file.endswith(".box"):
path = os.path.join(self.path, file)
parser = Parser(path)
generator = Generator(parser)
code = generator.to_python([])
result.append((parser, generator))
self.function_declarations[generator.function_name] = generator
return result
|
21681
|
import json
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
from pathlib import Path
def main():
parser = ArgumentParser(description='Collect markdown files, and write JSON.',
formatter_class=ArgumentDefaultsHelpFormatter)
project_path = Path(__file__).parent.parent.parent.parent
parser.add_argument('--source',
type=Path,
default=project_path / 'html' / 'tutorials')
parser.add_argument('--target',
type=FileType('w'),
default=str(project_path / 'html' / 'src' /
'tutorials.json'))
args = parser.parse_args()
tutorials = {}
# source_file: Path
for source_file in args.source.rglob('*.md'):
name = str(source_file.relative_to(args.source).with_suffix(''))
if name == 'README':
continue
source = source_file.read_text()
tutorials[name] = source
json.dump(tutorials, args.target)
main()
|
21704
|
import torch
import torch.nn as nn
class voxel_match_loss(nn.Module):
def __init__(self):
super().__init__()
self.criterion=nn.MSELoss()
def forward(self,output,label):
positive_mask=torch.zeros(label.shape).cuda()
positive_mask=torch.where(label>0.2,torch.ones_like(positive_mask), positive_mask)
positive_loss=self.criterion(output*positive_mask,label*positive_mask)
negative_mask=torch.zeros(label.shape).cuda()
negative_mask = torch.where(label <= 0.2, torch.ones_like(negative_mask), negative_mask)
negative_loss=self.criterion(output*negative_mask,label*negative_mask)
loss=positive_loss+negative_loss
loss=loss/2
return loss
|
21736
|
import functools
from flask import Blueprint
from flask import render_template
from flask import g
from flask import redirect
from flask import url_for
from flask import flash
from mflac.vuln_app.db import get_db
bp = Blueprint("admin", __name__, url_prefix="/admin")
def admin_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None or not g.user['is_admin']:
flash("Forbidden. You haven't enough permissions")
return redirect(url_for("index.index"))
return view(**kwargs)
return wrapped_view
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for("auth.login"))
return view(**kwargs)
return wrapped_view
@bp.route("/users_list")
@login_required
@admin_required
def users_list():
db = get_db()
users = db.execute("SELECT id, username, is_admin FROM user").fetchall()
return render_template('admin/users_list.html', users=users)
|
21772
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from conversion_imagenet import TestModels
from conversion_imagenet import is_paddle_supported
def get_test_table():
return { 'paddle' : {
'resnet50' : [
TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
TestModels.coreml_emit,
TestModels.keras_emit,
TestModels.mxnet_emit,
TestModels.pytorch_emit,
TestModels.tensorflow_emit
],
'resnet101' : [
#TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
TestModels.coreml_emit,
TestModels.keras_emit,
TestModels.mxnet_emit,
TestModels.pytorch_emit,
TestModels.tensorflow_emit
],
'vgg16' : [
TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
#TestModels.coreml_emit,
#TestModels.keras_emit,
#TestModels.mxnet_emit,
#TestModels.pytorch_emit,
#TestModels.tensorflow_emit
],
}}
def test_paddle():
if not is_paddle_supported():
return
# omit tensorflow lead to crash
import tensorflow as tf
test_table = get_test_table()
tester = TestModels(test_table)
tester._test_function('paddle', tester.paddle_parse)
if __name__ == '__main__':
test_paddle()
|
21790
|
import numpy as np
from matplotlib import pyplot as plt
"""
https://stackoverflow.com/questions/42750910/convert-rgb-image-to-index-image/62980021#62980021
convert semantic labels from RGB coding to index coding
Steps:
1. define COLORS (see below)
2. hash colors
3. run rgb2index(segmentation_rgb)
see example below
TODO: apparently, using cv2.LUT is much simpler (and maybe faster?)
"""
COLORS = np.array([[0, 0, 0], [0, 0, 255], [255, 0, 0], [0, 255, 0]])
W = np.power(255, [0, 1, 2])
HASHES = np.sum(W * COLORS, axis=-1)
HASH2COLOR = {h: c for h, c in zip(HASHES, COLORS)}
HASH2IDX = {h: i for i, h in enumerate(HASHES)}
def rgb2index(segmentation_rgb):
"""
turn a 3 channel RGB color to 1 channel index color
"""
s_shape = segmentation_rgb.shape
s_hashes = np.sum(W * segmentation_rgb, axis=-1)
print(np.unique(segmentation_rgb.reshape((-1, 3)), axis=0))
func = lambda x: HASH2IDX[int(x)] # noqa
segmentation_idx = np.apply_along_axis(func, 0, s_hashes.reshape((1, -1)))
segmentation_idx = segmentation_idx.reshape(s_shape[:2])
return segmentation_idx
segmentation = np.array([[0, 0, 0], [0, 0, 255], [255, 0, 0]] * 3).reshape((3, 3, 3))
segmentation_idx = rgb2index(segmentation)
print(segmentation)
print(segmentation_idx)
fig, axes = plt.subplots(1, 2, figsize=(6, 3))
axes[0].imshow(segmentation)
axes[0].set_title("Segmentation RGB")
axes[1].imshow(segmentation_idx)
axes[1].set_title("Segmentation IDX")
plt.show()
|
21827
|
from .model import KerasModel
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import BatchNormalization, Dropout, Conv2D, MaxPooling2D
import kapre
from kapre.utils import Normalization2D
from kapre.time_frequency import Spectrogram
class CNN_STFT(KerasModel):
def create_model(self, input_shape, dropout=0.5, print_summary=False):
# basis of the CNN_STFT is a Sequential network
model = Sequential()
# spectrogram creation using STFT
model.add(Spectrogram(n_dft = 128, n_hop = 16, input_shape = input_shape,
return_decibel_spectrogram = False, power_spectrogram = 2.0,
trainable_kernel = False, name = 'static_stft'))
model.add(Normalization2D(str_axis = 'freq'))
# Conv Block 1
model.add(Conv2D(filters = 24, kernel_size = (12, 12),
strides = (1, 1), name = 'conv1',
border_mode = 'same'))
model.add(BatchNormalization(axis = 1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2), padding = 'valid',
data_format = 'channels_last'))
# Conv Block 2
model.add(Conv2D(filters = 48, kernel_size = (8, 8),
name = 'conv2', border_mode = 'same'))
model.add(BatchNormalization(axis = 1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid',
data_format = 'channels_last'))
# Conv Block 3
model.add(Conv2D(filters = 96, kernel_size = (4, 4),
name = 'conv3', border_mode = 'same'))
model.add(BatchNormalization(axis = 1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2),
padding = 'valid',
data_format = 'channels_last'))
model.add(Dropout(dropout))
# classificator
model.add(Flatten())
model.add(Dense(2)) # two classes only
model.add(Activation('softmax'))
if print_summary:
print(model.summary())
# compile the model
model.compile(loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
# assign model and return
self.model = model
return model
|
21838
|
import heterocl as hcl
hcl.init()
target = hcl.Platform.xilinx_zc706
initiation_interval = 4
a = hcl.placeholder((10, 20), name="a")
b = hcl.placeholder((10, 20), name="b")
c = hcl.placeholder((10, 20), name="c")
d = hcl.placeholder((10, 20), name="d")
e = hcl.placeholder((10, 20), name="e")
def add_mul(a, b, c, d, e):
@hcl.def_([a.shape, b.shape, c.shape])
def ret_add(a, b, c):
with hcl.for_(0, a.shape[0]) as i:
with hcl.for_(0, a.shape[1]) as j:
c[i, j] = a[i, j] + b[i, j]
@hcl.def_([c.shape, d.shape, e.shape])
def ret_mul(c, d, e):
# hcl.update(c, lambda x, y: a[x, y] * b[x, y], 'c_mul')
with hcl.for_(0, c.shape[0]) as i:
with hcl.for_(0, c.shape[1]) as j:
e[i, j] = c[i, j] * d[i, j]
ret_add(a, b, c)
ret_mul(c, d, e)
# compute customization
s = hcl.create_schedule([a, b, c, d, e], add_mul)
# op1 = add_mul.ret_add.c
# op2 = add_mul.ret_mul.c
# s[op1].pipeline(op1.axis[0], initiation_interval)
# stream into modules / device
a0, b0 = s.to([a, b], target.xcel)
d0 = s.to(d, target.xcel)
#s.partition(b0, dim=2, factor=2)
s.to([a0, b0], s[add_mul.ret_add])
s.to(d0, s[add_mul.ret_mul])
# within device move producer to consumer
s.to(c, s[add_mul.ret_mul],
s[add_mul.ret_add], depth=10)
# return tensor for inter-device move
# e0 = s.stream_to(e, hcl.CPU('riscv'))
# print(add_mul.ret_mul._buf, c._buf)
print(hcl.lower(s))
code = hcl.build(s, target)
print(code)
#
# with open("example.cl", "w") as f:
# f.write(code)
# f.close()
|
21936
|
DILAMI_WEEKDAY_NAMES = {
0: "شمبه",
1: "یکشمبه",
2: "دۊشمبه",
3: "سۊشمبه",
4: "چارشمبه",
5: "پئنشمبه",
6: "جۊمه",
}
DILAMI_MONTH_NAMES = {
0: "پنجيک",
1: "نؤرۊز ما",
2: "کۊرچ ٚ ما",
3: "أرئه ما",
4: "تیر ما",
5: "مۊردال ما",
6: "شریرما",
7: "أمیر ما",
8: "آول ما",
9: "سیا ما",
10: "دیا ما",
11: "ورفن ٚ ما",
12: "اسفندار ما",
}
DILAMI_LEAP_YEARS = (
199,
203,
207,
211,
215,
220,
224,
228,
232,
236,
240,
244,
248,
253,
257,
261,
265,
269,
273,
277,
281,
286,
290,
294,
298,
302,
306,
310,
315,
319,
323,
327,
331,
335,
339,
343,
348,
352,
356,
360,
364,
368,
372,
376,
381,
385,
389,
393,
397,
401,
405,
409,
414,
418,
422,
426,
430,
434,
438,
443,
447,
451,
455,
459,
463,
467,
471,
476,
480,
484,
488,
492,
496,
500,
504,
509,
513,
517,
521,
525,
529,
533,
537,
542,
546,
550,
554,
558,
562,
566,
571,
575,
579,
583,
587,
591,
595,
599,
604,
608,
612,
616,
620,
624,
628,
632,
637,
641,
645,
649,
653,
657,
661,
665,
669,
674,
678,
682,
686,
690,
694,
698,
703,
707,
711,
715,
719,
723,
727,
731,
736,
740,
744,
748,
752,
756,
760,
764,
769,
773,
777,
781,
785,
789,
793,
797,
802,
806,
810,
814,
818,
822,
826,
831,
835,
839,
843,
847,
851,
855,
859,
864,
868,
872,
876,
880,
884,
888,
892,
897,
901,
905,
909,
913,
917,
921,
925,
930,
934,
938,
942,
946,
950,
954,
959,
963,
967,
971,
975,
979,
983,
987,
992,
996,
1000,
1004,
1008,
1012,
1016,
1020,
1025,
1029,
1033,
1037,
1041,
1045,
1049,
1053,
1058,
1062,
1066,
1070,
1074,
1078,
1082,
1087,
1091,
1095,
1099,
1103,
1107,
1111,
1115,
1120,
1124,
1128,
1132,
1136,
1140,
1144,
1148,
1153,
1157,
1161,
1165,
1169,
1173,
1177,
1181,
1186,
1190,
1194,
1198,
1202,
1206,
1210,
1215,
1219,
1223,
1227,
1231,
1235,
1239,
1243,
1248,
1252,
1256,
1260,
1264,
1268,
1272,
1276,
1281,
1285,
1289,
1293,
1297,
1301,
1305,
1309,
1314,
1318,
1322,
1326,
1330,
1334,
1338,
1343,
1347,
1351,
1355,
1359,
1363,
1367,
1371,
1376,
1380,
1384,
1388,
1392,
1396,
1400,
1404,
1409,
1413,
1417,
1421,
1425,
1429,
1433,
1437,
1442,
1446,
1450,
1454,
1458,
1462,
1466,
1471,
1475,
1479,
1483,
1487,
1491,
1495,
1499,
1504,
1508,
1512,
1516,
1520,
1524,
1528,
1532,
1537,
1541,
1545,
1549,
1553,
1557,
1561,
1565,
1570,
1574,
1578,
1582,
1586,
1590,
1594,
1599,
1603,
1607,
1611,
1615,
1619,
1623,
1627,
1632,
1636,
1640,
1644,
1648,
1652,
1656,
1660,
1665,
1669,
1673,
1677,
1681,
1685,
1689,
1693,
1698,
1702,
1706,
1710,
1714,
1718,
1722,
1727,
1731,
1735,
1739,
1743,
1747,
1751,
1755,
1760,
1764,
1768,
1772,
1776,
1780,
1784,
1788,
1793,
1797,
1801,
1805,
1809,
1813,
1817,
1821,
1826,
1830,
1834,
1838,
1842,
1846,
1850,
1855,
1859,
1863,
1867,
1871,
1875,
1879,
1883,
1888,
1892,
1896,
1900,
1904,
1908,
1912,
1916,
1921,
1925,
1929,
1933,
1937,
1941,
1945,
1949,
1954,
1958,
1962,
1966,
1970,
1974,
1978,
1983,
1987,
1991,
1995,
1999,
2003,
2007,
2011,
2016,
2020,
2024,
2028,
2032,
2036,
2040,
2044,
2049,
2053,
2057,
2061,
2065,
2069,
2073,
2077,
2082,
2086,
2090,
2094,
2098,
2102,
2106,
2111,
2115,
2119,
2123,
2127,
2131,
2135,
2139,
2144,
2148,
2152,
2156,
2160,
2164,
2168,
2172,
2177,
2181,
2185,
2189,
2193,
2197,
2201,
2205,
2210,
2214,
2218,
2222,
2226,
2230,
2234,
2239,
2243,
2247,
2251,
2255,
2259,
2263,
2267,
2272,
2276,
2280,
2284,
2288,
2292,
2296,
2300,
2305,
2309,
2313,
2317,
2321,
2325,
2329,
2333,
2338,
2342,
2346,
2350,
2354,
2358,
2362,
2367,
2371,
2375,
2379,
2383,
2387,
2391,
2395,
2400,
2404,
2408,
2412,
2416,
2420,
2424,
2428,
2433,
2437,
2441,
2445,
2449,
2453,
2457,
2461,
2466,
2470,
2474,
2478,
2482,
2486,
2490,
2495,
2499,
2503,
2507,
2511,
2515,
2519,
2523,
2528,
2532,
2536,
2540,
2544,
2548,
2552,
2556,
2561,
2565,
2569,
2573,
2577,
2581,
2585,
2589,
2594,
2598,
2602,
2606,
2610,
2614,
2618,
2623,
2627,
2631,
2635,
2639,
2643,
2647,
2651,
2656,
2660,
2664,
2668,
2672,
2676,
2680,
2684,
2689,
2693,
2697,
2701,
2705,
2709,
2713,
2717,
2722,
2726,
2730,
2734,
2738,
2742,
2746,
2751,
2755,
2759,
2763,
2767,
2771,
2775,
2779,
2784,
2788,
2792,
2796,
2800,
2804,
2808,
2812,
2817,
2821,
2825,
2829,
2833,
2837,
2841,
2845,
2850,
2854,
2858,
2862,
2866,
2870,
2874,
2879,
2883,
2887,
2891,
2895,
2899,
2903,
2907,
2912,
2916,
2920,
2924,
2928,
2932,
2936,
2940,
2945,
2949,
2953,
2957,
2961,
2965,
2969,
2973,
2978,
2982,
2986,
2990,
2994,
2998,
3002,
3007,
3011,
3015,
3019,
3023,
3027,
3031,
3035,
3040,
3044,
3048,
3052,
3056,
3060,
3064,
3068,
3073,
3077,
3081,
3085,
3089,
3093,
3097,
3101,
3106,
3110,
3114,
3118,
3122,
3126,
3130,
3135,
3139,
3143,
3147,
3151,
3155,
3159,
3163,
3168,
3172,
3176,
3180,
3184,
3188,
3192,
3196,
3201,
3205,
3209,
3213,
3217,
3221,
3225,
3229,
3234,
3238,
3242,
3246,
3250,
3254,
3258,
3263,
3267,
3271,
3275,
3279,
3283,
3287,
3291,
3296,
3300,
3304,
3308,
3312,
3316,
3320,
3324,
3329,
3333,
3337,
3341,
3345,
3349,
3353,
3357,
3362,
3366,
3370,
)
#: Minimum year supported by the library.
MINYEAR = 195
#: Maximum year supported by the library.
MAXYEAR = 3372
|
22008
|
from __future__ import with_statement
from contextlib import contextmanager
from test import TemplateTest, eq_, raises, template_base, mock
import os
from mako.cmd import cmdline
class CmdTest(TemplateTest):
@contextmanager
def _capture_output_fixture(self, stream="stdout"):
with mock.patch("sys.%s" % stream) as stdout:
yield stdout
def test_stdin_success(self):
with self._capture_output_fixture() as stdout:
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="hello world ${x}"))):
cmdline(["--var", "x=5", "-"])
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_stdin_syntax_err(self):
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="${x"))):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert "SyntaxException: Expected" in \
stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_stdin_rt_err(self):
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="${q}"))):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_success(self):
with self._capture_output_fixture() as stdout:
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_good.mako")])
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_file_syntax_err(self):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_syntax.mako")])
assert "SyntaxException: Expected" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_rt_err(self):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_runtime.mako")])
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_notfound(self):
with raises(SystemExit, "error: can't find fake.lalala"):
cmdline(["--var", "x=5", "fake.lalala"])
|
22010
|
from LightPipes import *
import matplotlib.pyplot as plt
def TheExample(N):
fig=plt.figure(figsize=(11,9.5))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
labda=1000*nm;
size=10*mm;
f1=10*m
f2=1.11111111*m
z=1.0*m
w=5*mm;
F=Begin(size,labda,N);
F=RectAperture(w,w,0,0,0,F);
#1) Using Lens and Fresnel:
F1=Lens(z,0,0,F)
F1=Fresnel(z,F1)
phi1=Phase(F1);phi1=PhaseUnwrap(phi1)
I1=Intensity(0,F1);
x1=[]
for i in range(N):
x1.append((-size/2+i*size/N)/mm)
#2) Using Lens + LensFresnel and Convert:
F2=Lens(f1,0,0,F);
F2=LensFresnel(f2,z,F2);
F2=Convert(F2);
phi2=Phase(F2);phi2=PhaseUnwrap(phi2)
I2=Intensity(0,F2);
x2=[]
newsize=size/10
for i in range(N):
x2.append((-newsize/2+i*newsize/N)/mm)
ax1.plot(x1,phi1[int(N/2)],'k--',label='Lens + Fresnel')
ax1.plot(x2,phi2[int(N/2)],'k',label='LensFresnel + Convert');
ax1.set_xlim(-newsize/2/mm,newsize/2/mm)
ax1.set_ylim(-2,4)
ax1.set_xlabel('x [mm]');
ax1.set_ylabel('phase [rad]');
ax1.set_title('phase, N = %d' %N)
legend = ax1.legend(loc='upper center', shadow=True)
ax2.plot(x1,I1[int(N/2)],'k--',label='Lens+Fresnel')
ax2.plot(x2,I2[int(N/2)], 'k',label='LensFresnel + Convert');
ax2.set_xlim(-newsize/2/mm,newsize/2/mm)
ax2.set_ylim(0,1000)
ax2.set_xlabel('x [mm]');
ax2.set_ylabel('Intensity [a.u.]');
ax2.set_title('intensity, N = %d' %N)
legend = ax2.legend(loc='upper center', shadow=True)
ax3.imshow(I1);ax3.axis('off');ax3.set_title('Intensity, Lens + Fresnel, N = %d' %N)
ax3.set_xlim(int(N/2)-N/20,int(N/2)+N/20)
ax3.set_ylim(int(N/2)-N/20,int(N/2)+N/20)
ax4.imshow(I2);ax4.axis('off');ax4.set_title('Intensity, LensFresnel + Convert, N = %d' %N)
plt.show()
TheExample(100) #100 x 100 grid
TheExample(1000) #1000 x 1000 grid
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.