ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b402f574ebaf2e855db5e2338e309b05f83b3a69 | import argparse
import os
def file_lengthy(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def main(input_dir):
for file in os.listdir(input_dir):
count = file_lengthy(file)
print(count)
def start_process():
global parser, args
parser = argparse.ArgumentParser(description='Parallel text processor')
parser.add_argument('--input_dir', type=str, required=True)
args = parser.parse_args()
main( args.input_dir)
if __name__ == '__main__':
# test_batch_execution('one_file', "E:/download/output_files")
start_process()
print("in main")
|
py | b402f637b6caa47d822df6245f0df65870f824c2 | #!/usr/bin/env python
from django.conf import settings
from localground.apps.lib.helpers.sqlparse.sql_djangoify import parser
from datetime import datetime
class QueryParser(object):
error = False
def __init__(self, model_class, query_text, debug=True):
self.model_class = model_class
self.query_text = query_text
self.where_conditions = []
self.filter_fields = []
if debug:
self.parse()
else:
try:
self.parse()
except:
self.error = True
self.error_message = 'Invalid query "%s"' % self.query_text
def __repr__(self):
return 'Filter Text: %s\n%s' % (self.query_text, self.to_dict_list(debug=True))
def __str__(self):
return 'Filter Text: %s\n%s' % (self.query_text, self.to_dict_list(debug=True))
def to_dict_list(self, debug=False):
return [c.to_dict(debug=True) for c in self.where_conditions]
def parse(self):
if self.query_text is None:
return
self.where_conditions = parser.parse_sql(self.query_text, self.model_class)
def extend_query(self, q):
if self.where_conditions:
return q.filter(self.where_conditions)
else:
return q
def populate_filter_fields(self):
'''
Populates the UI filter fields with data, if applicable
'''
filter_fields = self.model_class.get_filter_fields().values()
for filter_field in filter_fields:
filter_field.update_from_sql(self.where_conditions)
return filter_fields
def to_dict_list(self):
'''
Populates the UI filter fields with data, if applicable
'''
fields = self.model_class.get_filter_fields().values()
return [f.to_dict(col_name=True) for f in fields]
|
py | b402f6453d7a96c6ed6e7b2738f729ee57d724c0 | # default PEARL experiment settings
# all experiments should modify these settings only as needed
default_config = dict(
env_name='cheetah-dir',
n_train_tasks=1,
n_eval_tasks=1,
latent_size=5, # dimension of the latent context vector
net_size=300, # number of units per FC layer in each network
path_to_weights=None, # path to pre-trained weights to load into networks
env_params=dict(
n_tasks=2, # number of distinct tasks in this domain, shoudl equal sum of train and eval tasks
randomize_tasks=True, # shuffle the tasks after creating them
),
algo_params=dict(
meta_batch=16, # number of tasks to average the gradient across
num_iterations=500, # number of data sampling / training iterates
num_initial_steps=2000, # number of transitions collected per task before training
num_tasks_sample=5, # number of randomly sampled tasks to collect data for each iteration
num_steps_prior=400, # number of transitions to collect per task with z ~ prior
#As in like each of these is how many steps per time that you sample the task
num_steps_posterior=0, # number of transitions to collect per task with z ~ posterior
num_extra_rl_steps_posterior=400, # number of additional transitions to collect per task with z ~ posterior that are only used to train the policy and NOT the encoder
num_train_steps_per_itr=2000, # number of meta-gradient steps taken per iteration
num_evals=2, # number of independent evals
num_steps_per_eval=600, # nuumber of transitions to eval on
batch_size=256, # number of transitions in the RL batch
embedding_batch_size=64, # number of transitions in the context batch
embedding_mini_batch_size=64, # number of context transitions to backprop through (should equal the arg above except in the recurrent encoder case)
max_path_length=200, # max path length for this environment
discount=0.99, # RL discount factor
soft_target_tau=0.005, # for SAC target network update
policy_lr=3E-4,
qf_lr=3E-4,
vf_lr=3E-4,
context_lr=3e-4,
reward_scale=5., # scale rewards before constructing Bellman update, effectively controls weight on the entropy of the policy
sparse_rewards=False, # whether to sparsify rewards as determined in env
kl_lambda=.1, # weight on KL divergence term in encoder loss
use_information_bottleneck=True, # False makes latent context deterministic
use_next_obs_in_context=False, # use next obs if it is useful in distinguishing tasks
update_post_train=1, # how often to resample the context when collecting data during training (in trajectories)
num_exp_traj_eval=1, # how many exploration trajs to collect before beginning posterior sampling at test time
recurrent=False, # recurrent or permutation-invariant encoder
dump_eval_paths=False, # whether to save evaluation trajectories
),
util_params=dict(
base_log_dir='output',
use_gpu=False,
gpu_id=0,
debug=False, # debugging triggers printing and writes logs to debug directory
docker=False, # TODO docker is not yet supported
)
)
|
py | b402f673adc26b87203e652e817d2447e4360bb9 | '''
VGG16
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 32, 32, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 32, 32, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 16, 16, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 16, 16, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 16, 16, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 8, 8, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 8, 8, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 8, 8, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 8, 8, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 4, 4, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 4, 4, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 4, 4, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 4, 4, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 2, 2, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 1, 1, 512) 0
_________________________________________________________________
sequential_1 (Sequential) (None, 10) 133898
=================================================================
Total params: 14,848,586
Trainable params: 7,473,482
Non-trainable params: 7,375,104
Cifar10
_________________________________________________________________
for layer in model.layers[7:15]: layer.trainable = False
1562/1562 [==============================] - 76s - loss: 4.5619e-04 - acc: 0.9999 - val_loss: 1.5312 - val_acc: 0.8400
for layer in model.layers[1:10]: layer.trainable = False
1562/1562 [==============================] - 78s - loss: 0.0024 - acc: 0.9994 - val_loss: 1.0931 - val_acc: 0.8953
i, ir= 110 8.589934592000007e-06
for layer in model.layers[1:1]: layer.trainable = False
1562/1562 [==============================] - 104s - loss: 0.0096 - acc: 0.9974 - val_loss: 0.7429 - val_acc: 0.9002
=================================================================
_________________________________________________________________
'''
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input, Reshape, Embedding
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Model
from keras.optimizers import Adam, SGD
#from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
#from keras.applications.vgg19 import VGG19, preprocess_input, decode_predictions
from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
#from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
import numpy as np
import os
import shutil
import random
import matplotlib.pyplot as plt
#from keras.utils.visualize_util import plot
from getDataSet import getDataSet
#import h5py
def save_history(history, result_file):
loss = history.history['loss']
acc = history.history['acc']
val_loss = history.history['val_loss']
val_acc = history.history['val_acc']
nb_epoch = len(acc)
with open(result_file, "w") as fp:
fp.write("epoch\tloss\tacc\tval_loss\tval_acc\n")
for i in range(nb_epoch):
fp.write("%d\t%f\t%f\t%f\t%f\n" % (i, loss[i], acc[i], val_loss[i], val_acc[i]))
batch_size = 32
num_classes = 18
epochs = 1
data_augmentation = True #False
img_rows=224
img_cols=224
result_dir="./history"
# The data, shuffled and split between train and test sets:
#(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train,y_train,x_test,y_test = getDataSet(img_rows,img_cols)
#このままだと読み込んでもらえないので、array型にします。
#x_train = np.array(x_train).astype(np.float32).reshape((len(x_train),3, 32, 32)) / 255
x_train = np.array(x_train) #/ 255
y_train = np.array(y_train).astype(np.int32)
x_test = np.array(x_test) #/ 255
y_test = np.array(y_test).astype(np.int32)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# VGG16モデルと学習済み重みをロード
# Fully-connected層(FC)はいらないのでinclude_top=False)
input_tensor = Input(shape=x_train.shape[1:]) #(img_rows, img_cols, 3))
#vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
#vgg19 = VGG19(include_top=False, weights='imagenet', input_tensor=input_tensor)
#InceptionV3 = InceptionV3(include_top=False, weights='imagenet', input_tensor=input_tensor)
ResNet50 = ResNet50(include_top=False, weights='imagenet', input_tensor=input_tensor)
# FC層を構築
top_model = Sequential()
top_model.add(Flatten(input_shape=ResNet50.output_shape[1:])) #vgg16,vgg19,InceptionV3
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(num_classes, activation='softmax'))
# VGG16とFCを接続
#model = Model(input=vgg16.input, output=top_model(vgg16.output))
#model = Model(input=vgg19.input, output=top_model(vgg19.output))
#model = Model(input=InceptionV3.input, output=top_model(InceptionV3.output))
model = Model(input=ResNet50.input, output=top_model(ResNet50.output))
# 最後のconv層の直前までの層をfreeze
#trainingするlayerを指定 VGG16では18,15,10,1など 20で全層固定
#trainingするlayerを指定 VGG16では16,11,7,1など 21で全層固定
#trainingするlayerを指定 InceptionV3では310で全層固定
#trainingするlayerを指定 ResNet50では174で全層固定
for layer in model.layers[1:90]:
layer.trainable = False
# Fine-tuningのときはSGDの方がよい⇒adamがよかった
lr = 0.00001
opt = keras.optimizers.Adam(lr, beta_1=0.5, beta_2=0.999, epsilon=1e-08, decay=1e-6)
#opt = keras.optimizers.SGD(lr=1e-4, momentum=0.9)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# モデルのサマリを表示
model.summary()
#plot(model, show_shapes=True, to_file=os.path.join(result_dir,
#model.load_weights('params_model_VGG16L3_i_190.hdf5')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
for i in range(epochs):
epoch=100
if not data_augmentation:
print('Not using data augmentation.')
"""
history = model.fit(x_train, y_train,
batch_size=batch_size,
nb_epoch=epoch,
verbose=1,
validation_split=0.1)
"""
# 学習履歴をプロット
#plot_history(history, result_dir)
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epoch,
validation_data=(x_test, y_test),
shuffle=True)
# save weights every epoch
model.save_weights('params_model_epoch_{0:03d}.hdf5'.format(i), True)
save_history(history, os.path.join(result_dir, 'history_epoch_{0:03d}.txt'.format(i)))
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
history = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epoch,
validation_data=(x_test, y_test))
model.save_weights('params_model_epoch_{0:03d}.hdf5'.format(i), True)
save_history(history, os.path.join(result_dir, 'history_epoch_{0:03d}.txt'.format(i)))
if i%10==0:
print('i, ir= ',i, lr)
# save weights every epoch
model.save_weights('params_model_VGG16L3_i_{0:03d}.hdf5'.format(i), True)
"""
lr=lr*0.8
opt = keras.optimizers.Adam(lr, beta_1=0.5, beta_2=0.999, epsilon=1e-08, decay=1e-6)
"""
# Let's train the model using Adam
model.compile(loss='categorical_crossentropy',
optimizer=opt,metrics=['accuracy'])
else:
continue
save_history(history, os.path.join(result_dir, 'history.txt'))
"""
model = Model(input=vgg19.input, output=top_model(vgg19.output))
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 3, 224, 224) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 64, 224, 224) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 64, 224, 224) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 64, 112, 112) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 128, 112, 112) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 128, 112, 112) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 128, 56, 56) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 256, 56, 56) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 256, 56, 56) 590080
_________________________________________________________________
10block3_conv3 (Conv2D) (None, 256, 56, 56) 590080
_________________________________________________________________
block3_conv4 (Conv2D) (None, 256, 56, 56) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 256, 28, 28) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 512, 28, 28) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 512, 28, 28) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 512, 28, 28) 2359808
_________________________________________________________________
block4_conv4 (Conv2D) (None, 512, 28, 28) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 512, 14, 14) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 512, 14, 14) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 512, 14, 14) 2359808
_________________________________________________________________
20block5_conv3 (Conv2D) (None, 512, 14, 14) 2359808
_________________________________________________________________
block5_conv4 (Conv2D) (None, 512, 14, 14) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 512, 7, 7) 0
_________________________________________________________________
sequential_1 (Sequential) (None, 10) 6425354
=================================================================
Total params: 26,449,738
Trainable params: 6,425,354
Non-trainable params: 20,024,384
_________________________________________________________________
model = Model(input=ResNet50.input, output=top_model(ResNet50.output))
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 3, 224, 224) 0
__________________________________________________________________________________________________
conv1 (Conv2D) (None, 64, 112, 112) 9472 input_1[0][0]
__________________________________________________________________________________________________
bn_conv1 (BatchNormalization) (None, 64, 112, 112) 256 conv1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 64, 112, 112) 0 bn_conv1[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 64, 55, 55) 0 activation_1[0][0]
__________________________________________________________________________________________________
res2a_branch2a (Conv2D) (None, 64, 55, 55) 4160 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
bn2a_branch2a (BatchNormalizati (None, 64, 55, 55) 256 res2a_branch2a[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 64, 55, 55) 0 bn2a_branch2a[0][0]
__________________________________________________________________________________________________
res2a_branch2b (Conv2D) (None, 64, 55, 55) 36928 activation_2[0][0]
__________________________________________________________________________________________________
10bn2a_branch2b (BatchNormalizati (None, 64, 55, 55) 256 res2a_branch2b[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 64, 55, 55) 0 bn2a_branch2b[0][0]
__________________________________________________________________________________________________
res2a_branch2c (Conv2D) (None, 256, 55, 55) 16640 activation_3[0][0]
__________________________________________________________________________________________________
res2a_branch1 (Conv2D) (None, 256, 55, 55) 16640 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
bn2a_branch2c (BatchNormalizati (None, 256, 55, 55) 1024 res2a_branch2c[0][0]
__________________________________________________________________________________________________
bn2a_branch1 (BatchNormalizatio (None, 256, 55, 55) 1024 res2a_branch1[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 256, 55, 55) 0 bn2a_branch2c[0][0]
bn2a_branch1[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 256, 55, 55) 0 add_1[0][0]
__________________________________________________________________________________________________
res2b_branch2a (Conv2D) (None, 64, 55, 55) 16448 activation_4[0][0]
__________________________________________________________________________________________________
bn2b_branch2a (BatchNormalizati (None, 64, 55, 55) 256 res2b_branch2a[0][0]
__________________________________________________________________________________________________
20activation_5 (Activation) (None, 64, 55, 55) 0 bn2b_branch2a[0][0]
__________________________________________________________________________________________________
res2b_branch2b (Conv2D) (None, 64, 55, 55) 36928 activation_5[0][0]
__________________________________________________________________________________________________
bn2b_branch2b (BatchNormalizati (None, 64, 55, 55) 256 res2b_branch2b[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 64, 55, 55) 0 bn2b_branch2b[0][0]
__________________________________________________________________________________________________
res2b_branch2c (Conv2D) (None, 256, 55, 55) 16640 activation_6[0][0]
__________________________________________________________________________________________________
bn2b_branch2c (BatchNormalizati (None, 256, 55, 55) 1024 res2b_branch2c[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, 256, 55, 55) 0 bn2b_branch2c[0][0]
activation_4[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 256, 55, 55) 0 add_2[0][0]
__________________________________________________________________________________________________
res2c_branch2a (Conv2D) (None, 64, 55, 55) 16448 activation_7[0][0]
__________________________________________________________________________________________________
bn2c_branch2a (BatchNormalizati (None, 64, 55, 55) 256 res2c_branch2a[0][0]
__________________________________________________________________________________________________
30activation_8 (Activation) (None, 64, 55, 55) 0 bn2c_branch2a[0][0]
__________________________________________________________________________________________________
res2c_branch2b (Conv2D) (None, 64, 55, 55) 36928 activation_8[0][0]
__________________________________________________________________________________________________
bn2c_branch2b (BatchNormalizati (None, 64, 55, 55) 256 res2c_branch2b[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 64, 55, 55) 0 bn2c_branch2b[0][0]
__________________________________________________________________________________________________
res2c_branch2c (Conv2D) (None, 256, 55, 55) 16640 activation_9[0][0]
__________________________________________________________________________________________________
bn2c_branch2c (BatchNormalizati (None, 256, 55, 55) 1024 res2c_branch2c[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, 256, 55, 55) 0 bn2c_branch2c[0][0]
activation_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 256, 55, 55) 0 add_3[0][0]
__________________________________________________________________________________________________
res3a_branch2a (Conv2D) (None, 128, 28, 28) 32896 activation_10[0][0]
__________________________________________________________________________________________________
bn3a_branch2a (BatchNormalizati (None, 128, 28, 28) 512 res3a_branch2a[0][0]
__________________________________________________________________________________________________
40activation_11 (Activation) (None, 128, 28, 28) 0 bn3a_branch2a[0][0]
__________________________________________________________________________________________________
res3a_branch2b (Conv2D) (None, 128, 28, 28) 147584 activation_11[0][0]
__________________________________________________________________________________________________
bn3a_branch2b (BatchNormalizati (None, 128, 28, 28) 512 res3a_branch2b[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 128, 28, 28) 0 bn3a_branch2b[0][0]
__________________________________________________________________________________________________
res3a_branch2c (Conv2D) (None, 512, 28, 28) 66048 activation_12[0][0]
__________________________________________________________________________________________________
res3a_branch1 (Conv2D) (None, 512, 28, 28) 131584 activation_10[0][0]
__________________________________________________________________________________________________
bn3a_branch2c (BatchNormalizati (None, 512, 28, 28) 2048 res3a_branch2c[0][0]
__________________________________________________________________________________________________
bn3a_branch1 (BatchNormalizatio (None, 512, 28, 28) 2048 res3a_branch1[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, 512, 28, 28) 0 bn3a_branch2c[0][0]
bn3a_branch1[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 512, 28, 28) 0 add_4[0][0]
__________________________________________________________________________________________________
50res3b_branch2a (Conv2D) (None, 128, 28, 28) 65664 activation_13[0][0]
__________________________________________________________________________________________________
bn3b_branch2a (BatchNormalizati (None, 128, 28, 28) 512 res3b_branch2a[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 128, 28, 28) 0 bn3b_branch2a[0][0]
__________________________________________________________________________________________________
res3b_branch2b (Conv2D) (None, 128, 28, 28) 147584 activation_14[0][0]
__________________________________________________________________________________________________
bn3b_branch2b (BatchNormalizati (None, 128, 28, 28) 512 res3b_branch2b[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 128, 28, 28) 0 bn3b_branch2b[0][0]
__________________________________________________________________________________________________
res3b_branch2c (Conv2D) (None, 512, 28, 28) 66048 activation_15[0][0]
__________________________________________________________________________________________________
bn3b_branch2c (BatchNormalizati (None, 512, 28, 28) 2048 res3b_branch2c[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, 512, 28, 28) 0 bn3b_branch2c[0][0]
activation_13[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 512, 28, 28) 0 add_5[0][0]
__________________________________________________________________________________________________
60res3c_branch2a (Conv2D) (None, 128, 28, 28) 65664 activation_16[0][0]
__________________________________________________________________________________________________
bn3c_branch2a (BatchNormalizati (None, 128, 28, 28) 512 res3c_branch2a[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 128, 28, 28) 0 bn3c_branch2a[0][0]
__________________________________________________________________________________________________
res3c_branch2b (Conv2D) (None, 128, 28, 28) 147584 activation_17[0][0]
__________________________________________________________________________________________________
bn3c_branch2b (BatchNormalizati (None, 128, 28, 28) 512 res3c_branch2b[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 128, 28, 28) 0 bn3c_branch2b[0][0]
__________________________________________________________________________________________________
res3c_branch2c (Conv2D) (None, 512, 28, 28) 66048 activation_18[0][0]
__________________________________________________________________________________________________
bn3c_branch2c (BatchNormalizati (None, 512, 28, 28) 2048 res3c_branch2c[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, 512, 28, 28) 0 bn3c_branch2c[0][0]
activation_16[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 512, 28, 28) 0 add_6[0][0]
__________________________________________________________________________________________________
70res3d_branch2a (Conv2D) (None, 128, 28, 28) 65664 activation_19[0][0]
__________________________________________________________________________________________________
bn3d_branch2a (BatchNormalizati (None, 128, 28, 28) 512 res3d_branch2a[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 128, 28, 28) 0 bn3d_branch2a[0][0]
__________________________________________________________________________________________________
res3d_branch2b (Conv2D) (None, 128, 28, 28) 147584 activation_20[0][0]
__________________________________________________________________________________________________
bn3d_branch2b (BatchNormalizati (None, 128, 28, 28) 512 res3d_branch2b[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 128, 28, 28) 0 bn3d_branch2b[0][0]
__________________________________________________________________________________________________
res3d_branch2c (Conv2D) (None, 512, 28, 28) 66048 activation_21[0][0]
__________________________________________________________________________________________________
bn3d_branch2c (BatchNormalizati (None, 512, 28, 28) 2048 res3d_branch2c[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, 512, 28, 28) 0 bn3d_branch2c[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 512, 28, 28) 0 add_7[0][0]
__________________________________________________________________________________________________
80res4a_branch2a (Conv2D) (None, 256, 14, 14) 131328 activation_22[0][0]
__________________________________________________________________________________________________
bn4a_branch2a (BatchNormalizati (None, 256, 14, 14) 1024 res4a_branch2a[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 256, 14, 14) 0 bn4a_branch2a[0][0]
__________________________________________________________________________________________________
res4a_branch2b (Conv2D) (None, 256, 14, 14) 590080 activation_23[0][0]
__________________________________________________________________________________________________
bn4a_branch2b (BatchNormalizati (None, 256, 14, 14) 1024 res4a_branch2b[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 256, 14, 14) 0 bn4a_branch2b[0][0]
__________________________________________________________________________________________________
res4a_branch2c (Conv2D) (None, 1024, 14, 14) 263168 activation_24[0][0]
__________________________________________________________________________________________________
res4a_branch1 (Conv2D) (None, 1024, 14, 14) 525312 activation_22[0][0]
__________________________________________________________________________________________________
bn4a_branch2c (BatchNormalizati (None, 1024, 14, 14) 4096 res4a_branch2c[0][0]
__________________________________________________________________________________________________
bn4a_branch1 (BatchNormalizatio (None, 1024, 14, 14) 4096 res4a_branch1[0][0]
__________________________________________________________________________________________________
90add_8 (Add) (None, 1024, 14, 14) 0 bn4a_branch2c[0][0]
bn4a_branch1[0][0]
__52sec________________________________________________________________________________________________
activation_25 (Activation) (None, 1024, 14, 14) 0 add_8[0][0]
__________________________________________________________________________________________________
res4b_branch2a (Conv2D) (None, 256, 14, 14) 262400 activation_25[0][0]
__________________________________________________________________________________________________
bn4b_branch2a (BatchNormalizati (None, 256, 14, 14) 1024 res4b_branch2a[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 256, 14, 14) 0 bn4b_branch2a[0][0]
__________________________________________________________________________________________________
res4b_branch2b (Conv2D) (None, 256, 14, 14) 590080 activation_26[0][0]
__________________________________________________________________________________________________
bn4b_branch2b (BatchNormalizati (None, 256, 14, 14) 1024 res4b_branch2b[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 256, 14, 14) 0 bn4b_branch2b[0][0]
__________________________________________________________________________________________________
res4b_branch2c (Conv2D) (None, 1024, 14, 14) 263168 activation_27[0][0]
__________________________________________________________________________________________________
bn4b_branch2c (BatchNormalizati (None, 1024, 14, 14) 4096 res4b_branch2c[0][0]
__________________________________________________________________________________________________
100add_9 (Add) (None, 1024, 14, 14) 0 bn4b_branch2c[0][0]
activation_25[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 1024, 14, 14) 0 add_9[0][0]
__________________________________________________________________________________________________
res4c_branch2a (Conv2D) (None, 256, 14, 14) 262400 activation_28[0][0]
__________________________________________________________________________________________________
bn4c_branch2a (BatchNormalizati (None, 256, 14, 14) 1024 res4c_branch2a[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 256, 14, 14) 0 bn4c_branch2a[0][0]
__________________________________________________________________________________________________
res4c_branch2b (Conv2D) (None, 256, 14, 14) 590080 activation_29[0][0]
__________________________________________________________________________________________________
bn4c_branch2b (BatchNormalizati (None, 256, 14, 14) 1024 res4c_branch2b[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 256, 14, 14) 0 bn4c_branch2b[0][0]
__________________________________________________________________________________________________
res4c_branch2c (Conv2D) (None, 1024, 14, 14) 263168 activation_30[0][0]
__________________________________________________________________________________________________
bn4c_branch2c (BatchNormalizati (None, 1024, 14, 14) 4096 res4c_branch2c[0][0]
__________________________________________________________________________________________________
110add_10 (Add) (None, 1024, 14, 14) 0 bn4c_branch2c[0][0]
activation_28[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 1024, 14, 14) 0 add_10[0][0]
__________________________________________________________________________________________________
res4d_branch2a (Conv2D) (None, 256, 14, 14) 262400 activation_31[0][0]
__________________________________________________________________________________________________
bn4d_branch2a (BatchNormalizati (None, 256, 14, 14) 1024 res4d_branch2a[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 256, 14, 14) 0 bn4d_branch2a[0][0]
__________________________________________________________________________________________________
res4d_branch2b (Conv2D) (None, 256, 14, 14) 590080 activation_32[0][0]
__________________________________________________________________________________________________
bn4d_branch2b (BatchNormalizati (None, 256, 14, 14) 1024 res4d_branch2b[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 256, 14, 14) 0 bn4d_branch2b[0][0]
__________________________________________________________________________________________________
res4d_branch2c (Conv2D) (None, 1024, 14, 14) 263168 activation_33[0][0]
__________________________________________________________________________________________________
bn4d_branch2c (BatchNormalizati (None, 1024, 14, 14) 4096 res4d_branch2c[0][0]
__________________________________________________________________________________________________
120add_11 (Add) (None, 1024, 14, 14) 0 bn4d_branch2c[0][0]
activation_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 1024, 14, 14) 0 add_11[0][0]
__________________________________________________________________________________________________
res4e_branch2a (Conv2D) (None, 256, 14, 14) 262400 activation_34[0][0]
__________________________________________________________________________________________________
bn4e_branch2a (BatchNormalizati (None, 256, 14, 14) 1024 res4e_branch2a[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 256, 14, 14) 0 bn4e_branch2a[0][0]
__________________________________________________________________________________________________
res4e_branch2b (Conv2D) (None, 256, 14, 14) 590080 activation_35[0][0]
__________________________________________________________________________________________________
bn4e_branch2b (BatchNormalizati (None, 256, 14, 14) 1024 res4e_branch2b[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 256, 14, 14) 0 bn4e_branch2b[0][0]
__________________________________________________________________________________________________
res4e_branch2c (Conv2D) (None, 1024, 14, 14) 263168 activation_36[0][0]
__________________________________________________________________________________________________
bn4e_branch2c (BatchNormalizati (None, 1024, 14, 14) 4096 res4e_branch2c[0][0]
__________________________________________________________________________________________________
130add_12 (Add) (None, 1024, 14, 14) 0 bn4e_branch2c[0][0]
activation_34[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 1024, 14, 14) 0 add_12[0][0]
__________________________________________________________________________________________________
res4f_branch2a (Conv2D) (None, 256, 14, 14) 262400 activation_37[0][0]
__________________________________________________________________________________________________
bn4f_branch2a (BatchNormalizati (None, 256, 14, 14) 1024 res4f_branch2a[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 256, 14, 14) 0 bn4f_branch2a[0][0]
__________________________________________________________________________________________________
res4f_branch2b (Conv2D) (None, 256, 14, 14) 590080 activation_38[0][0]
__________________________________________________________________________________________________
bn4f_branch2b (BatchNormalizati (None, 256, 14, 14) 1024 res4f_branch2b[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 256, 14, 14) 0 bn4f_branch2b[0][0]
__________________________________________________________________________________________________
res4f_branch2c (Conv2D) (None, 1024, 14, 14) 263168 activation_39[0][0]
__________________________________________________________________________________________________
bn4f_branch2c (BatchNormalizati (None, 1024, 14, 14) 4096 res4f_branch2c[0][0]
__________________________________________________________________________________________________
140add_13 (Add) (None, 1024, 14, 14) 0 bn4f_branch2c[0][0]
activation_37[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 1024, 14, 14) 0 add_13[0][0]
__________________________________________________________________________________________________
res5a_branch2a (Conv2D) (None, 512, 7, 7) 524800 activation_40[0][0]
__________________________________________________________________________________________________
bn5a_branch2a (BatchNormalizati (None, 512, 7, 7) 2048 res5a_branch2a[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 512, 7, 7) 0 bn5a_branch2a[0][0]
__________________________________________________________________________________________________
res5a_branch2b (Conv2D) (None, 512, 7, 7) 2359808 activation_41[0][0]
__________________________________________________________________________________________________
bn5a_branch2b (BatchNormalizati (None, 512, 7, 7) 2048 res5a_branch2b[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 512, 7, 7) 0 bn5a_branch2b[0][0]
__________________________________________________________________________________________________
res5a_branch2c (Conv2D) (None, 2048, 7, 7) 1050624 activation_42[0][0]
__________________________________________________________________________________________________
res5a_branch1 (Conv2D) (None, 2048, 7, 7) 2099200 activation_40[0][0]
__________________________________________________________________________________________________
150bn5a_branch2c (BatchNormalizati (None, 2048, 7, 7) 8192 res5a_branch2c[0][0]
__________________________________________________________________________________________________
bn5a_branch1 (BatchNormalizatio (None, 2048, 7, 7) 8192 res5a_branch1[0][0]
__________________________________________________________________________________________________
add_14 (Add) (None, 2048, 7, 7) 0 bn5a_branch2c[0][0]
bn5a_branch1[0][0]
____152____35-38sec__________________________________________________________________________________________
activation_43 (Activation) (None, 2048, 7, 7) 0 add_14[0][0]
__________________________________________________________________________________________________
res5b_branch2a (Conv2D) (None, 512, 7, 7) 1049088 activation_43[0][0]
__________________________________________________________________________________________________
bn5b_branch2a (BatchNormalizati (None, 512, 7, 7) 2048 res5b_branch2a[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 512, 7, 7) 0 bn5b_branch2a[0][0]
__________________________________________________________________________________________________
res5b_branch2b (Conv2D) (None, 512, 7, 7) 2359808 activation_44[0][0]
__________________________________________________________________________________________________
bn5b_branch2b (BatchNormalizati (None, 512, 7, 7) 2048 res5b_branch2b[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 512, 7, 7) 0 bn5b_branch2b[0][0]
__________________________________________________________________________________________________
160res5b_branch2c (Conv2D) (None, 2048, 7, 7) 1050624 activation_45[0][0]
__________________________________________________________________________________________________
bn5b_branch2c (BatchNormalizati (None, 2048, 7, 7) 8192 res5b_branch2c[0][0]
__________________________________________________________________________________________________
add_15 (Add) (None, 2048, 7, 7) 0 bn5b_branch2c[0][0]
activation_43[0][0]
___162_______________________________________________________________________________________________
activation_46 (Activation) (None, 2048, 7, 7) 0 add_15[0][0]
__________________________________________________________________________________________________
res5c_branch2a (Conv2D) (None, 512, 7, 7) 1049088 activation_46[0][0]
__________________________________________________________________________________________________
bn5c_branch2a (BatchNormalizati (None, 512, 7, 7) 2048 res5c_branch2a[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 512, 7, 7) 0 bn5c_branch2a[0][0]
__________________________________________________________________________________________________
res5c_branch2b (Conv2D) (None, 512, 7, 7) 2359808 activation_47[0][0]
__________________________________________________________________________________________________
bn5c_branch2b (BatchNormalizati (None, 512, 7, 7) 2048 res5c_branch2b[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 512, 7, 7) 0 bn5c_branch2b[0][0]
__________________________________________________________________________________________________
170res5c_branch2c (Conv2D) (None, 2048, 7, 7) 1050624 activation_48[0][0]
__________________________________________________________________________________________________
bn5c_branch2c (BatchNormalizati (None, 2048, 7, 7) 8192 res5c_branch2c[0][0]
__________________________________________________________________________________________________
add_16 (Add) (None, 2048, 7, 7) 0 bn5c_branch2c[0][0]
activation_46[0][0]
____172___32sec___________________________________________________________________________________________
activation_49 (Activation) (None, 2048, 7, 7) 0 add_16[0][0]
__________________________________________________________________________________________________
avg_pool (AveragePooling2D) (None, 2048, 1, 1) 0 activation_49[0][0]
__________________________________________________________________________________________________
175sequential_1 (Sequential) (None, 10) 527114 avg_pool[0][0]
==================================================================================================
for layer in model.layers[1:90]
==================================================================================================
Total params: 24,114,826
Trainable params: 21,096,714
Non-trainable params: 3,018,112
__________________________________________________________________________________________________
model = Model(input=InceptionV3.input, output=top_model(InceptionV3.output))
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 3, 224, 224) 0
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 32, 111, 111) 864 input_1[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 32, 111, 111) 96 conv2d_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 32, 111, 111) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 32, 109, 109) 9216 activation_1[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 32, 109, 109) 96 conv2d_2[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 32, 109, 109) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 64, 109, 109) 18432 activation_2[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 64, 109, 109) 192 conv2d_3[0][0]
__________________________________________________________________________________________________
10 activation_3 (Activation) (None, 64, 109, 109) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 64, 54, 54) 0 activation_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 80, 54, 54) 5120 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 80, 54, 54) 240 conv2d_4[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 80, 54, 54) 0 batch_normalization_4[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 192, 52, 52) 138240 activation_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 192, 52, 52) 576 conv2d_5[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 192, 52, 52) 0 batch_normalization_5[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 192, 25, 25) 0 activation_5[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 64, 25, 25) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
20 batch_normalization_9 (BatchNor (None, 64, 25, 25) 192 conv2d_9[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 64, 25, 25) 0 batch_normalization_9[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 48, 25, 25) 9216 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 96, 25, 25) 55296 activation_9[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 48, 25, 25) 144 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 96, 25, 25) 288 conv2d_10[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 48, 25, 25) 0 batch_normalization_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 96, 25, 25) 0 batch_normalization_10[0][0]
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 192, 25, 25) 0 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 64, 25, 25) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
30 conv2d_8 (Conv2D) (None, 64, 25, 25) 76800 activation_7[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 96, 25, 25) 82944 activation_10[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 32, 25, 25) 6144 average_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 64, 25, 25) 192 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 64, 25, 25) 192 conv2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 96, 25, 25) 288 conv2d_11[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 32, 25, 25) 96 conv2d_12[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 64, 25, 25) 0 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 64, 25, 25) 0 batch_normalization_8[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 96, 25, 25) 0 batch_normalization_11[0][0]
__________________________________________________________________________________________________
40 activation_12 (Activation) (None, 32, 25, 25) 0 batch_normalization_12[0][0]
__________________________________________________________________________________________________
mixed0 (Concatenate) (None, 256, 25, 25) 0 activation_6[0][0]
activation_8[0][0]
activation_11[0][0]
activation_12[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 64, 25, 25) 16384 mixed0[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 64, 25, 25) 192 conv2d_16[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 64, 25, 25) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 48, 25, 25) 12288 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 96, 25, 25) 55296 activation_16[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 48, 25, 25) 144 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 96, 25, 25) 288 conv2d_17[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 48, 25, 25) 0 batch_normalization_14[0][0]
__________________________________________________________________________________________________
50 activation_17 (Activation) (None, 96, 25, 25) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 256, 25, 25) 0 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 64, 25, 25) 16384 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 64, 25, 25) 76800 activation_14[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 96, 25, 25) 82944 activation_17[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 64, 25, 25) 16384 average_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 64, 25, 25) 192 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 64, 25, 25) 192 conv2d_15[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 96, 25, 25) 288 conv2d_18[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 64, 25, 25) 192 conv2d_19[0][0]
__________________________________________________________________________________________________
60 activation_13 (Activation) (None, 64, 25, 25) 0 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 64, 25, 25) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 96, 25, 25) 0 batch_normalization_18[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 64, 25, 25) 0 batch_normalization_19[0][0]
__________________________________________________________________________________________________
mixed1 (Concatenate) (None, 288, 25, 25) 0 activation_13[0][0]
activation_15[0][0]
activation_18[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 64, 25, 25) 18432 mixed1[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 64, 25, 25) 192 conv2d_23[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 64, 25, 25) 0 batch_normalization_23[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 48, 25, 25) 13824 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 96, 25, 25) 55296 activation_23[0][0]
__________________________________________________________________________________________________
70 batch_normalization_21 (BatchNo (None, 48, 25, 25) 144 conv2d_21[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 96, 25, 25) 288 conv2d_24[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 48, 25, 25) 0 batch_normalization_21[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 96, 25, 25) 0 batch_normalization_24[0][0]
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 288, 25, 25) 0 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 64, 25, 25) 18432 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 64, 25, 25) 76800 activation_21[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 96, 25, 25) 82944 activation_24[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 64, 25, 25) 18432 average_pooling2d_3[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 64, 25, 25) 192 conv2d_20[0][0]
__________________________________________________________________________________________________
80 batch_normalization_22 (BatchNo (None, 64, 25, 25) 192 conv2d_22[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 96, 25, 25) 288 conv2d_25[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 64, 25, 25) 192 conv2d_26[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 64, 25, 25) 0 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 64, 25, 25) 0 batch_normalization_22[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 96, 25, 25) 0 batch_normalization_25[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 64, 25, 25) 0 batch_normalization_26[0][0]
__________________________________________________________________________________________________
mixed2 (Concatenate) (None, 288, 25, 25) 0 activation_20[0][0]
activation_22[0][0]
activation_25[0][0]
activation_26[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 64, 25, 25) 18432 mixed2[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 64, 25, 25) 192 conv2d_28[0][0]
__________________________________________________________________________________________________
90 activation_28 (Activation) (None, 64, 25, 25) 0 batch_normalization_28[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 96, 25, 25) 55296 activation_28[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 96, 25, 25) 288 conv2d_29[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 96, 25, 25) 0 batch_normalization_29[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 384, 12, 12) 995328 mixed2[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 96, 12, 12) 82944 activation_29[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 384, 12, 12) 1152 conv2d_27[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 96, 12, 12) 288 conv2d_30[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 384, 12, 12) 0 batch_normalization_27[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 96, 12, 12) 0 batch_normalization_30[0][0]
__________________________________________________________________________________________________
100 max_pooling2d_3 (MaxPooling2D) (None, 288, 12, 12) 0 mixed2[0][0]
__________________________________________________________________________________________________
mixed3 (Concatenate) (None, 768, 12, 12) 0 activation_27[0][0]
activation_30[0][0]
max_pooling2d_3[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 128, 12, 12) 98304 mixed3[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 128, 12, 12) 384 conv2d_35[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 128, 12, 12) 0 batch_normalization_35[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 128, 12, 12) 114688 activation_35[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 128, 12, 12) 384 conv2d_36[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 128, 12, 12) 0 batch_normalization_36[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 128, 12, 12) 98304 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 128, 12, 12) 114688 activation_36[0][0]
__________________________________________________________________________________________________
110 batch_normalization_32 (BatchNo (None, 128, 12, 12) 384 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 128, 12, 12) 384 conv2d_37[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 128, 12, 12) 0 batch_normalization_32[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 128, 12, 12) 0 batch_normalization_37[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 128, 12, 12) 114688 activation_32[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 128, 12, 12) 114688 activation_37[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 128, 12, 12) 384 conv2d_33[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 128, 12, 12) 384 conv2d_38[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 128, 12, 12) 0 batch_normalization_33[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 128, 12, 12) 0 batch_normalization_38[0][0]
__________________________________________________________________________________________________
120 average_pooling2d_4 (AveragePoo (None, 768, 12, 12) 0 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 192, 12, 12) 147456 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 192, 12, 12) 172032 activation_33[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 192, 12, 12) 172032 activation_38[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 192, 12, 12) 147456 average_pooling2d_4[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 192, 12, 12) 576 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 192, 12, 12) 576 conv2d_34[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 192, 12, 12) 576 conv2d_39[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 192, 12, 12) 576 conv2d_40[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 192, 12, 12) 0 batch_normalization_31[0][0]
__________________________________________________________________________________________________
130 activation_34 (Activation) (None, 192, 12, 12) 0 batch_normalization_34[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 192, 12, 12) 0 batch_normalization_39[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 192, 12, 12) 0 batch_normalization_40[0][0]
__________________________________________________________________________________________________
mixed4 (Concatenate) (None, 768, 12, 12) 0 activation_31[0][0]
activation_34[0][0]
activation_39[0][0]
activation_40[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 160, 12, 12) 122880 mixed4[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 160, 12, 12) 480 conv2d_45[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 160, 12, 12) 0 batch_normalization_45[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 160, 12, 12) 179200 activation_45[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 160, 12, 12) 480 conv2d_46[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 160, 12, 12) 0 batch_normalization_46[0][0]
__________________________________________________________________________________________________
140 conv2d_42 (Conv2D) (None, 160, 12, 12) 122880 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 160, 12, 12) 179200 activation_46[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 160, 12, 12) 480 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 160, 12, 12) 480 conv2d_47[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 160, 12, 12) 0 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 160, 12, 12) 0 batch_normalization_47[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 160, 12, 12) 179200 activation_42[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 160, 12, 12) 179200 activation_47[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 160, 12, 12) 480 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 160, 12, 12) 480 conv2d_48[0][0]
__________________________________________________________________________________________________
150 activation_43 (Activation) (None, 160, 12, 12) 0 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 160, 12, 12) 0 batch_normalization_48[0][0]
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 768, 12, 12) 0 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 192, 12, 12) 147456 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 192, 12, 12) 215040 activation_43[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 192, 12, 12) 215040 activation_48[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 192, 12, 12) 147456 average_pooling2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 192, 12, 12) 576 conv2d_41[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 192, 12, 12) 576 conv2d_44[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 192, 12, 12) 576 conv2d_49[0][0]
__________________________________________________________________________________________________
160 batch_normalization_50 (BatchNo (None, 192, 12, 12) 576 conv2d_50[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 192, 12, 12) 0 batch_normalization_41[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 192, 12, 12) 0 batch_normalization_44[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 192, 12, 12) 0 batch_normalization_49[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 192, 12, 12) 0 batch_normalization_50[0][0]
__________________________________________________________________________________________________
mixed5 (Concatenate) (None, 768, 12, 12) 0 activation_41[0][0]
activation_44[0][0]
activation_49[0][0]
activation_50[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 160, 12, 12) 122880 mixed5[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 160, 12, 12) 480 conv2d_55[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 160, 12, 12) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 160, 12, 12) 179200 activation_55[0][0]
__________________________________________________________________________________________________
170 batch_normalization_56 (BatchNo (None, 160, 12, 12) 480 conv2d_56[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 160, 12, 12) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 160, 12, 12) 122880 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 160, 12, 12) 179200 activation_56[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 160, 12, 12) 480 conv2d_52[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 160, 12, 12) 480 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 160, 12, 12) 0 batch_normalization_52[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 160, 12, 12) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 160, 12, 12) 179200 activation_52[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 160, 12, 12) 179200 activation_57[0][0]
__________________________________________________________________________________________________
180 batch_normalization_53 (BatchNo (None, 160, 12, 12) 480 conv2d_53[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 160, 12, 12) 480 conv2d_58[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 160, 12, 12) 0 batch_normalization_53[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 160, 12, 12) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 768, 12, 12) 0 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 192, 12, 12) 147456 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 192, 12, 12) 215040 activation_53[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 192, 12, 12) 215040 activation_58[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 192, 12, 12) 147456 average_pooling2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 192, 12, 12) 576 conv2d_51[0][0]
__________________________________________________________________________________________________
190 batch_normalization_54 (BatchNo (None, 192, 12, 12) 576 conv2d_54[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 192, 12, 12) 576 conv2d_59[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 192, 12, 12) 576 conv2d_60[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 192, 12, 12) 0 batch_normalization_51[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 192, 12, 12) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 192, 12, 12) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 192, 12, 12) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
mixed6 (Concatenate) (None, 768, 12, 12) 0 activation_51[0][0]
activation_54[0][0]
activation_59[0][0]
activation_60[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 192, 12, 12) 147456 mixed6[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 192, 12, 12) 576 conv2d_65[0][0]
__________________________________________________________________________________________________
200 activation_65 (Activation) (None, 192, 12, 12) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 192, 12, 12) 258048 activation_65[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 192, 12, 12) 576 conv2d_66[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 192, 12, 12) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 192, 12, 12) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 192, 12, 12) 258048 activation_66[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 192, 12, 12) 576 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 192, 12, 12) 576 conv2d_67[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 192, 12, 12) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 192, 12, 12) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
210 conv2d_63 (Conv2D) (None, 192, 12, 12) 258048 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 192, 12, 12) 258048 activation_67[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 192, 12, 12) 576 conv2d_63[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 192, 12, 12) 576 conv2d_68[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 192, 12, 12) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 192, 12, 12) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 768, 12, 12) 0 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 192, 12, 12) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 192, 12, 12) 258048 activation_63[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 192, 12, 12) 258048 activation_68[0][0]
__________________________________________________________________________________________________
220 conv2d_70 (Conv2D) (None, 192, 12, 12) 147456 average_pooling2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 192, 12, 12) 576 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 192, 12, 12) 576 conv2d_64[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 192, 12, 12) 576 conv2d_69[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 192, 12, 12) 576 conv2d_70[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 192, 12, 12) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 192, 12, 12) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 192, 12, 12) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 192, 12, 12) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
mixed7 (Concatenate) (None, 768, 12, 12) 0 activation_61[0][0]
activation_64[0][0]
activation_69[0][0]
activation_70[0][0]
__________________________________________________________________________________________________
230 conv2d_73 (Conv2D) (None, 192, 12, 12) 147456 mixed7[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 192, 12, 12) 576 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 192, 12, 12) 0 batch_normalization_73[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 192, 12, 12) 258048 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 192, 12, 12) 576 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 192, 12, 12) 0 batch_normalization_74[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 192, 12, 12) 147456 mixed7[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 192, 12, 12) 258048 activation_74[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 192, 12, 12) 576 conv2d_71[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 192, 12, 12) 576 conv2d_75[0][0]
__________________________________________________________________________________________________
240 activation_71 (Activation) (None, 192, 12, 12) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 192, 12, 12) 0 batch_normalization_75[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 320, 5, 5) 552960 activation_71[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 192, 5, 5) 331776 activation_75[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 320, 5, 5) 960 conv2d_72[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 192, 5, 5) 576 conv2d_76[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 320, 5, 5) 0 batch_normalization_72[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 192, 5, 5) 0 batch_normalization_76[0][0]
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 768, 5, 5) 0 mixed7[0][0]
__________________________________________________________________________________________________
mixed8 (Concatenate) (None, 1280, 5, 5) 0 activation_72[0][0]
activation_76[0][0]
max_pooling2d_4[0][0]
__________________________________________________________________________________________________
250 conv2d_81 (Conv2D) (None, 448, 5, 5) 573440 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 448, 5, 5) 1344 conv2d_81[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 448, 5, 5) 0 batch_normalization_81[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 384, 5, 5) 491520 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 384, 5, 5) 1548288 activation_81[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 384, 5, 5) 1152 conv2d_78[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 384, 5, 5) 1152 conv2d_82[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 384, 5, 5) 0 batch_normalization_78[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 384, 5, 5) 0 batch_normalization_82[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 384, 5, 5) 442368 activation_78[0][0]
__________________________________________________________________________________________________
260 conv2d_80 (Conv2D) (None, 384, 5, 5) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 384, 5, 5) 442368 activation_82[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 384, 5, 5) 442368 activation_82[0][0]
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 1280, 5, 5) 0 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 320, 5, 5) 409600 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 384, 5, 5) 1152 conv2d_79[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 384, 5, 5) 1152 conv2d_80[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 384, 5, 5) 1152 conv2d_83[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 384, 5, 5) 1152 conv2d_84[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 192, 5, 5) 245760 average_pooling2d_8[0][0]
__________________________________________________________________________________________________
270 batch_normalization_77 (BatchNo (None, 320, 5, 5) 960 conv2d_77[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 384, 5, 5) 0 batch_normalization_79[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 384, 5, 5) 0 batch_normalization_80[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 384, 5, 5) 0 batch_normalization_83[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 384, 5, 5) 0 batch_normalization_84[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 192, 5, 5) 576 conv2d_85[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 320, 5, 5) 0 batch_normalization_77[0][0]
__________________________________________________________________________________________________
mixed9_0 (Concatenate) (None, 768, 5, 5) 0 activation_79[0][0]
activation_80[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 768, 5, 5) 0 activation_83[0][0]
activation_84[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 192, 5, 5) 0 batch_normalization_85[0][0]
__________________________________________________________________________________________________
280 mixed9 (Concatenate) (None, 2048, 5, 5) 0 activation_77[0][0]
mixed9_0[0][0]
concatenate_1[0][0]
activation_85[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 448, 5, 5) 917504 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 448, 5, 5) 1344 conv2d_90[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 448, 5, 5) 0 batch_normalization_90[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 384, 5, 5) 786432 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 384, 5, 5) 1548288 activation_90[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 384, 5, 5) 1152 conv2d_87[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 384, 5, 5) 1152 conv2d_91[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 384, 5, 5) 0 batch_normalization_87[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 384, 5, 5) 0 batch_normalization_91[0][0]
__________________________________________________________________________________________________
290 conv2d_88 (Conv2D) (None, 384, 5, 5) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 384, 5, 5) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 384, 5, 5) 442368 activation_91[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 384, 5, 5) 442368 activation_91[0][0]
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 2048, 5, 5) 0 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 320, 5, 5) 655360 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 384, 5, 5) 1152 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 384, 5, 5) 1152 conv2d_89[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 384, 5, 5) 1152 conv2d_92[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 384, 5, 5) 1152 conv2d_93[0][0]
__________________________________________________________________________________________________
300 conv2d_94 (Conv2D) (None, 192, 5, 5) 393216 average_pooling2d_9[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 320, 5, 5) 960 conv2d_86[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 384, 5, 5) 0 batch_normalization_88[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 384, 5, 5) 0 batch_normalization_89[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 384, 5, 5) 0 batch_normalization_92[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 384, 5, 5) 0 batch_normalization_93[0][0]
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 192, 5, 5) 576 conv2d_94[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 320, 5, 5) 0 batch_normalization_86[0][0]
__________________________________________________________________________________________________
mixed9_1 (Concatenate) (None, 768, 5, 5) 0 activation_88[0][0]
activation_89[0][0]
__________________________________________________________________________________________________
concatenate_2 (Concatenate) (None, 768, 5, 5) 0 activation_92[0][0]
activation_93[0][0]
__________________________________________________________________________________________________
310 activation_94 (Activation) (None, 192, 5, 5) 0 batch_normalization_94[0][0]
__________________________________________________________________________________________________
mixed10 (Concatenate) (None, 2048, 5, 5) 0 activation_86[0][0]
mixed9_1[0][0]
concatenate_2[0][0]
activation_94[0][0]
__________________________________________________________________________________________________
sequential_1 (Sequential) (None, 10) 13110026 mixed10[0][0]
==================================================================================================
Total params: 34,912,810
Trainable params: 32,819,818
Non-trainable params: 2,092,992
__________________________________________________________________________________________________
"""
|
py | b402f6b3bc9e468d6672f088132529836748356c | __author__ = 'jmeline'
|
py | b402f770b8ceda38e529f3b1f8efc239c529302c | # Distributed under the MIT License.
# See LICENSE.txt for details.
from spectre.DataStructures import (DataVector, ExtentsAndTensorVolumeData,
TensorComponent, ElementVolumeData)
from spectre.Spectral import Basis, Quadrature
import unittest
import numpy as np
import numpy.testing as npt
class TestTensorData(unittest.TestCase):
# Tests for TensorComponent functions
def test_tensor_component(self):
# Set up Tensor Component
tensor_component = TensorComponent("tensor component",
DataVector([1.5, 1.1]))
# Test name
self.assertEqual(tensor_component.name, "tensor component")
tensor_component.name = "new tensor component"
self.assertEqual(tensor_component.name, "new tensor component")
# Test data
npt.assert_array_almost_equal(np.array(tensor_component.data),
np.array([1.5, 1.1]))
tensor_component.data = DataVector([6.7, 3.2])
npt.assert_array_almost_equal(np.array(tensor_component.data),
np.array([6.7, 3.2]))
# Test str, repr
self.assertEqual(str(tensor_component),
"(new tensor component, (6.7,3.2))")
self.assertEqual(repr(tensor_component),
"(new tensor component, (6.7,3.2))")
# Tests for ExtentsAndTensorVolumeData functions
def test_extents_and_tensor_volume_data(self):
# Set up Extents and Tensor Volume data
tensor_component_1 = TensorComponent("tensor component one",
DataVector([1.5, 1.1]))
tensor_component_2 = TensorComponent("tensor component two",
DataVector([7.1, 5]))
extents_and_data = ExtentsAndTensorVolumeData(
[1, 2, 3, 4], [tensor_component_1, tensor_component_2])
# Test extents
self.assertEqual(extents_and_data.extents, [1, 2, 3, 4])
extents_and_data.extents = [5, 6, 7, 8]
self.assertEqual(extents_and_data.extents, [5, 6, 7, 8])
# Test tensor components
self.assertEqual(extents_and_data.tensor_components,
[tensor_component_1, tensor_component_2])
extents_and_data.tensor_components = [
tensor_component_1, tensor_component_1
]
self.assertEqual(extents_and_data.tensor_components,
[tensor_component_1, tensor_component_1])
# Test str, repr
self.assertEqual(
str(extents_and_data),
"((5,6,7,8),((tensor component one, (1.5,1.1)"
"),(tensor component one, (1.5,1.1))))")
self.assertEqual(
repr(extents_and_data),
"((5,6,7,8),((tensor component one, (1.5,1.1)"
"),(tensor component one, (1.5,1.1))))")
# Tests for ExtentsAndTensorVolumeData functions
def test_element_volume_data(self):
# Set up Extents and Tensor Volume data
tensor_component_1 = TensorComponent("tensor component one",
DataVector([1.5, 1.1]))
tensor_component_2 = TensorComponent("tensor component two",
DataVector([7.1, 5]))
basis = Basis.Legendre
quad = Quadrature.Gauss
element_data = ElementVolumeData(
[1, 2, 3, 4], [tensor_component_1, tensor_component_2],
[basis, basis], [quad, quad])
# Test extents
self.assertEqual(element_data.extents, [1, 2, 3, 4])
element_data.extents = [5, 6, 7, 8]
self.assertEqual(element_data.extents, [5, 6, 7, 8])
# Test tensor components
self.assertEqual(element_data.tensor_components,
[tensor_component_1, tensor_component_2])
element_data.tensor_components = [
tensor_component_1, tensor_component_1
]
self.assertEqual(element_data.tensor_components,
[tensor_component_1, tensor_component_1])
# Test str, repr
self.assertEqual(
str(element_data), "((5,6,7,8),((tensor component one, (1.5,1.1)"
"),(tensor component one, (1.5,1.1))))")
self.assertEqual(
repr(element_data), "((5,6,7,8),((tensor component one, (1.5,1.1)"
"),(tensor component one, (1.5,1.1))))")
# Test basis and quadrature
self.assertEqual(element_data.basis, [basis, basis])
self.assertEqual(element_data.quadrature, [quad, quad])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py | b402f82810faa9edafd5a66174c32a259f9534a5 | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Optional
from prometheus_client import Counter, Histogram
from synapse.logging.context import make_deferred_yieldable
from synapse.util import json_decoder, json_encoder
if TYPE_CHECKING:
from synapse.server import HomeServer
set_counter = Counter(
"synapse_external_cache_set",
"Number of times we set a cache",
labelnames=["cache_name"],
)
get_counter = Counter(
"synapse_external_cache_get",
"Number of times we get a cache",
labelnames=["cache_name", "hit"],
)
response_timer = Histogram(
"synapse_external_cache_response_time_seconds",
"Time taken to get a response from Redis for a cache get/set request",
labelnames=["method"],
buckets=(
0.001,
0.002,
0.005,
0.01,
0.02,
0.05,
),
)
logger = logging.getLogger(__name__)
class ExternalCache:
"""A cache backed by an external Redis. Does nothing if no Redis is
configured.
"""
def __init__(self, hs: "HomeServer"):
self._redis_connection = hs.get_outbound_redis_connection()
def _get_redis_key(self, cache_name: str, key: str) -> str:
return "cache_v1:%s:%s" % (cache_name, key)
def is_enabled(self) -> bool:
"""Whether the external cache is used or not.
It's safe to use the cache when this returns false, the methods will
just no-op, but the function is useful to avoid doing unnecessary work.
"""
return self._redis_connection is not None
async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> None:
"""Add the key/value to the named cache, with the expiry time given."""
if self._redis_connection is None:
return
set_counter.labels(cache_name).inc()
# txredisapi requires the value to be string, bytes or numbers, so we
# encode stuff in JSON.
encoded_value = json_encoder.encode(value)
logger.debug("Caching %s %s: %r", cache_name, key, encoded_value)
with response_timer.labels("set").time():
return await make_deferred_yieldable(
self._redis_connection.set(
self._get_redis_key(cache_name, key),
encoded_value,
pexpire=expiry_ms,
)
)
async def get(self, cache_name: str, key: str) -> Optional[Any]:
"""Look up a key/value in the named cache."""
if self._redis_connection is None:
return None
with response_timer.labels("get").time():
result = await make_deferred_yieldable(
self._redis_connection.get(self._get_redis_key(cache_name, key))
)
logger.debug("Got cache result %s %s: %r", cache_name, key, result)
get_counter.labels(cache_name, result is not None).inc()
if not result:
return None
# For some reason the integers get magically converted back to integers
if isinstance(result, int):
return result
return json_decoder.decode(result)
|
py | b402f967b6546055d3b217247b202c6f8d780377 | """
===============================================
vidgear library source-code is deployed under the Apache 2.0 License:
Copyright (c) 2019-2020 Abhishek Thakur(@abhiTronix) <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================
"""
# import the necessary packages
import os
import cv2
import queue
import pytest
import logging as log
import platform
import tempfile
import subprocess
from mpegdash.parser import MPEGDASHParser
from vidgear.gears import CamGear, StreamGear
from vidgear.gears.helper import logger_handler
# define test logger
logger = log.getLogger("Test_Streamgear")
logger.propagate = False
logger.addHandler(logger_handler())
logger.setLevel(log.DEBUG)
# define machine os
_windows = True if os.name == "nt" else False
def return_testvideo_path(fmt="av"):
"""
returns Test video path
"""
supported_fmts = {
"av": "BigBuckBunny_4sec.mp4",
"vo": "BigBuckBunny_4sec_VO.mp4",
"ao": "BigBuckBunny_4sec_AO.aac",
}
req_fmt = fmt if (fmt in supported_fmts) else "av"
path = "{}/Downloads/Test_videos/{}".format(
tempfile.gettempdir(), supported_fmts[req_fmt]
)
return os.path.abspath(path)
def check_valid_mpd(file="", exp_reps=1):
"""
checks if given file is a valid MPD(MPEG-DASH Manifest file)
"""
if not file or not os.path.isfile(file):
return False
all_reprs = []
all_adapts = []
try:
mpd = MPEGDASHParser.parse(file)
for period in mpd.periods:
for adapt_set in period.adaptation_sets:
all_adapts.append(adapt_set)
for rep in adapt_set.representations:
all_reprs.append(rep)
except Exception as e:
logger.error(str(e))
return False
return (all_adapts, all_reprs) if (len(all_reprs) >= exp_reps) else False
def extract_meta_mpd(file):
"""
Extracts metadata from a valid MPD(MPEG-DASH Manifest file)
"""
adapts, reprs = check_valid_mpd(file)
if reprs:
metas = []
for rep in reprs:
meta = {}
meta["mime_type"] = rep.mime_type
if meta["mime_type"].startswith("audio"):
meta["audioSamplingRate"] = rep.audio_sampling_rate
else:
meta["width"] = rep.width
meta["height"] = rep.height
meta["framerate"] = (
rep.frame_rate
if not (rep.frame_rate is None)
else adapts[0].frame_rate
)
logger.debug("Found Meta: {}".format(meta))
metas.append(meta)
logger.debug("MetaData: {}".format(metas))
return metas
else:
return []
def return_mpd_path():
"""
returns MPD assets temp path
"""
return os.path.join(tempfile.gettempdir(), "temp_mpd")
def string_to_float(value):
"""
Converts fraction to float
"""
if value is None:
logger.error("Input value is None!")
return 0.0
extracted = value.strip().split("/")
cleaned = [float(x.strip()) for x in extracted]
return cleaned[0] / cleaned[1]
def extract_resolutions(source, streams):
"""
Extracts resolution value from dictionaries
"""
if not (source) or not (streams):
return {}
results = {}
assert os.path.isfile(source), "Not a valid source"
s_cv = cv2.VideoCapture(source)
results[int(s_cv.get(cv2.CAP_PROP_FRAME_WIDTH))] = int(
s_cv.get(cv2.CAP_PROP_FRAME_HEIGHT)
)
for stream in streams:
if "-resolution" in stream:
try:
res = stream["-resolution"].split("x")
assert len(res) == 2
width, height = (res[0].strip(), res[1].strip())
assert width.isnumeric() and height.isnumeric()
results[int(width)] = int(height)
except Exception as e:
logger.error(str(e))
continue
else:
continue
return results
def test_ss_stream():
"""
Testing Single-Source Mode
"""
mpd_file_path = os.path.join(return_mpd_path(), "dash_test.mpd")
try:
stream_params = {
"-video_source": return_testvideo_path(),
"-clear_prev_assets": True,
}
streamer = StreamGear(output=mpd_file_path, logging=True, **stream_params)
streamer.transcode_source()
streamer.terminate()
assert check_valid_mpd(mpd_file_path)
except Exception as e:
pytest.fail(str(e))
def test_ss_livestream():
"""
Testing Single-Source Mode with livestream.
"""
mpd_file_path = os.path.join(return_mpd_path(), "dash_test.mpd")
try:
stream_params = {
"-video_source": return_testvideo_path(),
"-livestream": True,
"-remove_at_exit": 1,
}
streamer = StreamGear(output=mpd_file_path, logging=True, **stream_params)
streamer.transcode_source()
streamer.terminate()
except Exception as e:
pytest.fail(str(e))
@pytest.mark.parametrize("conversion", [None, "COLOR_BGR2GRAY", "COLOR_BGR2BGRA"])
def test_rtf_stream(conversion):
"""
Testing Real-Time Frames Mode
"""
mpd_file_path = return_mpd_path()
try:
# Open stream
options = {"THREAD_TIMEOUT": 300}
stream = CamGear(
source=return_testvideo_path(), colorspace=conversion, **options
).start()
stream_params = {
"-clear_prev_assets": True,
"-input_framerate": "invalid",
}
streamer = StreamGear(output=mpd_file_path, **stream_params)
while True:
frame = stream.read()
# check if frame is None
if frame is None:
break
if conversion == "COLOR_BGR2RGBA":
streamer.stream(frame, rgb_mode=True)
else:
streamer.stream(frame)
stream.stop()
streamer.terminate()
mpd_file = [
os.path.join(mpd_file_path, f)
for f in os.listdir(mpd_file_path)
if f.endswith(".mpd")
]
assert len(mpd_file) == 1, "Failed to create MPD file!"
assert check_valid_mpd(mpd_file[0])
except Exception as e:
if not isinstance(e, queue.Empty):
pytest.fail(str(e))
def test_rtf_livestream():
"""
Testing Real-Time Frames Mode with livestream.
"""
mpd_file_path = return_mpd_path()
try:
# Open stream
options = {"THREAD_TIMEOUT": 300}
stream = CamGear(source=return_testvideo_path(), **options).start()
stream_params = {
"-livestream": True,
}
streamer = StreamGear(output=mpd_file_path, **stream_params)
while True:
frame = stream.read()
# check if frame is None
if frame is None:
break
streamer.stream(frame)
stream.stop()
streamer.terminate()
except Exception as e:
if not isinstance(e, queue.Empty):
pytest.fail(str(e))
def test_input_framerate_rtf():
"""
Testing "-input_framerate" parameter provided by StreamGear
"""
try:
mpd_file_path = os.path.join(return_mpd_path(), "dash_test.mpd")
stream = cv2.VideoCapture(return_testvideo_path()) # Open stream
test_framerate = stream.get(cv2.CAP_PROP_FPS)
stream_params = {
"-clear_prev_assets": True,
"-input_framerate": test_framerate,
}
streamer = StreamGear(output=mpd_file_path, logging=True, **stream_params)
while True:
(grabbed, frame) = stream.read()
if not grabbed:
break
streamer.stream(frame)
stream.release()
streamer.terminate()
meta_data = extract_meta_mpd(mpd_file_path)
assert meta_data and len(meta_data) > 0, "Test Failed!"
framerate_mpd = string_to_float(meta_data[0]["framerate"])
assert framerate_mpd > 0.0 and isinstance(framerate_mpd, float), "Test Failed!"
assert round(framerate_mpd) == round(test_framerate), "Test Failed!"
except Exception as e:
pytest.fail(str(e))
@pytest.mark.parametrize(
"stream_params",
[
{"-clear_prev_assets": True, "-bpp": 0.2000, "-gop": 125, "-vcodec": "libx265"},
{
"-clear_prev_assets": True,
"-bpp": "unknown",
"-gop": "unknown",
"-s:v:0": "unknown",
"-b:v:0": "unknown",
"-b:a:0": "unknown",
},
],
)
def test_params(stream_params):
"""
Testing "-input_framerate" parameter provided by StreamGear
"""
try:
mpd_file_path = os.path.join(return_mpd_path(), "dash_test.mpd")
stream = cv2.VideoCapture(return_testvideo_path()) # Open stream
streamer = StreamGear(output=mpd_file_path, logging=True, **stream_params)
while True:
(grabbed, frame) = stream.read()
if not grabbed:
break
streamer.stream(frame)
stream.release()
streamer.terminate()
assert check_valid_mpd(mpd_file_path)
except Exception as e:
pytest.fail(str(e))
@pytest.mark.parametrize(
"stream_params",
[
{
"-clear_prev_assets": True,
"-video_source": return_testvideo_path(fmt="vo"),
"-audio": "https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/invalid.aac",
},
{
"-clear_prev_assets": True,
"-video_source": return_testvideo_path(fmt="vo"),
"-audio": return_testvideo_path(fmt="ao"),
},
{
"-clear_prev_assets": True,
"-video_source": return_testvideo_path(fmt="vo"),
"-audio": "https://raw.githubusercontent.com/abhiTronix/Imbakup/master/Images/big_buck_bunny_720p_1mb_ao.aac",
},
],
)
def test_audio(stream_params):
"""
Testing Single-Source Mode
"""
mpd_file_path = os.path.join(return_mpd_path(), "dash_test.mpd")
try:
streamer = StreamGear(output=mpd_file_path, logging=True, **stream_params)
streamer.transcode_source()
streamer.terminate()
assert check_valid_mpd(mpd_file_path)
except Exception as e:
pytest.fail(str(e))
@pytest.mark.parametrize(
"stream_params",
[
{
"-clear_prev_assets": True,
"-video_source": return_testvideo_path(fmt="vo"),
"-streams": [
{
"-video_bitrate": "unknown",
}, # Invalid Stream 1
{
"-resolution": "unxun",
}, # Invalid Stream 2
{
"-resolution": "640x480",
"-video_bitrate": "unknown",
}, # Invalid Stream 3
{
"-resolution": "640x480",
"-framerate": "unknown",
}, # Invalid Stream 4
{
"-resolution": "320x240",
"-framerate": 20.0,
}, # Stream: 320x240 at 20fps framerate
],
},
{
"-clear_prev_assets": True,
"-video_source": return_testvideo_path(fmt="vo"),
"-audio": return_testvideo_path(fmt="ao"),
"-streams": [
{
"-resolution": "640x480",
"-video_bitrate": "850k",
"-audio_bitrate": "128k",
}, # Stream1: 640x480 at 850kbps bitrate
{
"-resolution": "320x240",
"-framerate": 20.0,
}, # Stream2: 320x240 at 20fps framerate
],
},
{
"-clear_prev_assets": True,
"-video_source": return_testvideo_path(),
"-streams": [
{
"-resolution": "960x540",
"-video_bitrate": "1350k",
}, # Stream1: 960x540 at 1350kbps bitrate
],
},
],
)
def test_multistreams(stream_params):
"""
Testing Support for additional Secondary Streams of variable bitrates or spatial resolutions.
"""
mpd_file_path = os.path.join(return_mpd_path(), "dash_test.mpd")
results = extract_resolutions(
stream_params["-video_source"], stream_params["-streams"]
)
try:
streamer = StreamGear(output=mpd_file_path, logging=True, **stream_params)
streamer.transcode_source()
streamer.terminate()
metadata = extract_meta_mpd(mpd_file_path)
meta_videos = [x for x in metadata if x["mime_type"].startswith("video")]
assert meta_videos and (len(meta_videos) <= len(results)), "Test Failed!"
for s_v in meta_videos:
assert int(s_v["width"]) in results, "Width check failed!"
assert (
int(s_v["height"]) == results[int(s_v["width"])]
), "Height check failed!"
except Exception as e:
pytest.fail(str(e))
|
py | b402f99c9cb06f0c52ac3c4fca64bb52d045caba | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates the N-styles style transfer model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
from magenta.models.image_stylization import image_utils
from magenta.models.image_stylization import learning
from magenta.models.image_stylization import model
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1.0}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 1e-4, "vgg_16/conv2": 1e-4,'
' "vgg_16/conv3": 1e-4, "vgg_16/conv4": 1e-4}')
flags = tf.app.flags
flags.DEFINE_boolean('style_grid', False,
'Whether to generate the style grid.')
flags.DEFINE_boolean('style_crossover', False,
'Whether to do a style crossover in the style grid.')
flags.DEFINE_boolean('learning_curves', True,
'Whether to evaluate learning curves for all styles.')
flags.DEFINE_integer('batch_size', 16, 'Batch size')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_integer('eval_interval_secs', 60,
'Frequency, in seconds, at which evaluation is run.')
flags.DEFINE_integer('num_evals', 32, 'Number of evaluations of the losses.')
flags.DEFINE_integer('num_styles', None, 'Number of styles.')
flags.DEFINE_float('alpha', 1.0, 'Width multiplier')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('eval_dir', None,
'Directory where the results are saved to.')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries')
flags.DEFINE_string('master', '',
'Name of the TensorFlow master to use.')
flags.DEFINE_string('style_coefficients', None,
'Scales the style weights conditioned on the style image.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS,
'Style weights')
FLAGS = flags.FLAGS
def main(_):
with tf.Graph().as_default():
# Create inputs in [0, 1], as expected by vgg_16.
inputs, _ = image_utils.imagenet_inputs(
FLAGS.batch_size, FLAGS.image_size)
evaluation_images = image_utils.load_evaluation_images(FLAGS.image_size)
# Process style and weight flags
if FLAGS.style_coefficients is None:
style_coefficients = [1.0 for _ in range(FLAGS.num_styles)]
else:
style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
if len(style_coefficients) != FLAGS.num_styles:
raise ValueError(
'number of style coefficients differs from number of styles')
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Load style images.
style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
os.path.expanduser(FLAGS.style_dataset_file),
batch_size=FLAGS.num_styles, image_size=FLAGS.image_size,
square_crop=True, shuffle=False)
labels = tf.unstack(labels)
def _create_normalizer_params(style_label):
"""Creates normalizer parameters from a style label."""
return {'labels': tf.expand_dims(style_label, 0),
'num_categories': FLAGS.num_styles,
'center': True,
'scale': True}
# Dummy call to simplify the reuse logic
model.transform(
inputs,
alpha=FLAGS.alpha,
reuse=False,
normalizer_params=_create_normalizer_params(labels[0]))
def _style_sweep(inputs):
"""Transfers all styles onto the input one at a time."""
inputs = tf.expand_dims(inputs, 0)
stylized_inputs = []
for _, style_label in enumerate(labels):
stylized_input = model.transform(
inputs,
alpha=FLAGS.alpha,
reuse=True,
normalizer_params=_create_normalizer_params(style_label))
stylized_inputs.append(stylized_input)
return tf.concat([inputs] + stylized_inputs, 0)
if FLAGS.style_grid:
style_row = tf.concat(
[tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]), style_images],
0)
stylized_training_example = _style_sweep(inputs[0])
stylized_evaluation_images = [
_style_sweep(image) for image in tf.unstack(evaluation_images)]
stylized_noise = _style_sweep(
tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
stylized_style_images = [
_style_sweep(image) for image in tf.unstack(style_images)]
if FLAGS.style_crossover:
grid = tf.concat(
[style_row, stylized_training_example, stylized_noise] +
stylized_evaluation_images + stylized_style_images,
0)
else:
grid = tf.concat(
[style_row, stylized_training_example, stylized_noise] +
stylized_evaluation_images,
0)
if FLAGS.style_crossover:
grid_shape = [
3 + evaluation_images.get_shape().as_list()[0] + FLAGS.num_styles,
1 + FLAGS.num_styles]
else:
grid_shape = [
3 + evaluation_images.get_shape().as_list()[0],
1 + FLAGS.num_styles]
tf.summary.image(
'Style Grid',
tf.cast(
image_utils.form_image_grid(
grid,
grid_shape,
[FLAGS.image_size, FLAGS.image_size],
3) * 255.0,
tf.uint8))
if FLAGS.learning_curves:
metrics = {}
for i, label in enumerate(labels):
gram_matrices = dict(
(key, value[i: i + 1])
for key, value in style_gram_matrices.items())
stylized_inputs = model.transform(
inputs,
alpha=FLAGS.alpha,
reuse=True,
normalizer_params=_create_normalizer_params(label))
_, loss_dict = learning.total_loss(
inputs, stylized_inputs, gram_matrices, content_weights,
style_weights, reuse=i > 0)
for key, value in loss_dict.items():
metrics['{}_style_{}'.format(key, i)] = slim.metrics.streaming_mean(
value)
names_values, names_updates = slim.metrics.aggregate_metric_map(metrics)
for name, value in names_values.items():
summary_op = tf.summary.scalar(name, value, [])
print_op = tf.Print(summary_op, [value], name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, print_op)
eval_op = names_updates.values()
num_evals = FLAGS.num_evals
else:
eval_op = None
num_evals = 1
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=os.path.expanduser(FLAGS.train_dir),
logdir=os.path.expanduser(FLAGS.eval_dir),
eval_op=eval_op,
num_evals=num_evals,
eval_interval_secs=FLAGS.eval_interval_secs)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
py | b402fac1812d91e24521726a90ed76f549d93285 | from time import sleep
from threading import Thread
import os
import shutil
import argparse
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent))
from hpbandster.core.nameserver import NameServer
from hpbandster.optimizers.mobohb import MOBOHB
from mobohb_worker import MOBOHBWorker
def main_mobohb():
res = mobohb.run(n_iterations=10e20)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_MOBOHB(
experiment,
search_space,
num_initial_samples=10,
num_candidates=24,
gamma=0.10,
seed=0,
num_iterations=2000,
history_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'history', 'mobohb'),
init_method='random',
budget=25,
min_budget=5,
max_budget=25,
init=True,
):
NS = NameServer(run_id=str(seed), host='127.0.0.1', port=0)
ns_host, ns_port = NS.start()
w = MOBOHBWorker(experiment, search_space.as_uniform_space(), None, seed, run_id=str(seed), host='127.0.0.1', nameserver=ns_host, nameserver_port=ns_port)
w.run(background=True)
motpe_params = {
'init_method': init_method,
'num_initial_samples': num_initial_samples,
'num_candidates': num_candidates,
'gamma': gamma,
'budget': budget
}
mobohb = MOBOHB(configspace=search_space.as_uniform_space(), parameters=motpe_params, history_dir=history_dir, init=init,
run_id=str(seed), nameserver=ns_host, nameserver_port=ns_port,
min_budget=min_budget, max_budget=max_budget
)
main_mobohb = lambda : mobohb.run(n_iterations=num_iterations)
t = Thread(target=main_mobohb)
t.daemon = True
t.start()
snoozeiness = 24 * 3600
mobohb.is_write()
sleep(snoozeiness)
while mobohb.is_write():
sleep(2)
mobohb.shutdown(shutdown_workers=True)
NS.shutdown()
return experiment
|
py | b402fad0c2a7ffe515bdd65bb85ab5af767faf0b | from datasets.quordle.used_words import words as used_words
from datasets.wordle.allowed_words import words as words
from datasets.wordle.common_words import words as common_words
from word_game_helper import CharacterGuess, CharacterStatus, WordGameHelper
GRAY: CharacterStatus = CharacterStatus.GRAY
GREEN: CharacterStatus = CharacterStatus.GREEN
YELLOW: CharacterStatus = CharacterStatus.YELLOW
# common_words = set()
GAME1: WordGameHelper = WordGameHelper(words, common_words, set())
GAME2: WordGameHelper = WordGameHelper(words, common_words, set())
GAME3: WordGameHelper = WordGameHelper(words, common_words, set())
GAME4: WordGameHelper = WordGameHelper(words, common_words, set())
GAMES: list[WordGameHelper] = [GAME1, GAME2, GAME3, GAME4]
def guess(word: str, statuses: list[list[CharacterStatus]]) -> None:
if len(word) != 5 or len(statuses) != 4:
return
for index, status in enumerate(statuses):
if not status or len(status) != 5:
continue
GAMES[index].make_guess(
[
CharacterGuess(word[0].lower(), status[0]),
CharacterGuess(word[1].lower(), status[1]),
CharacterGuess(word[2].lower(), status[2]),
CharacterGuess(word[3].lower(), status[3]),
CharacterGuess(word[4].lower(), status[4]),
]
)
def main():
guess(
"CRANE",
[
[GRAY, GRAY, GRAY, GRAY, GRAY],
[GRAY, GRAY, GRAY, YELLOW, GRAY],
[GRAY, GREEN, GREEN, GRAY, GREEN],
[GRAY, GRAY, GRAY, GREEN, GREEN],
],
)
guess(
"SHONE",
[
[GREEN, GRAY, GREEN, GRAY, GRAY],
[GRAY, GRAY, GRAY, YELLOW, GRAY],
[GRAY, GRAY, GRAY, GRAY, GREEN],
[GRAY, GREEN, GRAY, GREEN, GREEN],
],
)
guess(
"WHINE",
[
[GRAY, GRAY, YELLOW, GRAY, GRAY],
[GRAY, GRAY, YELLOW, YELLOW, GRAY],
[GRAY, GRAY, GRAY, GRAY, GREEN],
[GREEN, GREEN, GREEN, GREEN, GREEN],
],
)
guess(
"SPOIL",
[
[GREEN, GREEN, GREEN, GREEN, GREEN],
[GRAY, GRAY, GRAY, GREEN, GRAY],
[GRAY, GRAY, GRAY, GRAY, GRAY],
None,
],
)
guess(
"GRADE",
[
None,
[GRAY, GRAY, GRAY, GRAY, GRAY],
[GREEN, GREEN, GREEN, GRAY, GREEN],
None,
],
)
for game in GAMES:
game.print_possible_answers()
if __name__ == "__main__":
main()
|
py | b402fad719b69f1abdca2c51e7c79b23efd564b2 | from .widget import Widget
from tkinter import ttk
from tkinter.constants import HORIZONTAL, VERTICAL
class Notebook(Widget):
def __init__(self,master, **kwargs):
super().__init__(
tk=ttk.Notebook(
master=master), **kwargs)
def forget_children(self):
for child in self.tk.tabs():
self.tk.forget(child)
def place_children(self, changed_value=None):
self.forget_children()
for child in self.children:
if child and not child.hidden:
self.tk.add(child.tk, text=child.text)
child._node.placed() |
py | b402fb06806f58673af157f2cca873e0e63eec85 | """
Django settings for api_sensei project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wm1*bp6ww#g)03b9*f5_y+fx(k5#22-*u5crb38yb+^kbmt#%y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'api.apps.ApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api_sensei.urls'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_sensei.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
py | b402fc5c70f3a8c0c1836c4ea6e33a5deb0ffbbe | """
Support for LaCrosse sensor components.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.lacrosse/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import ENTITY_ID_FORMAT, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE, CONF_ID, CONF_NAME, CONF_SENSORS, CONF_TYPE,
EVENT_HOMEASSISTANT_STOP, TEMP_CELSIUS)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
REQUIREMENTS = ['pylacrosse==0.3.1']
_LOGGER = logging.getLogger(__name__)
CONF_BAUD = 'baud'
CONF_DATARATE = 'datarate'
CONF_EXPIRE_AFTER = 'expire_after'
CONF_FREQUENCY = 'frequency'
CONF_JEELINK_LED = 'led'
CONF_TOGGLE_INTERVAL = 'toggle_interval'
CONF_TOGGLE_MASK = 'toggle_mask'
DEFAULT_DEVICE = '/dev/ttyUSB0'
DEFAULT_BAUD = '57600'
DEFAULT_EXPIRE_AFTER = 300
TYPES = ['battery', 'humidity', 'temperature']
SENSOR_SCHEMA = vol.Schema({
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSORS): vol.Schema({cv.slug: SENSOR_SCHEMA}),
vol.Optional(CONF_BAUD, default=DEFAULT_BAUD): cv.string,
vol.Optional(CONF_DATARATE): cv.positive_int,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
vol.Optional(CONF_FREQUENCY): cv.positive_int,
vol.Optional(CONF_JEELINK_LED): cv.boolean,
vol.Optional(CONF_TOGGLE_INTERVAL): cv.positive_int,
vol.Optional(CONF_TOGGLE_MASK): cv.positive_int,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the LaCrosse sensors."""
import pylacrosse
from serial import SerialException
usb_device = config.get(CONF_DEVICE)
baud = int(config.get(CONF_BAUD))
expire_after = config.get(CONF_EXPIRE_AFTER)
_LOGGER.debug("%s %s", usb_device, baud)
try:
lacrosse = pylacrosse.LaCrosse(usb_device, baud)
lacrosse.open()
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return False
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lacrosse.close)
if CONF_JEELINK_LED in config:
lacrosse.led_mode_state(config.get(CONF_JEELINK_LED))
if CONF_FREQUENCY in config:
lacrosse.set_frequency(config.get(CONF_FREQUENCY))
if CONF_DATARATE in config:
lacrosse.set_datarate(config.get(CONF_DATARATE))
if CONF_TOGGLE_INTERVAL in config:
lacrosse.set_toggle_interval(config.get(CONF_TOGGLE_INTERVAL))
if CONF_TOGGLE_MASK in config:
lacrosse.set_toggle_mask(config.get(CONF_TOGGLE_MASK))
lacrosse.start_scan()
sensors = []
for device, device_config in config[CONF_SENSORS].items():
_LOGGER.debug("%s %s", device, device_config)
typ = device_config.get(CONF_TYPE)
sensor_class = TYPE_CLASSES[typ]
name = device_config.get(CONF_NAME, device)
sensors.append(
sensor_class(
hass, lacrosse, device, name, expire_after, device_config
)
)
add_devices(sensors)
class LaCrosseSensor(Entity):
"""Implementation of a Lacrosse sensor."""
_temperature = None
_humidity = None
_low_battery = None
_new_battery = None
def __init__(self, hass, lacrosse, device_id, name, expire_after, config):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass)
self._config = config
self._name = name
self._value = None
self._expire_after = expire_after
self._expiration_trigger = None
lacrosse.register_callback(
int(self._config['id']), self._callback_lacrosse, None)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
def update(self, *args):
"""Get the latest data."""
pass
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {
'low_battery': self._low_battery,
'new_battery': self._new_battery,
}
return attributes
def _callback_lacrosse(self, lacrosse_sensor, user_data):
"""Handle a function that is called from pylacrosse with new values."""
if self._expire_after is not None and self._expire_after > 0:
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = (
dt_util.utcnow() + timedelta(seconds=self._expire_after))
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self.value_is_expired, expiration_at)
self._temperature = round(lacrosse_sensor.temperature * 2) / 2
self._humidity = lacrosse_sensor.humidity
self._low_battery = lacrosse_sensor.low_battery
self._new_battery = lacrosse_sensor.new_battery
@callback
def value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._value = None
self.async_schedule_update_ha_state()
class LaCrosseTemperature(LaCrosseSensor):
"""Implementation of a Lacrosse temperature sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def state(self):
"""Return the state of the sensor."""
return self._temperature
class LaCrosseHumidity(LaCrosseSensor):
"""Implementation of a Lacrosse humidity sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return '%'
@property
def state(self):
"""Return the state of the sensor."""
return self._humidity
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:water-percent'
class LaCrosseBattery(LaCrosseSensor):
"""Implementation of a Lacrosse battery sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._low_battery is None:
state = None
elif self._low_battery is True:
state = 'low'
else:
state = 'ok'
return state
@property
def icon(self):
"""Icon to use in the frontend."""
if self._low_battery is None:
icon = 'mdi:battery-unknown'
elif self._low_battery is True:
icon = 'mdi:battery-alert'
else:
icon = 'mdi:battery'
return icon
TYPE_CLASSES = {
'temperature': LaCrosseTemperature,
'humidity': LaCrosseHumidity,
'battery': LaCrosseBattery
}
|
py | b402fcaa9110e0ea5147f813de4d5f2432bd8d7b | from collections import OrderedDict
import logging
from colorama import Fore, Style, init as colorama_init
from future.builtins import input, str
from iepy.data.db import CandidateEvidenceManager
from iepy.data.models import SegmentToTag
logger = logging.getLogger(__name__)
class Answers(object):
YES = u'y'
NO = u'n'
DONT_KNOW = u'd'
STOP = u'stop'
options = [YES, NO, DONT_KNOW, STOP]
values = {YES: 1.0, NO: 0.0, DONT_KNOW: 0.5}
QUESTION_TEMPLATE = str(u"""
Is the following text evidence of the Fact %(fact)s?
%(text)s
(%(keys)s): """)
class TerminalInterviewer(object):
"""
Capable of asking Human to validate evidence for some facts over a text terminal.
Questions is a list of tuples of (Evidence, score), that will be consumed in
the received order.
Each time an evidence is validated or rejected by the human, correspondent
boolean answer is stored by calling the provided callback.
Extra options can be defined (key, explanation) like this:
extra_options=[('stop', 'Stop algorithm')]
when user picks such answers, the control is returned to the caller,
leaving the internal state untouched, so it's possible to resume execution.
"""
# FIXME: this "options" shall be merged with the Answers class defined above.
YES = u'y'
NO = u'n'
DONT_KNOW = u'd'
RUN = u'run'
base_options = OrderedDict(
[(YES, u'Valid Evidence'),
(NO, u'Not valid Evidence'),
(DONT_KNOW, u'Discard, not sure'),
(RUN, u'Tired of answering for now. Run with what I gave you.')
])
template = QUESTION_TEMPLATE
def __init__(self, questions, store_answer_callback,
extra_options=None):
"""
Creates an object capable of asking Human to validate evidence for some facts.
Questions is a list of tuples of (Evidence, score), that will be consumed in
the received order.
Each time an evidence is validated or rejected by the human, correspondent
boolean answer is stored by calling the provided callback.
Extra options can be defined (key, explanation) like this:
extra_options=[('stop', 'Stop algorithm')]
when user use such answers, flow is returned to the caller,
and question is discarded (so it's possible to resume execution)
"""
self.questions = questions
self.raw_answers = [] # list of answers
self.store_answer_callback = store_answer_callback
self.extra_options = OrderedDict(extra_options or [])
if set(self.base_options).intersection(self.extra_options.keys()):
raise ValueError(u"Can't define extra answers with the builtin keys")
self.keys = list(self.base_options.keys()) + list(self.extra_options.keys())
self.formatter = TerminalEvidenceFormatter()
def explain(self):
"""Returns string that explains how to use the tool for the person
answering questions.
"""
r = u"You'll be presented with pieces of text that have a good chance to be "
r += u"evidences of the known facts. Please confirm or reject each.\n"
r += u"Pay attention to the colors.\n"
r += u"Possible answers are:\n"
options = list(self.base_options.items()) + list(self.extra_options.items())
r += u'\n'.join(' %s: %s' % (key, explanation) for key, explanation in options)
print(r)
def __call__(self):
"""For each available question prompts the Human if it's valid evidence or not.
Returns None in case that all question has been answered (or when the Human
indicates that he's tired of answering).
Each time that Human replies with a custom answer (not in the base list) that
answer will be returned instantaneously (and no further question will be shown
except the terminal is invoked again).
"""
colorama_init()
self.explain()
for evidence in self.questions[len(self.raw_answers):]:
answer = self.get_human_answer(evidence)
if answer in self.extra_options:
# Will not be handled here but in the caller.
return answer
elif answer == self.RUN:
# No more questions and answers for now. Use what is available.
return None
else:
self.raw_answers.append(answer)
if answer in [self.YES, self.NO]:
self.store_answer_callback(evidence, answer == self.YES)
def get_human_answer(self, evidence):
keys = u'/'.join(self.keys)
c_fact, c_text = self.formatter.colored_fact_and_text(evidence)
question = self.template % {
'keys': keys, 'fact': c_fact,
'text': c_text
}
answer = input(question)
while answer not in self.keys:
answer = input('Invalid answer. (%s): ' % keys)
return answer
def human_oracle(evidence, possible_answers):
"""Simple text interface to query a human for fact generation."""
colored_fact, colored_segment = evidence.colored_fact_and_text()
print(u'SEGMENT: %s' % colored_segment)
question = ' FACT: {0}? ({1}) '.format(colored_fact,
u'/'.join(possible_answers))
answer = input(question)
while answer not in possible_answers:
answer = input(question)
return answer
class TerminalEvidenceFormatter(object):
default_color_1 = Fore.RED
default_color_2 = Fore.GREEN
def colored_text(self, ev, color_1=None, color_2=None):
"""Will return a naive formated text with entities remarked.
Assumes that occurrences does not overlap.
"""
color_1 = color_1 or self.default_color_1
color_2 = color_2 or self.default_color_2
# right and left entity-occurrences. "Right" and "Left" are just ideas, but
# are not necessary their true position on the text
r_eo = ev.right_entity_occurrence
l_eo = ev.left_entity_occurrence
ev.segment.hydrate()
r_eo.hydrate_for_segment(ev.segment)
l_eo.hydrate_for_segment(ev.segment)
tkns = ev.segment.tokens[:]
if r_eo.segment_offset < l_eo.segment_offset:
tkns.insert(l_eo.segment_offset_end, Style.RESET_ALL)
tkns.insert(l_eo.segment_offset, color_2)
tkns.insert(r_eo.segment_offset_end, Style.RESET_ALL)
tkns.insert(r_eo.segment_offset, color_1)
else: # must be solved in the reverse order
tkns.insert(r_eo.segment_offset_end, Style.RESET_ALL)
tkns.insert(r_eo.segment_offset, color_1)
tkns.insert(l_eo.segment_offset_end, Style.RESET_ALL)
tkns.insert(l_eo.segment_offset, color_2)
return u' '.join(tkns)
def colored_fact(self, ev, color_1=None, color_2=None):
color_1 = color_1 or self.default_color_1
color_2 = color_2 or self.default_color_2
right_entity = ev.right_entity_occurrence.entity
left_entity = ev.left_entity_occurrence.entity
return u'(%s <%s>, %s, %s <%s>)' % (
color_1 + right_entity.key + Style.RESET_ALL,
right_entity.kind,
ev.relation.name,
color_2 + left_entity.key + Style.RESET_ALL,
left_entity.kind,
)
def colored_fact_and_text(self, ev, color_1=None, color_2=None):
color_1 = color_1 or self.default_color_1
color_2 = color_2 or self.default_color_2
return (
self.colored_fact(ev, color_1, color_2),
self.colored_text(ev, color_1, color_2)
)
class TerminalAdministration(object):
"""Terminal/Console interface for administrating the run of a iepy extraction.
"""
REFRESH = u'refresh'
RUN = u'run'
base_options = OrderedDict(
[(REFRESH, u'Refresh - check how many new labels were created.'),
(RUN, u'Run Process - run the process again with the info obtained'),
])
def __init__(self, relation, extra_options):
self.relation = relation
self.extra_options = OrderedDict(extra_options or [])
if set(self.base_options).intersection(self.extra_options.keys()):
raise ValueError(u"Can't define extra options with the builtin keys")
self.keys = list(self.base_options.keys()) + list(self.extra_options.keys())
def update_candidate_evidences_to_label(self, evidence_candidates):
# Will let the UI know which are the segments that have evidence to label.
# Needs to respect the provided ordering, so the created SegmentToTag objects
# when sorted by date respect the evidence_candidates provided.
logger.info('Creating segments to tag')
segments_to_tag = []
for ev_c in evidence_candidates:
if ev_c.segment not in segments_to_tag:
segments_to_tag.append(ev_c.segment)
existent_stt = {stt.segment_id: stt for stt in SegmentToTag.objects.filter(
relation=self.relation, segment__in=segments_to_tag)}
for segment in segments_to_tag:
if segment.pk in existent_stt:
stt = existent_stt[segment.pk]
else:
stt, created = SegmentToTag.objects.get_or_create(
segment=segment,
relation=self.relation,
)
if not stt.done:
stt.save() # always saving, so modification_date is updated
logger.info('Done creating segments to tag')
def explain(self):
"""Returns string that explains how to use the tool for the person
administering the extraction.
"""
r = "Waiting for candidate evidences to be labeled. \n"
r += "Available commands are:\n"
options = list(self.base_options.items()) + list(self.extra_options.items())
r += u'\n'.join(' %s: %s' % (key, explanation) for key, explanation in options)
print(r)
def __call__(self):
self.explain()
while True:
# Forever loop until the administrator decides to stop it
cmd = self.get_command()
if cmd in self.extra_options or cmd == self.RUN:
return cmd
if cmd == self.REFRESH:
self.refresh_info()
def refresh_info(self):
c = CandidateEvidenceManager.value_labeled_candidates_count_for_relation(
self.relation)
print ('There are %s labels with yes/no answers' % c)
def get_command(self):
keys = u'/'.join(self.keys)
answer = input('Waiting... what to do: ')
while answer not in self.keys:
answer = input('"%s" is an invalid answer. (%s): ' % (answer, keys))
return answer
|
py | b402fcd97635e8fe37854b03a4468a43993b189a | def y(m, x, b):
y = m*x+b
return y
while True:
slope = float(input("Enter the slope: "))
y_int = float(input("Enter the y-int: "))
try:
x_value = float(input("Enter an x-value (enter a string to exit): "))
except ValueError:
break
print(f"The y value is {y(slope, x_value, y_int)}")
print("End of program") |
py | b402fcdf5712c995c6fd0cb6bf2a19bae8a423f3 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
project = "Spark NLP"
copyright = "2021, John Snow Labs"
author = "John Snow Labs"
# The full version, including alpha/beta/rc tags
release = "3.4.2"
pyspark_version = "3.0.3"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
"sphinx.ext.autosummary",
"numpydoc", # handle NumPy documentation formatted docstrings.
"sphinx-prompt",
"sphinx_toggleprompt",
# "sphinx_copybutton", # TODO
"sphinx_substitution_extensions",
"sphinx.ext.intersphinx",
]
intersphinx_mapping = {
"spark": ("https://spark.apache.org/docs/latest/api/python/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo.png"
html_favicon = "_static/fav.ico"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
numpydoc_show_class_members = False # Or add Method section in doc strings? https://stackoverflow.com/questions/65198998/sphinx-warning-autosummary-stub-file-not-found-for-the-methods-of-the-class-c
# autoclass_content = "both" # use __init__ as doc as well
# -- More Configurations -----------------------------------------------------
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Global substitutions in the RST files.
rst_prolog = """
.. |release| replace:: {0}
.. |pyspark_version| replace:: {1}
""".format(
release, pyspark_version
) |
py | b402fdb885bcd53d7d8d84fc10ce2f1c6ad7b5dd | # flopy version file automatically created using...make-release.py
# created on...August 07, 2021 08:14:57
major = 3
minor = 3
micro = 5
__version__ = f"{major}.{minor}.{micro}"
__pakname__ = "flopy"
# edit author dictionary as necessary (
# in order of commits after Bakker and Post
author_dict = {
"Mark Bakker": "[email protected]",
"Vincent Post": "[email protected]",
"Joseph D. Hughes": "[email protected]",
"Christian D. Langevin": "[email protected]",
"Jeremy T. White": "[email protected]",
"Andrew T. Leaf": "[email protected]",
"Scott R. Paulinski": "[email protected]",
"Jason C. Bellino": "[email protected]",
"Eric D. Morway": "[email protected]",
"Michael W. Toews": "[email protected]",
"Joshua D. Larsen": "[email protected]",
"Michael N. Fienen": "[email protected]",
"Jon Jeffrey Starn": "[email protected]",
"Davíd Brakenhoff": "[email protected]",
}
__author__ = ", ".join(author_dict.keys())
__author_email__ = ", ".join(s for _, s in author_dict.items())
|
py | b402fe9e802426ee79485614978cf8ca4ef5adaa | from greendoge.wallet.puzzles.load_clvm import load_clvm
CC_MOD = load_clvm("cc.clvm", package_or_requirement=__name__)
LOCK_INNER_PUZZLE = load_clvm("lock.inner.puzzle.clvm", package_or_requirement=__name__)
|
py | b402fec9aa2c982eb8a503fc6907e3b41340d6a2 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""The main config file for Superset
All configuration in this file can be overridden by providing a superset_config
in your PYTHONPATH as there is a ``from superset_config import *``
at the end of this file.
"""
from collections import OrderedDict
import imp
import json
import os
import sys
from celery.schedules import crontab
from dateutil import tz
from flask_appbuilder.security.manager import AUTH_DB
from superset.stats_logger import DummyStatsLogger
# Realtime stats logger, a StatsD implementation exists
STATS_LOGGER = DummyStatsLogger()
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
if 'SUPERSET_HOME' in os.environ:
DATA_DIR = os.environ['SUPERSET_HOME']
else:
DATA_DIR = os.path.join(os.path.expanduser('~'), '.superset')
# ---------------------------------------------------------
# Superset specific config
# ---------------------------------------------------------
PACKAGE_DIR = os.path.join(BASE_DIR, 'static', 'assets')
PACKAGE_FILE = os.path.join(PACKAGE_DIR, 'package.json')
with open(PACKAGE_FILE) as package_file:
VERSION_STRING = json.load(package_file)['version']
ROW_LIMIT = 50000
VIZ_ROW_LIMIT = 10000
# max rows retrieved by filter select auto complete
FILTER_SELECT_ROW_LIMIT = 10000
SUPERSET_WORKERS = 2 # deprecated
SUPERSET_CELERY_WORKERS = 32 # deprecated
SUPERSET_WEBSERVER_ADDRESS = '0.0.0.0'
SUPERSET_WEBSERVER_PORT = 8088
# This is an important setting, and should be lower than your
# [load balancer / proxy / envoy / kong / ...] timeout settings.
# You should also make sure to configure your WSGI server
# (gunicorn, nginx, apache, ...) timeout setting to be <= to this setting
SUPERSET_WEBSERVER_TIMEOUT = 60
SUPERSET_DASHBOARD_POSITION_DATA_LIMIT = 65535
EMAIL_NOTIFICATIONS = False
CUSTOM_SECURITY_MANAGER = None
SQLALCHEMY_TRACK_MODIFICATIONS = False
# ---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h' # noqa
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(DATA_DIR, 'superset.db')
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# In order to hook up a custom password store for all SQLACHEMY connections
# implement a function that takes a single argument of type 'sqla.engine.url',
# returns a password and set SQLALCHEMY_CUSTOM_PASSWORD_STORE.
#
# e.g.:
# def lookup_password(url):
# return 'secret'
# SQLALCHEMY_CUSTOM_PASSWORD_STORE = lookup_password
# The limit of queries fetched for query search
QUERY_SEARCH_LIMIT = 1000
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = True
# Add endpoints that need to be exempt from CSRF protection
WTF_CSRF_EXEMPT_LIST = ['superset.views.core.log']
# Whether to run the web server in debug mode or not
DEBUG = os.environ.get('FLASK_ENV') == 'development'
FLASK_USE_RELOAD = True
# Whether to show the stacktrace on 500 error
SHOW_STACKTRACE = True
# Extract and use X-Forwarded-For/X-Forwarded-Proto headers?
ENABLE_PROXY_FIX = False
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = 'Superset'
# Uncomment to setup an App icon
APP_ICON = '/static/assets/images/[email protected]'
APP_ICON_WIDTH = 126
# Uncomment to specify where clicking the logo would take the user
# e.g. setting it to '/welcome' would take the user to '/superset/welcome'
LOGO_TARGET_PATH = None
# Druid query timezone
# tz.tzutc() : Using utc timezone
# tz.tzlocal() : Using local timezone
# tz.gettz('Asia/Shanghai') : Using the time zone with specific name
# [TimeZone List]
# See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# other tz can be overridden by providing a local_config
DRUID_IS_ACTIVE = True
DRUID_TZ = tz.tzutc()
DRUID_ANALYSIS_TYPES = ['cardinality']
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://open.login.yahoo.com/' },
# { 'name': 'Flickr', 'url': 'https://www.flickr.com/<username>' },
# ---------------------------------------------------
# Roles config
# ---------------------------------------------------
# Grant public role the same set of permissions as for the GAMMA role.
# This is useful if one wants to enable anonymous users to view
# dashboards. Explicit grant on specific datasets is still required.
PUBLIC_ROLE_LIKE_GAMMA = False
# ---------------------------------------------------
# Babel config for translations
# ---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'superset/translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag': 'us', 'name': 'English'},
'it': {'flag': 'it', 'name': 'Italian'},
'fr': {'flag': 'fr', 'name': 'French'},
'zh': {'flag': 'cn', 'name': 'Chinese'},
'ja': {'flag': 'jp', 'name': 'Japanese'},
'de': {'flag': 'de', 'name': 'German'},
'pt': {'flag': 'pt', 'name': 'Portuguese'},
'pt_BR': {'flag': 'br', 'name': 'Brazilian Portuguese'},
'ru': {'flag': 'ru', 'name': 'Russian'},
'ko': {'flag': 'kr', 'name': 'Korean'},
}
# ---------------------------------------------------
# Feature flags
# ---------------------------------------------------
# Feature flags that are set by default go here. Their values can be
# overwritten by those specified under FEATURE_FLAGS in super_config.py
# For example, DEFAULT_FEATURE_FLAGS = { 'FOO': True, 'BAR': False } here
# and FEATURE_FLAGS = { 'BAR': True, 'BAZ': True } in superset_config.py
# will result in combined feature flags of { 'FOO': True, 'BAR': True, 'BAZ': True }
DEFAULT_FEATURE_FLAGS = {}
# A function that receives a dict of all feature flags
# (DEFAULT_FEATURE_FLAGS merged with FEATURE_FLAGS)
# can alter it, and returns a similar dict. Note the dict of feature
# flags passed to the function is a deepcopy of the dict in the config,
# and can therefore be mutated without side-effect
#
# GET_FEATURE_FLAGS_FUNC can be used to implement progressive rollouts,
# role-based features, or a full on A/B testing framework.
#
# from flask import g, request
# def GET_FEATURE_FLAGS_FUNC(feature_flags_dict):
# feature_flags_dict['some_feature'] = g.user and g.user.id == 5
# return feature_flags_dict
GET_FEATURE_FLAGS_FUNC = None
# ---------------------------------------------------
# Image and file configuration
# ---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
# IMG_SIZE = (300, 200, True)
CACHE_DEFAULT_TIMEOUT = 60 * 60 * 24
CACHE_CONFIG = {'CACHE_TYPE': 'null'}
TABLE_NAMES_CACHE_CONFIG = {'CACHE_TYPE': 'null'}
# CORS Options
ENABLE_CORS = False
CORS_OPTIONS = {}
# Chrome allows up to 6 open connections per domain at a time. When there are more
# than 6 slices in dashboard, a lot of time fetch requests are queued up and wait for
# next available socket. PR #5039 is trying to allow domain sharding for Superset,
# and this feature will be enabled by configuration only (by default Superset
# doesn't allow cross-domain request).
SUPERSET_WEBSERVER_DOMAINS = None
# Allowed format types for upload on Database view
# TODO: Add processing of other spreadsheet formats (xls, xlsx etc)
ALLOWED_EXTENSIONS = set(['csv'])
# CSV Options: key/value pairs that will be passed as argument to DataFrame.to_csv method
# note: index option should not be overridden
CSV_EXPORT = {
'encoding': 'utf-8',
}
# ---------------------------------------------------
# Time grain configurations
# ---------------------------------------------------
# List of time grains to disable in the application (see list of builtin
# time grains in superset/db_engine_specs.builtin_time_grains).
# For example: to disable 1 second time grain:
# TIME_GRAIN_BLACKLIST = ['PT1S']
TIME_GRAIN_BLACKLIST = []
# Additional time grains to be supported using similar definitions as in
# superset/db_engine_specs.builtin_time_grains.
# For example: To add a new 2 second time grain:
# TIME_GRAIN_ADDONS = {'PT2S': '2 second'}
TIME_GRAIN_ADDONS = {}
# Implementation of additional time grains per engine.
# For example: To implement 2 second time grain on clickhouse engine:
# TIME_GRAIN_ADDON_FUNCTIONS = {
# 'clickhouse': {
# 'PT2S': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 2)*2)'
# }
# }
TIME_GRAIN_ADDON_FUNCTIONS = {}
# ---------------------------------------------------
# List of viz_types not allowed in your environment
# For example: Blacklist pivot table and treemap:
# VIZ_TYPE_BLACKLIST = ['pivot_table', 'treemap']
# ---------------------------------------------------
VIZ_TYPE_BLACKLIST = []
# ---------------------------------------------------
# List of data sources not to be refreshed in druid cluster
# ---------------------------------------------------
DRUID_DATA_SOURCE_BLACKLIST = []
# --------------------------------------------------
# Modules, datasources and middleware to be registered
# --------------------------------------------------
DEFAULT_MODULE_DS_MAP = OrderedDict([
('superset.connectors.sqla.models', ['SqlaTable']),
('superset.connectors.druid.models', ['DruidDatasource']),
])
ADDITIONAL_MODULE_DS_MAP = {}
ADDITIONAL_MIDDLEWARE = []
"""
1) https://docs.python-guide.org/writing/logging/
2) https://docs.python.org/2/library/logging.config.html
"""
# Console Log Settings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
LOG_LEVEL = 'DEBUG'
# ---------------------------------------------------
# Enable Time Rotate Log Handler
# ---------------------------------------------------
# LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
ENABLE_TIME_ROTATE = False
TIME_ROTATE_LOG_LEVEL = 'DEBUG'
FILENAME = os.path.join(DATA_DIR, 'superset.log')
ROLLOVER = 'midnight'
INTERVAL = 1
BACKUP_COUNT = 30
# Custom logger for auditing queries. This can be used to send ran queries to a
# structured immutable store for auditing purposes. The function is called for
# every query ran, in both SQL Lab and charts/dashboards.
# def QUERY_LOGGER(
# database,
# query,
# schema=None,
# user=None,
# client=None,
# security_manager=None,
# ):
# pass
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = os.environ.get('MAPBOX_API_KEY', '')
# Maximum number of rows returned from a database
# in async mode, no more than SQL_MAX_ROW will be returned and stored
# in the results backend. This also becomes the limit when exporting CSVs
SQL_MAX_ROW = 100000
# Default row limit for SQL Lab queries
DEFAULT_SQLLAB_LIMIT = 1000
# Maximum number of tables/views displayed in the dropdown window in SQL Lab.
MAX_TABLE_NAMES = 3000
# Adds a warning message on sqllab save query modal.
SQLLAB_SAVE_WARNING_MESSAGE = None
# If defined, shows this text in an alert-warning box in the navbar
# one example use case may be "STAGING" to make it clear that this is
# not the production version of the site.
WARNING_MSG = None
# Default celery config is to use SQLA as a broker, in a production setting
# you'll want to use a proper broker as specified here:
# http://docs.celeryproject.org/en/latest/getting-started/brokers/index.html
class CeleryConfig(object):
BROKER_URL = 'sqla+sqlite:///celerydb.sqlite'
CELERY_IMPORTS = (
'superset.sql_lab',
'superset.tasks',
)
CELERY_RESULT_BACKEND = 'db+sqlite:///celery_results.sqlite'
CELERYD_LOG_LEVEL = 'DEBUG'
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = True
CELERY_ANNOTATIONS = {
'sql_lab.get_sql_results': {
'rate_limit': '100/s',
},
'email_reports.send': {
'rate_limit': '1/s',
'time_limit': 120,
'soft_time_limit': 150,
'ignore_result': True,
},
}
CELERYBEAT_SCHEDULE = {
'email_reports.schedule_hourly': {
'task': 'email_reports.schedule_hourly',
'schedule': crontab(minute=1, hour='*'),
},
}
CELERY_CONFIG = CeleryConfig
"""
# Set celery config to None to disable all the above configuration
CELERY_CONFIG = None
"""
# static http headers to be served by your Superset server.
# This header prevents iFrames from other domains and
# "clickjacking" as a result
HTTP_HEADERS = {'X-Frame-Options': 'SAMEORIGIN'}
# If you need to allow iframes from other domains (and are
# aware of the risks), you can disable this header:
# HTTP_HEADERS = {}
# The db id here results in selecting this one as a default in SQL Lab
DEFAULT_DB_ID = None
# Timeout duration for SQL Lab synchronous queries
SQLLAB_TIMEOUT = 30
# SQLLAB_DEFAULT_DBID
SQLLAB_DEFAULT_DBID = None
# The MAX duration (in seconds) a query can run for before being killed
# by celery.
SQLLAB_ASYNC_TIME_LIMIT_SEC = 60 * 60 * 6
# An instantiated derivative of werkzeug.contrib.cache.BaseCache
# if enabled, it can be used to store the results of long-running queries
# in SQL Lab by using the "Run Async" button/feature
RESULTS_BACKEND = None
# The S3 bucket where you want to store your external hive tables created
# from CSV files. For example, 'companyname-superset'
CSV_TO_HIVE_UPLOAD_S3_BUCKET = None
# The directory within the bucket specified above that will
# contain all the external tables
CSV_TO_HIVE_UPLOAD_DIRECTORY = 'EXTERNAL_HIVE_TABLES/'
# The namespace within hive where the tables created from
# uploading CSVs will be stored.
UPLOADED_CSV_HIVE_NAMESPACE = None
# A dictionary of items that gets merged into the Jinja context for
# SQL Lab. The existing context gets updated with this dictionary,
# meaning values for existing keys get overwritten by the content of this
# dictionary.
JINJA_CONTEXT_ADDONS = {}
# Roles that are controlled by the API / Superset and should not be changes
# by humans.
ROBOT_PERMISSION_ROLES = ['Public', 'Gamma', 'Alpha', 'Admin', 'sql_lab']
CONFIG_PATH_ENV_VAR = 'SUPERSET_CONFIG_PATH'
# If a callable is specified, it will be called at app startup while passing
# a reference to the Flask app. This can be used to alter the Flask app
# in whatever way.
# example: FLASK_APP_MUTATOR = lambda x: x.before_request = f
FLASK_APP_MUTATOR = None
# Set this to false if you don't want users to be able to request/grant
# datasource access requests from/to other users.
ENABLE_ACCESS_REQUEST = False
# smtp server configuration
EMAIL_NOTIFICATIONS = False # all the emails are sent using dryrun
SMTP_HOST = 'localhost'
SMTP_STARTTLS = True
SMTP_SSL = False
SMTP_USER = 'superset'
SMTP_PORT = 25
SMTP_PASSWORD = 'superset'
SMTP_MAIL_FROM = '[email protected]'
if not CACHE_DEFAULT_TIMEOUT:
CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get('CACHE_DEFAULT_TIMEOUT')
# Whether to bump the logging level to ERROR on the flask_appbuilder package
# Set to False if/when debugging FAB related issues like
# permission management
SILENCE_FAB = True
# The link to a page containing common errors and their resolutions
# It will be appended at the bottom of sql_lab errors.
TROUBLESHOOTING_LINK = ''
# CSRF token timeout, set to None for a token that never expires
WTF_CSRF_TIME_LIMIT = 60 * 60 * 24 * 7
# This link should lead to a page with instructions on how to gain access to a
# Datasource. It will be placed at the bottom of permissions errors.
PERMISSION_INSTRUCTIONS_LINK = ''
# Integrate external Blueprints to the app by passing them to your
# configuration. These blueprints will get integrated in the app
BLUEPRINTS = []
# Provide a callable that receives a tracking_url and returns another
# URL. This is used to translate internal Hadoop job tracker URL
# into a proxied one
TRACKING_URL_TRANSFORMER = lambda x: x # noqa: E731
# Interval between consecutive polls when using Hive Engine
HIVE_POLL_INTERVAL = 5
# Allow for javascript controls components
# this enables programmers to customize certain charts (like the
# geospatial ones) by inputing javascript in controls. This exposes
# an XSS security vulnerability
ENABLE_JAVASCRIPT_CONTROLS = False
# The id of a template dashboard that should be copied to every new user
DASHBOARD_TEMPLATE_ID = None
# A callable that allows altering the database conneciton URL and params
# on the fly, at runtime. This allows for things like impersonation or
# arbitrary logic. For instance you can wire different users to
# use different connection parameters, or pass their email address as the
# username. The function receives the connection uri object, connection
# params, the username, and returns the mutated uri and params objects.
# Example:
# def DB_CONNECTION_MUTATOR(uri, params, username, security_manager, source):
# user = security_manager.find_user(username=username)
# if user and user.email:
# uri.username = user.email
# return uri, params
#
# Note that the returned uri and params are passed directly to sqlalchemy's
# as such `create_engine(url, **params)`
DB_CONNECTION_MUTATOR = None
# A function that intercepts the SQL to be executed and can alter it.
# The use case is can be around adding some sort of comment header
# with information such as the username and worker node information
#
# def SQL_QUERY_MUTATOR(sql, username, security_manager):
# dttm = datetime.now().isoformat()
# return f"-- [SQL LAB] {username} {dttm}\n{sql}"
SQL_QUERY_MUTATOR = None
# When not using gunicorn, (nginx for instance), you may want to disable
# using flask-compress
ENABLE_FLASK_COMPRESS = True
# Enable / disable scheduled email reports
ENABLE_SCHEDULED_EMAIL_REPORTS = False
# If enabled, certail features are run in debug mode
# Current list:
# * Emails are sent using dry-run mode (logging only)
SCHEDULED_EMAIL_DEBUG_MODE = False
# Email reports - minimum time resolution (in minutes) for the crontab
EMAIL_REPORTS_CRON_RESOLUTION = 15
# Email report configuration
# From address in emails
EMAIL_REPORT_FROM_ADDRESS = '[email protected]'
# Send bcc of all reports to this address. Set to None to disable.
# This is useful for maintaining an audit trail of all email deliveries.
EMAIL_REPORT_BCC_ADDRESS = None
# User credentials to use for generating reports
# This user should have permissions to browse all the dashboards and
# slices.
# TODO: In the future, login as the owner of the item to generate reports
EMAIL_REPORTS_USER = 'admin'
EMAIL_REPORTS_SUBJECT_PREFIX = '[Report] '
# The webdriver to use for generating reports. Use one of the following
# firefox
# Requires: geckodriver and firefox installations
# Limitations: can be buggy at times
# chrome:
# Requires: headless chrome
# Limitations: unable to generate screenshots of elements
EMAIL_REPORTS_WEBDRIVER = 'firefox'
# Window size - this will impact the rendering of the data
WEBDRIVER_WINDOW = {
'dashboard': (1600, 2000),
'slice': (3000, 1200),
}
# Any config options to be passed as-is to the webdriver
WEBDRIVER_CONFIGURATION = {}
# The base URL to query for accessing the user interface
WEBDRIVER_BASEURL = 'http://0.0.0.0:8080/'
# Send user to a link where they can report bugs
BUG_REPORT_URL = None
# Send user to a link where they can read more about Superset
DOCUMENTATION_URL = None
# What is the Last N days relative in the time selector to:
# 'today' means it is midnight (00:00:00) of today in the local timezone
# 'now' means it is relative to the query issue time
DEFAULT_RELATIVE_END_TIME = 'today'
# Is epoch_s/epoch_ms datetime format supposed to be considered since UTC ?
# If not, it is sassumed then the epoch_s/epoch_ms is seconds since 1/1/1970
# localtime (in the tz where the superset webserver is running)
IS_EPOCH_S_TRULY_UTC = False
try:
if CONFIG_PATH_ENV_VAR in os.environ:
# Explicitly import config module that is not in pythonpath; useful
# for case where app is being executed via pex.
print('Loaded your LOCAL configuration at [{}]'.format(
os.environ[CONFIG_PATH_ENV_VAR]))
module = sys.modules[__name__]
override_conf = imp.load_source(
'superset_config',
os.environ[CONFIG_PATH_ENV_VAR])
for key in dir(override_conf):
if key.isupper():
setattr(module, key, getattr(override_conf, key))
else:
from superset_config import * # noqa
import superset_config
print('Loaded your LOCAL configuration at [{}]'.format(
superset_config.__file__))
except ImportError:
pass
|
py | b402ff323e53ed2093a9a8ee2cea9409789e70b3 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
PLUGIN_INFO = {
'NAME': _(u"Slideshow"),
'CATEGORY': 'picture',
'VERSION': "1.0",
'SHORT_DESCRIPTION': _(u"Slidehow of pictures"),
'DESCRIPTION': _(u"Easily add an animate slideshow of pictures on your page. Select a list of picture from your files manager, define correct order, and configure the animation as you want it to be. "),
}
|
py | b402ff43dce5f8fca54b4669eb711141b74e233b | import os
from fnmatch import fnmatch
def matches(path, pattern):
"""
Matches rsync-like pattern
Currently should obey the same rules as rsync, apart from ** patterns
"""
path = os.path.normpath(path)
if pattern == "/":
return True
else:
pattern = pattern.rstrip("/")
if pattern.startswith("/"):
return _anchored_match(path, pattern.lstrip("/"))
else:
return _floating_match(path, pattern)
def matches_any_of(path, patterns):
for pattern in patterns:
if matches(path, pattern):
return True
return False
def _anchored_match(path, pattern):
path_components = _get_path_components(path)
pattern_components = _get_path_components(pattern)
return _anchored_match_helper(path_components, pattern_components)
def _floating_match(path, pattern):
path_components = _get_path_components(path)
pattern_components = _get_path_components(pattern)
for i in range(len(path_components)):
subpath_components = path_components[i:]
if _anchored_match_helper(subpath_components, pattern_components):
return True
return False
def _anchored_match_helper(path_components, pattern_components):
if len(pattern_components) > len(path_components):
return False
elif len(pattern_components) == 0:
return True
elif len(pattern_components) == 1:
return fnmatch(path_components[0], pattern_components[0])
else:
return fnmatch(
path_components[0], pattern_components[0]
) and _anchored_match_helper(
path_components[1:], pattern_components[1:]
)
def _get_path_components(path):
path_components = []
while path and path != "/":
path, tail = os.path.split(path)
path_components.append(tail)
path_components.reverse()
return path_components
|
py | b4030165416fe6e1b2cfba393f9bdd83b9a747cb | import gym
import logging
from d4rl.pointmaze import waypoint_controller
from d4rl.pointmaze import maze_model, maze_layouts
import numpy as np
import pickle
import gzip
import h5py
import os
import argparse
def reset_data():
return {'observations': [],
'actions': [],
'terminals': [],
'rewards': [],
'infos/goal': [],
'infos/qpos': [],
'infos/qvel': [],
}
def append_data(data, s, a, tgt, done, env_data):
data['observations'].append(s)
data['actions'].append(a)
data['rewards'].append(0.0)
data['terminals'].append(done)
data['infos/goal'].append(tgt)
data['infos/qpos'].append(env_data.qpos.ravel().copy())
data['infos/qvel'].append(env_data.qvel.ravel().copy())
def npify(data):
for k in data:
if k == 'terminals':
dtype = np.bool_
else:
dtype = np.float32
data[k] = np.array(data[k], dtype=dtype)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--render', action='store_true', help='Render trajectories')
parser.add_argument('--noisy', action='store_true', help='Noisy actions')
parser.add_argument('--env_name', type=str, default='maze2d-umaze-v1', help='Maze type')
parser.add_argument('--num_samples', type=int, default=int(1e6), help='Num samples to collect')
parser.add_argument('--data_dir', type=str, default='.', help='Base directory for dataset')
parser.add_argument('--batch_idx', type=int, default=int(-1), help='(Optional) Index of generated data batch')
args = parser.parse_args()
env = gym.make(args.env_name)
maze = env.str_maze_spec
max_episode_steps = env._max_episode_steps
controller = waypoint_controller.WaypointController(maze)
env = maze_model.MazeEnv(maze)
env.set_target()
s = env.reset()
act = env.action_space.sample()
done = False
data = reset_data()
ts = 0
for _ in range(args.num_samples):
position = s[0:2]
velocity = s[2:4]
act, done = controller.get_action(position, velocity, env._target)
if args.noisy:
act = act + np.random.randn(*act.shape)*0.5
act = np.clip(act, -1.0, 1.0)
if ts >= max_episode_steps:
done = True
append_data(data, s, act, env._target, done, env.sim.data)
ns, _, _, _ = env.step(act)
if len(data['observations']) % 1000 == 0:
print(len(data['observations']))
ts += 1
if done:
env.set_target()
done = False
ts = 0
else:
s = ns
if args.render:
env.render()
if args.batch_idx >= 0:
dir_name = 'maze2d-%s-noisy' % args.maze if args.noisy else 'maze2d-%s-sparse' % args.maze
os.makedirs(os.path.join(args.data_dir, dir_name), exist_ok=True)
fname = os.path.join(args.data_dir, dir_name, "rollouts_batch_{}.h5".format(args.batch_idx))
else:
os.makedirs(args.data_dir, exist_ok=True)
fname = 'maze2d-%s-noisy.hdf5' % args.maze if args.noisy else 'maze2d-%s-sparse.hdf5' % args.maze
fname = os.path.join(args.data_dir, fname)
dataset = h5py.File(fname, 'w')
npify(data)
for k in data:
dataset.create_dataset(k, data=data[k], compression='gzip')
if __name__ == "__main__":
main()
|
py | b40302d299ea3da6a334e2730590060faa85fe49 | #
# PySNMP MIB module NBS-CMMCENUM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NBS-CMMCENUM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:17:11 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
nbs, = mibBuilder.importSymbols("NBS-MIB", "nbs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, IpAddress, NotificationType, Unsigned32, MibIdentifier, Counter64, Counter32, Gauge32, ModuleIdentity, iso, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "IpAddress", "NotificationType", "Unsigned32", "MibIdentifier", "Counter64", "Counter32", "Gauge32", "ModuleIdentity", "iso", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
nbsCmmcEnumMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 629, 225))
if mibBuilder.loadTexts: nbsCmmcEnumMib.setLastUpdated('201503120000Z')
if mibBuilder.loadTexts: nbsCmmcEnumMib.setOrganization('NBS')
if mibBuilder.loadTexts: nbsCmmcEnumMib.setContactInfo('For technical support, please contact your service channel')
if mibBuilder.loadTexts: nbsCmmcEnumMib.setDescription('This MIB module defines some frequently updated lists for NBS-CMMC-MIB.')
class NbsCmmcEnumChassisType(TextualConvention, Integer32):
description = 'The type of Chassis.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39))
namedValues = NamedValues(("other", 1), ("bu16", 2), ("bu4", 3), ("bu1", 4), ("bu5", 5), ("bu3", 6), ("bu2", 7), ("fCpe", 8), ("bmc", 9), ("virtual16", 10), ("bu21", 11), ("bu42", 12), ("virtual1", 13), ("virtual2", 14), ("virtual3", 15), ("virtual4", 16), ("bu22", 17), ("bu82", 18), ("bu3v", 19), ("virtual3v", 20), ("bu12", 21), ("occ48", 22), ("occ96", 23), ("occ128", 24), ("occ320", 25), ("od48", 26), ("virtod48", 27), ("od12", 28), ("virtod12", 29), ("od16", 30), ("virtod16", 31), ("od32", 32), ("virtod32", 33), ("od16lc", 34), ("virtod16lc", 35), ("od6", 36), ("virtod6", 37), ("od4", 38), ("virtod4", 39))
class NbsCmmcEnumSlotOperationType(TextualConvention, Integer32):
description = 'Mode, or primary function, of card in slot'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45))
namedValues = NamedValues(("other", 1), ("management", 2), ("converter", 3), ("repeater", 4), ("switch", 5), ("splitterCombiner", 6), ("fastRepeater", 7), ("gigabitRepeater", 8), ("monitor", 9), ("opticSwitch", 10), ("remote", 11), ("redundant", 12), ("centralOffice", 13), ("customerPremise", 14), ("multiplexer", 15), ("deprecated16", 16), ("deprecated17", 17), ("deprecated18", 18), ("optAmpBoosterAGC", 19), ("optAmpBoosterAPC", 20), ("optAmpInlineAGC", 21), ("optAmpInlineAPC", 22), ("optAmpPreampAGC", 23), ("optAmpPreampAPC", 24), ("coDualActive", 25), ("coDualInactive", 26), ("physLayerSwitch", 27), ("packetMux", 28), ("optAmpVariableGain", 29), ("optAmpMidstageAGC", 30), ("optAmpMidstageAPC", 31), ("multiCO1g", 32), ("multiCO10g", 33), ("addDropMux", 34), ("multicast", 35), ("optAttenuator", 36), ("repeater40G", 37), ("multiplexer4x10G", 38), ("optAmpPreampAPPC", 39), ("optPassive", 40), ("transponder", 41), ("muxponder", 42), ("addWssDropSplitter", 43), ("dropWssAddCombiner", 44), ("dualAddWssDropSplitter", 45))
class NbsCmmcEnumSlotType(TextualConvention, Integer32):
description = "This data type is used as the syntax of the nbsCmmcSlotType object in the definition of NBS-CMMC-MIB's nbsCmmcSlotTable. This object is used internally by Manager, and is not useful to most end-users."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254), SingleValueConstraint(255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509), SingleValueConstraint(510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757))
namedValues = NamedValues(("empty0", 0), ("empty1", 1), ("empty2", 2), ("empty3", 3), ("em316gs1", 4), ("em316gs2", 5), ("em316gs3", 6), ("em316fms1", 7), ("em316fms2", 8), ("em316fms3", 9), ("em316as1", 10), ("em316as2", 11), ("em316as3", 12), ("em316fds1", 13), ("em316fds2", 14), ("em316fds3", 15), ("em316o3s1", 16), ("em316o3s2", 17), ("em316o3s3", 18), ("em316o12s1", 19), ("em316o12s2", 20), ("em316o12s3", 21), ("em316gsfs1", 22), ("em316gsfs2", 23), ("em316gsfs3", 24), ("em316fsfs1", 25), ("em316fsfs2", 26), ("em316fsfsx", 27), ("em316fsfsz", 28), ("em316fmsfs1", 29), ("em316fmsfs2", 30), ("em316fmsfs3", 31), ("em316asfs2", 32), ("em316asfs3", 33), ("em316fdsfs2", 34), ("em316fdsfs3", 35), ("em316o3sfs2", 36), ("em316o3sfs3", 37), ("em316o12sfs2", 38), ("em316o12sfs3", 39), ("em316em", 40), ("em316emx", 41), ("em316es", 42), ("em316esx", 43), ("em315esz", 44), ("em316fm", 45), ("em316fs1", 46), ("em316fs2", 47), ("em316fsx", 48), ("em315fsz", 49), ("em3162swm", 50), ("em3162sws1", 51), ("em3162sws2", 52), ("em3162sws3a", 53), ("em3162sws3b", 54), ("em3164wdm", 55), ("em316nm", 56), ("em3164sw", 57), ("em3164hub", 58), ("em316sc3m", 59), ("em316sc8m", 60), ("em316sc3s", 61), ("em316sc5s", 62), ("em316fr1", 63), ("em316fr2", 64), ("em316fr3", 65), ("em316gr1", 66), ("em316gr2", 67), ("em316gr3", 68), ("em316f21", 69), ("em316f22", 70), ("em316wdm4", 71), ("em316g", 72), ("em316gsf", 73), ("em316fn", 74), ("em316fsfn", 75), ("em316fmsn", 76), ("em316fmsfn", 77), ("em316asn", 78), ("em316asfsn", 79), ("em316fdsn", 80), ("em316fdsfsn", 81), ("em316o3sn", 82), ("em316o3sfsn", 83), ("em316o12sn", 84), ("em316o12sfsn", 85), ("em316emsn", 86), ("em316emsfsn", 87), ("em316ssn", 88), ("em316ssfsn", 89), ("em316tr", 90), ("em316t1", 91), ("em316t1sf", 92), ("nc3162bu", 93), ("em316wdm4o12", 94), ("em316wdm4o3", 95), ("em316grg", 96), ("em316mso12", 97), ("em316mso3", 98), ("em316e1", 99), ("em316e1sf", 100), ("wdmtrnk", 101), ("em316wdm43", 102), ("em316wdm44", 103), ("em104", 104), ("em105", 105), ("em106", 106), ("em316ds31", 107), ("em316ds32", 108), ("em3164sw1", 109), ("em3166sw1", 110), ("em3166sw2", 111), ("em316wfcs", 112), ("em316wfts", 113), ("em316e11", 114), ("em316e12", 115), ("nc316bu31", 116), ("nc316bu32", 117), ("em316od3", 118), ("nc316nw41", 119), ("nc316nw42", 120), ("em316em1", 121), ("em316e2", 122), ("em316fc", 123), ("em316fcsf", 124), ("nc316nw43", 125), ("nc316nw44", 126), ("em316o48", 127), ("em316o48sf", 128), ("ns129", 129), ("ns130", 130), ("ns131", 131), ("em3163sw", 132), ("em3163swsf", 133), ("em316o3c1", 134), ("em316o3csf", 135), ("nc316nw45", 136), ("nc316nw46", 137), ("em316wdm4f", 138), ("em316wdm4fc", 139), ("em316dpg", 140), ("em3162gsws", 141), ("ns142", 142), ("em316wgcs", 143), ("em316wgts", 144), ("em316wfccs", 145), ("em316wfcts", 146), ("em316wecs", 147), ("em316wets", 148), ("em316osw", 149), ("ns150", 150), ("ns151", 151), ("em316fe11l", 152), ("em316ft11l", 153), ("em316wdm81", 154), ("ns155", 155), ("wdm38", 156), ("ns157", 157), ("em316o3f1", 158), ("ns159", 159), ("em316wdm85", 160), ("em316wdmc3", 161), ("ns162", 162), ("em316fmsh", 163), ("ns164", 164), ("ns165", 165), ("ns166", 166), ("em316e31", 167), ("ns168", 168), ("em316fe12r", 169), ("ns170", 170), ("ns171", 171), ("ns172", 172), ("em316gc1", 173), ("em316gcsf", 174), ("ns175", 175), ("ns176", 176), ("em316ds3sh", 177), ("ns178", 178), ("em316nmhb1", 179), ("em316ds3r", 180), ("ns181", 181), ("em316fe11r", 182), ("em316ft11r", 183), ("ns184", 184), ("em316wdmc4", 185), ("em316adsl1", 186), ("ns187", 187), ("ns188", 188), ("ns189", 189), ("ns190", 190), ("ns191", 191), ("ns192", 192), ("ns193", 193), ("ns194", 194), ("em316gccsf", 195), ("em316gctsf", 196), ("em316osh", 197), ("ns198", 198), ("ns199", 199), ("ns200", 200), ("ns201", 201), ("ns202", 202), ("ns203", 203), ("ns204", 204), ("ns205", 205), ("ns206", 206), ("ns207", 207), ("ns208", 208), ("ns209", 209), ("em316sadm1", 210), ("ns211", 211), ("ns212", 212), ("em316flm1", 213), ("em316flm2", 214), ("ns215", 215), ("ns216", 216), ("ns217", 217), ("ns218", 218), ("wdm24ctr", 219), ("ns220", 220), ("wdm24ctl", 221), ("em316frm1", 222), ("em316frm2", 223), ("wdm44sf", 224), ("em316swrfhp", 225), ("ns226", 226), ("em316swhp", 227), ("ns228", 228), ("em316f2rm1", 229), ("em316f2rm2", 230), ("ns231", 231), ("ns232", 232), ("ns233", 233), ("ns234", 234), ("ns235", 235), ("ns236", 236), ("ns237", 237), ("ns238", 238), ("em316wfrmc", 239), ("em316wfrmt", 240), ("em316t1mux1", 241), ("em316t1mux2", 242), ("em316e1mux4j", 243), ("em316e1x4sfj", 244), ("ns245", 245), ("em316efrm1", 246), ("em316efrm2", 247), ("ns248", 248), ("ns249", 249), ("ns250", 250), ("ns251", 251), ("ns252", 252), ("ns253", 253), ("ns254", 254)) + NamedValues(("ns255", 255), ("ns256", 256), ("ns257", 257), ("em316sc1021", 258), ("ns259", 259), ("ns260", 260), ("ns261", 261), ("em316edsc1", 262), ("em316edsc2", 263), ("em316wdmslot", 264), ("em316wdmc265", 265), ("empty266", 266), ("em316wp1", 267), ("em316wp2", 268), ("em316oa", 269), ("em316e1mux1", 270), ("em316e1mux2", 271), ("em3162tsfp", 272), ("em316dmr48", 273), ("ns3162sfpr", 274), ("ns316xp342r", 275), ("em316ef", 276), ("em316efsf", 277), ("em316padms", 278), ("ns279", 279), ("ns280", 280), ("ns281", 281), ("ns316f16csfp", 282), ("ns316sdi8", 283), ("ns284", 284), ("em316wdmpa4", 285), ("em316wdmpa4t", 286), ("ns287", 287), ("em3162gbicl", 288), ("em3162gbicr", 289), ("em316ge1sfl", 290), ("em316ge1sfr", 291), ("em316fchub", 292), ("em316fcr", 293), ("em316mr48", 294), ("ns295", 295), ("em316fe1xx", 296), ("em316ft1sf", 297), ("em316gbicsfp", 298), ("ns299", 299), ("ns300", 300), ("em316pamulc8n", 301), ("em316pamulc4n", 302), ("em316t1muxrrm", 303), ("em316e1muxrrm", 304), ("ns305", 305), ("em316wo3c", 306), ("ns307", 307), ("em316grmah", 308), ("em316grmahsf", 309), ("em316efrmah", 310), ("em316efrmahsf", 311), ("em316erm", 312), ("em316ermsf", 313), ("em316efan", 314), ("em316efansf", 315), ("ns316", 316), ("nc316Xp343r", 317), ("ns318", 318), ("em316pamulc8", 319), ("em316pamulc4", 320), ("cm316fFtth", 321), ("ns322", 322), ("ns323", 323), ("ns324", 324), ("ns325", 325), ("em316padm41mu", 326), ("ns327", 327), ("em316pamuscm4", 328), ("em316pamuscd4", 329), ("em316pamuscm8", 330), ("em316pamuscd8", 331), ("em316muxmusc16", 332), ("em316dmuxmusc16", 333), ("ns334", 334), ("em316dpadms", 335), ("ns336", 336), ("em316dwmux16", 337), ("em316dwdmx16", 338), ("ns339", 339), ("ns340", 340), ("em316fe1sf", 341), ("em316xt1", 342), ("em316fe1rj", 343), ("em316gt1sfv", 344), ("ns345", 345), ("ns346", 346), ("ns347", 347), ("ns348", 348), ("ns349", 349), ("nc316xp322", 350), ("nc316xp323", 351), ("em316wermc", 352), ("em316wermt", 353), ("ns354", 354), ("ns355", 355), ("ns356", 356), ("ns357", 357), ("em316ee1rmft", 358), ("em316xe1rmft", 359), ("em316lx2", 360), ("em316lxm", 361), ("em316dwmux32", 362), ("em316dwdmx32v", 363), ("em316dwmux32nv", 364), ("em316dwdmx32n", 365), ("ns366", 366), ("ns367", 367), ("em316fe1rmft", 368), ("em316efe1ah", 369), ("em316eft1ah", 370), ("em316efe1rj", 371), ("ns372", 372), ("ns373", 373), ("ns374", 374), ("em316grmahsh", 375), ("em316ermahsh", 376), ("ns377", 377), ("ns378", 378), ("em316ermah", 379), ("ns380", 380), ("em3162sfpx", 381), ("ns382", 382), ("pmcwdm8sfp", 383), ("ns384", 384), ("ns385", 385), ("mccSfp36", 386), ("mccGRj36", 387), ("em316osc", 388), ("em316gemx2r", 389), ("em316gemx6r", 390), ("mccSfp72", 391), ("mccGRj72", 392), ("em316gcl", 393), ("em316gclsf", 394), ("em316wgclc", 395), ("em316wgclt", 396), ("ns397", 397), ("ns398", 398), ("ns399", 399), ("ns400", 400), ("ns401", 401), ("ns402", 402), ("ns403", 403), ("ns404", 404), ("ns405", 405), ("ns406", 406), ("ns407", 407), ("ns408", 408), ("ns409", 409), ("ns410", 410), ("ns411", 411), ("ns412", 412), ("ns413", 413), ("ns414", 414), ("ns415", 415), ("ns416", 416), ("em316xfpr", 417), ("oemntgrmah", 418), ("oemntermah", 419), ("oemntnm", 420), ("em316wds3c", 421), ("em316wds3t", 422), ("em316we3c", 423), ("em316we3t", 424), ("ns425", 425), ("ns426", 426), ("em316eft1mua4v", 427), ("em316efx1mub4", 428), ("em316efe1muc4v", 429), ("ns430", 430), ("ns431", 431), ("ns432", 432), ("em316t1mux4rm", 433), ("em316e1muxrjrm", 434), ("em316e1mux4rm", 435), ("em316dmr", 436), ("em316mr", 437), ("ns438", 438), ("ns439", 439), ("ns440", 440), ("em316ge1rjsf", 441), ("em316mr48q", 442), ("em316dmr48q", 443), ("em316mrmx2r", 444), ("ns445", 445), ("ns446", 446), ("ns447", 447), ("ns448", 448), ("ns449", 449), ("ns450", 450), ("mcc9xfp", 451), ("ns452", 452), ("em316cdadd2", 453), ("em316cdadd1", 454), ("ns455", 455), ("ns456", 456), ("em316nmlx12", 457), ("em316nmlx21", 458), ("em316nmlx", 459), ("ns460", 460), ("em316sw22", 461), ("em316sw12", 462), ("em316sw04", 463), ("em316sw13", 464), ("ns465", 465), ("ns466", 466), ("ns467", 467), ("ns468", 468), ("ns469", 469), ("ns470", 470), ("em3164swb", 471), ("ns472", 472), ("ns473", 473), ("ns474", 474), ("em316csadsxx", 475), ("em316csadsxxyy", 476), ("em316csaddxx", 477), ("em316csaddxxyy", 478), ("em3163swb", 479), ("em316ds3", 480), ("em316dt3e3", 481), ("ns482", 482), ("em316mux4xn", 483), ("em316dmx4xn", 484), ("em316mux4xbd", 485), ("em316dmx4xbd", 486), ("em316mux8nbd", 487), ("em316dmx8nbd", 488), ("em316mux8bd", 489), ("em316dmx8bd", 490), ("em316dpadxx", 491), ("em316dpadxxyy", 492), ("em316dpad4xx", 493), ("em316dpad8xx", 494), ("em316wt1c", 495), ("ns496", 496), ("em316gt1rm", 497), ("em316g6t1rm1", 498), ("em316g6t1rm2", 499), ("em316dsadsxx", 500), ("em316ddaddxx", 501), ("em316ddaddxxyy", 502), ("em316edfalv", 503), ("em316psc", 504), ("em316sos", 505), ("em316doscb", 506), ("em316padm8", 507), ("em316csads4", 508), ("ns509", 509)) + NamedValues(("ns510", 510), ("ns511", 511), ("ns512", 512), ("em316plc", 513), ("ns514", 514), ("ns515", 515), ("ns516", 516), ("ns517", 517), ("ns518", 518), ("em316dwmx8", 519), ("ns520", 520), ("em316genpasv", 521), ("em316ge1rm", 522), ("ns523", 523), ("ns524", 524), ("em316g6e1rms2", 525), ("ns526", 526), ("ns527", 527), ("ns528", 528), ("ns529", 529), ("mcc18t1e1", 530), ("ns531", 531), ("ns532", 532), ("mcc18dt3e3", 533), ("em316edfar", 534), ("ns535", 535), ("ns536", 536), ("ns537", 537), ("em316ossh", 538), ("em316sc3", 539), ("ns540", 540), ("em316fc400", 541), ("ns542", 542), ("ns543", 543), ("ns544", 544), ("em316eusmv", 545), ("ns546", 546), ("ns547", 547), ("em316dcm100r", 548), ("em316dcm100l", 549), ("ns550", 550), ("em316twoxfpet", 551), ("em316dwmux16be", 552), ("ns553", 553), ("ns554", 554), ("empmc8xfp", 555), ("ns556", 556), ("em316dwmx16bem", 557), ("ns558", 558), ("em316e1t1xy", 559), ("dwmx32rbm", 560), ("ns561", 561), ("ns562", 562), ("ns563", 563), ("empmc36t1e1", 564), ("ns565", 565), ("em316palc8nl", 566), ("em316palc8nr", 567), ("em316gswxy", 568), ("em316dwd40m5713", 569), ("em316dwd40m5712", 570), ("em316dwd40m5711", 571), ("em316mux535531b", 572), ("ns573", 573), ("em31610gxy", 574), ("ns575", 575), ("ns576", 576), ("ns577", 577), ("ns578", 578), ("ns579", 579), ("ns580", 580), ("ns581", 581), ("ns582", 582), ("ns583", 583), ("ns584", 584), ("em316os2", 585), ("em316osa", 586), ("ns587", 587), ("ns588", 588), ("ns589", 589), ("ns590", 590), ("ns591", 591), ("ns592", 592), ("em316ea", 593), ("ns594", 594), ("em316eusm10gr", 595), ("em316eusm10gl", 596), ("em316dmdxa16b1", 597), ("em316dmdxa16b2", 598), ("em316dmdxa16b3", 599), ("em316dmdxa16b4", 600), ("em316dmdxa16b5", 601), ("em316dmdxa40m01", 602), ("em316dmdxa40m02", 603), ("em316dmdxa40m03", 604), ("em316dmdxa40m04", 605), ("em316dmdxa40m05", 606), ("em316dmdxa40m06", 607), ("em316dmdxa40m07", 608), ("em316dmdxa40m08", 609), ("em316dmdxa40m09", 610), ("em316dmdxa40m10", 611), ("em316dmdxa40m11", 612), ("em316dmdxa16ra", 613), ("em316dmdxa16rb", 614), ("em31620g1", 615), ("em31620g2", 616), ("em31640g3", 617), ("em31640g4", 618), ("em31640g5", 619), ("em316rpon", 620), ("ns621", 621), ("empmc36sas", 622), ("em316osw8", 623), ("ns624", 624), ("ns625", 625), ("em31610g8swxyr", 626), ("em31610g8swxym", 627), ("em31610g8swxyl", 628), ("ns629", 629), ("em316cmux831b", 630), ("ns631", 631), ("em316mdx46ma001", 632), ("em316mdx46ma002", 633), ("em316mdx46ma003", 634), ("em316mdx46ma004", 635), ("em316mdx46ma005", 636), ("em316mdx46ma006", 637), ("em316mdx46ma007", 638), ("em316mdx46ma008", 639), ("em316mdx46ma009", 640), ("em316mdx46ma010", 641), ("em316mdx46ma011", 642), ("em316mdx46ma012", 643), ("em316osw128a", 644), ("em316osw128b", 645), ("em316osw128c", 646), ("em316osw128d", 647), ("em316osw128e", 648), ("em316osw128f", 649), ("em316osw128g", 650), ("em316osw128h", 651), ("em316osw128i", 652), ("em316osw128j", 653), ("em316osw128k", 654), ("em316osw128l", 655), ("em316osw128m", 656), ("ns657", 657), ("em316dcmxx", 658), ("em316osshlc", 659), ("em316eavg2217", 660), ("em316dmr10g3r", 661), ("em316fdt1e1rm", 662), ("em316sw8fxr", 663), ("em316sw8fxlv", 664), ("em316mdx46mx002", 665), ("em316mdx46mb003", 666), ("em316mdx46mb002", 667), ("em316mdx46mc002", 668), ("em316eamlp2017v", 669), ("ns670", 670), ("em316gemx4rr", 671), ("em316gemx4rlv", 672), ("empmcqsfp36", 673), ("ns674", 674), ("ns675", 675), ("em3162qsfp40", 676), ("ns677", 677), ("ns678", 678), ("mcc36ic", 679), ("ns680", 680), ("em316voar", 681), ("em316voalv", 682), ("em316dvmdxa", 683), ("em316dvmdxbv", 684), ("em316cmdxm8al", 685), ("em316cmdxm8ar", 686), ("ns687", 687), ("ns688", 688), ("em316dvmdxav1", 689), ("em316dvmdxav2", 690), ("em316dvmdxav3", 691), ("em316dvmdxav4", 692), ("em316dvmdxav5", 693), ("em316dvmdxav6", 694), ("em316dvmdxav7", 695), ("em316dvmdxav8", 696), ("em316dvmdxav9", 697), ("ns698", 698), ("ns699", 699), ("ns700", 700), ("em316ra12r", 701), ("em316ra12lv", 702), ("ns703", 703), ("em316ra12mv", 704), ("ns705", 705), ("ns706", 706), ("em316dmr10gf", 707), ("ns708", 708), ("ns709", 709), ("ns710", 710), ("ns711", 711), ("ns712", 712), ("ns713", 713), ("ns714", 714), ("ns715", 715), ("ns716", 716), ("ns717", 717), ("ns718", 718), ("ns719", 719), ("oddmr10g3r", 720), ("oddmr10gf", 721), ("od2hwss4dws", 722), ("od2hmxp100g", 723), ("odtxp100gf2c", 724), ("ns725", 725), ("em316raf10", 726), ("ns727", 727), ("odtxp100g2c", 728), ("ns729", 729), ("od2hwss4dcw", 730), ("ns731", 731), ("ns732", 732), ("odugc", 733), ("ns734", 734), ("ns735", 735), ("odfiller", 736), ("odtxp100g2cw1", 737), ("od2hwss4dww", 738), ("ns739", 739), ("ns740", 740), ("ns741", 741), ("ns742", 742), ("ns743", 743), ("ns744", 744), ("ns745", 745), ("ns746", 746), ("em316twoxfp16g", 747), ("od2hdwss4dws", 748), ("ns749", 749), ("ns750", 750), ("ns751", 751), ("ns752", 752), ("od2hdmx10g", 753), ("ns754", 754), ("ns755", 755), ("ns756", 756), ("odtxp100gf", 757))
class NbsCmmcEnumPortConnector(TextualConvention, Integer32):
description = 'The Port Connector.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))
namedValues = NamedValues(("unknown", 1), ("removed", 2), ("foDSC", 3), ("foSC", 4), ("cuRj45", 5), ("foLC", 6), ("coaxF", 7), ("coaxBNC", 8), ("coax2BNC", 9), ("cuRj45wLEDs", 10), ("cuRj11", 11), ("cuDb9", 12), ("cuHssdc", 13), ("coaxHeader", 14), ("foFiberJack", 15), ("foMtRj", 16), ("foMu", 17), ("sg", 18), ("foPigtail", 19), ("cuPigtail", 20), ("smb", 21), ("firewireA", 22), ("firewireB", 23), ("cuRj48", 24), ("fo1LC", 25), ("fo2ST", 26), ("sataDevicePlug", 27), ("sataHostPlug", 28), ("miniCoax", 29), ("mpo", 30), ("miniSAS4x", 31), ("reserved", 32), ("cxpCuPassive", 33), ("cxpCuActive", 34), ("cxpFoActive", 35), ("cxpFoConnect", 36), ("fc", 37), ("cuMicroUsbB", 38), ("rj45wUSBRJ45Active", 39), ("rj45wUSBUSBActive", 40))
class NbsCmmcChannelBand(TextualConvention, Integer32):
description = "The ITU grid labels DWDM channels with a letter 'band' and a numeric channel. Within this mib, the band is indicated by this object, and the channel number is shown in the object nbsOsaChannelNumber. Frequencies of at least 180100 GHz but less than 190100 GHz are considered the L spectrum, and frequencies of at least 190100 but less than 200100 GHz are considered the C spectrum. Frequencies evenly divisible by 100 GHz are designated with a 'C' or 'L' prepended to the channel number. Frequencies that are offset by 50 GHz are designated 'H' within the C spectrum, and 'Q' within the L spectrum."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))
namedValues = NamedValues(("notSupported", 0), ("cBand", 1), ("hBand", 2), ("lBand", 3), ("qBand", 4))
mibBuilder.exportSymbols("NBS-CMMCENUM-MIB", NbsCmmcChannelBand=NbsCmmcChannelBand, NbsCmmcEnumChassisType=NbsCmmcEnumChassisType, NbsCmmcEnumPortConnector=NbsCmmcEnumPortConnector, PYSNMP_MODULE_ID=nbsCmmcEnumMib, NbsCmmcEnumSlotType=NbsCmmcEnumSlotType, nbsCmmcEnumMib=nbsCmmcEnumMib, NbsCmmcEnumSlotOperationType=NbsCmmcEnumSlotOperationType)
|
py | b4030488ff835cb657aac7e3e30f21e99085a030 | """dbnd URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls')),
path('' , include('polls.urls')),
]
|
py | b40304ece777569c8c8d3fd9b9fa70e286aa28d4 | #!/usr/bin/env python
# David Prihoda
# Average predictions in a Domain CSV file by protein
import argparse
import pandas as pd
PROTEIN_GROUP_COLS = ['gene_start', 'gene_end', 'gene_strand', 'protein_id']
PROTEIN_EXTRA_COLS = ['contig_id', 'in_cluster']
def agg_concat(s):
return ';'.join(s)
def average_protein_prediction(domains, y=None, concat_domains=True):
"""
Average predictions into a 'prediction' column by protein using the 'protein_id' and other PROTEIN_GROUP_COLS.
:param domains: DataFrame from the Domain CSV file
:param y: Series of predictions to be averaged and written in the 'prediction' column
:param concat_domains: Whether to include a ';'-concatenated list of pfam_ids for each protein.
:return: DataFrame of proteins with averaged 'prediction' column
"""
extra_cols = [col for col in PROTEIN_EXTRA_COLS if col in domains.columns]
cols = extra_cols + PROTEIN_GROUP_COLS
if concat_domains:
cols.append('pfam_id')
copy = domains[cols].copy()
copy['prediction'] = y
per_gene = copy.groupby(extra_cols + PROTEIN_GROUP_COLS, sort=False)
if concat_domains:
return per_gene.agg({'pfam_id': agg_concat, 'prediction': 'mean'})\
.rename(columns={'pfam_id': 'pfam_ids'})\
.reset_index()
else:
return per_gene.mean().reset_index()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", required=True,
help="Path to domain prediction CSV.", metavar="FILE")
parser.add_argument("-c", "--column", dest="column", default='prediction',
help="Prediction column.", metavar="STRING")
parser.add_argument("-o", "--output", dest="output", required=True,
help="Output file path.", metavar="FILE")
options = parser.parse_args()
domains = pd.read_csv(options.input)
proteins = average_protein_prediction(domains, domains[options.column])
proteins.to_csv(options.output, index=False)
print('Saved protein predictions to: {}'.format(options.output)) |
py | b40304eed5846ea925756d7acd5ccc8bab05a77e | # -*- coding: utf-8 -*--
from pyramid.view import view_config
import pyramid.httpexceptions as exc
from infolica.exceptions.custom_error import CustomError
from infolica.models.constant import Constant
from infolica.models.models import Numero, NumeroRelation, VNumerosRelations
from infolica.scripts.utils import Utils
from infolica.scripts.authentication import check_connected
from sqlalchemy import and_
@view_config(route_name='numeros_relations', request_method='GET', renderer='json')
@view_config(route_name='numeros_relations_s', request_method='GET', renderer='json')
def numeros_relations_view(request):
"""
Return all numeros_relations
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
query = request.dbsession.query(VNumerosRelations).all()
return Utils.serialize_many(query)
@view_config(route_name='numeros_relation_by_affaire_id', request_method='GET', renderer='json')
def numeros_relation_by_affaire_id_view(request):
"""
Return Numeros_relations
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
affaire_id = request.matchdict['id']
# filter by conditions
query = request.dbsession.query(VNumerosRelations).filter(
VNumerosRelations.affaire_id == affaire_id).all()
return Utils.serialize_many(query)
# """ Return all numeros_relations based on numero_base_id having project or valid numbers defined on it"""
# @view_config(route_name='numeros_relations_by_numeroBase', request_method='POST', renderer='json')
# @view_config(route_name='numeros_relations_by_numeroBase_s', request_method='POST', renderer='json')
# def numeros_relations_by_numeroBase_view(request):
# # Check connected
# if not check_connected(request):
# raise exc.HTTPForbidden()
# # Récupérer les indices des états projet et vigueur de la config
# settings = request.registry.settings
# numero_etat_projet_id = int(settings['numero_projet_id'])
# numero_etat_vigueur_id = int(settings['numero_vigueur_id'])
# # Récupérer la liste des numéros de base de l'affaire
# numeros_base_id_list = request.params['numeros_base_id_list'] if 'numeros_base_id_list' in request.params else None
# numeros_base_id_list = json.loads(numeros_base_id_list)
# query = request.dbsession.query(models.VNumerosRelations).filter(
# and_(
# models.VNumerosRelations.numero_base_id.in_(numeros_base_id_list),
# models.VNumerosRelations.numero_associe_etat_id.in_([numero_etat_projet_id, numero_etat_vigueur_id])
# )).all()
# return Utils.serialize_many(query)
@view_config(route_name='numeros_relations', request_method='POST', renderer='json')
@view_config(route_name='numeros_relations_s', request_method='POST', renderer='json')
def numeros_relations_new_view(request, params=None):
"""
Add new numeros_relations
"""
if params is None:
params = request.params
# Check authorization
if not Utils.has_permission(request, request.registry.settings['affaire_numero_edition']):
raise exc.HTTPForbidden()
# check that relation does not exist yet
conditions = []
conditions.append(NumeroRelation.numero_id_base == params['numero_id_base'])
conditions.append(NumeroRelation.numero_id_associe == params['numero_id_associe'])
conditions.append(NumeroRelation.relation_type_id == params['relation_type_id'])
conditions.append(NumeroRelation.affaire_id == params['affaire_id'])
rel = request.dbsession.query(NumeroRelation).filter(*conditions).first()
if not rel is None:
return None
# Get numeros_relations instance
model = Utils.set_model_record(NumeroRelation(), params)
request.dbsession.add(model)
return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(NumeroRelation.__tablename__))
@view_config(route_name='numeros_relations', request_method='PUT', renderer='json')
@view_config(route_name='numeros_relations_s', request_method='PUT', renderer='json')
def numeros_relations_update_view(request):
"""
Update numeros_relations
"""
# Check authorization
if not Utils.has_permission(request, request.registry.settings['affaire_numero_edition']):
raise exc.HTTPForbidden()
model = request.dbsession.query(NumeroRelation)
# get instance
if "id" in request.params:
numrel_id = request.params["id"]
model = model.filter(NumeroRelation.id == numrel_id)
else:
num_base_id = request.params["numero_id_base"]
num_associe_id = request.params["numero_id_associe"]
affaire_old_id = request.params["affaire_old_id"]
model = model.filter(and_(
NumeroRelation.numero_id_base == num_base_id,
NumeroRelation.numero_id_associe == num_associe_id,
NumeroRelation.affaire_id == affaire_old_id,
))
model = model.first()
# update instance
if model != None and "affaire_new_id" in request.params:
model.affaire_id = request.params["affaire_new_id"] if "affaire_new_id" in request.params else None
return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(NumeroRelation.__tablename__))
@view_config(route_name='numeros_relations', request_method='DELETE', renderer='json')
@view_config(route_name='numeros_relations_s', request_method='DELETE', renderer='json')
def numeros_relations_delete_view(request):
"""
Delete numeros_relations
"""
# Check authorization
if not Utils.has_permission(request, request.registry.settings['affaire_numero_edition']):
raise exc.HTTPForbidden()
model = None
numero_relation_id = None
if "numero_relation_id" in request.params:
numero_relation_id = request.params["numero_relation_id"]
model = request.dbsession.query(NumeroRelation).filter(NumeroRelation.id == numero_relation_id).first()
elif "numero_base_id" in request.params and "affaire_id" in request.params:
numero_base_id = int(request.params["numero_base_id"])
affaire_id = int(request.params["affaire_id"])
numero_relation_id = "numero_base_id=" + request.params["numero_base_id"] + " & affaire_id=" + request.params["affaire_id"]
model = request.dbsession.query(NumeroRelation).filter(and_(
NumeroRelation.numero_id_base == numero_base_id,
NumeroRelation.affaire_id == affaire_id
)).first()
else:
raise CustomError.INCOMPLETE_REQUEST
if not model:
raise CustomError(
CustomError.RECORD_WITH_ID_NOT_FOUND.format(NumeroRelation.__tablename__, numero_relation_id))
request.dbsession.delete(model)
return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(NumeroRelation.__tablename__))
|
py | b4030615e3250496033c8bc1c77c95fde20db487 | #__init__.py
from .ASCII_Proto_ETH import ASCII_Proto_ETH |
py | b403069c825308fe1c03d20c317d8e86f9c81f0f | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
# transactions
# [END_TAGS]
import wttest
from wtscenario import make_scenarios
# test_txn01.py
# Transactions: basic functionality
class test_txn01(wttest.WiredTigerTestCase):
nentries = 1000
scenarios = make_scenarios([
('col-f', dict(uri='file:text_txn01',key_format='r',value_format='S')),
('col-t', dict(uri='table:text_txn01',key_format='r',value_format='S')),
('fix-f', dict(uri='file:text_txn01',key_format='r',value_format='8t')),
('fix-t', dict(uri='table:text_txn01',key_format='r',value_format='8t')),
('row-f', dict(uri='file:text_txn01',key_format='S',value_format='S')),
('row-t', dict(uri='table:text_txn01',key_format='S',value_format='S')),
])
# Return the number of records visible to the cursor.
def cursor_count(self, cursor):
count = 0
# Column-store appends result in phantoms, ignore records unless they
# have our flag value.
for r in cursor:
if self.value_format == 'S' or cursor.get_value() == 0xab:
count += 1
return count
# Checkpoint the database and assert the number of records visible to the
# checkpoint matches the expected value.
def check_checkpoint(self, expected):
s = self.conn.open_session()
s.checkpoint("name=test")
cursor = s.open_cursor(self.uri, None, "checkpoint=test")
self.assertEqual(self.cursor_count(cursor), expected)
s.close()
# Open a cursor with snapshot isolation, and assert the number of records
# visible to the cursor matches the expected value.
def check_txn_cursor(self, level, expected):
s = self.conn.open_session()
cursor = s.open_cursor(self.uri, None)
s.begin_transaction(level)
self.assertEqual(self.cursor_count(cursor), expected)
s.close()
# Open a session with snapshot isolation, and assert the number of records
# visible to the cursor matches the expected value.
def check_txn_session(self, level, expected):
s = self.conn.open_session(level)
cursor = s.open_cursor(self.uri, None)
s.begin_transaction()
self.assertEqual(self.cursor_count(cursor), expected)
s.close()
def check(self, cursor, committed, total):
# The cursor itself should see all of the records.
if cursor != None:
cursor.reset()
self.assertEqual(self.cursor_count(cursor), total)
# Read-uncommitted should see all of the records.
# Snapshot and read-committed should see only committed records.
self.check_txn_cursor('isolation=read-uncommitted', total)
self.check_txn_session('isolation=read-uncommitted', total)
self.check_txn_cursor('isolation=snapshot', committed)
self.check_txn_session('isolation=snapshot', committed)
self.check_txn_cursor('isolation=read-committed', committed)
self.check_txn_session('isolation=read-committed', committed)
# Checkpoints should only write committed items.
self.check_checkpoint(committed)
# Loop through a set of inserts, periodically committing; before each
# commit, verify the number of visible records matches the expected value.
def test_visibility(self):
self.session.create(self.uri,
'key_format=' + self.key_format +
',value_format=' + self.value_format)
committed = 0
cursor = self.session.open_cursor(self.uri, None)
self.check(cursor, 0, 0)
self.session.begin_transaction()
for i in range(self.nentries):
if i > 0 and i % (self.nentries // 37) == 0:
self.check(cursor, committed, i)
self.session.commit_transaction()
committed = i
self.session.begin_transaction()
if self.key_format == 'S':
cursor.set_key("key: %06d" % i)
else:
cursor.set_key(i + 1)
if self.value_format == 'S':
cursor.set_value("value: %06d" % i)
else:
cursor.set_value(0xab)
cursor.insert()
self.check(cursor, committed, self.nentries)
self.session.commit_transaction()
self.check(cursor, self.nentries, self.nentries)
# Test that read-committed is the default isolation level.
class test_read_committed_default(wttest.WiredTigerTestCase):
uri = 'table:test_txn'
# Return the number of records visible to the cursor.
def cursor_count(self, cursor):
count = 0
for r in cursor:
count += 1
return count
def test_read_committed_default(self):
self.session.create(self.uri, 'key_format=S,value_format=S')
cursor = self.session.open_cursor(self.uri, None)
self.session.begin_transaction()
cursor['key: aaa'] = 'value: aaa'
self.session.commit_transaction()
self.session.begin_transaction()
cursor['key: bbb'] = 'value: bbb'
s = self.conn.open_session()
cursor = s.open_cursor(self.uri, None)
s.begin_transaction("isolation=read-committed")
self.assertEqual(self.cursor_count(cursor), 1)
s.commit_transaction()
s.begin_transaction(None)
self.assertEqual(self.cursor_count(cursor), 1)
s.commit_transaction()
s.close()
if __name__ == '__main__':
wttest.run()
|
py | b40306c03502145483bf13773f804480c7d5d6f5 | # Test akara.dist.setup()
import sys
import os
import tempfile
import subprocess
import shutil
from akara import dist
class SetupException(Exception):
pass
# Do a bit of extra work since nosetests might run in the top-level
# Akara directory or in test/ .
dirname = os.path.dirname(__file__)
setup_scripts_dir = os.path.join(dirname, "setup_scripts")
assert os.path.isdir(setup_scripts_dir), setup_scripts_dir
def call_setup(args):
p = subprocess.Popen([sys.executable] + args,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
cwd=setup_scripts_dir)
stdout = p.stdout.read()
p.wait()
if p.returncode != 0:
raise SetupException("setup.py failure %d: %s" % (p.returncode, stdout,))
# print here to help in case of failures;
# nosetests prints captured stdout
print stdout
def test_basic():
dirname = tempfile.mkdtemp(prefix="akara_setup_test_")
try:
call_setup(["setup_basic.py", "install",
"--root", dirname,
"--akara-modules-dir", dirname])
assert os.path.exists(os.path.join(dirname, "blah.py"))
finally:
shutil.rmtree(dirname)
def test_missing():
try:
call_setup(["setup_missing.py", "install",
"--root", "/../this/does/not/exist",
"--akara-modules-dir", "/../this/does/not/exist"])
raise AssertionError
except SetupException, err:
s = str(err)
assert "you need to include the 'akara_extensions' parameter" in s, s
def test_bad_ext():
try:
call_setup(["setup_bad_ext.py", "install",
"--root", "/../this/does/not/exist",
"--akara-modules-dir", "/../this/does/not/exist"])
raise AssertionError
except SetupException, err:
s = str(err)
assert "Akara extensions must end with '.py'" in s, s
def test_specifying_config():
dirname = tempfile.mkdtemp(prefix="akara_setup_test_")
config_filename = os.path.join(dirname, "akara.conf")
try:
f = open(config_filename, "w")
f.write("class Akara: ConfigRoot = %r + '/blather'\n" % dirname)
f.close()
call_setup(["setup_basic.py", "install",
"--root", dirname,
"--akara-config", config_filename])
assert os.path.exists(os.path.join(dirname, "blather", "modules", "blah.py"))
finally:
shutil.rmtree(dirname)
# dirname has priority
def test_specifying_config_and_dir():
dirname = tempfile.mkdtemp(prefix="akara_setup_test_")
try:
try:
call_setup(["setup_basic.py", "install",
"--root", dirname,
"--akara-config", "setup_akara.conf",
"--akara-modules-dir", dirname])
assert os.path.exists(os.path.join(dirname, "blah.py"))
except SetupException, err:
s = str(err)
assert "flapdoodle" in s, s
finally:
shutil.rmtree(dirname)
|
py | b40307f7b2efee6d13f2a672d50b1baaa61aca7c | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Param sets for deterministic basic next frame prediction model."""
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import modalities
from tensor2tensor.models.video import base
from tensor2tensor.utils import registry
@registry.register_hparams
def next_frame_basic_deterministic():
"""Basic 2-frame conv model."""
hparams = base.next_frame_base()
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 1
hparams.hidden_size = 64
hparams.batch_size = 4
hparams.num_hidden_layers = 2
hparams.optimizer = "Adafactor"
hparams.learning_rate_constant = 1.5
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate_schedule = "linear_warmup * constant * rsqrt_decay"
hparams.label_smoothing = 0.0
hparams.initializer = "uniform_unit_scaling"
hparams.initializer_gain = 1.3
hparams.weight_decay = 0.0
hparams.clip_grad_norm = 1.0
hparams.dropout = 0.1
hparams.add_hparam("residual_dropout", 0.5)
hparams.add_hparam("num_compress_steps", 6)
hparams.add_hparam("filter_double_steps", 2)
hparams.add_hparam("pixel_sampling_temperature", 0.0)
hparams.add_hparam("concat_internal_states", False)
hparams.add_hparam("do_autoregressive_rnn", False)
hparams.add_hparam("autoregressive_rnn_lookback", 8)
hparams.add_hparam("autoregressive_rnn_warmup_steps", 8000)
hparams.add_hparam("activation_fn", "belu")
hparams.bottom["inputs"] = modalities.video_identity_bottom
hparams.bottom["targets"] = modalities.video_identity_bottom
return hparams
@registry.register_hparams
def next_frame_pixel_noise():
"""Basic 2-frame conv model with pixel noise."""
hparams = next_frame_basic_deterministic()
hparams.add_hparam("video_modality_input_noise", 0.05)
hparams.bottom["inputs"] = modalities.video_pixel_noise_bottom
hparams.top["inputs"] = modalities.video_top
return hparams
@registry.register_hparams
def next_frame_pixel_noise_long():
"""Long scheduled sampling setting."""
hparams = next_frame_pixel_noise()
hparams.batch_size = 2
hparams.video_num_target_frames = 16
return hparams
@registry.register_hparams
def next_frame_sampling():
"""Basic conv model with scheduled sampling."""
hparams = next_frame_basic_deterministic()
hparams.scheduled_sampling_mode = "prob_inverse_exp"
hparams.scheduled_sampling_max_prob = 1.0
hparams.scheduled_sampling_decay_steps = 10000
return hparams
@registry.register_hparams
def next_frame_tpu():
hparams = next_frame_basic_deterministic()
hparams.batch_size = 1
return hparams
@registry.register_hparams
def next_frame_ae():
"""Conv autoencoder."""
hparams = next_frame_basic_deterministic()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.hidden_size = 256
hparams.batch_size = 8
hparams.num_hidden_layers = 4
hparams.num_compress_steps = 4
hparams.dropout = 0.4
return hparams
@registry.register_hparams
def next_frame_ae_tiny():
"""Conv autoencoder, tiny set for testing."""
hparams = next_frame_tiny()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.batch_size = 8
hparams.dropout = 0.4
return hparams
@registry.register_hparams
def next_frame_small():
"""Small conv model."""
hparams = next_frame_basic_deterministic()
hparams.hidden_size = 32
return hparams
@registry.register_hparams
def next_frame_tiny():
"""Tiny for testing."""
hparams = next_frame_basic_deterministic()
hparams.hidden_size = 32
hparams.num_hidden_layers = 1
hparams.num_compress_steps = 2
hparams.filter_double_steps = 1
return hparams
@registry.register_hparams
def next_frame_l1():
"""Basic conv model with L1 modality."""
hparams = next_frame_basic_deterministic()
hparams.loss["targets"] = modalities.video_l1_loss
hparams.top["targets"] = modalities.video_l1_top
hparams.video_modality_loss_cutoff = 2.4
return hparams
@registry.register_hparams
def next_frame_l2():
"""Basic conv model with L2 modality."""
hparams = next_frame_basic_deterministic()
hparams.loss["targets"] = modalities.video_l2_loss
hparams.top["targets"] = modalities.video_l1_top
hparams.video_modality_loss_cutoff = 2.4
return hparams
@registry.register_ranged_hparams
def next_frame_base_range(rhp):
"""Basic tuning grid."""
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_discrete("hidden_size", [64, 128, 256])
rhp.set_int("num_compress_steps", 5, 8)
rhp.set_discrete("batch_size", [4, 8, 16, 32])
rhp.set_int("num_hidden_layers", 1, 3)
rhp.set_int("filter_double_steps", 1, 6)
rhp.set_float("learning_rate_constant", 1., 4.)
rhp.set_int("learning_rate_warmup_steps", 500, 3000)
rhp.set_float("initializer_gain", 0.8, 1.8)
@registry.register_ranged_hparams
def next_frame_doubling_range(rhp):
"""Filter doubling and dropout tuning grid."""
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_int("filter_double_steps", 2, 5)
@registry.register_ranged_hparams
def next_frame_clipgrad_range(rhp):
"""Filter doubling and dropout tuning grid."""
rhp.set_float("dropout", 0.3, 0.4)
rhp.set_float("clip_grad_norm", 0.5, 10.0)
@registry.register_ranged_hparams
def next_frame_xent_cutoff_range(rhp):
"""Cross-entropy tuning grid."""
rhp.set_float("video_modality_loss_cutoff", 0.005, 0.05)
@registry.register_ranged_hparams
def next_frame_ae_range(rhp):
"""Autoencoder world model tuning grid."""
rhp.set_float("dropout", 0.3, 0.5)
rhp.set_int("num_compress_steps", 1, 3)
rhp.set_int("num_hidden_layers", 2, 6)
rhp.set_float("learning_rate_constant", 1., 2.)
rhp.set_float("initializer_gain", 0.8, 1.5)
rhp.set_int("filter_double_steps", 2, 3)
|
py | b403083cb06d1bc7380b683a550d9c1edae7e84a | # Generated by Django 1.9.13 on 2017-10-13 19:22
import django.db.models.deletion
from django.db import migrations, models
import cms.models.fields
import djangocms_attributes_field.fields
import filer.fields.image
from djangocms_picture.models import (
LINK_TARGET, PICTURE_ALIGNMENT, get_templates,
)
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
('filer', '0007_auto_20161016_1055'),
]
operations = [
migrations.CreateModel(
name='Bootstrap4Picture',
fields=[
('template', models.CharField(choices=get_templates(), default=get_templates()[0][0], max_length=255, verbose_name='Template')),
('external_picture', models.URLField(blank=True, help_text='If provided, overrides the embedded image. Certain options such as cropping are not applicable to external images.', max_length=255, verbose_name='External image')),
('width', models.PositiveIntegerField(blank=True, help_text='The image width as number in pixels. Example: "720" and not "720px".', null=True, verbose_name='Width')),
('height', models.PositiveIntegerField(blank=True, help_text='The image height as number in pixels. Example: "720" and not "720px".', null=True, verbose_name='Height')),
('alignment', models.CharField(blank=True, choices=PICTURE_ALIGNMENT, help_text='Aligns the image according to the selected option.', max_length=255, verbose_name='Alignment')),
('caption_text', models.TextField(blank=True, help_text='Provide a description, attribution, copyright or other information.', verbose_name='Caption text')),
('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')),
('link_url', models.URLField(blank=True, help_text='Wraps the image in a link to an external URL.', max_length=2040, verbose_name='External URL')),
('link_target', models.CharField(blank=True, choices=LINK_TARGET, max_length=255, verbose_name='Link target')),
('link_attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Link attributes')),
('use_automatic_scaling', models.BooleanField(default=True, help_text='Uses the placeholder dimensions to automatically calculate the size.', verbose_name='Automatic scaling')),
('use_no_cropping', models.BooleanField(default=False, help_text='Outputs the raw image without cropping.', verbose_name='Use original image')),
('use_crop', models.BooleanField(default=False, help_text='Crops the image according to the thumbnail settings provided in the template.', verbose_name='Crop image')),
('use_upscale', models.BooleanField(default=False, help_text='Upscales the image to the size of the thumbnail settings in the template.', verbose_name='Upscale image')),
('cmsplugin_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='bootstrap4_picture_bootstrap4picture', serialize=False, to='cms.CMSPlugin')),
('picture_fluid', models.BooleanField(default=True, help_text='Adds the .img-fluid class to make the image responsive.', verbose_name='Responsive')),
('picture_rounded', models.BooleanField(default=False, help_text='Adds the .rounded class for round corners.', verbose_name='Rounded')),
('picture_thumbnail', models.BooleanField(default=False, help_text='Adds the .img-thumbnail class.', verbose_name='Thumbnail')),
('link_page', cms.models.fields.PageField(blank=True, help_text='Wraps the image in a link to an internal (page) URL.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms.Page', verbose_name='Internal URL')),
('picture', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='filer.Image', verbose_name='Image')),
('thumbnail_options', models.ForeignKey(blank=True, help_text='Overrides width, height, and crop; scales up to the provided preset dimensions.', null=True, on_delete=django.db.models.deletion.CASCADE, to='filer.ThumbnailOption', verbose_name='Thumbnail options')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
py | b40308a61524e8c8ffe56686bbdb894364ff308f | """
Core views.
Including the main homepage, documentation and header rendering,
and server errors.
"""
import os
import logging
from urllib.parse import urlparse
from django.conf import settings
from django.http import HttpResponseRedirect, Http404, JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView
from django.views.static import serve as static_serve
from readthedocs.builds.models import Version
from readthedocs.core.utils.general import wipe_version_via_slugs
from readthedocs.core.resolver import resolve_path
from readthedocs.core.symlink import PrivateSymlink, PublicSymlink
from readthedocs.projects.constants import PRIVATE
from readthedocs.projects.models import HTMLFile, Project
from readthedocs.redirects.utils import (
get_redirect_response,
project_and_path_from_request,
language_and_version_from_path
)
log = logging.getLogger(__name__)
class NoProjectException(Exception):
pass
class HomepageView(TemplateView):
template_name = 'homepage.html'
def get_context_data(self, **kwargs):
"""Add latest builds and featured projects."""
context = super().get_context_data(**kwargs)
context['featured_list'] = Project.objects.filter(featured=True)
return context
class SupportView(TemplateView):
template_name = 'support.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
support_email = settings.SUPPORT_EMAIL
if not support_email:
support_email = 'support@{domain}'.format(
domain=settings.PRODUCTION_DOMAIN
)
context['support_email'] = support_email
return context
def wipe_version(request, project_slug, version_slug):
version = get_object_or_404(
Version.internal.all(),
project__slug=project_slug,
slug=version_slug,
)
# We need to check by ``for_admin_user`` here to allow members of the
# ``Admin`` team (which doesn't own the project) under the corporate site.
if version.project not in Project.objects.for_admin_user(user=request.user):
raise Http404('You must own this project to wipe it.')
if request.method == 'POST':
wipe_version_via_slugs(
version_slug=version_slug,
project_slug=project_slug,
)
return redirect('project_version_list', project_slug)
return render(
request,
'wipe_version.html',
{'version': version, 'project': version.project},
)
def server_error_500(request, template_name='500.html'):
"""A simple 500 handler so we get media."""
r = render(request, template_name)
r.status_code = 500
return r
def server_error_404(request, exception=None, template_name='404.html'): # pylint: disable=unused-argument # noqa
"""
A simple 404 handler so we get media.
.. note::
Marking exception as optional to make /404/ testing page to work.
"""
response = get_redirect_response(request, full_path=request.get_full_path())
# Return a redirect response if there is one
if response:
if response.url == request.build_absolute_uri():
# check that we do have a response and avoid infinite redirect
log.warning(
'Infinite Redirect: FROM URL is the same than TO URL. url=%s',
response.url,
)
else:
return response
# Try to serve custom 404 pages if it's a subdomain/cname
if getattr(request, 'subdomain', False) or getattr(request, 'cname', False):
return server_error_404_subdomain(request, template_name)
# Return the default 404 page generated by Read the Docs
r = render(request, template_name)
r.status_code = 404
return r
def server_error_404_subdomain(request, template_name='404.html'):
"""
Handler for 404 pages on subdomains.
Check if the project associated has a custom ``404.html`` and serve this
page. First search for a 404 page in the current version, then continues
with the default version and finally, if none of them are found, the Read
the Docs default page (Maze Found) is rendered by Django and served.
"""
def resolve_404_path(project, version_slug=None, language=None, filename='404.html'):
"""
Helper to resolve the path of ``404.html`` for project.
The resolution is based on ``project`` object, version slug and
language.
:returns: tuple containing the (basepath, filename)
:rtype: tuple
"""
filename = resolve_path(
project,
version_slug=version_slug,
language=language,
filename=filename,
subdomain=True, # subdomain will make it a "full" path without a URL prefix
)
# This breaks path joining, by ignoring the root when given an "absolute" path
if filename[0] == '/':
filename = filename[1:]
version = None
if version_slug:
version_qs = project.versions.filter(slug=version_slug)
if version_qs.exists():
version = version_qs.first()
private = any([
version and version.privacy_level == PRIVATE,
not version and project.privacy_level == PRIVATE,
])
if private:
symlink = PrivateSymlink(project)
else:
symlink = PublicSymlink(project)
basepath = symlink.project_root
fullpath = os.path.join(basepath, filename)
return (basepath, filename, fullpath)
project, full_path = project_and_path_from_request(request, request.get_full_path())
if project:
language = None
version_slug = None
schema, netloc, path, params, query, fragments = urlparse(full_path)
if not project.single_version:
language, version_slug, path = language_and_version_from_path(path)
# Firstly, attempt to serve the 404 of the current version (version_slug)
# Secondly, try to serve the 404 page for the default version
# (project.get_default_version())
for slug in (version_slug, project.get_default_version()):
for tryfile in ('404.html', '404/index.html'):
basepath, filename, fullpath = resolve_404_path(project, slug, language, tryfile)
if os.path.exists(fullpath):
log.debug(
'serving 404.html page current version: [project: %s] [version: %s]',
project.slug,
slug,
)
r = static_serve(request, filename, basepath)
r.status_code = 404
return r
# Finally, return the default 404 page generated by Read the Docs
r = render(request, template_name)
r.status_code = 404
return r
def do_not_track(request):
dnt_header = request.META.get('HTTP_DNT')
# https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation
return JsonResponse( # pylint: disable=redundant-content-type-for-json-response
{
'policy': 'https://docs.readthedocs.io/en/latest/privacy-policy.html',
'same-party': [
'readthedocs.org',
'readthedocs.com',
'readthedocs.io', # .org Documentation Sites
'readthedocs-hosted.com', # .com Documentation Sites
],
'tracking': 'N' if dnt_header == '1' else 'T',
}, content_type='application/tracking-status+json',
)
|
py | b40308b73c313b05062e454c95ac1370f80d1211 | from hc.accounts.models import Profile
class TeamAccessMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if not request.user.is_authenticated:
return self.get_response(request)
teams_q = Profile.objects.filter(member__user_id=request.user.id)
teams_q = teams_q.select_related("user")
request.get_teams = lambda: list(teams_q)
request.profile = Profile.objects.for_user(request.user)
request.team = request.profile.team()
return self.get_response(request)
|
py | b40309618d81da81ee595892eb3da732947b2b57 | from flask_rebar.swagger_generation.swagger_generator import ExternalDocumentation
from flask_rebar.swagger_generation.swagger_generator import SwaggerV2Generator
from flask_rebar.swagger_generation.swagger_generator import Tag
from flask_rebar.swagger_generation.marshmallow_to_swagger import sets_swagger_attr
from flask_rebar.swagger_generation.marshmallow_to_swagger import ConverterRegistry
|
py | b403096e6739ed286088a149dfe934c746e03c49 | # The MIT License.
# Copyright (C) 2017 The Future Shell , Antony Jr.
#
# @filename : __init__.py
# @description : The traditional python package __init__ file
import argparse
import os
import sys
from .InstagramPyCLI import InstagramPyCLI
from .InstagramPySession import InstagramPySession, DEFAULT_PATH
from .InstagramPyInstance import InstagramPyInstance
from .InstagramPyDumper import InstagramPyDumper
from .InstagramPyScript import InstagramPyScript
from .InstagramPyConfigurationCreator import InstagramPyConfigurationCreator
from datetime import datetime
from .AppInfo import appInfo as AppInformation
from .colors import *
__version__ = AppInformation['version']
'''
Arguments for instagram-py command-line tool
'''
cli_parser = argparse.ArgumentParser(
epilog=AppInformation['example']
)
# nargs = '+' , makes them positional argument.
cli_parser.add_argument('--username', # parse username from command line
'-u',
type=str,
help='username for Instagram account'
)
cli_parser.add_argument('--password-list', # parse path to password list file
'-pl',
type=str,
help='password list file to try with the given username.'
)
cli_parser.add_argument('--script',
'-s',
type=str,
help='Instagram-Py Attack Script.'
)
cli_parser.add_argument('--inspect-username',
'-i',
type=str,
help='Username to inspect in the instagram-py dump.'
)
cli_parser.add_argument('--create-configuration',
'-cc',
action='count',
help='Create a Configuration file for Instagram-Py with ease.'
)
cli_parser.add_argument('--default-configuration',
'-dc',
action='count',
help='noconfirm for Instagram-Py Configuration Creator!'
)
cli_parser.add_argument('--countinue',
'-c',
action='count',
help='Countinue the previous attack if found.'
)
cli_parser.add_argument('--verbose', # check if the user wants verbose mode enabled
'-v',
action='count',
help='Activate Verbose mode. ( Verbose level )'
)
def ExecuteInstagramPy():
Parsed = cli_parser.parse_args()
if Parsed.create_configuration is not None:
if Parsed.default_configuration is not None:
InstagramPyConfigurationCreator(os.path.expanduser(
'~') + "/instapy-config.json").create()
else:
InstagramPyConfigurationCreator(os.path.expanduser(
'~') + "/instapy-config.json").easy_create()
elif Parsed.inspect_username is not None:
InstagramPyDumper(Parsed.inspect_username).Dump()
elif Parsed.script is not None:
if not os.path.isfile(Parsed.script):
print("No Attack Script found at {}".format(Parsed.script))
sys.exit(-1)
InstagramPyScript(Parsed.script).run()
elif Parsed.username is not None and Parsed.password_list is not None:
cli = InstagramPyCLI(appinfo=AppInformation,
started=datetime.now(), verbose_level=Parsed.verbose, username=Parsed.username)
cli.PrintHeader()
cli.PrintDatetime()
session = InstagramPySession(
Parsed.username, Parsed.password_list, DEFAULT_PATH, DEFAULT_PATH, cli)
session.ReadSaveFile(Parsed.countinue)
instagrampy = InstagramPyInstance(cli, session)
while not instagrampy.PasswordFound():
instagrampy.TryPassword()
session.WriteDumpFile(
{
"id": Parsed.username,
"password": session.CurrentPassword(),
"started": str(cli.started)
}
)
else:
cli_parser.print_help()
print('\n{}Report bug, suggestions and new features at {}{}https://github.com/antony-jr/instagram-py{}'
.format(Fore.GREEN,
Style.RESET_ALL,
Style.BRIGHT,
Style.RESET_ALL
))
sys.exit(0)
|
py | b4030984e648c1b16f9294af0b6262a1af3111ca | def cart_prod(*sets):
result = [[]]
set_list = list(sets)
for s in set_list:
result = [x+[y] for x in result for y in s]
if (len(set_list) > 0):
return {tuple(prod) for prod in result}
else:
return set(tuple())
A = {1}
B = {1, 2}
C = {1, 2, 3}
X = {'a'}
Y = {'a', 'b'}
Z = {'a', 'b', 'c'}
print(cart_prod(A, B, C))
print(cart_prod(X, Y, Z))
|
py | b40309e4fa541356a04438cbccead7cfa6de0000 | from .base import *
DEBUG = True
ALLOWED_HOSTS = ['localhost']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
py | b4030a24810e4493f9836800e1363dd91a6fd152 | from messages.object import Object
class Bearing(Object):
def __init__(self):
super(Bearing, self).__init__()
|
py | b4030ac50bb526c5646410425379b30342b89aa3 | import csv
from io import StringIO
def csv_processor(input, options=None):
options = options or {}
delimiter = options.get('delimiter', ';')
reader = csv.reader(StringIO(input.decode('utf-8')), delimiter=delimiter)
result = []
headers = []
header = options.get('header', False)
for parts in reader:
if header and not headers:
headers = list(map(lambda x: x, parts))
continue
if header:
row = {}
for i, part in enumerate(parts):
row[headers[i]] = part
result.append(row)
else:
result.append(list(map(lambda x: x, parts)))
return result
|
py | b4030cd52e837b6fd28d3e63ebefdccb2aa3d440 | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from database import Base
class Brand(Base):
__tablename__ = "brands"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True, index=True)
discounts = relationship("Discount", back_populates="owner")
class Discount(Base):
__tablename__ = "discounts"
id = Column(Integer, primary_key=True, index=True)
code = Column(String, index=True)
description = Column(String, index=True)
is_active = Column(Boolean, default=False)
owner_id = Column(Integer, ForeignKey("brands.id"))
owner = relationship("Brand", back_populates="discounts")
|
py | b4030d6b308d9936bc9855280ec9b77b6afd2d2e | n = int(input("Enter value of n: "))
str_list = [input() for _ in range(n)]
count = 0
for string in str_list:
if string.endswith(string[0]) and len(string) > 1:
count += 1
print("Count is: ", count)
[print(string) for string in str_list if len(string)%2 != 0] |
py | b4030da9dee5809f19e3321438d8638d554496cf | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis import literal
from ibis.tests.util import assert_equal
def test_lower_upper(table):
lresult = table.g.lower()
uresult = table.g.upper()
assert isinstance(lresult, ir.StringColumn)
assert isinstance(uresult, ir.StringColumn)
assert isinstance(lresult.op(), ops.Lowercase)
assert isinstance(uresult.op(), ops.Uppercase)
lit = literal('FoO')
lresult = lit.lower()
uresult = lit.upper()
assert isinstance(lresult, ir.StringScalar)
assert isinstance(uresult, ir.StringScalar)
def test_substr(table):
lit = literal('FoO')
result = table.g.substr(2, 4)
lit_result = lit.substr(0, 2)
assert isinstance(result, ir.StringColumn)
assert isinstance(lit_result, ir.StringScalar)
op = result.op()
assert isinstance(op, ops.Substring)
start, length = op.args[1:]
assert start.equals(literal(2))
assert length.equals(literal(4))
def test_left_right(table):
result = table.g.left(5)
expected = table.g.substr(0, 5)
assert result.equals(expected)
result = table.g.right(5)
op = result.op()
assert isinstance(op, ops.StrRight)
assert op.args[1].equals(literal(5))
def test_length(table):
lit = literal('FoO')
result = table.g.length()
lit_result = lit.length()
assert isinstance(result, ir.IntegerColumn)
assert isinstance(lit_result, ir.IntegerScalar)
assert isinstance(result.op(), ops.StringLength)
def test_join(table):
dash = literal('-')
expr = dash.join([table.f.cast('string'), table.g])
assert isinstance(expr, ir.StringColumn)
expr = dash.join([literal('ab'), literal('cd')])
assert isinstance(expr, ir.StringScalar)
def test_contains(table):
expr = table.g.contains('foo')
expected = table.g.find('foo') >= 0
assert_equal(expr, expected)
with pytest.raises(TypeError):
'foo' in table.g
@pytest.mark.parametrize(
('left_slice', 'right_start', 'right_stop'),
[(slice(None, 3), 0, 3), (slice(2, 6), 2, 4)],
)
def test_getitem_slice(table, left_slice, right_start, right_stop):
case = table.g[left_slice]
expected = table.g.substr(right_start, right_stop)
assert_equal(case, expected)
def test_add_radd(table, string_col):
string_col = table[string_col]
assert isinstance(literal('foo') + 'bar', ir.StringScalar)
assert isinstance('bar' + literal('foo'), ir.StringScalar)
assert isinstance(string_col + 'bar', ir.StringColumn)
assert isinstance('bar' + string_col, ir.StringColumn)
def test_startswith(table):
assert isinstance(table.g.startswith('foo'), ir.BooleanColumn)
assert isinstance(literal('bar').startswith('foo'), ir.BooleanScalar)
def test_endswith(table):
assert isinstance(table.g.endswith('foo'), ir.BooleanColumn)
assert isinstance(literal('bar').endswith('foo'), ir.BooleanScalar)
|
py | b4030e43c9be703f52f078831f511268a04aa2bd | #!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks HB selection logic."""
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal
class CompactBlocksConnectionTest(SyscoinTestFramework):
"""Test class for verifying selection of HB peer connections."""
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
def peer_info(self, from_node, to_node):
"""Query from_node for its getpeerinfo about to_node."""
for peerinfo in self.nodes[from_node].getpeerinfo():
if "(testnode%i)" % to_node in peerinfo['subver']:
return peerinfo
return None
def setup_network(self):
self.setup_nodes()
# Start network with everyone disconnected
self.sync_all()
def relay_block_through(self, peer):
"""Relay a new block through peer peer, and return HB status between 1 and [2,3,4,5]."""
self.connect_nodes(peer, 0)
self.generate(self.nodes[0], 1)
self.disconnect_nodes(peer, 0)
status_to = [self.peer_info(1, i)['bip152_hb_to'] for i in range(2, 6)]
status_from = [self.peer_info(i, 1)['bip152_hb_from'] for i in range(2, 6)]
assert_equal(status_to, status_from)
return status_to
def run_test(self):
self.log.info("Testing reserved high-bandwidth mode slot for outbound peer...")
# Connect everyone to node 0, and mine some blocks to get all nodes out of IBD.
for i in range(1, 6):
self.connect_nodes(i, 0)
self.generate(self.nodes[0], 2)
for i in range(1, 6):
self.disconnect_nodes(i, 0)
# Construct network topology:
# - Node 0 is the block producer
# - Node 1 is the "target" node being tested
# - Nodes 2-5 are intermediaries.
# - Node 1 has an outbound connection to node 2
# - Node 1 has inbound connections from nodes 3-5
self.connect_nodes(3, 1)
self.connect_nodes(4, 1)
self.connect_nodes(5, 1)
self.connect_nodes(1, 2)
# Mine blocks subsequently relaying through nodes 3,4,5 (inbound to node 1)
for nodeid in range(3, 6):
status = self.relay_block_through(nodeid)
assert_equal(status, [False, nodeid >= 3, nodeid >= 4, nodeid >= 5])
# And again through each. This should not change HB status.
for nodeid in range(3, 6):
status = self.relay_block_through(nodeid)
assert_equal(status, [False, True, True, True])
# Now relay one block through peer 2 (outbound from node 1), so it should take HB status
# from one of the inbounds.
status = self.relay_block_through(2)
assert_equal(status[0], True)
assert_equal(sum(status), 3)
# Now relay again through nodes 3,4,5. Since 2 is outbound, it should remain HB.
for nodeid in range(3, 6):
status = self.relay_block_through(nodeid)
assert status[0]
assert status[nodeid - 2]
assert_equal(sum(status), 3)
# Reconnect peer 2, and retry. Now the three inbounds should be HB again.
self.disconnect_nodes(1, 2)
self.connect_nodes(1, 2)
for nodeid in range(3, 6):
status = self.relay_block_through(nodeid)
assert not status[0]
assert status[nodeid - 2]
assert_equal(status, [False, True, True, True])
if __name__ == '__main__':
CompactBlocksConnectionTest().main()
|
py | b4030f6d60ba3b82a5a7fff71d4a0ca8039ed924 | # Copyright 2017, Fabien Boucher
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from collections import OrderedDict
from pecan import expose
from pecan import conf
from repoxplorer import index
from repoxplorer.controllers import utils
from repoxplorer.index.commits import Commits
from repoxplorer.index.contributors import Contributors
xorkey = conf.get('xorkey') or 'default'
class SearchController(object):
@expose('json')
def search_authors(self, query=""):
ret_limit = 100
c = Commits(index.Connector())
ret = c.es.search(
index=c.index, doc_type=c.dbname,
q=query, df="author_name", size=10000,
default_operator="AND",
_source_include=["author_name", "author_email"])
ret = ret['hits']['hits']
if not len(ret):
return {}
idents = Contributors()
authors = dict([(d['_source']['author_email'],
d['_source']['author_name']) for d in ret])
result = {}
_idents = idents.get_idents_by_emails(list(authors.keys())[:ret_limit])
for iid, ident in _idents.items():
email = ident['default-email']
name = ident['name'] or authors[email]
result[utils.encrypt(xorkey, iid)] = {
'name': name,
'gravatar': hashlib.md5(
email.encode(errors='ignore')).hexdigest()}
result = OrderedDict(
sorted(list(result.items()), key=lambda t: t[1]['name']))
return result
|
py | b4030fa1bce9991b331611443fce5a3d6475f1f9 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ExpressRoutePort']
class ExpressRoutePort(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bandwidth_in_gbps: Optional[pulumi.Input[int]] = None,
encapsulation: Optional[pulumi.Input[str]] = None,
express_route_port_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteLinkArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
ExpressRoutePort resource definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] bandwidth_in_gbps: Bandwidth of procured ports in Gbps.
:param pulumi.Input[str] encapsulation: Encapsulation method on physical ports.
:param pulumi.Input[str] express_route_port_name: The name of the ExpressRoutePort resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The identity of ExpressRoutePort, if configured.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteLinkArgs']]]] links: The set of physical links of the ExpressRoutePort resource.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] peering_location: The name of the peering location that the ExpressRoutePort is mapped to physically.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['bandwidth_in_gbps'] = bandwidth_in_gbps
__props__['encapsulation'] = encapsulation
if express_route_port_name is None:
raise TypeError("Missing required property 'express_route_port_name'")
__props__['express_route_port_name'] = express_route_port_name
__props__['id'] = id
__props__['identity'] = identity
__props__['links'] = links
__props__['location'] = location
__props__['peering_location'] = peering_location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['allocation_date'] = None
__props__['circuits'] = None
__props__['etag'] = None
__props__['ether_type'] = None
__props__['mtu'] = None
__props__['name'] = None
__props__['provisioned_bandwidth_in_gbps'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRoutePort")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRoutePort, __self__).__init__(
'azure-nextgen:network/v20200701:ExpressRoutePort',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRoutePort':
"""
Get an existing ExpressRoutePort resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRoutePort(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocationDate")
def allocation_date(self) -> pulumi.Output[str]:
"""
Date of the physical port allocation to be used in Letter of Authorization.
"""
return pulumi.get(self, "allocation_date")
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> pulumi.Output[Optional[int]]:
"""
Bandwidth of procured ports in Gbps.
"""
return pulumi.get(self, "bandwidth_in_gbps")
@property
@pulumi.getter
def circuits(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
Reference the ExpressRoute circuit(s) that are provisioned on this ExpressRoutePort resource.
"""
return pulumi.get(self, "circuits")
@property
@pulumi.getter
def encapsulation(self) -> pulumi.Output[Optional[str]]:
"""
Encapsulation method on physical ports.
"""
return pulumi.get(self, "encapsulation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="etherType")
def ether_type(self) -> pulumi.Output[str]:
"""
Ether type of the physical port.
"""
return pulumi.get(self, "ether_type")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
The identity of ExpressRoutePort, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def links(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteLinkResponse']]]:
"""
The set of physical links of the ExpressRoutePort resource.
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def mtu(self) -> pulumi.Output[str]:
"""
Maximum transmission unit of the physical port pair(s).
"""
return pulumi.get(self, "mtu")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> pulumi.Output[Optional[str]]:
"""
The name of the peering location that the ExpressRoutePort is mapped to physically.
"""
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="provisionedBandwidthInGbps")
def provisioned_bandwidth_in_gbps(self) -> pulumi.Output[float]:
"""
Aggregate Gbps of associated circuit bandwidths.
"""
return pulumi.get(self, "provisioned_bandwidth_in_gbps")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the express route port resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the express route port resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b403104a45ede1110a9c5cca95878c43993fc086 | # Generated by Django 3.0.7 on 2020-11-25 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drip', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='querysetrule',
name='rule_type',
field=models.CharField(choices=[('or', 'Or'), ('and', 'And')], default='and', max_length=3),
),
]
|
py | b403104bc6d9ae81ee67adb693bb8bd037f7ca97 | def calcula_soma(lista):
lista = [1,2,3,4,5]
soma_da_lista = sum(lista)
i = 0
for i in lista (lista.len)
soma_da_lista += lista[i]
return soma_da_lista
print(soma_da_lista)
def converte_entrada(texto):
lista = list(texto.split(" "))
return lista
str1 = input('')
print (converte_entrada(str1))
def processa_numeros (entrada):
lista_de_numeros = [ ]
i = 0
soma = 0
lista_de_numero (entrada)
i += 1
return (soma)
def main():
entrada = input('Digite números separados por espaço ')
lista_com_numeros = converte_entrada(entrada)
media = processa_numeros(lista_com_numeros)
media = media[0] / media[1]
print(f'A média desses números é {media:.2f}')
if _name_ == '_main_':
main()
|
py | b403115407e4ae4122cf3ff24127c99dcc58047a | #!/usr/bin/env python
"""
Stupid RTMP server that does a correct handshake and then sends a predefined
sequence of RTMP frame to establish a connexion with the client. Then data
are just thrown away
http://wwwimages.adobe.com/content/dam/Adobe/en/devnet/rtmp/pdf/rtmp_specification_1.0.pdf
"""
import binascii
import logging
import random
import time
from optparse import OptionParser
import vstcp
VERBOSE = False
C0 = binascii.unhexlify("03")
S0 = C0
ZERO = int(time.time() * 1000)
ETIME = hex(int(time.time()))[2:]
RANDOM = ""
for _ in xrange(1536 - 8):
r = hex(random.randint(0, 255))[2:]
if len(r) == 1:
r = "0" + r
RANDOM += r
class RTMPPacket(object):
def __str__(self):
return self.header + self.body
def __add__(self, other):
return str(self) + str(other)
def __radd__(self, other):
return other + str(self)
@property
def body(self):
raise Exception("implement in subclass")
@property
def length(self):
return len(self.body) / 2
@property
def basic_header(self):
def first_byte(rest):
return "{0:#0{1}x}".format(self.hfmt * 2 ** 6 + rest,
1 * 2 + 2)[2:]
if self.chunk_streamid < 64:
return first_byte(self.chunk_streamid)
elif self.chunk_streamid < 320:
return first_byte(0) + "{0:#0{1}x}".format(self.chunk_streamid,
1 * 2 + 2)[2:]
elif self.chunk_streamid < 65600:
return first_byte(2 ** 6 - 1) + "{0:#0{1}x}".format(
self.chunk_streamid, 2 * 2 + 2)[2:]
else:
raise Exception("Stream ID not supported")
@property
def message_header(self):
if self.hfmt == 0:
# 11 bytes long
return "{0:#0{1}x}".format(
16777210 * 16 ** (8 * 2) +\
self.length * 16 ** (5 * 2) +\
self.typeid * 16 ** (4 * 2) +\
self.streamid,
11 * 2 + 2)[2:]
elif self.hfmt == 1:
# 7 bytes long
return "{0:#0{1}x}".format(
1 * 16 ** (4 * 2) +\
self.length * 16 ** (1 * 2) +\
self.typeid,
7 * 2 + 2)[2:]
elif self.hfmt == 2:
# 3 bytes long
return "{0:#0{1}x}".format(1, 3 * 2 + 2)[2:]
else:
raise Exception("unknown header format")
@property
def header(self):
return self.basic_header + self.message_header
class RTMPChunkSizePacket(RTMPPacket):
def __init__(self, size):
if size >= 2 ** 32:
raise Exception("chunk size too big")
self.size = size
self.hfmt = 0
self.chunk_streamid = 2
self.typeid = 1
self.streamid = 0
@property
def body(self):
return "{0:#0{1}x}".format(self.size, 4 * 2 + 2)[2:]
class RMTPGenericPacket(RTMPPacket):
def __init__(self, hfmt, chunk_streamid, body, streamid=1):
self.hfmt = hfmt
self.chunk_streamid = chunk_streamid
self.streamid = streamid
self.hard_body = body
self.typeid = 20
@property
def body(self):
return self.hard_body
class RTMPCommandResult(RMTPGenericPacket):
def __init__(self):
super(RTMPCommandResult, self).__init__(
hfmt=0,
chunk_streamid=3,
streamid=0,
body="0200075f726573756c74003ff0000000000000030006666d7356657202000e464d532f332c352c352c32303034000c6361706162696c697469657300403f00000000000000046d6f6465003ff00000000000000000090300056c6576656c0200067374617475730004636f646502001d4e6574436f6e6e656374696f6e2e436f6e6e6563742e53756363657373000b6465736372697074696f6e020015436f6e6e656374696f6e207375636365656465642e0008636c69656e746964004094e40000000000000e6f626a656374456e636f64696e67000000000000000000000009",
)
class RTMPOnPublish(RMTPGenericPacket):
def __init__(self):
super(RTMPOnPublish, self).__init__(
hfmt=1,
chunk_streamid=3,
body="02000b6f6e46435075626c69736800000000000000000005030004636f64650200174e657453747265616d2e5075626c6973682e5374617274000b6465736372697074696f6e020027506c6561736520666f6c6c6f7775702077697468207075626c69736820636f6d6d616e642e2e2e000009",
)
class RTMPCommandResult1(RMTPGenericPacket):
def __init__(self):
super(RTMPCommandResult1, self).__init__(
hfmt=1,
chunk_streamid=3,
body="0200075f726573756c7400401000000000000005003ff0000000000000",
)
class RTMPCommandResult2(RMTPGenericPacket):
def __init__(self):
super(RTMPCommandResult2, self).__init__(
hfmt=0,
chunk_streamid=3,
body="0200075f726573756c74004014000000000000050101",
streamid=1,
)
class RTMPWindowAcknowledgement(RTMPPacket):
def __init__(self, size):
self.size = size
self.hfmt = 1
self.chunk_streamid = 2
self.typeid = 5
@property
def body(self):
return "{0:#0{1}x}".format(self.size, 4 * 2 + 2)[2:]
class RTMPSetPeerBandwidth(RTMPPacket):
def __init__(self, size, limit_type):
self.size = size
self.ltype = limit_type
self.hfmt = 1
self.chunk_streamid = 2
self.typeid = 6
@property
def body(self):
return "{0:#0{1}x}".format(
self.size * 16 ** (1 * 2) + self.ltype, 5 * 2 + 2)[2:]
class RTMPUserControlMessageStreamBegin(RTMPPacket):
def __init__(self, hfmt=1):
self.hfmt = hfmt
self.chunk_streamid = 2
self.typeid = 4
@property
def body(self):
return "{0:#0{1}x}".format(1, 6 * 2 + 2)[2:]
def bin_str_to_int(string):
try:
return int(binascii.hexlify(string), 16)
except ValueError:
raise Exception("not bin data, someone is smoking")
def hexa_str_to_int(string):
try:
return int(string, 16)
except ValueError:
raise Exception("not hexa data, someone is smoking")
def split_n(n, string):
tmp = []
while string:
tmp.append(string[:n])
string = string[n:]
return tmp
def hexa_str_to_streamid(string):
tab = split_n(2, string)[::-1]
string = "".join(tab)
return hexa_str_to_int(string)
FMT_TO_LENGTH = {
0 : 11,
1 : 7,
2 : 3,
3 : 0,
}
CHUNK_SIZE = 4096
HARD = [
RTMPChunkSizePacket(CHUNK_SIZE),
RTMPWindowAcknowledgement(2500000) + \
RTMPSetPeerBandwidth(2500000, 0) + \
RTMPUserControlMessageStreamBegin() + \
RTMPCommandResult(),
RTMPOnPublish(),
RTMPCommandResult1() + \
RTMPUserControlMessageStreamBegin(2),
RTMPCommandResult2(),
RTMPUserControlMessageStreamBegin(2),
]
class RTMPServer(vstcp.VSTCPServer):
def __init__(self, host, port, timeout=1, logfile=None):
super(RTMPServer, self).__init__(host, port, timeout, logfile)
self.handshake = False
self.c1 = None
self.s1 = None
self.state = -1
self.video = 0
self.audio = 0
self.current_video = 0
self.current_audio = 0
self.current_bytes = 0
self.time_ref = 0
self.previous_length = 0
def mock(self):
self.state += 1
self.logger.debug("We are in state " + str(self.state))
self.conn.sendall(binascii.unhexlify(
str(HARD[self.state]).replace(" ", "")))
def c0(self):
self.c1 = None
self.s1 = None
self.handshake = False
self.state = -1
def c0_c1(self):
self.conn.sendall(S0)
self.c1 = self.data
self.s1 = binascii.unhexlify(ETIME + "00000000" + RANDOM)
self.conn.sendall(self.s1)
def handle(self):
#hanshake and all
if self.state < len(HARD) - 1:
self.data = self.conn.recv(1)
if self.handshake:
basic_header = bin_str_to_int(self.data)
fmt = basic_header >> 6
tmp = ""
if fmt == 0:
tmp = self.conn.recv(11)
elif fmt == 1:
tmp = self.conn.recv(7)
if not tmp:
return
message_header = binascii.hexlify(tmp)
message_length = hexa_str_to_int(message_header[3 * 2:6 * 2])
self.logger.debug("packet with body length {}".format(
message_length))
tmp = self.conn.recv(message_length)
self.mock()
if self.state in [0, 2]:
self.mock()
return
if self.data == '':
return
if self.data == C0:
self.logger.debug("received handshake C0")
self.c0()
self.data = self.conn.recv(1536)
if self.data[4:8] == binascii.unhexlify("00000000"):
self.logger.debug("received handshake C1")
self.c0_c1()
else:
if not self.handshake:
self.data += self.conn.recv(1536 - 1)
if self.data[4:8] == binascii.unhexlify("00000000"):
if self.data == self.s1:
self.logger.debug("received handshake C2")
self.handshake = True
self.conn.sendall(self.c1)
self.time_ref = time.time()
else:
# take the first byte
max_size = 128
first = ""
while len(first) == 0:
first = self.conn.recv(1)
basic_header = bin_str_to_int(first)
fmt = basic_header >> 6
csid = basic_header % 64
# look if the basic RTMP header is 1, 2 or 3 bytes long
if csid == 0:
tmp = self.conn.recv(1)
chunck_streamid = bin_str_to_int(tmp)
elif csid == 63:
tmp = self.conn.recv(2)
chunck_streamid = bin_str_to_int(tmp)
else:
tmp = ""
chunck_streamid = csid
self.logger.debug(
"received packet of format: {}, CSID: {}".format(
fmt, chunck_streamid))
message_length = 0
# Done with basic header, lets git through message header
if fmt == 0:
tmp = self.conn.recv(11)
message_header = binascii.hexlify(tmp)
timestamp = hexa_str_to_int(message_header[:3 * 2])
message_length = hexa_str_to_int(message_header[3 * 2:6 * 2])
message_typeid = hexa_str_to_int(message_header[6 * 2:7 * 2])
if message_typeid == 9:
self.current_video += 1
self.video += 1
self.current_bytes += message_length
elif message_typeid == 8:
self.current_audio += 1
self.audio += 1
self.current_bytes += message_length
message_steamid = hexa_str_to_streamid(message_header[7 * 2:])
self.previous_length = message_length
self.logger.debug(
"packet with body length {}, type ID {} and stream ID {}".format(
message_length, message_typeid, message_steamid))
elif fmt == 1:
tmp = self.conn.recv(7)
message_header = binascii.hexlify(tmp)
timestamp = hexa_str_to_int(message_header[:3 * 2])
message_length = hexa_str_to_int(message_header[3 * 2:6 * 2])
message_typeid = hexa_str_to_int(message_header[6 * 2:7 * 2])
if message_typeid == 9:
self.current_video += 1
self.video += 1
self.current_bytes += message_length
elif message_typeid == 8:
self.current_audio += 1
self.audio += 1
self.current_bytes += message_length
self.previous_length = message_length
self.logger.debug(
"packet with body length {}, type ID {}".format(
message_length, message_typeid))
elif fmt == 2:
tmp = self.conn.recv(3)
message_header = binascii.hexlify(tmp)
timestamp = hexa_str_to_int(message_header[:3 * 2])
message_length = self.previous_length
self.current_bytes += message_length
self.logger.debug("packet type 2")
elif fmt == 3:
timestamp = 0
self.logger.debug("Packet of format 3 don't have headers")
else:
raise Exception("Well, this is weird")
if timestamp == 16777215:
tmp = self.conn.recv(4)
timestamp = bin_str_to_int(tmp)
self.logger.debug("timestamp : {}".format(timestamp))
# header is done, let's analyse the body
body = ""
first = True
while message_length:
# if the mesage is long, take the chunk maximum size
if message_length > max_size:
# one byte for the header
tmp = self.conn.recv(max_size + 1)
message_length -= max_size
# else just read the all chunk
else:
tmp = self.conn.recv(message_length)
message_length = 0
if first:
first = False
# after the first packet there will a 1 byte header
# (probably C3)
else:
tmp = tmp[1:]
body += tmp
# let's be sure about our body length
rest = self.previous_length - len(body)
if rest:
tmp = self.conn.recv(rest)
body += tmp
now = time.time()
if now - self.time_ref >= 2:
self.time_ref = now
self.logger.info(
"received {} video and {} audio packets for {} bytes ({}KB/s)".format(
self.current_video,
self.current_audio,
self.current_bytes,
self.current_bytes/2000.,
))
self.current_video = 0
self.current_audio = 0
self.current_bytes = 0
if __name__ == '__main__':
PARSER = OptionParser()
PARSER.add_option(
'-a',
'--address',
dest='address',
default='127.0.0.1',
help="listening IP address. Default '127.0.0.1'",
)
PARSER.add_option(
'-p',
'--port',
dest='port',
default=1935,
type="int",
help='listening port number. Default 1935',
)
PARSER.add_option(
'-l',
'--logfile',
dest='logfile',
default='vsrtmp.log',
help="log file. Default ./vsrtmp.log",
)
(OPTIONS, _) = PARSER.parse_args()
SERVER = RTMPServer(OPTIONS.address, OPTIONS.port, logfile=OPTIONS.logfile)
SERVER.start()
|
py | b403117f54e21cb8c988fdbb8afc1a06a3b1c9e0 | '''
The server module takes care of os-level state. Targets POSIX compatibility, tested on
Linux/BSD.
'''
from __future__ import division, unicode_literals
from time import sleep
import six
from six.moves import shlex_quote
from pyinfra import logger
from pyinfra.api import operation
from . import files
from .util.files import chmod, sed_replace
@operation
def reboot(state, host, delay=10, interval=1, reboot_timeout=300):
'''
Reboot the server and wait for reconnection.
+ delay: number of seconds to wait before attempting reconnect
+ interval: interval (s) between reconnect attempts
+ reboot_timeout: total time before giving up reconnecting
Note: Probably want sudo enabled.
Example:
.. code:: python
server.reboot(
{'Reboot the server and wait to reconnect'},
delay=5,
timeout=30,
)
'''
logger.warning('The server.reboot operation is in beta!')
yield {
'command': 'reboot',
'success_exit_codes': [-1], # -1 being error/disconnected
}
def wait_and_reconnect(state, host): # pragma: no cover
sleep(delay)
max_retries = round(reboot_timeout / interval)
host.connection = None # remove the connection object
retries = 0
while True:
host.connect(state, show_errors=False)
if host.connection:
break
if retries > max_retries:
raise Exception((
'Server did not reboot in time (reboot_timeout={0}s)'
).format(reboot_timeout))
sleep(interval)
retries += 1
yield (wait_and_reconnect, (), {})
@operation
def wait(state, host, port=None):
'''
Waits for a port to come active on the target machine. Requires netstat, checks every
second.
+ port: port number to wait for
Example:
.. code:: python
server.wait(
{'Wait for webserver to start'},
port=80,
)
'''
yield r'''
while ! (netstat -an | grep LISTEN | grep -e "\.{0}" -e ":{0}"); do
echo "waiting for port {0}..."
sleep 1
done
'''.format(port)
@operation
def shell(state, host, commands, chdir=None):
'''
Run raw shell code on server during a deploy. If the command would
modify data that would be in a fact, the fact would not be updated
since facts are only run at the start of a deploy.
+ commands: command or list of commands to execute on the remote server
+ chdir: directory to cd into before executing commands
Example:
.. code:: python
server.shell(
{'Run lxd auto init'},
'lxd init --auto',
)
'''
# Ensure we have a list
if isinstance(commands, six.string_types):
commands = [commands]
for command in commands:
if chdir:
yield 'cd {0} && ({1})'.format(chdir, command)
else:
yield command
@operation
def script(state, host, filename, chdir=None):
'''
Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script
Example:
.. code:: python
# Note: This assumes there is a file in files/hello.bash locally.
server.script(
{'Hello'},
'files/hello.bash',
)
'''
temp_file = state.get_temp_filename(filename)
yield files.put(state, host, filename, temp_file)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file
@operation
def script_template(state, host, template_filename, chdir=None, **data):
'''
Generate, upload and execute a local script template on the remote host.
+ template_filename: local script template filename
+ chdir: directory to cd into before executing the script
Example:
.. code:: python
# Example showing how to pass python variable to a script template file.
# The .j2 file can use `{{ some_var }}` to be interpolated.
# To see output need to run pyinfra with '-v'
# Note: This assumes there is a file in templates/hello2.bash.j2 locally.
some_var = 'blah blah blah '
server.script_template(
{'Hello from script'},
'templates/hello2.bash.j2',
some_var=some_var,
)
'''
temp_file = state.get_temp_filename('{0}{1}'.format(template_filename, data))
yield files.template(state, host, template_filename, temp_file, **data)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file
@operation
def modprobe(state, host, name, present=True, force=False):
'''
Load/unload kernel modules.
+ name: name of the module to manage
+ present: whether the module should be loaded or not
+ force: whether to force any add/remove modules
Example:
.. code:: python
server.modprobe(
{'Silly example for modprobe'},
'floppy',
)
'''
modules = host.fact.kernel_modules
is_present = name in modules
args = ''
if force:
args = ' -f'
# Module is loaded and we don't want it?
if not present and is_present:
yield 'modprobe{0} -r {1}'.format(args, name)
# Module isn't loaded and we want it?
elif present and not is_present:
yield 'modprobe{0} {1}'.format(args, name)
@operation
def mount(
state, host, name,
mounted=True, options=None,
# TODO: do we want to manage fstab here?
# update_fstab=False, device=None, fs_type=None,
):
'''
Manage mounted filesystems.
+ name: the path of the mounted filesystem
+ mounted: whether the filesystem should be mounted
+ options: the mount options
Options:
If the currently mounted filesystem does not have all of the provided
options it will be remounted with the options provided.
``/etc/fstab``:
This operation does not attempt to modify the on disk fstab file - for
that you should use the `files.line operation <./files.html#files-line>`_.
'''
options = options or []
options_string = ','.join(options)
mounts = host.fact.mounts
is_mounted = name in mounts
# Want mount but don't have?
if mounted and not is_mounted:
yield 'mount{0} {1}'.format(
' -o {0}'.format(options_string) if options_string else '',
name,
)
# Want no mount but mounted?
elif mounted is False and is_mounted:
yield 'umount {0}'.format(name)
# Want mount and is mounted! Check the options
elif is_mounted and mounted and options:
mounted_options = mounts[name]['options']
needed_options = set(options) - set(mounted_options)
if needed_options:
yield 'mount -o remount,{0} {1}'.format(options_string, name)
@operation
def hostname(state, host, hostname, hostname_file=None):
'''
Set the system hostname.
+ hostname: the hostname that should be set
+ hostname_file: the file that permanently sets the hostname
Hostname file:
By default pyinfra will auto detect this by targetting ``/etc/hostname``
on Linux and ``/etc/myname`` on OpenBSD.
To completely disable writing the hostname file, set ``hostname_file=False``.
Example:
.. code:: python
server.hostname(
{'Set the hostname'},
'server1.example.com',
)
'''
if hostname_file is None:
os = host.fact.os
if os == 'Linux':
hostname_file = '/etc/hostname'
elif os == 'OpenBSD':
hostname_file = '/etc/myname'
current_hostname = host.fact.hostname
if current_hostname != hostname:
yield 'hostname {0}'.format(hostname)
if hostname_file:
# Create a whole new hostname file
file = six.StringIO('{0}\n'.format(hostname))
# And ensure it exists
yield files.put(
state, host,
file, hostname_file,
)
@operation
def sysctl(
state, host, name, value,
persist=False, persist_file='/etc/sysctl.conf',
):
'''
Edit sysctl configuration.
+ name: name of the sysctl setting to ensure
+ value: the value or list of values the sysctl should be
+ persist: whether to write this sysctl to the config
+ persist_file: file to write the sysctl to persist on reboot
Example:
.. code:: python
server.sysctl(
{'Change the fs.file-max value'},
'fs.file-max',
'100000',
persist=True,
)
'''
string_value = (
' '.join(value)
if isinstance(value, list)
else value
)
existing_value = host.fact.sysctl.get(name)
if not existing_value or existing_value != value:
yield 'sysctl {0}={1}'.format(name, string_value)
if persist:
yield files.line(
state, host,
persist_file,
'{0}[[:space:]]*=[[:space:]]*{1}'.format(name, string_value),
replace='{0} = {1}'.format(name, string_value),
)
@operation
def crontab(
state, host, command, present=True, user=None, name=None,
minute='*', hour='*', month='*', day_of_week='*', day_of_month='*',
interpolate_variables=True,
):
'''
Add/remove/update crontab entries.
+ command: the command for the cron
+ present: whether this cron command should exist
+ user: the user whose crontab to manage
+ name: name the cronjob so future changes to the command will overwrite
+ minute: which minutes to execute the cron
+ hour: which hours to execute the cron
+ month: which months to execute the cron
+ day_of_week: which day of the week to execute the cron
+ day_of_month: which day of the month to execute the cron
+ interpolate_variables: whether to interpolate variables in ``command``
Cron commands:
Unless ``name`` is specified the command is used to identify crontab entries.
This means commands must be unique within a given users crontab. If you require
multiple identical commands, provide a different name argument for each.
Example:
.. code:: python
# simple example for a crontab
server.crontab(
{'Backup /etc weekly'},
'/bin/tar cf /tmp/etc_bup.tar /etc',
name='backup_etc',
day_of_week=0,
hour=1,
minute=0,
)
'''
def comma_sep(value):
if isinstance(value, (list, tuple)):
return ','.join('{0}'.format(v) for v in value)
return value
minute = comma_sep(minute)
hour = comma_sep(hour)
month = comma_sep(month)
day_of_week = comma_sep(day_of_week)
day_of_month = comma_sep(day_of_month)
crontab = host.fact.crontab(user)
name_comment = '# pyinfra-name={0}'.format(name)
existing_crontab = crontab.get(command)
existing_crontab_match = command
if not existing_crontab and name: # find the crontab by name if provided
for cmd, details in crontab.items():
if name_comment in details['comments']:
existing_crontab = details
existing_crontab_match = cmd
exists = existing_crontab is not None
edit_commands = []
temp_filename = state.get_temp_filename()
new_crontab_line = '{minute} {hour} {day_of_month} {month} {day_of_week} {command}'.format(
command=command,
minute=minute,
hour=hour,
month=month,
day_of_week=day_of_week,
day_of_month=day_of_month,
)
existing_crontab_match = '.*{0}.*'.format(existing_crontab_match)
# Don't want the cron and it does exist? Remove the line
if not present and exists:
edit_commands.append(sed_replace(
temp_filename, existing_crontab_match, '',
interpolate_variables=interpolate_variables,
))
# Want the cron but it doesn't exist? Append the line
elif present and not exists:
if name:
edit_commands.append('echo {0} >> {1}'.format(
shlex_quote(name_comment), temp_filename,
))
edit_commands.append('echo {0} >> {1}'.format(
shlex_quote(new_crontab_line), temp_filename,
))
# We have the cron and it exists, do it's details? If not, replace the line
elif present and exists:
if any((
minute != existing_crontab['minute'],
hour != existing_crontab['hour'],
month != existing_crontab['month'],
day_of_week != existing_crontab['day_of_week'],
day_of_month != existing_crontab['day_of_month'],
)):
edit_commands.append(sed_replace(
temp_filename, existing_crontab_match, new_crontab_line,
interpolate_variables=interpolate_variables,
))
if edit_commands:
crontab_args = []
if user:
crontab_args.append('-u {0}'.format(user))
# List the crontab into a temporary file if it exists
if crontab:
yield 'crontab -l {0} > {1}'.format(' '.join(crontab_args), temp_filename)
# Now yield any edits
for edit_command in edit_commands:
yield edit_command
# Finally, use the tempfile to write a new crontab
yield 'crontab {0} {1}'.format(' '.join(crontab_args), temp_filename)
@operation
def group(
state, host, name, present=True, system=False, gid=None,
):
'''
Add/remove system groups.
+ name: name of the group to ensure
+ present: whether the group should be present or not
+ system: whether to create a system group
System users:
System users don't exist on BSD, so the argument is ignored for BSD targets.
Examples:
.. code:: python
server.group(
{'Create docker group'},
'docker',
)
# multiple groups
for group in ['wheel', 'lusers']:
server.group(
{f'Create the group {group}'},
group,
)
'''
groups = host.fact.groups or []
is_present = name in groups
# Group exists but we don't want them?
if not present and is_present:
yield 'groupdel {0}'.format(name)
# Group doesn't exist and we want it?
elif present and not is_present:
args = []
# BSD doesn't do system users
if system and 'BSD' not in host.fact.os:
args.append('-r')
args.append(name)
if gid:
args.append('--gid {0}'.format(gid))
yield 'groupadd {0}'.format(' '.join(args))
@operation
def user(
state, host, name,
present=True, home=None, shell=None, group=None, groups=None,
public_keys=None, delete_keys=False, ensure_home=True,
system=False, uid=None,
):
'''
Add/remove/update system users & their ssh `authorized_keys`.
+ name: name of the user to ensure
+ present: whether this user should exist
+ home: the users home directory
+ shell: the users shell
+ group: the users primary group
+ groups: the users secondary groups
+ public_keys: list of public keys to attach to this user, ``home`` must be specified
+ delete_keys: whether to remove any keys not specified in ``public_keys``
+ ensure_home: whether to ensure the ``home`` directory exists
+ system: whether to create a system account
Home directory:
When ``ensure_home`` or ``public_keys`` are provided, ``home`` defaults to
``/home/{name}``.
Examples:
.. code:: python
server.user(
{'Ensure user is removed'},
'kevin',
present=False,
)
server.user(
{'Ensure myweb user exists'},
'myweb',
shell='/bin/bash',
)
# multiple users
for user in ['kevin', 'bob']:
server.user(
{f'Ensure user {user} is removed'},
user,
present=False,
)
'''
users = host.fact.users or {}
user = users.get(name)
if groups is None:
groups = []
if home is None:
home = '/home/{0}'.format(name)
# User not wanted?
if not present:
if user:
yield 'userdel {0}'.format(name)
return
# User doesn't exist but we want them?
if present and user is None:
# Create the user w/home/shell
args = []
if home:
args.append('-d {0}'.format(home))
if shell:
args.append('-s {0}'.format(shell))
if group:
args.append('-g {0}'.format(group))
if groups:
args.append('-G {0}'.format(','.join(groups)))
if system and 'BSD' not in host.fact.os:
args.append('-r')
if uid:
args.append('--uid {0}'.format(uid))
yield 'useradd {0} {1}'.format(' '.join(args), name)
# User exists and we want them, check home/shell/keys
else:
args = []
# Check homedir
if home and user['home'] != home:
args.append('-d {0}'.format(home))
# Check shell
if shell and user['shell'] != shell:
args.append('-s {0}'.format(shell))
# Check primary group
if group and user['group'] != group:
args.append('-g {0}'.format(group))
# Check secondary groups, if defined
if groups and set(user['groups']) != set(groups):
args.append('-G {0}'.format(','.join(groups)))
# Need to mod the user?
if args:
yield 'usermod {0} {1}'.format(' '.join(args), name)
# Ensure home directory ownership
if ensure_home:
yield files.directory(
state, host, home,
user=name, group=name,
)
# Add SSH keys
if public_keys is not None:
if isinstance(public_keys, six.string_types):
public_keys = [public_keys]
# Ensure .ssh directory
# note that this always outputs commands unless the SSH user has access to the
# authorized_keys file, ie the SSH user is the user defined in this function
yield files.directory(
state, host,
'{0}/.ssh'.format(home),
user=name, group=name,
mode=700,
)
filename = '{0}/.ssh/authorized_keys'.format(home)
if delete_keys:
# Create a whole new authorized_keys file
keys_file = six.StringIO('{0}\n'.format(
'\n'.join(public_keys),
))
# And ensure it exists
yield files.put(
state, host,
keys_file, filename,
user=name, group=name,
mode=600,
)
else:
# Ensure authorized_keys exists
yield files.file(
state, host, filename,
user=name, group=name,
mode=600,
)
# And every public key is present
for key in public_keys:
yield files.line(
state, host,
filename, key,
)
|
py | b40314ee46cd62e22d8c1a19f661e2f7980c7876 | """
This python file implement our approach EIDIG, and it can be simply applied to other differentiable prediction models.
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn import cluster
import itertools
import time
import generation_utilities
class DisInstanceResult:
def __init__(self, num_attribs, seed, g_dis_ins):
self.seed = np.array([seed])
self.g_dis_ins = np.array(list(set([tuple(g_id) for g_id in g_dis_ins])))
self.l_dis_ins = np.empty(shape=(0, num_attribs))
def set_l_dis_ins(self, l_dis_ins):
self.l_dis_ins = np.array(list(set([tuple(l_id) for l_id in l_dis_ins])))
def printResult(self):
print("seed: " + str(self.seed))
print("g_dis_ins: " + str(self.g_dis_ins))
print("l_dis_ins: " + str(self.l_dis_ins))
def compute_grad(x, model):
# compute the gradient of model predictions w.r.t input attributes
x = tf.constant([x], dtype=tf.float32)
with tf.GradientTape() as tape:
tape.watch(x)
y_pred = model(x)
gradient = tape.gradient(y_pred, x)
return gradient[0].numpy() if model(x) > 0.5 else -gradient[0].numpy()
def global_generation(X, seeds, num_attribs, protected_attribs, constraint, model, decay, max_iter, s_g):
# global generation phase of EIDIG
print("我正在执行 EIDIG_htx.global_generation")
R = []
g_id = np.empty(shape=(0, num_attribs))
all_gen_g = np.empty(shape=(0, num_attribs))
try_times = 0
g_num = len(seeds)
for i in range(g_num):
x1 = seeds[i].copy()
grad1 = np.zeros_like(X[0]).astype(float)
grad2 = np.zeros_like(X[0]).astype(float)
for _ in range(max_iter):
try_times += 1
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
if generation_utilities.is_discriminatory(x1, similar_x1, model):
g_id = np.append(g_id, [x1], axis=0)
r = DisInstanceResult(num_attribs, seeds[i].copy(), [x1])
R.append(r)
break
x2 = generation_utilities.max_diff(x1, similar_x1, model)
grad1 = decay * grad1 + compute_grad(x1, model)
grad2 = decay * grad2 + compute_grad(x2, model)
direction = np.zeros_like(X[0])
sign_grad1 = np.sign(grad1)
sign_grad2 = np.sign(grad2)
for attrib in range(num_attribs):
if attrib not in protected_attribs and sign_grad1[attrib] == sign_grad2[attrib]:
direction[attrib] = (-1) * sign_grad1[attrib]
x1 = x1 + s_g * direction
x1 = generation_utilities.clip(x1, constraint)
all_gen_g = np.append(all_gen_g, [x1], axis=0)
# end for
# g_id 的类型是 np.array
g_id = np.array(list(set([tuple(id) for id in g_id])))
return g_id, all_gen_g, try_times, R
def local_generation(num_attribs, l_num, g_id, protected_attribs, constraint,
model, update_interval, s_l, epsilon, R):
# local generation phase of EIDIG
print("我正在执行 EIDIG_htx.local_generation")
direction = [-1, 1]
l_id = np.empty(shape=(0, num_attribs))
all_gen_l = np.empty(shape=(0, num_attribs))
try_times = 0
for x1 in g_id:
print("g_id's shape is {}.And I am {}".format(g_id.shape, x1))
x0 = x1.copy()
all_l_dis_ins_on_x1 = np.empty(shape=(0, num_attribs))
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
x2 = generation_utilities.max_diff(x1, similar_x1, model)
grad1 = compute_grad(x1, model)
grad2 = compute_grad(x2, model)
p = generation_utilities.normalization(grad1, grad2, protected_attribs, epsilon)
p0 = p.copy()
suc_iter = 0
for _ in range(l_num):
try_times += 1
if suc_iter >= update_interval:
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
x2 = generation_utilities.find_pair(x1, similar_x1, model)
grad1 = compute_grad(x1, model)
grad2 = compute_grad(x2, model)
p = generation_utilities.normalization(grad1, grad2, protected_attribs, epsilon)
suc_iter = 0
suc_iter += 1
a = generation_utilities.random_pick(p)
s = generation_utilities.random_pick([0.5, 0.5])
x1[a] = x1[a] + direction[s] * s_l
x1 = generation_utilities.clip(x1, constraint)
all_gen_l = np.append(all_gen_l, [x1], axis=0)
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
if generation_utilities.is_discriminatory(x1, similar_x1, model):
l_id = np.append(l_id, [x1], axis=0)
all_l_dis_ins_on_x1 = np.append(all_l_dis_ins_on_x1, [x1], axis=0)
else:
x1 = x0.copy()
p = p0.copy()
suc_iter = 0
# end for range(l_num)
count = 1
for r in R:
print("The R.size is {}.And I am {}".format(np.array(R).shape, count))
count += 1
g_dis_ins = r.g_dis_ins
if (g_dis_ins == x1).all():
r.set_l_dis_ins(all_l_dis_ins_on_x1)
break
# end for R
# end for g_id
l_id = np.array(list(set([tuple(id) for id in l_id])))
print("我正在准备退出函数EIDIG_htx.local_generation")
return l_id, all_gen_l, try_times
def individual_discrimination_generation(X, seeds, protected_attribs, constraint, model, decay, l_num, update_interval, max_iter=10, s_g=1.0, s_l=1.0, epsilon_l=1e-6):
# complete implementation of EIDIG return non-duplicated individual discriminatory instances generated,
# non-duplicate instances generated and total number of search iterations
num_attribs = len(X[0])
# generate one global discriminatory instance for each seed.
print("我准备执行 EIDIG_htx.global_generation")
g_id, gen_g, g_gen_num, R = global_generation(X, seeds, num_attribs, protected_attribs,
constraint, model, decay, max_iter, s_g)
print("我已经执行完毕 EIDIG_htx.global_generation,将要执行EIDIG_htx.local_generation")
l_id, gen_l, l_gen_num = local_generation(num_attribs, l_num, g_id, protected_attribs,
constraint, model, update_interval, s_l, epsilon_l, R)
all_id = np.append(g_id, l_id, axis=0)
all_gen = np.append(gen_g, gen_l, axis=0)
all_id_nondup = np.array(list(set([tuple(id) for id in all_id])))
all_gen_nondup = np.array(list(set([tuple(gen) for gen in all_gen])))
return all_id_nondup, all_gen_nondup, g_gen_num + l_gen_num, R
def seedwise_generation(X, seeds, protected_attribs, constraint, model, l_num, decay, update_interval, max_iter=10, s_g=1.0, s_l=1.0, epsilon=1e-6):
# perform global generation and local generation successively on each single seed
num_seeds = len(seeds)
num_gen = np.array([0] * num_seeds)
num_ids = np.array([0] * num_seeds)
num_attribs = len(X[0])
ids = np.empty(shape=(0, num_attribs))
all_gen = np.empty(shape=(0, num_attribs))
direction = [-1, 1]
for index, instance in enumerate(seeds):
x1 = instance.copy()
flag = False
grad1 = np.zeros_like(X[0]).astype(float)
grad2 = np.zeros_like(X[0]).astype(float)
for j in range(max_iter):
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
if generation_utilities.is_discriminatory(x1, similar_x1, model):
ids = np.append(ids, [x1], axis=0)
flag = True
break
x2 = generation_utilities.max_diff(x1, similar_x1, model)
grad1 = decay * grad1 + compute_grad(x1, model)
grad2 = decay * grad2 + compute_grad(x2, model)
direction_g = np.zeros_like(X[0])
sign_grad1 = np.sign(grad1)
sign_grad2 = np.sign(grad2)
for attrib in range(num_attribs):
if attrib not in protected_attribs and sign_grad1[attrib] == sign_grad2[attrib]:
direction_g[attrib] = (-1) * sign_grad1[attrib]
x1 = x1 + s_g * direction_g
x1 = generation_utilities.clip(x1, constraint)
all_gen = np.append(all_gen, [x1], axis=0)
if flag == True:
x0 = x1.copy()
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
x2 = generation_utilities.max_diff(x1, similar_x1, model)
grad1 = compute_grad(x1, model)
grad2 = compute_grad(x2, model)
p = generation_utilities.normalization(grad1, grad2, protected_attribs, epsilon)
p0 = p.copy()
suc_iter = 0
for _ in range(l_num):
if suc_iter >= update_interval:
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
x2 = generation_utilities.find_pair(x1, similar_x1, model)
grad1 = compute_grad(x1, model)
grad2 = compute_grad(x2, model)
p = generation_utilities.normalization(grad1, grad2, protected_attribs, epsilon)
suc_iter = 0
suc_iter += 1
a = generation_utilities.random_pick(p)
s = generation_utilities.random_pick([0.5, 0.5])
x1[a] = x1[a] + direction[s] * s_l
x1 = generation_utilities.clip(x1, constraint)
all_gen = np.append(all_gen, [x1], axis=0)
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
if generation_utilities.is_discriminatory(x1, similar_x1, model):
ids = np.append(ids, [x1], axis=0)
else:
x1 = x0.copy()
p = p0.copy()
suc_iter = 0
nondup_ids = np.array(list(set([tuple(id) for id in ids])))
nondup_gen = np.array(list(set([tuple(gen) for gen in all_gen])))
num_gen[index] = len(nondup_gen)
num_ids[index] = len(nondup_ids)
return num_gen, num_ids
def time_record(X, seeds, protected_attribs, constraint, model, decay, l_num, record_step, record_frequency, update_interval, max_iter=10, s_g=1.0, s_l=1.0, epsilon=1e-6):
# record time consumption
num_attribs = len(X[0])
t = np.array([0.0] * record_frequency)
direction_l = [-1, 1]
threshold = record_step
index = 0
t1 = time.time()
ids = np.empty(shape=(0, num_attribs))
num_ids = num_ids_before = 0
for instance in seeds:
if num_ids >= record_frequency * record_step:
break
x1 = instance.copy()
flag = False
grad1 = np.zeros_like(X[0]).astype(float)
grad2 = np.zeros_like(X[0]).astype(float)
for i in range(max_iter+1):
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
if generation_utilities.is_discriminatory(x1, similar_x1, model):
ids = np.append(ids, [x1], axis=0)
flag = True
break
if i == max_iter:
break
x2 = generation_utilities.max_diff(x1, similar_x1, model)
grad1 = decay * grad1 + compute_grad(x1, model)
grad2 = decay * grad2 + compute_grad(x2, model)
direction_g = np.zeros_like(X[0])
sign_grad1 = np.sign(grad1)
sign_grad2 = np.sign(grad2)
for attrib in range(num_attribs):
if attrib not in protected_attribs and sign_grad1[attrib] == sign_grad2[attrib]:
direction_g[attrib] = (-1) * sign_grad1[attrib]
x1 = x1 + s_g * direction_g
x1 = generation_utilities.clip(x1, constraint)
t2 = time.time()
if flag == True:
ids = np.array(list(set([tuple(id) for id in ids])))
num_ids = len(ids)
if num_ids > num_ids_before:
num_ids_before = num_ids
if num_ids == threshold:
t[index] = t2 - t1
threshold += record_step
index += 1
if num_ids >= record_frequency * record_step:
break
x0 = x1.copy()
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
x2 = generation_utilities.max_diff(x1, similar_x1, model)
grad1 = compute_grad(x1, model)
grad2 = compute_grad(x2, model)
p = generation_utilities.normalization(grad1, grad2, protected_attribs, epsilon)
p0 = p.copy()
suc_iter = 0
for _ in range(l_num):
if suc_iter >= update_interval:
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
x2 = generation_utilities.find_pair(x1, similar_x1, model)
grad1 = compute_grad(x1, model)
grad2 = compute_grad(x2, model)
p = generation_utilities.normalization(grad1, grad2, protected_attribs, epsilon)
suc_iter = 0
suc_iter += 1
a = generation_utilities.random_pick(p)
s = generation_utilities.random_pick([0.5, 0.5])
x1[a] = x1[a] + direction_l[s] * s_l
x1 = generation_utilities.clip(x1, constraint)
t2 = time.time()
similar_x1 = generation_utilities.similar_set(x1, num_attribs, protected_attribs, constraint)
if generation_utilities.is_discriminatory(x1, similar_x1, model):
ids = np.append(ids, [x1], axis=0)
ids = np.array(list(set([tuple(id) for id in ids])))
num_ids = len(ids)
if num_ids > num_ids_before:
num_ids_before = num_ids
if num_ids == threshold:
t[index] = t2 - t1
threshold += record_step
index += 1
if num_ids >= record_frequency * record_step:
break
else:
x1 = x0.copy()
p = p0.copy()
suc_iter = 0
return t |
py | b403153b086a35c0422e0396745945cf62ad2d2b | num = int(input('{}Digite a distancia da viagem: '.format('\033[1;35m')))
if num <= 200:
val = num * 0.50
else:
val = num * 0.45
print('{}Sua viagem de {}Km custara R${:.2f}'.format('\033[1;30m', num, val)) |
py | b4031561bc7d2ab4c11130e6b4546d80900fff70 | from .method import Method
from .module import Module
from .parameter_documentation import ParameterDocumentation
__all__ = ['Method', 'Module', 'ParameterDocumentation']
|
py | b40315dca2803e5edacdb4be5d8d1af0316ad723 | import unittest
from deepwater.models import mlp
from deepwater import optimizers
from functools import partial
from deepwater.models.test_utils import MNIST_must_converge
epochs = 10
class TestMLP(unittest.TestCase):
def test_single_layer(self):
model = mlp.MultiLayerPerceptron
MNIST_must_converge('mlpx1', model,
optimizers.RMSPropOptimizer,
initial_learning_rate=0.1,
batch_size=128,
epochs=epochs)
def test_mlp_layer_with_dropout(self):
hidden_layers = [1024, 1024]
dropout = [0.2, 0.5]
model = partial(mlp.MultiLayerPerceptron,
hidden_layers=hidden_layers,
dropout=dropout, activation_fn="tanh")
MNIST_must_converge('mlpx1024x1024', model,
optimizers.GradientDescentOptimizer,
initial_learning_rate=0.1,
batch_size=128,
epochs=epochs)
def test_mlp_2048_2048_no_dropout_gradient(self):
hidden_layers = [2048, 2048, 2048]
dropout = []
model = partial(mlp.MultiLayerPerceptron,
hidden_layers=hidden_layers,
dropout=dropout)
MNIST_must_converge('mlpx2048x2048x2048xNoDropout', model,
optimizers.RMSPropOptimizer,
initial_learning_rate=0.1,
batch_size=32,
epochs=epochs)
def test_mlp_2048_2048_momentum(self):
hidden_layers = [2048, 2048, 2048]
dropout = [0.2, 0.5, 0.5]
model = partial(mlp.MultiLayerPerceptron,
hidden_layers=hidden_layers,
dropout=dropout)
MNIST_must_converge('mlpx2048x2048x2048', model,
optimizers.RMSPropOptimizer,
initial_learning_rate=0.1,
batch_size=128,
epochs=epochs)
def test_mlp_200_200_momentum(self):
hidden_layers = [200, 200]
# dropout = [0.2, 0.5, 0.5]
dropout = []
model = partial(mlp.MultiLayerPerceptron,
hidden_layers=hidden_layers,
dropout=dropout)
MNIST_must_converge('mlpx200x200', model,
optimizers.RMSPropOptimizer,
initial_learning_rate=0.1,
batch_size=32,
epochs=epochs)
if __name__ == "__main__":
unittest.main()
|
py | b40315e41f7375ab55af4a02688009a4a486411a | import os, tempfile
def dot_write(jena_graph, out_fn, fmt):
#ipdb.set_trace()
temp_fn = tempfile.NamedTemporaryFile(delete = True)
jena_graph.write(temp_fn.name, format = "RDF/XML")
dd = {'temp_fn': temp_fn.name, 'out_fn': out_fn, 'fmt': fmt}
pipe_cmd = "cat {temp_fn} | rapper -q -i rdfxml -o dot - ex:ex | dot -x -T{fmt} -o {out_fn}".format(**dd)
os.system(pipe_cmd)
|
py | b40316d74e584a9cc63e7ebb96ac6d02f7c75706 | import pytest
from bitarray import bitarray
from clkhash import bloomfilter, randomnames
from clkhash.key_derivation import generate_key_lists
from hypothesis import given, strategies
from anonlink import similarities
FLOAT_ARRAY_TYPES = 'fd'
UINT_ARRAY_TYPES = 'BHILQ'
SIM_FUNS = [similarities.dice_coefficient_python,
similarities.dice_coefficient_accelerated]
class TestBloomFilterComparison:
@classmethod
def setup_class(cls):
cls.proportion = 0.8
nl = randomnames.NameList(300)
s1, s2 = nl.generate_subsets(200, cls.proportion)
keys = generate_key_lists('secret', len(nl.schema_types))
cls.filters1 = tuple(
f[0]
for f in bloomfilter.stream_bloom_filters(s1, keys, nl.SCHEMA))
cls.filters2 = tuple(
f[0]
for f in bloomfilter.stream_bloom_filters(s2, keys, nl.SCHEMA))
cls.filters = cls.filters1, cls.filters2
cls.default_k = 10
cls.default_threshold = 0.5
def _check_proportion(self, candidate_pairs):
sims, _ = candidate_pairs
exact_matches = sum(sim == 1 for sim in sims)
assert (exact_matches / len(self.filters1)
== pytest.approx(self.proportion))
assert (exact_matches / len(self.filters2)
== pytest.approx(self.proportion))
def assert_similarity_matrices_equal(self, M, N):
M_sims, (M_indices0, M_indices1) = M
N_sims, (N_indices0, N_indices1) = N
assert (set(zip(M_sims, M_indices0, M_indices1))
== set(zip(N_sims, N_indices0, N_indices1)))
def test_accelerated_manual(self):
nl = randomnames.NameList(30)
s1, s2 = nl.generate_subsets(5, 1.0)
keys = generate_key_lists('secret', len(nl.schema_types))
f1 = tuple(
f[0]
for f in bloomfilter.stream_bloom_filters(s1, keys, nl.SCHEMA))
f2 = tuple(
f[0]
for f in bloomfilter.stream_bloom_filters(s2, keys, nl.SCHEMA))
py_similarity = similarities.dice_coefficient_python(
(f1, f2), self.default_threshold, self.default_k)
c_similarity = similarities.dice_coefficient_accelerated(
(f1, f2), self.default_threshold, self.default_k)
self.assert_similarity_matrices_equal(py_similarity, c_similarity)
def test_accelerated(self):
similarity = similarities.dice_coefficient_accelerated(
self.filters, self.default_threshold, self.default_k)
self._check_proportion(similarity)
def test_python(self):
similarity = similarities.dice_coefficient_python(
self.filters, self.default_threshold, self.default_k)
self._check_proportion(similarity)
def test_default(self):
similarity = similarities.dice_coefficient(
self.filters, self.default_threshold, self.default_k)
self._check_proportion(similarity)
def test_same_score(self):
c_cands = similarities.dice_coefficient_accelerated(
self.filters, self.default_threshold, self.default_k)
c_scores, _ = c_cands
python_cands = similarities.dice_coefficient_python(
self.filters, self.default_threshold, self.default_k)
python_scores, _ = python_cands
assert c_scores == python_scores
def test_same_score_k_none(self):
c_cands = similarities.dice_coefficient_accelerated(
self.filters, self.default_threshold, None)
c_scores, _ = c_cands
python_cands = similarities.dice_coefficient_python(
self.filters, self.default_threshold, None)
python_scores, _ = python_cands
assert c_scores == python_scores
def test_empty_input_a(self):
candidate_pairs = similarities.dice_coefficient(
((), self.filters2), self.default_threshold, self.default_k)
sims, (indices0, indices1) = candidate_pairs
assert len(sims) == len(indices0) == len(indices1) == 0
assert sims.typecode in FLOAT_ARRAY_TYPES
assert indices0.typecode in UINT_ARRAY_TYPES
assert indices1.typecode in UINT_ARRAY_TYPES
def test_empty_input_b(self):
candidate_pairs = similarities.dice_coefficient(
(self.filters1, ()), self.default_threshold, self.default_k)
sims, (indices0, indices1) = candidate_pairs
assert len(sims) == len(indices0) == len(indices1) == 0
assert sims.typecode in FLOAT_ARRAY_TYPES
assert indices0.typecode in UINT_ARRAY_TYPES
assert indices1.typecode in UINT_ARRAY_TYPES
def test_small_input_a(self):
py_similarity = similarities.dice_coefficient_python(
(self.filters1[:10], self.filters2),
self.default_threshold, self.default_k)
c_similarity = similarities.dice_coefficient_accelerated(
(self.filters1[:10], self.filters2),
self.default_threshold, self.default_k)
self.assert_similarity_matrices_equal(py_similarity, c_similarity)
def test_small_input_b(self):
py_similarity = similarities.dice_coefficient_python(
(self.filters1, self.filters2[:10]),
self.default_threshold, self.default_k)
c_similarity = similarities.dice_coefficient_accelerated(
(self.filters1, self.filters2[:10]),
self.default_threshold, self.default_k)
self.assert_similarity_matrices_equal(py_similarity, c_similarity)
def test_memory_use(self):
n = 10
f1 = self.filters1[:n]
f2 = self.filters2[:n]
# If memory is not handled correctly, then this would allocate
# several terabytes of RAM.
big_k = 1 << 50
py_similarity = similarities.dice_coefficient_python(
(f1, f2), self.default_threshold, big_k)
c_similarity = similarities.dice_coefficient_accelerated(
(f1, f2), self.default_threshold, big_k)
self.assert_similarity_matrices_equal(py_similarity, c_similarity)
@pytest.mark.parametrize('sim_fun', SIM_FUNS)
@pytest.mark.parametrize('dataset_n', [0, 1])
@pytest.mark.parametrize('k', [None, 0, 1, 2, 3, 5])
@pytest.mark.parametrize('threshold', [0., .5, 1.])
def test_too_few_datasets(self, sim_fun, dataset_n, k, threshold):
datasets = [[bitarray('01001011') * 8, bitarray('01001011' * 8)]
for _ in range(dataset_n)]
with pytest.raises(ValueError):
sim_fun(datasets, threshold, k=k)
@pytest.mark.parametrize('sim_fun', SIM_FUNS)
@pytest.mark.parametrize('p_arity', [3, 5])
@pytest.mark.parametrize('k', [None, 0, 1, 2])
@pytest.mark.parametrize('threshold', [0., .5, 1.])
def test_unsupported_p_arity(self, sim_fun, p_arity, k, threshold):
datasets = [[bitarray('01001011') * 8, bitarray('01001011' * 8)]
for _ in range(p_arity)]
with pytest.raises(NotImplementedError):
sim_fun(datasets, threshold, k=k)
@pytest.mark.parametrize('sim_fun', SIM_FUNS)
@pytest.mark.parametrize('k', [None, 0, 1, 2, 3, 5])
@pytest.mark.parametrize('threshold', [0., .5, 1.])
def test_inconsistent_filter_length(self, sim_fun, k, threshold):
datasets = [[bitarray('01001011') * 8, bitarray('01001011') * 16],
[bitarray('01001011') * 8, bitarray('01001011') * 8]]
with pytest.raises(ValueError):
sim_fun(datasets, threshold, k=k)
datasets = [[bitarray('01001011') * 16, bitarray('01001011') * 8],
[bitarray('01001011') * 8, bitarray('01001011') * 8]]
with pytest.raises(ValueError):
sim_fun(datasets, threshold, k=k)
datasets = [[bitarray('01001011') * 8, bitarray('01001011') * 8],
[bitarray('01001011') * 16, bitarray('01001011') * 8]]
with pytest.raises(ValueError):
sim_fun(datasets, threshold, k=k)
datasets = [[bitarray('01001011') * 16, bitarray('01001011') * 8],
[bitarray('01001011') * 8, bitarray('01001011') * 16]]
with pytest.raises(ValueError):
sim_fun(datasets, threshold, k=k)
datasets = [[bitarray('01001011') * 16, bitarray('01001011') * 8],
[bitarray('01001011') * 16, bitarray('01001011') * 8]]
with pytest.raises(ValueError):
sim_fun(datasets, threshold, k=k)
datasets = [[bitarray('01001011') * 16, bitarray('01001011') * 16],
[bitarray('01001011') * 8, bitarray('01001011') * 8]]
with pytest.raises(ValueError):
sim_fun(datasets, threshold, k=k)
@pytest.mark.parametrize('k', [None, 0, 1, 2, 3, 5])
@pytest.mark.parametrize('threshold', [0., .5, 1.])
@pytest.mark.parametrize('bytes_n', [1, 7, 9, 15, 17, 23, 25])
def test_not_multiple_of_64(self, k, threshold, bytes_n):
datasets = [[bitarray('01001011') * bytes_n],
[bitarray('01001011') * bytes_n]]
py_similarity = similarities.dice_coefficient_python(
datasets, self.default_threshold, k)
c_similarity = similarities.dice_coefficient_accelerated(datasets, threshold, k=k)
self.assert_similarity_matrices_equal(py_similarity, c_similarity)
def test_not_multiple_of_8_raises(self,):
datasets = [[bitarray('010')],
[bitarray('010')]]
with pytest.raises(NotImplementedError):
similarities.dice_coefficient_accelerated(datasets, threshold=self.default_threshold)
@pytest.mark.parametrize('sim_fun', SIM_FUNS)
@pytest.mark.parametrize('k', [None, 0, 1])
@pytest.mark.parametrize('threshold', [0., .5, 1.])
def test_empty(self, sim_fun, k, threshold):
datasets = [[], [bitarray('01001011') * 8]]
sims, (rec_is0, rec_is1) = sim_fun(datasets, threshold, k=k)
assert len(sims) == len(rec_is0) == len(rec_is1) == 0
assert sims.typecode in FLOAT_ARRAY_TYPES
assert (rec_is0.typecode in UINT_ARRAY_TYPES
and rec_is1.typecode in UINT_ARRAY_TYPES)
datasets = [[bitarray('01001011') * 8], []]
sims, (rec_is0, rec_is1) = sim_fun(datasets, threshold, k=k)
assert len(sims) == len(rec_is0) == len(rec_is1) == 0
assert sims.typecode in FLOAT_ARRAY_TYPES
assert (rec_is0.typecode in UINT_ARRAY_TYPES
and rec_is1.typecode in UINT_ARRAY_TYPES)
@pytest.mark.parametrize('sim_fun', SIM_FUNS)
@pytest.mark.parametrize('k', [None, 0, 1])
@pytest.mark.parametrize('threshold', [0., .5])
def test_all_low(self, sim_fun, k, threshold):
datasets = [[bitarray('01001011') * 8],
[bitarray('00000000') * 8]]
sims, (rec_is0, rec_is1) = sim_fun(datasets, threshold, k=k)
assert (len(sims) == len(rec_is0) == len(rec_is1)
== (1 if threshold == 0. and k != 0 else 0))
assert sims.typecode in FLOAT_ARRAY_TYPES
assert (rec_is0.typecode in UINT_ARRAY_TYPES
and rec_is1.typecode in UINT_ARRAY_TYPES)
datasets = [[bitarray('00000000') * 8],
[bitarray('01001011') * 8]]
sims, (rec_is0, rec_is1) = sim_fun(datasets, threshold, k=k)
assert (len(sims) == len(rec_is0) == len(rec_is1)
== (1 if threshold == 0. and k != 0 else 0))
assert sims.typecode in FLOAT_ARRAY_TYPES
assert (rec_is0.typecode in UINT_ARRAY_TYPES
and rec_is1.typecode in UINT_ARRAY_TYPES)
@pytest.mark.parametrize('sim_fun', SIM_FUNS)
def test_order(self, sim_fun):
similarity = sim_fun(
self.filters, self.default_threshold, self.default_k)
sims, (rec_is0, rec_is1) = similarity
for i in range(len(sims) - 1):
sim_a, rec_i0_a, rec_i1_a = sims[i], rec_is0[i], rec_is1[i]
sim_b, rec_i0_b, rec_i1_b = sims[i+1], rec_is0[i+1], rec_is1[i+1]
if sim_a > sim_b:
pass # Correctly ordered!
elif sim_a == sim_b:
if rec_i0_a < rec_i0_b:
pass # Correctly ordered!
elif rec_i0_a == rec_i0_b:
if rec_i1_a < rec_i1_b:
pass # Correctly ordered!
elif rec_i1_a == rec_i1_b:
assert False, 'duplicate entry'
else:
assert False, 'incorrect tiebreaking on second index'
else:
assert False, 'incorrect tiebreaking on first index'
else:
assert False, 'incorrect similarity sorting'
def _to_bitarray(bytes_):
ba = bitarray()
ba.frombytes(bytes_)
return ba
@given(strategies.data(), strategies.floats(min_value=0, max_value=1))
@pytest.mark.parametrize('sim_fun', SIM_FUNS)
def test_bytes_bitarray_agree(sim_fun, data, threshold):
bytes_length = data.draw(strategies.integers(
min_value=0,
max_value=4096 # Let's not get too carried away...
))
filters0_bytes = data.draw(strategies.lists(strategies.binary(
min_size=bytes_length, max_size=bytes_length)))
filters1_bytes = data.draw(strategies.lists(strategies.binary(
min_size=bytes_length, max_size=bytes_length)))
filters0_ba = tuple(map(_to_bitarray, filters0_bytes))
filters1_ba = tuple(map(_to_bitarray, filters1_bytes))
res_bytes = sim_fun([filters0_bytes, filters1_bytes], threshold)
res_ba = sim_fun([filters0_ba, filters1_ba], threshold)
assert (res_bytes == res_ba)
|
py | b40318c0ab9dda70080c8640c9ff62fa8303d0a3 | description = 'detector moving devices'
group = 'lowlevel'
devices = dict(
det_drift = device('nicos.devices.generic.ManualSwitch',
description = 'depth of detector drift1=40mm drift2=65mm',
states = ['off', 'drift1', 'drift2'],
),
det_pivot = device('nicos_mlz.refsans.devices.pivot.PivotPoint',
description = 'Pivot point at floor of samplechamber',
states = list(range(1, 15)),
fmtstr = 'Point %d',
unit = '',
),
table_z_motor = device('nicos.devices.generic.VirtualMotor',
description = 'table inside tube',
unit = 'mm',
abslimits = (620, 11025),
visibility = (),
curvalue = 620,
),
det_table = device('nicos.devices.generic.Axis',
description = 'detector table inside tube',
motor = 'table_z_motor',
dragerror = 10.,
precision = 0.05,
),
tube_m = device('nicos.devices.generic.VirtualMotor',
description = 'tube Motor',
abslimits = (-120, 1100),
visibility = (),
unit = 'mm',
speed = 10,
fmtstr = '%.0f',
# visibility = (),
),
det_yoke = device('nicos.devices.generic.Axis',
description = 'tube height',
motor = 'tube_m',
precision = 0.05,
dragerror = 10.,
),
tube_angle = device('nicos_mlz.refsans.devices.tube.TubeAngle',
description = 'Angle between flight tube and ground',
yoke = 'det_yoke',
),
)
|
py | b4031934426438ea8cdeb0238ba8829ecb02229e | import os
from os.path import join
import rastervision as rv
from examples.utils import str_to_bool, save_image_crop
class PotsdamSemanticSegmentation(rv.ExperimentSet):
def exp_main(self, raw_uri, processed_uri, root_uri, test=False):
"""Run an experiment on the ISPRS Potsdam dataset.
Uses Tensorflow Deeplab backend with Mobilenet architecture. Should get to
F1 score of ~0.86 (including clutter class) after 6 hours of training on a P3
instance.
Args:
raw_uri: (str) directory of raw data
root_uri: (str) root directory for experiment output
test: (bool) if True, run a very small experiment as a test and generate
debug output
"""
test = str_to_bool(test)
exp_id = 'potsdam-seg'
train_ids = ['2-10', '2-11', '3-10', '3-11', '4-10', '4-11', '4-12', '5-10',
'5-11', '5-12', '6-10', '6-11', '6-7', '6-9', '7-10', '7-11',
'7-12', '7-7', '7-8', '7-9']
val_ids = ['2-12', '3-12', '6-12']
# infrared, red, green
channel_order = [3, 0, 1]
debug = False
batch_size = 8
num_steps = 100000
model_type = rv.MOBILENET_V2
if test:
debug = True
num_steps = 1
batch_size = 1
train_ids = train_ids[0:1]
val_ids = val_ids[0:1]
exp_id += '-test'
classes = {
'Car': (1, '#ffff00'),
'Building': (2, '#0000ff'),
'Low Vegetation': (3, '#00ffff'),
'Tree': (4, '#00ff00'),
'Impervious': (5, "#ffffff"),
'Clutter': (6, "#ff0000")
}
task = rv.TaskConfig.builder(rv.SEMANTIC_SEGMENTATION) \
.with_chip_size(300) \
.with_classes(classes) \
.with_chip_options(window_method='sliding',
stride=300, debug_chip_probability=1.0) \
.build()
backend = rv.BackendConfig.builder(rv.TF_DEEPLAB) \
.with_task(task) \
.with_model_defaults(model_type) \
.with_train_options(sync_interval=600) \
.with_num_steps(num_steps) \
.with_batch_size(batch_size) \
.with_debug(debug) \
.build()
def make_scene(id):
id = id.replace('-', '_')
raster_uri = '{}/4_Ortho_RGBIR/top_potsdam_{}_RGBIR.tif'.format(
raw_uri, id)
label_uri = '{}/5_Labels_for_participants/top_potsdam_{}_label.tif'.format(
raw_uri, id)
if test:
crop_uri = join(
processed_uri, 'crops', os.path.basename(raster_uri))
save_image_crop(raster_uri, crop_uri, size=600)
raster_uri = crop_uri
# Using with_rgb_class_map because label TIFFs have classes encoded as RGB colors.
label_source = rv.LabelSourceConfig.builder(rv.SEMANTIC_SEGMENTATION) \
.with_rgb_class_map(task.class_map) \
.with_raster_source(label_uri) \
.build()
# URI will be injected by scene config.
# Using with_rgb(True) because we want prediction TIFFs to be in RGB format.
label_store = rv.LabelStoreConfig.builder(rv.SEMANTIC_SEGMENTATION_RASTER) \
.with_rgb(True) \
.build()
scene = rv.SceneConfig.builder() \
.with_task(task) \
.with_id(id) \
.with_raster_source(raster_uri,
channel_order=channel_order) \
.with_label_source(label_source) \
.with_label_store(label_store) \
.build()
return scene
train_scenes = [make_scene(id) for id in train_ids]
val_scenes = [make_scene(id) for id in val_ids]
dataset = rv.DatasetConfig.builder() \
.with_train_scenes(train_scenes) \
.with_validation_scenes(val_scenes) \
.build()
experiment = rv.ExperimentConfig.builder() \
.with_id(exp_id) \
.with_task(task) \
.with_backend(backend) \
.with_dataset(dataset) \
.with_root_uri(root_uri) \
.build()
return experiment
if __name__ == '__main__':
rv.main()
|
py | b403195ad2a45a5ceba53e8477ef76d7f75c74fa | def largest_num(num1, num2, num3):
"""
find the largest number into the given list
:param num1:
:param num2:
:param num3:
:return:
"""
# max = max(num1, num2,num3)
# print(max)
if (num1 >= num2) and (num1 >= num3):
largest = num1
elif (num2 >= num1) and (num2 >= num3):
largest = num2
else:
largest = num3
print("The largest number between", num1, ",", num2, "and", num3, "is: ", largest)
largest_num(10, 14, 12)
def decimal_to_binary(number):
"""
convert decimal to binary of a given no.
:param number:
:return:
"""
print("Equivalent Binary Number: ", bin(number))
decimal_to_binary(42)
def leap_year(year):
"""
finding which year is a leap year
:param year:
:return:
"""
if (year % 4) == 0:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
leap_year(2040)
def lower_case(string):
print(string.lower())
lower_case("This Sh0uLd BE IN L0wErCasE!")
# Program make a simple calculator that can add, subtract, multiply and divide using functions
# This function adds two numbers
def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
def simple_calculator(num1, num2, operation):
if operation == 'add':
print(num1, "+", num2, "=", add(num1, num2))
elif operation == 'subtract':
print(num1, "-", num2, "=", subtract(num1, num2))
elif operation == 'multiply':
print(num1, "*", num2, "=", multiply(num1, num2))
elif operation == 'divide':
print(num1, "/", num2, "=", divide(num1, num2))
else:
print("Invalid input")
simple_calculator(50, 10, 'subtract')
def check_palindrome(string):
# make it suitable for caseless comparison
print(string)
string = string.casefold()
print(string)
# reverse the string
rev_str = reversed(string)
# check if the string is equal to its reverse
if list(string) == list(rev_str):
print("It is palindrome")
else:
print("It is not palindrome")
check_palindrome("MalayAlam")
|
py | b4031a2c63d93fd90eb2d7d8d1019202434e76c1 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import glob
import os
import shutil
import sys
import datetime
import json
import six
import pytest
from os.path import abspath, dirname, join, isfile, relpath
from asv import config, environment, util
from asv.results import iter_results_for_machine
from asv.util import check_output, which
from . import tools
from .tools import dummy_packages
try:
which('conda')
HAS_CONDA = True
except (RuntimeError, IOError):
HAS_CONDA = False
WIN = (os.name == 'nt')
dummy_values = [
(None, None),
(1, 1),
(3, 1),
(None, 1),
(6, None),
(5, 1),
(6, 1),
(6, 1),
(6, 6),
(6, 6),
]
def generate_basic_conf(tmpdir, repo_subdir=''):
tmpdir = six.text_type(tmpdir)
local = abspath(dirname(__file__))
os.chdir(tmpdir)
# Use relative paths on purpose since this is what will be in
# actual config files
shutil.copytree(os.path.join(local, 'benchmark'), 'benchmark')
machine_file = join(tmpdir, 'asv-machine.json')
shutil.copyfile(join(local, 'asv-machine.json'),
machine_file)
repo_path = tools.generate_test_repo(tmpdir, dummy_values,
subdir=repo_subdir).path
conf_dict = {
'env_dir': 'env',
'benchmark_dir': 'benchmark',
'results_dir': 'results_workflow',
'html_dir': 'html',
'repo': relpath(repo_path),
'dvcs': 'git',
'project': 'asv',
'matrix': {
"asv_dummy_test_package_1": [""],
"asv_dummy_test_package_2": tools.DUMMY2_VERSIONS,
},
}
if repo_subdir:
conf_dict['repo_subdir'] = repo_subdir
conf = config.Config.from_json(conf_dict)
if hasattr(sys, 'pypy_version_info'):
conf.pythons = ["pypy{0[0]}.{0[1]}".format(sys.version_info)]
return tmpdir, local, conf, machine_file
@pytest.fixture
def basic_conf(tmpdir, dummy_packages):
return generate_basic_conf(tmpdir)
@pytest.fixture
def basic_conf_with_subdir(tmpdir, dummy_packages):
return generate_basic_conf(tmpdir, 'some_subdir')
def test_run_publish(capfd, basic_conf):
tmpdir, local, conf, machine_file = basic_conf
# Tests a typical complete run/publish workflow
tools.run_asv_with_conf(conf, 'run', "master~5..master", '--steps=2',
'--quick', '--show-stderr', '--profile',
'-a', 'warmup_time=0',
_machine_file=machine_file)
text, err = capfd.readouterr()
assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2
assert 'asv: benchmark timed out (timeout 0.1s)' in text
tools.run_asv_with_conf(conf, 'publish')
assert isfile(join(tmpdir, 'html', 'index.html'))
assert isfile(join(tmpdir, 'html', 'index.json'))
assert isfile(join(tmpdir, 'html', 'asv.js'))
assert isfile(join(tmpdir, 'html', 'asv.css'))
# Check parameterized test json data format
filename = glob.glob(join(tmpdir, 'html', 'graphs', 'arch-x86_64',
'asv_dummy_test_package_1',
'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1],
'branch-master',
'cpu-Blazingly fast', 'machine-orangutan',
'os-GNU_Linux', 'python-*', 'ram-128GB',
'params_examples.time_skip.json'))[0]
with open(filename, 'r') as fp:
data = json.load(fp)
assert len(data) == 2
assert isinstance(data[0][0], six.integer_types) # revision
assert len(data[0][1]) == 3
assert len(data[1][1]) == 3
assert isinstance(data[0][1][0], float)
assert isinstance(data[0][1][1], float)
assert data[0][1][2] is None
# Check that the skip options work
capfd.readouterr()
tools.run_asv_with_conf(conf, 'run', "master~5..master", '--steps=2',
'--quick', '--skip-existing-successful',
'--bench=time_secondary.track_value',
'--skip-existing-failed',
_machine_file=join(tmpdir, 'asv-machine.json'))
tools.run_asv_with_conf(conf, 'run', "master~5..master", '--steps=2',
'--bench=time_secondary.track_value',
'--quick', '--skip-existing-commits',
_machine_file=join(tmpdir, 'asv-machine.json'))
text, err = capfd.readouterr()
assert 'Running benchmarks.' not in text
# Check EXISTING and --environment work
if HAS_CONDA:
env_spec = ("-E", "conda:{0[0]}.{0[1]}".format(sys.version_info))
else:
env_spec = ("-E", "virtualenv:{0[0]}.{0[1]}".format(sys.version_info))
tools.run_asv_with_conf(conf, 'run', "EXISTING", '--quick',
'--bench=time_secondary.track_value',
*env_spec,
_machine_file=machine_file)
# Remove the benchmarks.json file and check publish fails
os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))
with pytest.raises(util.UserError):
tools.run_asv_with_conf(conf, 'publish')
def test_continuous(capfd, basic_conf):
tmpdir, local, conf, machine_file = basic_conf
if HAS_CONDA:
env_spec = ("-E", "conda:{0[0]}.{0[1]}".format(sys.version_info))
else:
env_spec = ("-E", "virtualenv:{0[0]}.{0[1]}".format(sys.version_info))
# Check that asv continuous runs
tools.run_asv_with_conf(conf, 'continuous', "master^", '--show-stderr',
'--bench=params_examples.track_find_test',
'--bench=params_examples.track_param',
'--bench=time_examples.TimeSuite.time_example_benchmark_1',
'--attribute=repeat=1', '--attribute=number=1',
'--attribute=warmup_time=0',
*env_spec, _machine_file=machine_file)
text, err = capfd.readouterr()
assert "SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY" in text
assert "+ 1 6 6.00 params_examples.track_find_test(2)" in text
assert "params_examples.ClassOne" in text
# Check processes were interleaved (timing benchmark was run twice)
assert re.search(r"For.*commit [a-f0-9]+ (<[a-z0-9~^]+> )?\(round 1/2\)", text, re.M), text
result_found = False
for results in iter_results_for_machine(conf.results_dir, "orangutan"):
result_found = True
stats = results.get_result_stats('time_examples.TimeSuite.time_example_benchmark_1', [])
assert stats[0]['repeat'] == 2
assert result_found
def test_find(capfd, basic_conf):
tmpdir, local, conf, machine_file = basic_conf
if WIN and os.path.basename(sys.argv[0]).lower().startswith('py.test'):
# Multiprocessing in spawn mode can result to problems with py.test
# Find.run calls Setup.run in parallel mode by default
pytest.skip("Multiprocessing spawn mode on Windows not safe to run "
"from py.test runner.")
# Test find at least runs
tools.run_asv_with_conf(conf, 'find', "master~5..master", "params_examples.track_find_test",
_machine_file=machine_file)
# Check it found the first commit after the initially tested one
output, err = capfd.readouterr()
regression_hash = check_output(
[which('git'), 'rev-parse', 'master^'], cwd=conf.repo)
assert "Greatest regression found: {0}".format(regression_hash[:8]) in output
def test_run_spec(basic_conf):
tmpdir, local, conf, machine_file = basic_conf
conf.build_cache_size = 5
extra_branches = [('master~1', 'some-branch', [12])]
dvcs_path = os.path.join(tmpdir, 'test_repo2')
dvcs = tools.generate_test_repo(dvcs_path, [1, 2],
extra_branches=extra_branches)
conf.repo = dvcs.path
initial_commit = dvcs.get_hash("master~1")
master_commit = dvcs.get_hash("master")
branch_commit = dvcs.get_hash("some-branch")
template_dir = os.path.join(tmpdir, "results_workflow_template")
results_dir = os.path.join(tmpdir, 'results_workflow')
tools.run_asv_with_conf(conf, 'run', initial_commit+"^!",
'--bench=time_secondary.track_value',
'--quick',
_machine_file=join(tmpdir, 'asv-machine.json'))
shutil.copytree(results_dir, template_dir)
def _test_run(range_spec, branches, expected_commits):
# Rollback initial results
shutil.rmtree(results_dir)
shutil.copytree(template_dir, results_dir)
args = ["run", "--quick", "--skip-existing-successful",
"--bench=time_secondary.track_value",
"-s", "1000" # large number of steps should be noop
]
if range_spec is not None:
args.append(range_spec)
conf.branches = branches
tools.run_asv_with_conf(conf, *args, _machine_file=machine_file)
# Check that files for all commits expected were generated
envs = list(environment.get_environments(conf, None))
tool_name = envs[0].tool_name
pyver = conf.pythons[0]
if pyver.startswith('pypy'):
pyver = pyver[2:]
expected = set(['machine.json'])
for commit in expected_commits:
for psver in tools.DUMMY2_VERSIONS:
expected.add('{0}-{1}-py{2}-asv_dummy_test_package_1-asv_dummy_test_package_2{3}.json'.format(
commit[:8], tool_name, pyver, psver))
result_files = os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))
assert set(result_files) == expected
for branches, expected_commits in (
# Without branches in config, shoud just use master
([None], [initial_commit, master_commit]),
# With one branch in config, should just use that branch
(["some-branch"], [initial_commit, branch_commit]),
# With two branch in config, should apply to specified branches
(["master", "some-branch"], [initial_commit, master_commit, branch_commit]),
):
for range_spec in (None, "NEW", "ALL"):
_test_run(range_spec, branches, expected_commits)
# test the HASHFILE version of range_spec'ing
expected_commits = (initial_commit, branch_commit)
with open(os.path.join(tmpdir, 'hashes_to_benchmark'), 'w') as f:
for commit in expected_commits:
f.write(commit)
f.write('\n')
_test_run('HASHFILE:hashes_to_benchmark', [None], expected_commits)
def test_run_build_failure(basic_conf):
tmpdir, local, conf, machine_file = basic_conf
conf.matrix = {}
# Add a commit that fails to build
dvcs = tools.Git(conf.repo)
setup_py = join(dvcs.path, 'setup.py')
with open(setup_py, 'r') as f:
setup_py_content = f.read()
with open(setup_py, 'w') as f:
f.write("assert False")
dvcs.add(join(dvcs.path, 'setup.py'))
dvcs.commit("Break setup.py")
with open(setup_py, 'w') as f:
f.write(setup_py_content)
dvcs.add(join(dvcs.path, 'setup.py'))
dvcs.commit("Fix setup.py")
# Test running it
timestamp = util.datetime_to_js_timestamp(datetime.datetime.utcnow())
bench_name = 'time_secondary.track_value'
for commit in ['master^!', 'master~1^!']:
tools.run_asv_with_conf(conf, 'run', commit,
'--quick', '--show-stderr',
'--bench', bench_name,
_machine_file=machine_file)
# Check results
hashes = dvcs.get_branch_hashes()
fn_broken, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
hashes[1][:8] + '-*.json'))
fn_ok, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
hashes[0][:8] + '-*.json'))
data_broken = util.load_json(fn_broken)
data_ok = util.load_json(fn_ok)
for data in (data_broken, data_ok):
assert data['started_at'][bench_name] >= timestamp
assert data['ended_at'][bench_name] >= data['started_at'][bench_name]
assert len(data_broken['results']) == 1
assert len(data_ok['results']) == 1
assert data_broken['results'][bench_name] is None
assert data_ok['results'][bench_name] == 42.0
# Check that parameters were also saved
assert data_broken['params'] == data_ok['params']
def test_run_with_repo_subdir(basic_conf_with_subdir):
"""
Check 'asv run' with the Python project inside a subdirectory.
"""
tmpdir, local, conf, machine_file = basic_conf_with_subdir
conf.matrix = {}
# This benchmark imports the project under test (asv_test_repo)
bench_name = 'params_examples.track_find_test'
# Test with a single changeset
tools.run_asv_with_conf(conf, 'run', 'master^!',
'--quick', '--show-stderr',
'--bench', bench_name,
_machine_file=machine_file)
# Check it ran ok
fn_results, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
'*-*.json')) # avoid machine.json
data = util.load_json(fn_results)
assert data['results'][bench_name] == {'params': [['1', '2']],
'result': [6, 6]}
def test_benchmark_param_selection(basic_conf):
tmpdir, local, conf, machine_file = basic_conf
conf.matrix = {}
tools.generate_test_repo(tmpdir, values=[(1, 2, 3)])
tools.run_asv_with_conf(conf, 'run', 'master^!',
'--quick', '--show-stderr',
'--bench', r'track_param_selection\(.*, 3\)',
_machine_file=machine_file)
def get_results():
results = util.load_json(glob.glob(join(
tmpdir, 'results_workflow', 'orangutan', '*-*.json'))[0])
# replacing NaN by 'n/a' make assertions easier
return ['n/a' if util.is_nan(item) else item
for item in results['results'][
'params_examples.track_param_selection']['result']]
assert get_results() == [4, 'n/a', 5, 'n/a']
tools.run_asv_with_conf(conf, 'run', '--show-stderr',
'--bench', r'track_param_selection\(1, ',
_machine_file=machine_file)
assert get_results() == [4, 6, 5, 'n/a']
tools.run_asv_with_conf(conf, 'run', '--show-stderr',
'--bench', 'track_param_selection',
_machine_file=machine_file)
def test_run_append_samples(basic_conf):
tmpdir, local, conf, machine_file = basic_conf
# Only one environment
conf.matrix['asv_dummy_test_package_2'] = conf.matrix['asv_dummy_test_package_2'][:1]
# Tests multiple calls to "asv run --append-samples"
def run_it():
tools.run_asv_with_conf(conf, 'run', "master^!",
'--bench', 'time_examples.TimeSuite.time_example_benchmark_1',
'--append-samples', '-a', 'repeat=(1, 1, 10.0)', '-a', 'processes=1',
'-a', 'number=1', '-a', 'warmup_time=0',
_machine_file=machine_file)
run_it()
result_dir = join(tmpdir, 'results_workflow', 'orangutan')
result_fn, = [join(result_dir, fn) for fn in os.listdir(result_dir)
if fn != 'machine.json']
data = util.load_json(result_fn)
assert data['results']['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0] is not None
assert len(data['results']['time_examples.TimeSuite.time_example_benchmark_1']['samples'][0]) == 1
run_it()
data = util.load_json(result_fn)
assert len(data['results']['time_examples.TimeSuite.time_example_benchmark_1']['samples'][0]) == 2
def test_cpu_affinity(basic_conf):
tmpdir, local, conf, machine_file = basic_conf
# Only one environment
conf.matrix = {}
# Tests multiple calls to "asv run --append-samples"
tools.run_asv_with_conf(conf, 'run', "master^!",
'--bench', 'time_examples.TimeSuite.time_example_benchmark_1',
'--cpu-affinity=0', '-a', 'repeat=(1, 1, 10.0)', '-a', 'processes=1',
'-a', 'number=1', '-a', 'warmup_time=0',
_machine_file=machine_file)
# Check run produced a result
result_dir = join(tmpdir, 'results_workflow', 'orangutan')
result_fn, = [join(result_dir, fn) for fn in os.listdir(result_dir)
if fn != 'machine.json']
data = util.load_json(result_fn)
assert data['results']['time_examples.TimeSuite.time_example_benchmark_1']
|
py | b4031c4f4f472f5e225848b854fb41813d21a2b5 | import pickle
import random
import numpy as np
import torch
import torch.utils.data
from config import vocab_file
from models import layers
from utils import load_wav_to_torch, load_filepaths_and_text, text_to_sequence
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, audiopaths_and_text, hparams):
with open(vocab_file, 'rb') as file:
data = pickle.load(file)
self.char2idx = data['char2idx']
self.idx2char = data['idx2char']
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
self.sampling_rate = hparams.sampling_rate
# self.max_wav_value = hparams.max_wav_value
self.load_mel_from_disk = hparams.load_mel_from_disk
self.stft = layers.TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
hparams.mel_fmax)
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def get_mel_text_pair(self, audiopath_and_text):
# separate filename and text
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
text = self.get_text(text)
mel = self.get_mel(audiopath)
return (text, mel)
def get_mel(self, filename):
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
# audio_norm = audio / self.max_wav_value
audio_norm = audio.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
return melspec
def get_text(self, text):
text = text_to_sequence(text, self.char2idx)
text_norm = torch.IntTensor(text)
return text_norm
def __getitem__(self, index):
return self.get_mel_text_pair(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate:
""" Zero-pads model inputs and targets based on number of frames per step
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1) - 1:] = 1
output_lengths[i] = mel.size(1)
return text_padded, input_lengths, mel_padded, gate_padded, output_lengths
if __name__ == '__main__':
import config
from tqdm import tqdm
from utils import parse_args, sequence_to_text
from config import training_files, validation_files
with open(vocab_file, 'rb') as file:
data = pickle.load(file)
char2idx = data['char2idx']
idx2char = data['idx2char']
args = parse_args()
collate_fn = TextMelCollate(config.n_frames_per_step)
train_dataset = TextMelLoader(training_files, config)
print('len(train_dataset): ' + str(len(train_dataset)))
dev_dataset = TextMelLoader(validation_files, config)
print('len(dev_dataset): ' + str(len(dev_dataset)))
text, mel = train_dataset[0]
print('text: ' + str(text))
text = sequence_to_text(text.numpy().tolist(), idx2char)
text = ''.join(text)
print('text: ' + str(text))
print('type(mel): ' + str(type(mel)))
text_lengths = []
mel_lengths = []
for data in tqdm(dev_dataset):
text, mel = data
text = sequence_to_text(text.numpy().tolist(), idx2char)
text = ''.join(text)
mel = mel.numpy()
# print('text: ' + str(text))
# print('mel.size: ' + str(mel.size))
text_lengths.append(len(text))
mel_lengths.append(mel.size)
# print('np.mean(mel): ' + str(np.mean(mel)))
# print('np.max(mel): ' + str(np.max(mel)))
# print('np.min(mel): ' + str(np.min(mel)))
print('np.mean(text_lengths): ' + str(np.mean(text_lengths)))
print('np.mean(mel_lengths): ' + str(np.mean(mel_lengths)))
|
py | b4031cc716b119065f2b7cc6ae93aa4f9b378a9e | from api_model.apisnake import APISnake
from basic_model.board import Board
class APIBoard(Board):
def __init__(self, board_data):
self.height = board_data['height']
self.width = board_data['width']
self.food_list = self.food_list(board_data['food'])
self.snake_list = self.snake_list(board_data['snakes'])
def my_snake(self):
return self.snake_list[0]
@staticmethod
def snake_list(snake_data):
snake_list = []
for snake in snake_data:
new_snake = APISnake(snake)
snake_list.append(new_snake)
return snake_list
@staticmethod
def food_list(food_data):
food_list = []
for food_dic in food_data:
x = food_dic["x"]
y = food_dic["y"]
food_list.append((x, y))
return food_list
|
py | b4031d2ffad9b8d54413c8ba2c92c7ded5f8b2d8 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Hydra script that creates an entity and attaches Atom components to it for test verification.
"""
import os
import sys
import azlmbr.math as math
import azlmbr.bus as bus
import azlmbr.paths
import azlmbr.asset as asset
import azlmbr.entity as entity
import azlmbr.legacy.general as general
import azlmbr.editor as editor
sys.path.append(os.path.join(azlmbr.paths.devroot, "AutomatedTesting", "Gem", "PythonTests"))
import editor_python_test_tools.hydra_editor_utils as hydra
from editor_python_test_tools.utils import TestHelper
def run():
"""
Summary:
The below common tests are done for each of the components.
1) Addition of component to the entity
2) UNDO/REDO of addition of component
3) Enter/Exit game mode
4) Hide/Show entity containing component
5) Deletion of component
6) UNDO/REDO of deletion of component
Some additional tests for specific components include
1) Assigning value to some properties of each component
2) Verifying if the component is activated only when the required components are added
Expected Result:
1) Component can be added to an entity.
2) The addition of component can be undone and redone.
3) Game mode can be entered/exited without issue.
4) Entity with component can be hidden/shown.
5) Component can be deleted.
6) The deletion of component can be undone and redone.
7) Component is activated only when the required components are added
8) Values can be assigned to the properties of the component
:return: None
"""
def create_entity_undo_redo_component_addition(component_name):
new_entity = hydra.Entity(f"{component_name}")
new_entity.create_entity(math.Vector3(512.0, 512.0, 34.0), [component_name])
general.log(f"{component_name}_test: Component added to the entity: "
f"{hydra.has_components(new_entity.id, [component_name])}")
# undo component addition
general.undo()
TestHelper.wait_for_condition(lambda: not hydra.has_components(new_entity.id, [component_name]), 2.0)
general.log(f"{component_name}_test: Component removed after UNDO: "
f"{not hydra.has_components(new_entity.id, [component_name])}")
# redo component addition
general.redo()
TestHelper.wait_for_condition(lambda: hydra.has_components(new_entity.id, [component_name]), 2.0)
general.log(f"{component_name}_test: Component added after REDO: "
f"{hydra.has_components(new_entity.id, [component_name])}")
return new_entity
def verify_enter_exit_game_mode(component_name):
general.enter_game_mode()
TestHelper.wait_for_condition(lambda: general.is_in_game_mode(), 2.0)
general.log(f"{component_name}_test: Entered game mode: {general.is_in_game_mode()}")
general.exit_game_mode()
TestHelper.wait_for_condition(lambda: not general.is_in_game_mode(), 2.0)
general.log(f"{component_name}_test: Exit game mode: {not general.is_in_game_mode()}")
def verify_hide_unhide_entity(component_name, entity_obj):
def is_entity_hidden(entity_id):
return editor.EditorEntityInfoRequestBus(bus.Event, "IsHidden", entity_id)
editor.EditorEntityAPIBus(bus.Event, "SetVisibilityState", entity_obj.id, False)
general.idle_wait_frames(1)
general.log(f"{component_name}_test: Entity is hidden: {is_entity_hidden(entity_obj.id)}")
editor.EditorEntityAPIBus(bus.Event, "SetVisibilityState", entity_obj.id, True)
general.idle_wait_frames(1)
general.log(f"{component_name}_test: Entity is shown: {not is_entity_hidden(entity_obj.id)}")
def verify_deletion_undo_redo(component_name, entity_obj):
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntityById", entity_obj.id)
TestHelper.wait_for_condition(lambda: not hydra.find_entity_by_name(entity_obj.name), 2.0)
general.log(f"{component_name}_test: Entity deleted: {not hydra.find_entity_by_name(entity_obj.name)}")
general.undo()
TestHelper.wait_for_condition(lambda: hydra.find_entity_by_name(entity_obj.name) is not None, 2.0)
general.log(f"{component_name}_test: UNDO entity deletion works: "
f"{hydra.find_entity_by_name(entity_obj.name) is not None}")
general.redo()
TestHelper.wait_for_condition(lambda: not hydra.find_entity_by_name(entity_obj.name), 2.0)
general.log(f"{component_name}_test: REDO entity deletion works: "
f"{not hydra.find_entity_by_name(entity_obj.name)}")
def verify_required_component_addition(entity_obj, components_to_add, component_name):
def is_component_enabled(entity_componentid_pair):
return editor.EditorComponentAPIBus(bus.Broadcast, "IsComponentEnabled", entity_componentid_pair)
general.log(
f"{component_name}_test: Entity disabled initially: "
f"{not is_component_enabled(entity_obj.components[0])}")
for component in components_to_add:
entity_obj.add_component(component)
TestHelper.wait_for_condition(lambda: is_component_enabled(entity_obj.components[0]), 2.0)
general.log(
f"{component_name}_test: Entity enabled after adding "
f"required components: {is_component_enabled(entity_obj.components[0])}"
)
def verify_set_property(entity_obj, path, value):
entity_obj.get_set_test(0, path, value)
# Wait for Editor idle loop before executing Python hydra scripts.
TestHelper.init_idle()
# Delete all existing entities initially
search_filter = azlmbr.entity.SearchFilter()
all_entities = entity.SearchBus(azlmbr.bus.Broadcast, "SearchEntities", search_filter)
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntities", all_entities)
class ComponentTests:
"""Test launcher for each component."""
def __init__(self, component_name, *additional_tests):
self.component_name = component_name
self.additional_tests = additional_tests
self.run_component_tests()
def run_component_tests(self):
# Run common and additional tests
entity_obj = create_entity_undo_redo_component_addition(self.component_name)
# Enter/Exit game mode test
verify_enter_exit_game_mode(self.component_name)
# Any additional tests are executed here
for test in self.additional_tests:
test(entity_obj)
# Hide/Unhide entity test
verify_hide_unhide_entity(self.component_name, entity_obj)
# Deletion/Undo/Redo test
verify_deletion_undo_redo(self.component_name, entity_obj)
# DepthOfField Component
camera_entity = hydra.Entity("camera_entity")
camera_entity.create_entity(math.Vector3(512.0, 512.0, 34.0), ["Camera"])
depth_of_field = "DepthOfField"
ComponentTests(
depth_of_field,
lambda entity_obj: verify_required_component_addition(entity_obj, ["PostFX Layer"], depth_of_field),
lambda entity_obj: verify_set_property(
entity_obj, "Controller|Configuration|Camera Entity", camera_entity.id))
# Decal Component
material_asset_path = os.path.join("AutomatedTesting", "Materials", "basic_grey.material")
material_asset = asset.AssetCatalogRequestBus(
bus.Broadcast, "GetAssetIdByPath", material_asset_path, math.Uuid(), False)
ComponentTests(
"Decal (Atom)", lambda entity_obj: verify_set_property(
entity_obj, "Controller|Configuration|Material", material_asset))
# Directional Light Component
ComponentTests(
"Directional Light",
lambda entity_obj: verify_set_property(
entity_obj, "Controller|Configuration|Shadow|Camera", camera_entity.id))
# Exposure Control Component
ComponentTests(
"Exposure Control", lambda entity_obj: verify_required_component_addition(
entity_obj, ["PostFX Layer"], "Exposure Control"))
# Global Skylight (IBL) Component
diffuse_image_path = os.path.join("LightingPresets", "greenwich_park_02_4k_iblskyboxcm.exr.streamingimage")
diffuse_image_asset = asset.AssetCatalogRequestBus(
bus.Broadcast, "GetAssetIdByPath", diffuse_image_path, math.Uuid(), False)
specular_image_path = os.path.join("LightingPresets", "greenwich_park_02_4k_iblskyboxcm.exr.streamingimage")
specular_image_asset = asset.AssetCatalogRequestBus(
bus.Broadcast, "GetAssetIdByPath", specular_image_path, math.Uuid(), False)
ComponentTests(
"Global Skylight (IBL)",
lambda entity_obj: verify_set_property(
entity_obj, "Controller|Configuration|Diffuse Image", diffuse_image_asset),
lambda entity_obj: verify_set_property(
entity_obj, "Controller|Configuration|Specular Image", specular_image_asset))
# Physical Sky Component
ComponentTests("Physical Sky")
# PostFX Layer Component
ComponentTests("PostFX Layer")
# Radius Weight Modifier Component
ComponentTests("Radius Weight Modifier")
# Light Component
ComponentTests("Light")
# Display Mapper Component
ComponentTests("Display Mapper")
if __name__ == "__main__":
run()
|
py | b4031e087ed89b434c462134297eca9cfd3b95c9 | #!/usr/bin/env python
import json
import urllib
import urllib2
from pprint import pprint
def get_weather_data(api_key, request_type, location):
# return a dict data that coutains required information
url = 'http://api.wunderground.com/api/%s/%s/%s.json'\
% (api_key, request_type,location)
req = urllib2.Request(url)
content = urllib2.urlopen(req).read()
data = json.loads(content)
return data
def location_autocomplete(input):
"""
return a series of candidates output or return "can't find this place"
"""
url = 'http://autocomplete.wunderground.com/aq?query=%s' %(input)
req = urllib2.Request(url)
content = urllib2.urlopen(req).read()
data = json.loads(content)
return data
if __name__ == "__main__":
cityid = 5141502
location = '/q/zmw:12180.1.99999'
#print get_weather_data(api_key, "conditions",location)
pprint(location_autocomplete("troy"))
|
py | b4031e620b83661cac72377a272ef5021ee77794 | from .__about__ import __version__
from ._grid import Grid2D, Grid3D, TraveltimeGrid2D, TraveltimeGrid3D
from ._helpers import get_num_threads, set_num_threads
from ._io import grid_to_meshio, ray_to_meshio
from ._solver import Eikonal2D, Eikonal3D
__all__ = [
"Eikonal2D",
"Eikonal3D",
"Grid2D",
"Grid3D",
"TraveltimeGrid2D",
"TraveltimeGrid3D",
"get_num_threads",
"set_num_threads",
"grid_to_meshio",
"ray_to_meshio",
"__version__",
]
|
py | b4031e92595a43b3368bd94faaa2a4660f03fefb | import random
import unittest
from mdssdk.connection_manager.errors import CLIError
from mdssdk.fc import Fc
from tests.test_fc.vars import *
log = logging.getLogger(__name__)
class TestFcAttrDescription(unittest.TestCase):
def __init__(self, testName, sw):
super().__init__(testName)
self.switch = sw
def setUp(self) -> None:
log.debug(self.switch.version)
log.debug(self.switch.ipaddr)
interfaces = self.switch.interfaces
while True:
k, v = random.choice(list(interfaces.items()))
if type(v) is Fc:
self.fc = v
log.debug(k)
break
self.old = self.fc.description
def test_description_read(self):
self.assertIsNotNone(self.fc.description)
def test_description_write_max254(self):
desc = "switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch123456789123456789123456"
self.fc.description = desc
self.assertEqual(desc, self.fc.description)
self.fc.description = self.old
self.assertEqual(self.old, self.fc.description)
def test_description_write_beyondmax(self):
desc = "switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678switch12345678912345678912345678"
with self.assertRaises(CLIError) as e:
self.fc.description = desc
self.assertIn("String exceeded max length of (254)", str(e.exception))
def tearDown(self) -> None:
self.fc.description = self.old
self.assertEqual(self.old, self.fc.description)
|
py | b4031ede9e1460f4b29a5813b66779d1e5968352 | import pytest
import responses
from tests import TestCase
from src.masonite.notification import Notification, Notifiable, SlackMessage
from src.masonite.exceptions import NotificationException
from masoniteorm.models import Model
# fake webhook for tests
webhook_url = "https://hooks.slack.com/services/X/Y/Z"
webhook_url_2 = "https://hooks.slack.com/services/A/B/C"
def route_for_slack(self):
return "#bot"
class User(Model, Notifiable):
"""User Model"""
__fillable__ = ["name", "email", "password", "phone"]
def route_notification_for_slack(self):
return route_for_slack(self)
class WelcomeUserNotification(Notification):
def to_slack(self, notifiable):
return SlackMessage().text(f"Welcome {notifiable.name}!").from_("test-bot")
def via(self, notifiable):
return ["slack"]
class WelcomeNotification(Notification):
def to_slack(self, notifiable):
return SlackMessage().text("Welcome !").from_("test-bot")
def via(self, notifiable):
return ["slack"]
class OtherNotification(Notification):
def to_slack(self, notifiable):
return (
SlackMessage().to(["#general", "#news"]).text("Welcome !").from_("test-bot")
)
def via(self, notifiable):
return ["slack"]
class TestSlackWebhookDriver(TestCase):
def setUp(self):
super().setUp()
self.notification = self.application.make("notification")
@responses.activate
def test_sending_to_anonymous(self):
responses.add(responses.POST, webhook_url, body=b"ok")
self.notification.route("slack", webhook_url).notify(WelcomeNotification())
self.assertTrue(responses.assert_call_count(webhook_url, 1))
@responses.activate
def test_sending_to_notifiable(self):
responses.add(responses.POST, webhook_url, body=b"ok")
User.route_notification_for_slack = lambda notifiable: webhook_url
user = User.find(1)
user.notify(WelcomeNotification())
self.assertTrue(responses.assert_call_count(webhook_url, 1))
User.route_notification_for_slack = route_for_slack
@responses.activate
def test_sending_to_multiple_webhooks(self):
responses.add(responses.POST, webhook_url, body=b"ok")
responses.add(responses.POST, webhook_url_2, body=b"ok")
User.route_notification_for_slack = lambda notifiable: [
webhook_url,
webhook_url_2,
]
user = User.find(1)
user.notify(WelcomeNotification())
self.assertTrue(responses.assert_call_count(webhook_url, 1))
self.assertTrue(responses.assert_call_count(webhook_url_2, 1))
User.route_notification_for_slack = route_for_slack
class TestSlackAPIDriver(TestCase):
url = "https://slack.com/api/chat.postMessage"
channel_url = "https://slack.com/api/conversations.list"
def setUp(self):
super().setUp()
self.notification = self.application.make("notification")
def test_sending_without_credentials(self):
with self.assertRaises(NotificationException) as e:
self.notification.route("slack", "123456").notify(WelcomeNotification())
self.assertIn("not_authed", str(e.exception))
@responses.activate
def test_sending_to_anonymous(self):
responses.add(
responses.POST,
self.url,
body=b'{"ok": "True"}',
)
responses.add(
responses.POST,
self.channel_url,
body=b'{"channels": [{"name": "bot", "id": "123"}]}',
)
self.notification.route("slack", "#bot").notify(WelcomeNotification())
# to convert #bot to Channel ID
self.assertTrue(responses.assert_call_count(self.channel_url, 1))
self.assertTrue(responses.assert_call_count(self.url, 1))
@responses.activate
def test_sending_to_notifiable(self):
user = User.find(1)
responses.add(
responses.POST,
self.url,
body=b'{"ok": "True"}',
)
responses.add(
responses.POST,
self.channel_url,
body=b'{"channels": [{"name": "bot", "id": "123"}]}',
)
user.notify(WelcomeUserNotification())
self.assertTrue(responses.assert_call_count(self.url, 1))
@responses.activate
@pytest.mark.skip(
reason="Failing because user defined routing takes precedence. What should be the behaviour ?"
)
def test_sending_to_multiple_channels(self):
user = User.find(1)
responses.add(
responses.POST,
self.url,
body=b'{"ok": "True"}',
)
responses.add(
responses.POST,
self.channel_url,
body=b'{"channels": [{"name": "bot", "id": "123"}, {"name": "general", "id": "456"}]}',
)
user.notify(OtherNotification())
self.assertTrue(responses.assert_call_count(self.channel_url, 2))
self.assertTrue(responses.assert_call_count(self.url, 2))
@responses.activate
def test_convert_channel(self):
channel_id = self.notification.get_driver("slack").convert_channel(
"123456", "token"
)
self.assertEqual(channel_id, "123456")
responses.add(
responses.POST,
self.channel_url,
body=b'{"channels": [{"name": "general", "id": "654321"}]}',
)
channel_id = self.notification.get_driver("slack").convert_channel(
"#general", "token"
)
self.assertEqual(channel_id, "654321")
|
py | b4031ee6527d9c715b9e985201515c539d532fb3 | import abc
from abc import ABCMeta
class ISQLHandler(metaclass=ABCMeta):
def __init__(self):
pass
@abc.abstractmethod
def sql_instance(self):
pass
@abc.abstractmethod
def sql_table(self):
pass
@abc.abstractmethod
def sql_cols(self):
pass
def sql_insertable_cols(self):
return self.sql_cols()
def sql_modifiable_cols(self):
return self.sql_insertable_cols()
@abc.abstractmethod
def sql_primary_key(self):
pass
|
py | b4031fae6cd284fd36440d728ee9c87ed8b3ed32 | from bs4 import BeautifulSoup
import re
import requests
def crawlWeather(city):
url = 'https://wetter.de/suche.html?search='+city
response = requests.get(url)
text = response.text
soup = BeautifulSoup(text, features="html.parser")
dailyForecasts = soup.findAll('div', class_=['base-box--level-0', 'weather-daybox'])
result = []
for dailyForecastString in dailyForecasts:
daySoup = BeautifulSoup(str(dailyForecastString), features="html.parser")
print(daySoup)
dayName = daySoup.find('div', class_='weather-daybox__date__weekday').string
maxTemp = daySoup.find('div', class_='weather-daybox__minMax__max').string
minTemp = daySoup.find('div', class_='weather-daybox__minMax__max').string
forecastDict = {"day": dayName, "max": maxTemp, "min": minTemp}
result.append(forecastDict)
return result
|
py | b403205253f17c64bd19274f0acc360de490ebed | #!/usr/bin/python
# coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_workflow_launch
author: "John Westcott IV (@john-westcott-iv)"
version_added: "2.8"
short_description: Run a workflow in Ansible Tower
description:
- Launch an Ansible Tower workflows. See
U(https://www.ansible.com/tower) for an overview.
options:
workflow_template:
description:
- The name of the workflow template to run.
required: True
type: str
extra_vars:
description:
- Any extra vars required to launch the job.
type: str
wait:
description:
- Wait for the workflow to complete.
default: True
type: bool
timeout:
description:
- If waiting for the workflow to complete this will abort after this
amount of seconds
type: int
requirements:
- "python >= 2.6"
- ansible-tower-cli >= 3.0.2
extends_documentation_fragment: awx.awx.auth
'''
RETURN = '''
tower_version:
description: The version of Tower we connected to
returned: If connection to Tower works
type: str
sample: '3.4.0'
job_info:
description: dictionary containing information about the workflow executed
returned: If workflow launched
type: dict
'''
EXAMPLES = '''
- name: Launch a workflow
tower_workflow_launch:
name: "Test Workflow"
delegate_to: localhost
run_once: true
register: workflow_results
- name: Launch a Workflow with parameters without waiting
tower_workflow_launch:
workflow_template: "Test workflow"
extra_vars: "---\nmy: var"
wait: False
delegate_to: localhost
run_once: true
register: workflow_task_info
'''
from ..module_utils.ansible_tower import TowerModule, tower_auth_config
try:
import tower_cli
from tower_cli.api import client
from tower_cli.conf import settings
from tower_cli.exceptions import ServerError, ConnectionError, BadRequest, TowerCLIError
except ImportError:
pass
def main():
argument_spec = dict(
workflow_template=dict(required=True),
extra_vars=dict(),
wait=dict(default=True, type='bool'),
timeout=dict(default=None, type='int'),
)
module = TowerModule(
argument_spec=argument_spec,
supports_check_mode=True
)
workflow_template = module.params.get('workflow_template')
extra_vars = module.params.get('extra_vars')
wait = module.params.get('wait')
timeout = module.params.get('timeout')
# If we are going to use this result to return we can consider ourselfs changed
result = dict(
changed=False,
msg='initial message'
)
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
# First we will test the connection. This will be a test for both check and run mode
# Note, we are not using the tower_check_mode method here because we want to do more than just a ping test
# If we are in check mode we also want to validate that we can find the workflow
try:
ping_result = client.get('/ping').json()
# Stuff the version into the results as an FYI
result['tower_version'] = ping_result['version']
except(ServerError, ConnectionError, BadRequest) as excinfo:
result['msg'] = "Failed to reach Tower: {0}".format(excinfo)
module.fail_json(**result)
# Now that we know we can connect, lets verify that we can resolve the workflow_template
try:
workflow = tower_cli.get_resource("workflow").get(**{'name': workflow_template})
except TowerCLIError as e:
result['msg'] = "Failed to find workflow: {0}".format(e)
module.fail_json(**result)
# Since we were able to find the workflow, if we are in check mode we can return now
if module.check_mode:
result['msg'] = "Check mode passed"
module.exit_json(**result)
# We are no ready to run the workflow
try:
result['job_info'] = tower_cli.get_resource('workflow_job').launch(
workflow_job_template=workflow['id'],
monitor=False,
wait=wait,
timeout=timeout,
extra_vars=extra_vars
)
if wait:
# If we were waiting for a result we will fail if the workflow failed
if result['job_info']['failed']:
result['msg'] = "Workflow execution failed"
module.fail_json(**result)
else:
module.exit_json(**result)
# We were not waiting and there should be no way we can make it here without the workflow fired off so we can return a success
module.exit_json(**result)
except TowerCLIError as e:
result['msg'] = "Failed to execute workflow: {0}".format(e)
module.fail_json(**result)
if __name__ == '__main__':
main()
|
py | b403207b8e8211eb9cdcf96f6e4dfe87bf0b0d59 | from django.db.models import Q
from .choices import IPAddressRoleChoices
# BGP ASN bounds
BGP_ASN_MIN = 1
BGP_ASN_MAX = 2 ** 32 - 1
#
# VRFs
#
# Per RFC 4364 section 4.2, a route distinguisher may be encoded as one of the following:
# * Type 0 (16-bit AS number : 32-bit integer)
# * Type 1 (32-bit IPv4 address : 16-bit integer)
# * Type 2 (32-bit AS number : 16-bit integer)
# 21 characters are sufficient to convey the longest possible string value (255.255.255.255:65535)
# Also used for RouteTargets
VRF_RD_MAX_LENGTH = 21
#
# Prefixes
#
PREFIX_LENGTH_MIN = 1
PREFIX_LENGTH_MAX = 127 # IPv6
#
# IPAddresses
#
IPADDRESS_ASSIGNMENT_MODELS = Q(
Q(app_label="dcim", model="interface") | Q(app_label="virtualization", model="vminterface")
)
IPADDRESS_MASK_LENGTH_MIN = 1
IPADDRESS_MASK_LENGTH_MAX = 128 # IPv6
IPADDRESS_ROLES_NONUNIQUE = (
# IPAddress roles which are exempt from unique address enforcement
IPAddressRoleChoices.ROLE_ANYCAST,
IPAddressRoleChoices.ROLE_VIP,
IPAddressRoleChoices.ROLE_VRRP,
IPAddressRoleChoices.ROLE_HSRP,
IPAddressRoleChoices.ROLE_GLBP,
IPAddressRoleChoices.ROLE_CARP,
)
IPV4_BYTE_LENGTH = 4
IPV6_BYTE_LENGTH = 16
#
# VLANs
#
# 12-bit VLAN ID (values 0 and 4095 are reserved)
VLAN_VID_MIN = 1
VLAN_VID_MAX = 4094
#
# Services
#
# 16-bit port number
SERVICE_PORT_MIN = 1
SERVICE_PORT_MAX = 65535
|
py | b4032201fac1c7db8131aea67a571d37c65bd772 | from transformers import AutoTokenizer, AutoModelWithLMHead
def predict(sentence: str) -> str:
"""
Remove typos from the given string.
:param sentence: sentence to correct
:return: sentence corrected
"""
model_name = "flexudy/t5-base-multi-sentence-doctor"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelWithLMHead.from_pretrained(model_name)
input_text = f"repair_sentence: {sentence}</s>"
input_ids = tokenizer.encode(input_text, return_tensors="pt")
outputs = model.generate(input_ids, max_length=32, num_beams=1)
sentence = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
return sentence
|
py | b403220e99a1db0ff6344dd438d6bb134b0e0e03 | import glob
import json
import sys
import requests
import datetime
# check if correct arguments are passed to the script
if len(sys.argv) != 2:
print("Usage: {} ACCESS_TOKEN".format(sys.argv[0]))
sys.exit(0)
# directory which contains the activity JSON files
DATA_DIR = "./Sport-sessions"
# store access token for Strava API
ACCESS_TOKEN = sys.argv[1]
# Strava API endpoint
STRAVA_ENDPOINT = "https://www.strava.com/api/v3/activities"
activities = []
# store each activity (one per JSON file) in the activities list
for path in glob.glob(DATA_DIR + "/*.json"):
with open(path, "r") as json_file:
activities.append(json.load(json_file))
# No official documentation, took activity types from
# https://github.com/Metalnem/runtastic/blob/master/api/api.go
# and mapped it to the corresponding Strava activity types at
# https://developers.strava.com/docs/reference/#api-models-ActivityType
def strava_activity_type(runtastic_type_id):
walk = "Walk"
run = "Run"
swim = "Swim"
ride = "Ride"
return {
2: walk, 7: walk, 19: walk,
3: ride, 4: ride, 15: ride, 22: ride,
18: swim
}.get(int(runtastic_type_id), run)
# map runtastic data to strava API request and make API call
def import_activity(activity):
activity_type = strava_activity_type(activity['sport_type_id'])
activity_date = datetime.datetime.fromtimestamp(int(activity['start_time']) / 1000).isoformat()
print("Importing {} from {}".format(activity_type, activity_date))
data = {
"name": "{} ({})".format(activity_type, activity_date),
"type": activity_type,
"start_date_local": activity_date + "Z",
"elapsed_time": int(int(activity["duration"]) / 1000),
"distance": int(activity['distance'])
}
headers = {
"Authorization": "Bearer {}".format(ACCESS_TOKEN)
}
response = requests.post(STRAVA_ENDPOINT, data=data, headers=headers)
if response.status_code == requests.codes.created:
print("Import successful!")
# the Strava API returns a strange error if an activity already exists, since
# this could happen quite easily we decided to handle this error explicitly
elif response.status_code == 409:
print("Import failed, activity already exists!")
else:
print("Import failed, response was: \n{}\n".format(response.text))
# import all activities into Strava
for activity in activities:
import_activity(activity)
|
py | b40322dbb3f108201e3971a421242388528d974e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AvailableOperationDisplay(Model):
"""An operation available at the listed Azure resource provider.
:param provider: Name of the operation provider.
:type provider: str
:param resource: Name of the resource on which the operation is available.
:type resource: str
:param operation: Name of the available operation.
:type operation: str
:param description: Description of the available operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, provider=None, resource=None, operation=None, description=None):
super(AvailableOperationDisplay, self).__init__()
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
|
py | b40324959ea22e9668eb943e1b9e91079613ea3c | # Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from blazarclient import base
from blazarclient.i18n import _
class ComputeHostClientManager(base.BaseClientManager):
"""Manager for the ComputeHost connected requests."""
def create(self, name, **kwargs):
"""Creates host from values passed."""
values = {'name': name}
values.update(**kwargs)
resp, body = self.request_manager.post('/os-hosts', body=values)
return body['host']
def get(self, host_id):
"""Describe host specifications such as name and details."""
resp, body = self.request_manager.get('/os-hosts/%s' % host_id)
return body['host']
def update(self, host_id, values):
"""Update attributes of the host."""
if not values:
return _('No values to update passed.')
resp, body = self.request_manager.put(
'/os-hosts/%s' % host_id, body=values
)
return body['host']
def delete(self, host_id):
"""Delete host with specified ID."""
resp, body = self.request_manager.delete('/os-hosts/%s' % host_id)
def list(self, sort_by=None):
"""List all hosts."""
resp, body = self.request_manager.get('/os-hosts')
hosts = body['hosts']
if sort_by:
hosts = sorted(hosts, key=lambda l: l[sort_by])
return hosts
def get_allocation(self, host_id):
"""Get allocation for host."""
resp, body = self.request_manager.get(
'/os-hosts/%s/allocation' % host_id)
return body['allocation']
def list_allocations(self, sort_by=None):
"""List allocations for all hosts."""
resp, body = self.request_manager.get('/os-hosts/allocations')
allocations = body['allocations']
if sort_by:
allocations = sorted(allocations, key=lambda l: l[sort_by])
return allocations
def reallocate(self, host_id, values):
"""Reallocate host from leases."""
resp, body = self.request_manager.put(
'/os-hosts/%s/allocation' % host_id, body=values)
return body['allocation']
def list_capabilities(self, detail=False, sort_by=None):
url = '/os-hosts/properties'
if detail:
url += '?detail=True'
resp, body = self.request_manager.get(url)
resource_properties = body['resource_properties']
# Values is a reserved word in cliff so need to rename values column.
if detail:
for p in resource_properties:
p['capability_values'] = p['values']
del p['values']
if sort_by:
resource_properties = sorted(resource_properties,
key=lambda l: l[sort_by])
return resource_properties
def get_capability(self, capability_name):
resource_property = [
x for x in self.list_capabilities(detail=True)
if x['property'] == capability_name]
return {} if not resource_property else resource_property[0]
def set_capability(self, capability_name, private):
data = {'private': private}
resp, body = self.request_manager.patch(
'/os-hosts/properties/%s' % capability_name, body=data)
return body['resource_property']
|
py | b403256b8134e4c302371620eaa4eebc30a21226 | # Copyright (c) 2018, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
from pygdf.dataframe import DataFrame
def test_to_pandas():
df = DataFrame()
df['a'] = np.arange(10, dtype=np.int32)
df['b'] = np.arange(10, 20, dtype=np.float64)
pdf = df.to_pandas()
assert tuple(df.columns) == tuple(pdf.columns)
assert df['a'].dtype == pdf['a'].dtype
assert df['b'].dtype == pdf['b'].dtype
assert len(df['a']) == len(pdf['a'])
assert len(df['b']) == len(pdf['b'])
def test_from_pandas():
pdf = pd.DataFrame()
pdf['a'] = np.arange(10, dtype=np.int32)
pdf['b'] = np.arange(10, 20, dtype=np.float64)
df = DataFrame.from_pandas(pdf)
assert tuple(df.columns) == tuple(pdf.columns)
assert df['a'].dtype == pdf['a'].dtype
assert df['b'].dtype == pdf['b'].dtype
assert len(df['a']) == len(pdf['a'])
assert len(df['b']) == len(pdf['b'])
def test_from_pandas_ex1():
pdf = pd.DataFrame({'a': [0, 1, 2, 3],
'b': [0.1, 0.2, None, 0.3]})
print(pdf)
df = DataFrame.from_pandas(pdf)
print(df)
assert tuple(df.columns) == tuple(pdf.columns)
assert np.all(df['a'].to_array() == pdf['a'])
matches = df['b'].to_array() == pdf['b']
# the 3d element is False due to (nan == nan) == False
assert np.all(matches == [True, True, False, True])
assert np.isnan(df['b'].to_array()[2])
assert np.isnan(pdf['b'][2])
def test_from_pandas_with_index():
pdf = pd.DataFrame({'a': [0, 1, 2, 3],
'b': [0.1, 0.2, None, 0.3]})
pdf = pdf.set_index(np.asarray([4, 3, 2, 1]))
df = DataFrame.from_pandas(pdf)
# Check columns
np.testing.assert_array_equal(df.a.to_array(), pdf.a)
np.testing.assert_array_equal(df.b.to_array(), pdf.b)
# Check index
np.testing.assert_array_equal(df.index.values, pdf.index.values)
# Check again using pandas testing tool on frames
pd.util.testing.assert_frame_equal(df.to_pandas(), pdf)
|
py | b40325e51ef41f4014250d894453cd55801cccd7 | #!/usr/bin/env python
# played around with porting
# http://sourcery.dyndns.org/svn/teensymud/release/tmud-2.0.0/tmud.rb
# YAML implementation:
# http://pyyaml.org/wiki/PyYAMLDocumentation
import random, re, signal, sys
from SocketServer import ThreadingTCPServer, BaseRequestHandler
import yaml
rand = random.Random()
world = False
AUTHOR = "Jose Nazario"
VERSION = "1.1.1"
BANNER = """
This is PunyMUD version %s
Copyright (C) 2007 by Jose Nazario
Released under an Artistic License
Based on TeensyMUD Ruby code Copyright (C) 2005 by Jon A. Lambert
Original released under the terms of the TeensyMUD Public License
Login> """ % VERSION
HELP = """
===========================================================================
Play commands
i[nventory] = displays player inventory
l[ook] = displays the contents of a room
dr[op] = drops all objects in your inventory into the room
ex[amine] <object> = examine the named object
g[get] = gets all objects in the room into your inventory
k[ill] <name> = attempts to kill player (e.g. k bubba)
s[ay] <message> = sends <message> to all players in the room
c[hat] <message> = sends <message> to all players in the game
h[elp]|? = displays help
q[uit] = quits the game (saves player)
<exit name> = moves player through exit named (ex. south)
===========================================================================
OLC
O <object name> = creates a new object (ex. O rose)
D <object number> = add description for an object
R <room name> <exit name to> <exit name back> = creates a new room and
autolinks the exits using the exit names provided.
(ex. R Kitchen north south)
===========================================================================
"""
class Obj(object):
def __init__(self, name, location):
self.name = name
self.location = location
self.oid = -1
self.description = None
def __repr__(self):
return 'Object: %s (id %s)' % (self.name, self.oid)
class Room(Obj):
def __init__(self, name):
self.exits = {}
self.name = name
def __repr__(self):
return 'Room: %s (id %s) - exits %s' % (self.name, self.oid, '|'.join(self.exits.keys()))
class Player(Obj):
def __init__(self, name, sock=None):
if sock: self.sock = sock
self.name = name
self.location = 1
def __repr__(self):
return 'Player: %s (id %s) - at %s' % (self.name, self.oid, self.location)
def sendto(self, s):
if getattr(self, 'sock', False): self.sock.send('%s\n' % s)
def parse(self, m):
m = m.strip()
pat = re.compile('(\w+)\W(.*)')
try:
args = pat.findall(m)[0]
cmd = args[0]
arg = args[1]
except IndexError:
cmd = m
arg = False
if cmd.lower() in [ x.lower() for x in world.find_by_oid(self.location).exits.keys() ]:
self.location = world.find_by_oid(self.location).exits[cmd].oid
self.parse('look')
elif cmd.startswith('q'):
self.sendto('Bye bye!')
del(self.sock)
world.save()
elif cmd.lower().startswith('h') or cmd.startswith('?'):
self.sendto(HELP)
elif cmd.startswith('i'):
for o in world.objects_at_location(self.oid):
self.sendto(o.name)
elif cmd.startswith('k'):
if not arg: self.parse('help')
d = world.find_player_by_name(arg)
if d and rand.random() < 0.3:
world.global_message('%s kills %s' % (self.name, d.name))
d.sock = None
world.delete(d)
world.save()
else:
world.global_message('%s misses' % self.name)
elif cmd.startswith('s'):
if arg: self.sendto(' You say "%s"' % arg)
else: self.sendto(' Did you mean to say something?')
for x in world.other_players_at_location(self.location, self.oid):
x.sendto(' %s says "%s"' % (self.name, arg))
elif cmd.startswith('c'):
if arg: self.sendto(' You chat, "%s"' % arg)
else: self.sendto(' Did you mean to say something?')
world.global_message_others('%s chats, "%s"' % (self.name, arg), self.oid)
elif cmd.startswith('g'):
for q in world.objects_at_location(self.location):
q.location = self.oid
self.sendto('Ok')
elif cmd.startswith('dr'):
for q in world.objects_at_location(self.oid):
q.location = self.location
self.sendto('Ok')
elif cmd.startswith('ex'):
if not isinstance(arg, str): self.parse('help')
try: arg = arg.strip()
except AttributeError: self.parse('help')
found = False
for o in world.objects_at_location(self.oid) + world.objects_at_location(self.location):
if o.name == arg:
if getattr(o, 'description', False): self.sendto(o.description)
else: self.sendto("It's just a %s" % o.name)
found = True
if not found:
if arg: self.sendto('No object %s found' % arg)
else: self.parse('help')
elif cmd == 'O':
if not arg: self.parse('help')
try:
o = Obj(arg.strip(), self.location)
world.add(o)
self.sendto('Created object %s' % o.oid)
world.save()
except AttributeError: self.parse('help')
elif cmd == 'D':
if not isinstance(arg, str): self.parse('help')
oid = False
try: oid, desc = arg.split(' ', 1)
except AttributeError: self.parse('help')
except ValueError: self.parse('help')
try: oid = int(oid)
except ValueError: self.parse('help')
o = world.find_by_oid(oid)
if o:
o.description = desc
world.save()
self.sendto('Ok')
elif oid: self.sendto('Object %s not found' % oid)
elif cmd == 'R':
if not arg: self.parse('help')
tmp = arg.split()
if len(tmp) < 3:
self.sendto(HELP)
else:
name = tmp[0]
exit_name_to = tmp[1]
exit_name_back = tmp[2]
d = Room(name)
world.find_by_oid(self.location).exits[exit_name_to] = d
d.exits[exit_name_back] = world.find_by_oid(self.location)
world.add(d)
self.sendto('Ok')
world.save()
elif cmd.startswith('l'):
self.sendto('Room: %s' % world.find_by_oid(self.location).name)
if getattr(world.find_by_oid(self.location), 'description', False):
self.sendto(world.find_by_oid(self.location).description)
self.sendto('Players:')
for x in world.other_players_at_location(self.location, self.oid):
if getattr(x, 'sock', False): self.sendto('%s is here' % x.name)
self.sendto('Objects:')
for x in world.objects_at_location(self.location):
self.sendto('A %s is here' % x.name)
self.sendto('Exits: %s' % ' | '.join(world.find_by_oid(self.location).exits.keys()))
elif not len(world.find_by_oid(self.location).exits.keys()):
self.parse('look')
else:
self.sendto('Huh?')
MINIMAL_DB = """- !!python/object:mud.Room
exits: {}
name: Lobby
oid: 1
"""
class World(object):
def __init__(self):
try:
open('db/world.yaml', 'r')
except IOError:
print 'Building minimal world database ...',
f = open('db/world.yaml', 'w')
f.write(MINIMAL_DB)
f.close()
print 'Done.'
print 'Loading world ...',
self.db = yaml.load(open('db/world.yaml', 'r'))
if not isinstance(self.db, list): self.db = [ self.db ]
self.dbtop = max([ x.oid for x in self.db ])
print 'Done.'
def getid(self):
self.dbtop += 1
if self.find_by_oid(self.dbtop): self.getid()
return self.dbtop
def save(self):
f = open('db/world.yaml', 'w')
f.write(yaml.dump(self.db))
f.close()
def add(self, obj):
obj.oid = self.getid()
self.db.insert(int(obj.oid), obj)
def delete(self, obj):
self.db.remove(obj)
def find_player_by_name(self, nm):
for o in self.db:
if isinstance(o, Player) and o.name == nm:
return o
def players_at_location(self, loc):
l = []
for o in self.db:
if isinstance(o, Player):
if loc and o.location == loc:
l.append(o)
else: l.append(o)
return l
def other_players_at_location(self, loc, plrid):
l = []
for o in self.db:
if isinstance(o, Player) and o.oid != plrid:
if loc and o.location == loc:
l.append(o)
elif not loc: l.append(o)
return l
def global_message(self, msg):
for plr in self.players_at_location(None):
try: plr.sendto(msg)
except: print 'Error sending "%s" to %s' % (msg, plr.name)
def global_message_others(self, msg, plrid):
for plr in self.other_players_at_location(None, plrid):
plr.sendto(msg)
def objects_at_location(self, loc):
l = []
for o in self.db:
if isinstance(o, Obj) and not isinstance(o, Room) and not isinstance(o, Player):
if loc and o.location == loc: l.append(o)
elif not loc: l.append(o)
return l
def find_by_oid(self, i):
for x in self.db:
if x.oid == i: return x
return None
class MudHandler(BaseRequestHandler):
def setup(self):
self.request.send(BANNER)
login_name = self.request.recv(1024).strip()
if len(login_name) < 1: self.setup()
d = world.find_player_by_name(login_name)
if d:
d.sock = self.request
else:
d = Player(login_name, self.request)
world.add(d)
d.sendto('Welcome %s @ %s' % (d.name, self.client_address[0]))
r = 'look'
while r:
d.parse(r)
if not getattr(d, 'sock', False): break
d.sock.send('> ')
r = self.request.recv(1024)
self.finish()
def main():
global world
world = World()
z = ThreadingTCPServer(('', 4000), MudHandler)
try: z.serve_forever()
except KeyboardInterrupt:
world.global_message('World is shutting down')
for plr in world.players_at_location(None):
try: plr.parse('quit')
except: print 'ERROR: %s could not quit gracefully' % plr.name
z.server_close()
world.save()
if __name__ == '__main__':
main()
|
py | b403264b657064846982f10d579e7d17a872e526 | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
from rdkit import Chem
import pandas as pd
import deepchem as dc
from deepchem.feat import RDKitDescriptors, CircularFingerprint
from collections import OrderedDict
def __dropCol__(df, target_name, keepSmiles=False):
""""""
if keepSmiles:
target_name.extend(["smiles"])
return df.drop(target_name, axis=1)
else:
return df.drop([target_name, 'smiles'], axis=1)
def data_split(self, test=0.2, val=0, split="random", add_molecule_to_testset=None, scaler=None, random=None):
"""
Take in a data frame, the target column name (exp).
Returns a numpy array with the target variable,
a numpy array (matrix) of feature variables,
and a list of strings of the feature headers.
:param self:
:param test:
:param val:
:param split:
Keywords: 'random', 'index', 'scaffold'. Default is 'random'
:param add_molecule_to_testset:
:param scaler:
Keywords: None, 'standard', 'minmax'. Default is None
:param random:
:return:
"""
self.test_percent = test # Test percent instance
self.val_percent = val # Val percent instance
self.target_array = np.array(self.data[self.target_name]) # Target array instance
scaler_method = scaler
if scaler == "standard": # Determine data scaling method
scaler_method = StandardScaler()
elif scaler == "minmax":
scaler_method = MinMaxScaler()
self.scaler_method = scaler # Scaler instance
if self.dataset in ['sider.csv', 'clintox.csv']: # Drop specific columns for specific datasets
features_df = __dropCol__(df=self.data, target_name=self.target_name, keepSmiles=True)
else:
features_df = __dropCol__(df=self.data, target_name=self.target_name)
# if dropSmiles is not None:
# dropSmiles = [Chem.MolToSmiles(Chem.MolFromSmiles(i)) for i in dropSmiles]
# features_df = features_df[~features_df.isin(dropSmiles)]
self.feature_list = list(features_df.columns) # save list of strings of features
self.feature_length = len(self.feature_list) # Save feature size
self.feature_array = np.array(features_df) # Save feature array instance
self.n_tot = self.feature_array.shape[0] # Save total amount of features
molecules_array = np.array(self.data['smiles']) # Grab molecules array
self.train_percent = 1 - self.test_percent - self.val_percent # Train percent
temp_data = self.data
# if val != 0:
canonMolToAdd = []
if add_molecule_to_testset is not None: # For specific molecules to add to testset
for i in add_molecule_to_testset:
to_mol = Chem.MolFromSmiles(i)
if to_mol is not None:
canonMolToAdd.append(Chem.MolToSmiles(to_mol))
# add_molecule_to_testset = [Chem.MolToSmiles(Chem.MolFromSmiles(i)) for i in add_molecule_to_testset]
add_data = self.data[self.data['smiles'].isin(canonMolToAdd)] # Add in features of specific SMILES
add_data_molecules = np.array(add_data['smiles']) # Specific molecule array to be added
add_features_array = np.array(__dropCol__(add_data, self.target_name)) # Specific feature array to be added
add_target_array = np.array(add_data[self.target_name]) # Specific target array to be added
temp_data = self.data[~self.data['smiles'].isin(canonMolToAdd)] # Final feature df
# We need to generate fingerprints to use deepchem's scaffold and butina splitting techniques.
featurizer = CircularFingerprint(size=2048)
# Loading in csv into deepchem
loader = dc.data.CSVLoader(tasks=[self.target_name], smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(self.dataset) # Feature
split_dict = {"random": dc.splits.RandomSplitter(), "scaffold": dc.splits.ScaffoldSplitter(),
"index": dc.splits.IndexSplitter()} # Dictionary of different data splitting methods
split_name_dict = {"random": "RandomSplit", "scaffold": "ScaffoldSplit", "index": "IndexSplit"}
try:
splitter = split_dict[split]
self.split_method = split_name_dict[split]
except KeyError:
raise Exception("""Invalid splitting methods. Please enter either "random", "scaffold" or "index".""")
if val == 0:
train_dataset, test_dataset = splitter.train_test_split(dataset, frac_train=round(1-test, 1),
seed=self.random_seed)
else:
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(dataset, frac_train=1-test-val,
frac_test=test, frac_valid=val,
seed=self.random_seed)
# All training related data
train_molecules = []
for train_smiles in train_dataset.ids:
train_to_mol = Chem.MolFromSmiles(train_smiles)
if train_to_mol is not None:
train_molecules.append(Chem.MolToSmiles(train_to_mol))
else:
pass
train_molecules = list(OrderedDict.fromkeys(train_molecules))
train_df = temp_data[temp_data['smiles'].isin(train_molecules)]
train_df = train_df.drop_duplicates() # Drop duplicates in Dataframe
self.train_molecules = np.array(train_df['smiles'])
self.train_features = np.array(__dropCol__(df=train_df, target_name=self.target_name))
self.n_train = self.train_features.shape[0]
self.train_target = np.array(train_df[self.target_name])
# All testing related data
test_molecules = []
for test_smiles in test_dataset.ids:
test_to_mol = Chem.MolFromSmiles(test_smiles)
if test_to_mol is not None:
test_molecules.append(Chem.MolToSmiles(test_to_mol))
else:
pass
test_molecules = list(OrderedDict.fromkeys(test_molecules)) # Drop duplicates in list
test_df = temp_data[temp_data['smiles'].isin(test_molecules)]
test_df = test_df.drop_duplicates() # Drop duplicates in Dataframe
self.test_molecules = np.array(test_df['smiles'])
self.test_features = np.array(__dropCol__(df=test_df, target_name=self.target_name))
self.test_target = np.array(test_df[self.target_name])
if add_molecule_to_testset is not None: # If there are specific SMILES to add
self.test_features = np.concatenate([self.test_features, add_features_array])
self.test_molecules = np.concatenate([self.test_molecules, add_data_molecules])
self.test_target = np.concatenate([self.test_target, add_target_array])
self.n_test = self.test_features.shape[0]
# All validating related data
if val != 0:
val_molecules = []
for smiles in valid_dataset.ids:
val_to_mol = Chem.MolFromSmiles(smiles)
if val_to_mol is not None:
val_molecules.append(Chem.MolToSmiles(val_to_mol))
val_molecules = list(OrderedDict.fromkeys(val_molecules))
val_df = temp_data[temp_data['smiles'].isin(val_molecules)]
val_df = val_df.drop_duplicates()
self.val_molecules = np.array(val_df['smiles'])
self.val_features = np.array(__dropCol__(df=val_df, target_name=self.target_name))
self.val_target = np.array(val_df[self.target_name])
self.n_val = self.val_features.shape[0]
if self.algorithm != "cnn" and scaler is not None:
self.train_features = scaler_method.fit_transform(self.train_features)
self.test_features = scaler_method.transform(self.test_features)
if val > 0:
self.val_features = scaler_method.transform(self.val_features)
elif self.algorithm == "cnn" and scaler is not None:
# Can scale data 1d, 2d and 3d data
self.train_features = scaler_method.fit_transform(self.train_features.reshape(-1,
self.train_features.shape[
-1])).reshape(
self.train_features.shape)
self.test_features = scaler_method.transform(self.test_features.reshape(-1,
self.test_features.shape[-1])).reshape(
self.test_features.shape)
if val > 0:
self.val_features = scaler_method.transform(self.val_features.reshape(-1,
self.val_features.shape[-1])).reshape(
self.val_features.shape)
ptrain = self.n_train / self.n_tot * 100
#
ptest = self.n_test / self.n_tot * 100
#
print()
# print(
# 'Dataset of {} points is split into training ({:.1f}%), validation ({:.1f}%), and testing ({:.1f}%).'.format(
# self.n_tot, ptrain, pval, ptest))
self.in_shape = self.feature_array.shape[1]
# Logic to seperate data in test/train/val
def __fetch_set__(smiles):
if smiles in self.test_molecules:
return 'test'
elif smiles in self.train_molecules:
return 'train'
else:
return 'val'
self.data['in_set'] = self.data['smiles'].apply(__fetch_set__)
cols = list(self.data.columns)
cols.remove('in_set')
self.data = self.data[['in_set', *cols]]
# return train_features, test_features, val_features, train_target, test_target, val_target, feature_list
# Uncomment this section to have data shape distribution printed.
print('Total Feature Shape:', self.feature_array.shape)
print('Total Target Shape', self.target_array.shape)
print()
print('Training Features Shape:', self.train_features.shape)
print('Training Target Shape:', self.train_target.shape)
print()
print('Test Features Shape:', self.test_features.shape)
print('Test Target Shape:', self.test_target.shape)
print()
# if val > 0.0:
# print('Val Features Shape:', self.val_features.shape)
# print('Val Target Shape:', self.val_target.shape)
# print("Train:Test:Val -->", np.round(self.train_features.shape[0] / self.feature_array.shape[0] * 100, 1), ':',
# np.round(self.test_features.shape[0] / self.feature_array.shape[0] * 100, 1), ":",
# np.round(self.val_features.shape[0] / self.feature_array.shape[0] * 100, 1))
# else:
# print('Train:Test -->', np.round(self.train_features.shape[0] / self.feature_array.shape[0] * 100, 1), ':',
# np.round(self.test_features.shape[0] / self.feature_array.shape[0] * 100, 1)) |
py | b403264f8ac0e127cdc0fe073092124d46963acb | from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.constants import MAX_TAG_KEY_LENGTH, MAX_TAG_VALUE_LENGTH
from sentry.db.models import (
Model,
BoundedPositiveIntegerField,
GzippedDictField,
BaseManager,
sane_repr,
)
class TagValue(Model):
"""
Stores references to available filters.
"""
__core__ = False
project_id = BoundedPositiveIntegerField(db_index=True, null=True)
key = models.CharField(max_length=MAX_TAG_KEY_LENGTH)
value = models.CharField(max_length=MAX_TAG_VALUE_LENGTH)
data = GzippedDictField(blank=True, null=True)
times_seen = BoundedPositiveIntegerField(default=0)
last_seen = models.DateTimeField(default=timezone.now, db_index=True, null=True)
first_seen = models.DateTimeField(default=timezone.now, db_index=True, null=True)
objects = BaseManager()
class Meta:
app_label = "sentry"
db_table = "sentry_filtervalue"
unique_together = (("project_id", "key", "value"),)
index_together = (("project_id", "key", "last_seen"),)
__repr__ = sane_repr("project_id", "key", "value")
def get_label(self):
from sentry import tagstore
return tagstore.get_tag_value_label(self.key, self.value)
|
py | b4032777c00761537c34649d72b60150b3b95a4f | import collections
import collections.abc
import re
import urllib.parse
from datetime import timedelta
from typing import Optional
import kombu
import kombu.common
import netaddr
from hades import constants
from hades.config import check, compute
from hades.config.base import Compute, Option
###################
# General options #
###################
class HADES_SITE_NAME(Option):
"""Name of the site"""
type = str
required = True
static_check = check.match(r'\A[a-z][a-z0-9-]*\Z', re.ASCII)
class HADES_SITE_NODE_ID(Option):
"""ID of the site node"""
type = str
required = True
static_check = check.match(r'\A[a-z][a-z0-9-]*\Z', re.ASCII)
class HADES_MAIL_DESTINATION_ADDRESSES(Option):
"""Automatic notification mails will be send to this address."""
type = collections.abc.Sequence
static_check = check.satisfy_all(
check.not_empty,
check.sequence(check.type_is(str))
)
class HADES_MAIL_SENDER_ADDRESS(Option):
"""Automatic notification mails will use this address as sender."""
type = str
class HADES_MAIL_SMTP_SERVER(Option):
"""Name or IP address of SMTP relay server."""
type = str
class HADES_REAUTHENTICATION_INTERVAL(Option):
"""RADIUS periodic reauthentication interval"""
default = timedelta(seconds=300)
type = timedelta
static_check = check.greater_than(timedelta(0))
class HADES_RETENTION_INTERVAL(Option):
"""RADIUS postauth and accounting data retention interval"""
default = timedelta(days=1)
type = timedelta
static_check = check.greater_than(timedelta(0))
class HADES_CONTACT_ADDRESSES(Option):
"""Contact addresses displayed on the captive portal page"""
type = collections.abc.Mapping
required = True
class HADES_USER_NETWORKS(Option):
"""
Public networks of authenticated users.
Dictionary of networks. Keys are unique identifiers of the network,
values are :class:`netaddr.IPNetwork` objects
"""
type = collections.abc.Mapping
required = True
static_check = check.satisfy_all(
check.not_empty,
check.mapping(value_check=check.network_ip)
)
class HADES_CUSTOM_IPTABLES_INPUT_RULES(Option):
"""Additional iptables rules for ``INPUT`` chain.
A list of valid ``iptables-restore`` rule lines with leading ``-A INPUT``.
"""
type = collections.abc.Sequence
default: collections.abc.Sequence = []
#############################
# Network namespace options #
#############################
class HADES_NETNS_MAIN_AUTH_LISTEN(Option):
default = netaddr.IPNetwork('172.18.0.0/31')
static_check = check.network_ip
runtime_check = check.address_exists
class HADES_NETNS_AUTH_LISTEN(Option):
default = netaddr.IPNetwork('172.18.0.1/31')
static_check = check.network_ip
runtime_check = check.address_exists
class HADES_NETNS_MAIN_UNAUTH_LISTEN(Option):
default = netaddr.IPNetwork('172.18.0.2/31')
static_check = check.network_ip
runtime_check = check.address_exists
class HADES_NETNS_UNAUTH_LISTEN(Option):
default = netaddr.IPNetwork('172.18.0.3/31')
static_check = check.network_ip
runtime_check = check.address_exists
######################
# PostgreSQL options #
######################
class HADES_POSTGRESQL_PORT(Option):
"""Port and socket name of the PostgresSQL database"""
default = 5432
type = int
static_check = check.between(1, 65535)
class HADES_POSTGRESQL_LISTEN(Option):
"""
A list of addresses PostgreSQL should listen on.
"""
default = (
netaddr.IPNetwork('127.0.0.1/8'),
)
type = collections.abc.Sequence
static_check = check.sequence(check.network_ip)
runtime_check = check.sequence(check.address_exists)
class HADES_POSTGRESQL_FOREIGN_SERVER_FDW(Option):
"""
Name of the foreign data wrapper extensions that should be used.
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = 'postgres_fdw'
type = str
class HADES_POSTGRESQL_FOREIGN_SERVER_OPTIONS(Option):
"""
Foreign data wrapper specific server options
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
type = collections.abc.Mapping
default: collections.abc.Mapping = {}
class HADES_POSTGRESQL_FOREIGN_SERVER_TYPE(Option):
"""
Foreign data wrapper specific server type
If HADES_LOCAL_MASTER_DATABASE is set, this option is ignored.
"""
type = (str, type(None))
default: Optional[str] = None
class HADES_POSTGRESQL_FOREIGN_SERVER_VERSION(Option):
"""
Foreign data wrapper specific server version
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
type = (str, type(None))
default: Optional[str] = None
class HADES_POSTGRESQL_FOREIGN_TABLE_GLOBAL_OPTIONS(Option):
"""
Foreign data wrapper options that are set on each foreign table.
The options can be overridden with table specific options.
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default: collections.abc.Mapping = {}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_ALTERNATIVE_DNS_IPADDRESS_STRING(Option):
"""Whether the ``IPAddress`` column of the foreign ``alternative_dns`` table
has a string type"""
type = bool
default = False
class HADES_POSTGRESQL_FOREIGN_TABLE_ALTERNATIVE_DNS_OPTIONS(Option):
"""Foreign data wrapper options for the ``alternative_dns`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
'table_name': 'alternative_dns',
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_AUTH_DHCP_HOST_IPADDRESS_STRING(Option):
"""Whether the ``IPAddress`` column of the foreign ``auth_dhcp_host`` table
has a string type"""
type = bool
default = False
class HADES_POSTGRESQL_FOREIGN_TABLE_AUTH_DHCP_HOST_MAC_STRING(Option):
"""Whether the ``MAC`` column of the foreign ``auth_dhcp_host`` table has a
string type"""
type = bool
default = False
class HADES_POSTGRESQL_FOREIGN_TABLE_AUTH_DHCP_HOST_OPTIONS(Option):
"""Foreign data wrapper options for the ``auth_dhcp_host`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
"table_name": "auth_dhcp_host",
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_NAS_OPTIONS(Option):
"""Foreign data wrapper options for the ``nas`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
'table_name': 'nas',
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_RADCHECK_NASIPADDRESS_STRING(Option):
"""Whether the ``NASIPAddress`` column of the foreign ``radcheck`` table has
a string type."""
type = bool
default = False
class HADES_POSTGRESQL_FOREIGN_TABLE_RADCHECK_OPTIONS(Option):
"""Foreign data wrapper options for the ``radcheck`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
'table_name': 'radcheck',
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_RADGROUPCHECK_OPTIONS(Option):
"""Foreign data wrapper options for the ``radgroupcheck`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
'table_name': 'radgroupcheck',
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_RADGROUPREPLY_OPTIONS(Option):
"""Foreign data wrapper options for the ``radgroupreply`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
'table_name': 'radgroupreply',
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_RADREPLY_NASIPADDRESS_STRING(Option):
"""Whether the ``NASIPAddress`` column of the foreign ``radgroupcheck``
table has a string type"""
type = bool
default = False
class HADES_POSTGRESQL_FOREIGN_TABLE_RADREPLY_OPTIONS(Option):
"""Foreign data wrapper options for the ``radreply`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
'table_name': 'radreply',
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_FOREIGN_TABLE_RADUSERGROUP_NASIPADDRESS_STRING(Option):
"""Whether the ``NASIPAddress`` column of the foreign ``radgroupcheck``
table has a string type"""
type = bool
default = False
class HADES_POSTGRESQL_FOREIGN_TABLE_RADUSERGROUP_OPTIONS(Option):
"""Foreign data wrapper options for the ``radusergroup`` table
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
default = {
'table_name': 'radusergroup',
}
type = collections.abc.Mapping
class HADES_POSTGRESQL_USER_MAPPINGS(Option):
"""
User mappings from local database users to users on the foreign database
server
If :hades:option:`HADES_LOCAL_MASTER_DATABASE` is set, this option is
ignored.
"""
type = collections.abc.Mapping
static_check = check.user_mapping_for_user_exists(constants.DATABASE_USER)
########################
# Hades Portal options #
########################
class HADES_PORTAL_DOMAIN(Option):
"""Fully qualified domain name of the captive portal"""
default = 'captive-portal.agdsn.de'
type = str
class HADES_PORTAL_URL(Option):
"""URL of the landing page of the captive portal"""
default = compute.deferred_format("http://{}/", HADES_PORTAL_DOMAIN)
type = str
class HADES_PORTAL_NGINX_WORKERS(Option):
"""Number of nginx worker processes"""
default = 4
type = int
static_check = check.greater_than(0)
class HADES_PORTAL_SSL_CERTIFICATE(Option):
"""Path to the SSL certificate of the captive portal"""
default = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
runtime_check = check.file_exists
class HADES_PORTAL_SSL_CERTIFICATE_KEY(Option):
"""Path to the SSL certificate key of the captive portal"""
default = '/etc/ssl/private/ssl-cert-snakeoil.key'
runtime_check = check.file_exists
class HADES_PORTAL_UWSGI_WORKERS(Option):
"""Number of uWSGI worker processes"""
default = 4
type = int
static_check = check.greater_than(0)
###############################
# Authenticated users options #
###############################
class HADES_AUTH_DHCP_DOMAIN(Option):
"""DNS domain of authenticated users"""
default = 'users.agdsn.de'
type = str
class HADES_AUTH_DHCP_LEASE_LIFETIME(Option):
"""DHCP lease lifetime for authenticated users"""
default = timedelta(hours=24)
type = timedelta
static_check = check.greater_than(timedelta(0))
class HADES_AUTH_DHCP_LEASE_RENEW_TIMER(Option):
"""DHCP lease renew timer for authenticated users"""
type = timedelta
static_check = check.greater_than(timedelta(0))
@Compute.decorate
@staticmethod
def default(config):
"""Half of :hades:option:`HADES_AUTH_DHCP_LEASE_LIFETIME`"""
return 0.5 * config.HADES_AUTH_DHCP_LEASE_LIFETIME
class HADES_AUTH_DHCP_LEASE_REBIND_TIMER(Option):
"""DHCP lease rebind timer for authenticated users"""
type = timedelta
static_check = check.greater_than(timedelta(0))
@Compute.decorate
@staticmethod
def default(config):
"""0.875 of :hades:option:`HADES_AUTH_DHCP_LEASE_LIFETIME`"""
return 0.875 * config.HADES_AUTH_DHCP_LEASE_LIFETIME
class HADES_AUTH_LISTEN(Option):
"""
Sequence of IPs and networks to listen on for requests from authenticated
users.
The first IP in the sequence will be the main IP, e.g. it will be advertised
as IP of DNS server in DHCP responses.
"""
default = (
netaddr.IPNetwork('10.66.67.10/24'),
)
type = collections.abc.Sequence
static_check = check.satisfy_all(
check.not_empty,
check.sequence(check.network_ip),
)
runtime_check = check.sequence(check.address_exists)
class HADES_AUTH_INTERFACE(Option):
"""
Interface where requests of authenticated users arrive.
This interface will be moved into the auth namespace and IP addresses on
this interface are managed by the keepalived hades-auth VRRP instance.
This interface should therefore be managed completely by Hades. Aside from
its creation other tools, e.g. ``ifupdown``, ``systemd-networkd``, should
not interfere. No other daemons should listen on or bind to this interface.
"""
type = str
required = True
runtime_check = check.interface_exists
class HADES_AUTH_BRIDGE(Option):
"""Name of the auth bridge interface"""
type = str
default = "br-auth"
static_check = check.match('\A[A-Za-z0-9_-]{1,15}\Z', re.ASCII)
class HADES_AUTH_NEXT_HOP(Option):
"""
The next hop, where packets to user networks (e.g. DHCP replies, DNS
replies) should be forwarded to.
"""
type = netaddr.IPNetwork
default = netaddr.IPNetwork('10.66.67.1/24')
static_check = check.network_ip
class HADES_AUTH_ALLOWED_TCP_PORTS(Option):
"""Allowed TCP destination ports for unauthenticated users"""
type = collections.abc.Sequence
default = (53, 80, 443, 9053)
class HADES_AUTH_ALLOWED_UDP_PORTS(Option):
"""Allowed UDP destination ports for unauthenticated users"""
type = collections.abc.Sequence
default = (53, 67, 9053)
class HADES_AUTH_DNS_ALTERNATIVE_IPSET(Option):
"""Name of ipset for alternative DNS resolving."""
type = str
default = "hades_alternative_dns"
class HADES_AUTH_DNS_ALTERNATIVE_ZONES(Option):
"""DNS zones that are transparently spoofed if alternative DNS is
enabled."""
type = collections.abc.Mapping
default: collections.abc.Mapping = {}
#################################
# Unauthenticated users options #
#################################
class HADES_UNAUTH_DHCP_LEASE_TIME(Option):
"""
DHCP lease time for unauth users
This lease time should be set rather short, so that unauthenticated will
quickly obtain a new address if they become authenticated.
"""
default = timedelta(minutes=2)
type = timedelta
static_check = check.greater_than(timedelta(0))
class HADES_UNAUTH_INTERFACE(Option):
"""Interface attached to the unauth VLAN"""
type = str
required = True
runtime_check = check.interface_exists
class HADES_UNAUTH_BRIDGE(Option):
"""Name of the unauth bridge interface"""
type = str
default = "br-unauth"
static_check = check.match('\A[A-Za-z0-9_-]{1,15}\Z', re.ASCII)
class HADES_UNAUTH_LISTEN(Option):
"""
Sequence of IPs and networks to listen for unauthenticated users.
The first IP in the sequence will be the main IP, e.g. it will be advertised
as IP of DNS server in DHCP responses.
"""
default = (
netaddr.IPNetwork('10.66.0.1/19'),
)
type = collections.abc.Sequence
static_check = check.satisfy_all(
check.not_empty,
check.sequence(check.network_ip)
)
runtime_check = check.sequence(check.address_exists)
class HADES_UNAUTH_ALLOWED_TCP_PORTS(Option):
"""Allowed TCP destination ports for unauthenticated users"""
type = collections.abc.Sequence
default = (53, 80, 443)
class HADES_UNAUTH_ALLOWED_UDP_PORTS(Option):
"""Allowed UDP destination ports for unauthenticated users"""
type = collections.abc.Sequence
default = (53, 67)
class HADES_UNAUTH_CAPTURED_TCP_PORTS(Option):
"""
All traffic destined to these TCP ports is transparently redirected
(captured) to the unauth listen address of the site node
"""
type = collections.abc.Sequence
default = (53, 80, 443)
class HADES_UNAUTH_CAPTURED_UDP_PORTS(Option):
"""
All traffic destined to these UDP ports is transparently redirected
(captured) to the unauth listen address of the site node
"""
type = collections.abc.Sequence
default = (53,)
class HADES_UNAUTH_DHCP_RANGE(Option):
"""DHCP range for the unauth VLAN. Must be contained within the
:hades:option:`HADES_UNAUTH_LISTEN` network."""
default = netaddr.IPRange('10.66.0.10', '10.66.31.254')
type = netaddr.IPRange
static_check = check.ip_range_in_networks(HADES_UNAUTH_LISTEN)
class HADES_UNAUTH_WHITELIST_DNS(Option):
"""List of DNS names which are whitelisted for unauthenticated users.
"""
default = ()
type = collections.abc.Sequence
class HADES_UNAUTH_WHITELIST_IPSET(Option):
"""Name of ipset for whitelisted IPs.
"""
default = "hades_unauth_whitelist"
type = str
##################
# RADIUS options #
##################
class HADES_RADIUS_LISTEN(Option):
"""
Sequence of IPs and networks the RADIUS server is listening on.
"""
default = (
netaddr.IPNetwork('10.66.68.10/24'),
)
type = collections.abc.Sequence
static_check = check.satisfy_all(
check.not_empty,
check.sequence(check.network_ip)
)
runtime_check = check.sequence(check.address_exists)
class HADES_RADIUS_INTERFACE(Option):
"""Interface the RADIUS server is listening on"""
type = str
required = True
runtime_check = check.interface_exists
class HADES_RADIUS_AUTHENTICATION_PORT(Option):
"""RADIUS authentication port"""
type = int
default = 1812
class HADES_RADIUS_ACCOUNTING_PORT(Option):
"""RADIUS accounting port"""
type = int
default = 1813
class HADES_RADIUS_LOCALHOST_SECRET(Option):
"""Shared secret for the localhost RADIUS client"""
type = str
class HADES_RADIUS_DATABASE_FAIL_ACCEPT(Option):
"""Send ``Access-Accept`` packets if the RADIUS ``sql`` module fails"""
type = bool
default = True
class HADES_RADIUS_DATABASE_FAIL_REPLY_ATTRIBUTES(Option):
"""
Reply attributes that will be set in ``Access-Accept`` packets if the RADIUS
``sql`` module fails.
The attribute value must be specified in proper FreeRADIUS syntax. That
means that string replies should be enclosed in single quotes.
"""
type = collections.abc.Mapping
default = {
'Reply-Message': "'database_down'",
}
class HADES_RADIUS_UNKNOWN_USER(Option):
"""The ``User-Name``, that is used as fallback if the MAC address was not
found in the database."""
type = str
default = "unknown"
##########################
# Gratuitous ARP options #
##########################
class HADES_GRATUITOUS_ARP_INTERVAL(Option):
"""
Period in which gratuitous ARP requests are broadcasted to notify
#. clients of the MAC address of current master site node instance
#. clients switching from the auth to the unauth VLAN of the new gateway MAC
"""
type = timedelta
default = timedelta(seconds=1)
static_check = check.greater_than(timedelta(seconds=0))
################
# VRRP options #
################
class HADES_PRIORITY(Option):
"""
Priority of the site node instance.
The available instance with the highest priority becomes master.
"""
type = int
default = 100
static_check = check.between(1, 254)
class HADES_INITIAL_MASTER(Option):
"""Flag that indicates if the site node instance starts in master state"""
type = bool
default = False
class HADES_VRRP_INTERFACE(Option):
"""Interface for VRRP communication"""
type = str
runtime_check = check.interface_exists
class HADES_VRRP_BRIDGE(Option):
"""Interface name for VRRP bridge (created if necessary)"""
type = str
default = 'br-vrrp'
static_check = check.not_empty
class HADES_VRRP_LISTEN_AUTH(Option):
"""IP and network for VRRP communication (auth instance)"""
type = netaddr.IPNetwork
static_check = check.network_ip
runtime_check = check.address_exists
class HADES_VRRP_LISTEN_ROOT(Option):
"""IP and network for VRRP communication (root instance)"""
type = netaddr.IPNetwork
static_check = check.network_ip
runtime_check = check.address_exists
class HADES_VRRP_LISTEN_UNAUTH(Option):
"""IP and network for VRRP communication (unauth instance)"""
type = netaddr.IPNetwork
static_check = check.network_ip
runtime_check = check.address_exists
class HADES_VRRP_PASSWORD(Option):
"""
Shared secret to authenticate VRRP messages between site node instances.
"""
required = True
type = str
class HADES_VRRP_VIRTUAL_ROUTER_ID_AUTH(Option):
"""Virtual router ID used by Hades (auth instance)"""
type = int
default = 66
static_check = check.between(0, 255)
class HADES_VRRP_VIRTUAL_ROUTER_ID_ROOT(Option):
"""Virtual router ID used by Hades (root instance)"""
type = int
default = 67
static_check = check.between(0, 255)
class HADES_VRRP_VIRTUAL_ROUTER_ID_UNAUTH(Option):
"""Virtual router ID used by Hades (unauth instance)"""
type = int
default = 68
static_check = check.between(0, 255)
class HADES_VRRP_ADVERTISEMENT_INTERVAL(Option):
"""Interval between VRRP advertisements"""
type = timedelta
default = timedelta(seconds=5)
static_check = check.greater_than(timedelta(0))
class HADES_VRRP_PREEMPTION_DELAY(Option):
"""
Delay before a *MASTER* transitions to *BACKUP* when a node with a higher
priority comes online
"""
type = timedelta
default = timedelta(seconds=30)
static_check = check.between(timedelta(seconds=0), timedelta(seconds=1000))
################
# Test options #
################
class HADES_CREATE_DUMMY_INTERFACES(Option):
"""Create dummy interfaces if interfaces do not exist"""
type = bool
default = False
class HADES_LOCAL_MASTER_DATABASE(Option):
"""
Create and use a local “foreign” database.
"""
type = bool
default = False
class HADES_BRIDGE_SERVICE_INTERFACES(Option):
"""
Link the service interface of the auth and unauth network namespaces through
bridges and veth interfaces rather than moving the interface directly into
the network namespace.
This allows to attach other interfaces to the bridge to e.g. test DHCP.
"""
type = bool
default = False
#################
# Flask options #
#################
class FlaskOption(Option, abstract=True):
pass
class DEBUG(FlaskOption):
"""Flask debug mode flag"""
defaults = False
type = bool
#######################
# Flask-Babel options #
#######################
class BABEL_DEFAULT_LOCALE(FlaskOption):
"""Default locale of the portal application"""
default = 'de_DE'
type = str
class BABEL_DEFAULT_TIMEZONE(FlaskOption):
"""Default timezone of the portal application"""
default = 'Europe/Berlin'
type = str
############################
# Flask-SQLAlchemy options #
############################
class SQLALCHEMY_DATABASE_URI(FlaskOption):
@Compute.decorate
@staticmethod
def default(config):
if 'postgresql' not in urllib.parse.uses_netloc:
urllib.parse.uses_netloc.append('postgresql')
if 'postgresql' not in urllib.parse.uses_query:
urllib.parse.uses_query.append('postgresql')
query = urllib.parse.urlencode({
'host': constants.pkgrunstatedir + '/database',
'port': config.HADES_POSTGRESQL_PORT,
'requirepeer': constants.DATABASE_USER,
'client_encoding': 'utf-8',
'connect_timeout': 5,
})
return urllib.parse.urlunsplit(('postgresql', '',
constants.DATABASE_NAME,
query, ''))
type = str
##################
# Celery options #
##################
class HADES_CELERY_WORKER_HOSTNAME(Option):
"""
Hostname of the hades-agent Celery worker.
"""
default = compute.deferred_format('{}.{}', HADES_SITE_NAME,
HADES_SITE_NODE_ID)
type = str
class HADES_CELERY_RPC_EXCHANGE(Option):
default = 'hades.agent.rpc'
type = str
class HADES_CELERY_RPC_EXCHANGE_TYPE(Option):
default = 'topic'
type = str
class HADES_CELERY_NOTIFY_EXCHANGE(Option):
default = 'hades.agent.notify'
type = str
class HADES_CELERY_NOTIFY_EXCHANGE_TYPE(Option):
default = 'topic'
type = str
class HADES_CELERY_NODE_QUEUE(Option):
default = compute.deferred_format('hades.{}.{}', HADES_SITE_NAME,
HADES_SITE_NODE_ID)
type = str
class HADES_CELERY_SITE_ROUTING_KEY(Option):
default = compute.equal_to(HADES_SITE_NAME)
type = str
class HADES_CELERY_NODE_ROUTING_KEY(Option):
default = compute.deferred_format('{}.{}', HADES_SITE_NAME,
HADES_SITE_NODE_ID)
type = str
class HADES_CELERY_STATE_DB(Option):
"""Path of Celery node state database"""
type = str
default = "{}/agent/state.db".format(constants.pkgrunstatedir)
class CeleryOption(Option, abstract=True):
pass
class BROKER_URL(CeleryOption):
type = str
class BROKER_CONNECTION_MAX_RETRIES(CeleryOption):
"""
Maximum number of retries before giving up re-establishing the
connection to the broker.
Set to zero to retry forever in case of longer partitions between sites
and the main database.
"""
default = 0
type = int
class CELERY_ENABLE_UTC(CeleryOption):
default = True
type = bool
class CELERY_DEFAULT_DELIVERY_MODE(CeleryOption):
default = 'transient'
type = str
class CELERY_QUEUES(CeleryOption):
@Compute.decorate
@staticmethod
def default(config):
"""
Declare two exchanges, one for RPCs and one for notifications.
RPCs return results and should therefore only be answered by a single
agent. Notifications have no results and are processed by potentially
multiple agents.
Each agent/site node has a single queue specific to this node. This
queue is bound to the RPC exchange with a node-specific routing key and
to the notify exchange with the site-specific, node-specific, and empty
routing key. The agent on a site node, where the root VRRP instance has
become MASTER, will also bind its queue to the RPC exchange with the
site-specific routing key and remove this binding as soon as the sites
leaves the MASTER state.
This setup ensures that RPC messages can be sent to a specific
agent/node, by using the node-specific routing key and to the agent on
the master by using the site-specific routing key.
Notifications can be sent to all agents/nodes by using the empty routing
key, to all agents/nodes of a site by using the site-specific routing
key, and to a specific node by using the node-specific routing key.
"""
rpc_exchange = kombu.Exchange(
config.HADES_CELERY_RPC_EXCHANGE,
config.HADES_CELERY_RPC_EXCHANGE_TYPE
)
notify_exchange = kombu.Exchange(
config.HADES_CELERY_NOTIFY_EXCHANGE,
config.HADES_CELERY_NOTIFY_EXCHANGE_TYPE
)
node_key = config.HADES_CELERY_NODE_ROUTING_KEY
site_key = config.HADES_CELERY_SITE_ROUTING_KEY
return (
kombu.Queue(config.HADES_CELERY_NODE_QUEUE, (
kombu.binding(rpc_exchange, routing_key=node_key),
kombu.binding(notify_exchange, routing_key=node_key),
kombu.binding(notify_exchange, routing_key=site_key),
kombu.binding(notify_exchange, routing_key=''),
), auto_delete=True, durable=False),
)
type = collections.abc.Sequence
class CELERYD_PREFETCH_MULTIPLIER(CeleryOption):
type = int
default = 1
class CELERY_TIMEZONE(CeleryOption):
default = 'UTC'
type = str
class CELERY_DEFAULT_QUEUE(CeleryOption):
default = compute.equal_to(HADES_CELERY_NODE_QUEUE)
type = str
class CELERY_DEFAULT_ROUTING_KEY(CeleryOption):
default = compute.equal_to(HADES_CELERY_SITE_ROUTING_KEY)
type = str
class CELERY_DEFAULT_EXCHANGE(CeleryOption):
default = compute.equal_to(HADES_CELERY_RPC_EXCHANGE)
type = str
class CELERY_ACCEPT_CONTENT(CeleryOption):
default = ['json']
type = collections.abc.Sequence
class CELERY_EVENT_SERIALIZER(CeleryOption):
default = 'json'
type = str
class CELERY_RESULT_SERIALIZER(CeleryOption):
default = 'json'
type = str
class CELERY_TASK_SERIALIZER(CeleryOption):
default = 'json'
type = str
class CELERY_RESULT_BACKEND(CeleryOption):
default = 'rpc://'
type = str
class CELERY_RESULT_EXCHANGE(CeleryOption):
default = 'hades.result'
type = str
class CELERY_IMPORTS(CeleryOption):
default = ()
type = collections.abc.Sequence
class CELERY_TASK_RESULT_EXPIRES(CeleryOption):
default = timedelta(minutes=5)
type = timedelta
|
py | b403277db6258b902dfc59950f7bed93e8c71454 | from typing import List
from blspy import AugSchemeMPL, G2Element, PrivateKey
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.util.condition_tools import conditions_by_opcode, conditions_for_solution, pkm_pairs_for_conditions_dict
from tests.core.make_block_generator import GROUP_ORDER, int_to_public_key
from tests.block_tools import test_constants
class KeyTool(dict):
@classmethod
def __new__(cls, *args):
return dict.__new__(*args)
def add_secret_exponents(self, secret_exponents: List[int]) -> None:
for _ in secret_exponents:
self[bytes(int_to_public_key(_))] = _ % GROUP_ORDER
def sign(self, public_key: bytes, message_hash: bytes32) -> G2Element:
secret_exponent = self.get(public_key)
if not secret_exponent:
raise ValueError("unknown pubkey %s" % public_key.hex())
bls_private_key = PrivateKey.from_bytes(secret_exponent.to_bytes(32, "big"))
return AugSchemeMPL.sign(bls_private_key, message_hash)
def signature_for_solution(self, coin_spend: CoinSpend, additional_data: bytes) -> AugSchemeMPL:
signatures = []
err, conditions, cost = conditions_for_solution(
coin_spend.puzzle_reveal, coin_spend.solution, test_constants.MAX_BLOCK_COST_CLVM
)
assert conditions is not None
conditions_dict = conditions_by_opcode(conditions)
for public_key, message_hash in pkm_pairs_for_conditions_dict(
conditions_dict, coin_spend.coin.name(), additional_data
):
signature = self.sign(bytes(public_key), message_hash)
signatures.append(signature)
return AugSchemeMPL.aggregate(signatures)
|
py | b403281b16d8c815dace15b028fcc85d0fd5133e | import unittest2
from mahjonggscoring import Tile
class TestTile(unittest2.TestCase):
def test_wind(self):
tile = Tile("S")
self.assertEqual(tile.honor, "wind")
def test_wind_is_honor(self):
tile = Tile("S")
self.assertEqual(tile.tile_type, "honor")
def test_dragon(self):
tile = Tile("F")
self.assertEqual(tile.honor, "dragon")
def test_dragon_is_honor(self):
tile = Tile("F")
self.assertEqual(tile.tile_type, "honor")
def test_terminal_1(self):
tile = Tile("1●")
self.assertEqual(tile.tile_type, "terminal")
def test_terminal_9(self):
tile = Tile("9●")
self.assertEqual(tile.tile_type, "terminal")
def test_terminal_is_not_honor(self):
tile = Tile("1#")
self.assertRaises(AttributeError, lambda: tile.honor)
def test_suit_has_rank(self):
tile = Tile("5/")
self.assertEqual(tile.rank, "5")
def test_rank_has_suit(self):
tile = Tile("5/")
self.assertEqual(tile.suit, "bamboo")
def test_honor_has_no_rank(self):
tile = Tile("B")
self.assertRaises(AttributeError, lambda: tile.rank)
def test_honor_has_no_suit(self):
tile = Tile("B")
self.assertRaises(AttributeError, lambda: tile.suit)
def test_suited_tile_is_not_honor(self):
tile = Tile("3/")
self.assertRaises(AttributeError, lambda: tile.honor)
def test_name_suited(self):
tile = Tile("8#")
self.assertEqual(tile.name, "character 8")
def test_name_dragon(self):
tile = Tile("C")
self.assertEqual(tile.name, "red dragon")
def test_name_wind(self):
tile = Tile("E")
self.assertEqual(tile.name, "east wind")
if __name__ == '__main__':
unittest2.main() |
py | b4032846a081e366335e63a6f0f66c2096961858 | from .base_data_loader import *
from .base_model import *
from .base_pl import *
|
py | b403289544f024f6a69354c6ab7211ab786b538a | import zeit.cms.testing
class TestProductConfigIsolation(zeit.cms.testing.ZeitCmsTestCase):
def test_1_set_product_config(self):
import zope.app.appsetup.product
zope.app.appsetup.product._configs['zeit.cms'][
'isolated'] = 'i-am-isolated'
def test_2_second_test_should_not_see_changes_from_first_test(self):
import zope.app.appsetup.product
self.assertNotIn(
'isolated', zope.app.appsetup.product._configs['zeit.cms'])
|
py | b40329c9424d1470af754be63110de2ba4dc76e9 | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from petstore_api.exceptions import ApiAttributeError
class Category(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'id': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'id': 'id', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Category - a model defined in OpenAPI
Args:
Keyword Args:
name (str): defaults to "default-name" # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (int): [optional] # noqa: E501
"""
name = kwargs.get('name', "default-name")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Category - a model defined in OpenAPI
Args:
Keyword Args:
name (str): defaults to "default-name" # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (int): [optional] # noqa: E501
"""
name = kwargs.get('name', "default-name")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | b4032a942cc3a65f620623d8cc6be92d1bfb35be | from __future__ import annotations
import requests
from .request import validate_path, onedrive_http_request
class OnedriveDriveItem:
def __init__(self, uri: str = None):
self.uri = uri
def __str__(self):
return self.uri
# 根据路径找到文件(夹)
def find_file_by_path(self, path: str) -> OnedriveDriveItem:
path = validate_path(path)
if path == '/':
return OnedriveDriveItem(uri=self.uri)
uri = f"{self.uri}:{path}:" # 如果 self.uri 自带 ':' 和后面形成了 '::',应该去掉
uri = uri.replace('::', '')
return OnedriveDriveItem(uri=uri)
# 根据 DriveItem id 找到文件(夹)
def find_file_by_id(self, id: str) -> OnedriveDriveItem:
return OnedriveDriveItem(uri=f"{self.uri}/items/{id}")
# 在当前目录下进行搜索
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_search?view=odsp-graph-online
def search(self, keywords: str, fail_silently=False) -> requests.Response:
return onedrive_http_request(self.uri + f"/search(q='{keywords}')", fail_silently=fail_silently)
# 获取文件元信息
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online
def get_metadata(self, fail_silently=False) -> requests.Response:
return onedrive_http_request(self.uri, fail_silently=fail_silently)
# 获取大缩略图
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_list_thumbnails#get-a-single-thumbnail
def get_single_thumbnail(self, thumb_id=0, size='large', fail_silently=False) -> str:
response = onedrive_http_request(self.uri + f'/thumbnails/{thumb_id}/{size}', fail_silently=fail_silently)
return response.json()['url']
# 列出文件夹下内容
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_list_children?view=odsp-graph-online
def list_children(self, fail_silently=False) -> requests.Response:
return onedrive_http_request(self.uri + '/children', fail_silently=fail_silently)
# 创建文件夹
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_post_children?view=odsp-graph-online
def create_directory(self,
dirname: str,
conflict_behavior: str = 'fail',
fail_silently=False) -> requests.Response:
assert conflict_behavior in ('fail', 'replace', 'rename')
return onedrive_http_request(self.uri + '/children', 'POST', {
"name": dirname,
"folder": {},
"@microsoft.graph.conflictBehavior": conflict_behavior
}, fail_silently=fail_silently)
# 递归地创建多级文件夹
# 成功后返回最底层文件夹的信息,见:
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_post_children?view=odsp-graph-online
def create_directory_recursive(self, path: str) -> requests.Response:
# 由于多数时候都只需要创建子文件夹
# 因此从尝试创建子文件夹开始,从下往上尝试创建
# 在某层成功后又逐步往下
path_list = path.strip('/').split('/')
depth = len(path_list)
response = requests.Response()
response.status_code = 500
while not response.ok:
depth -= 1
if depth < 0:
break
cur_path = '/'.join(path_list[0:depth])
new_dir = path_list[depth]
response = self.find_file_by_path(cur_path).create_directory(new_dir, 'fail', fail_silently=True)
while depth < len(path_list) - 1:
depth += 1
cur_path = '/'.join(path_list[0:depth])
new_dir = path_list[depth]
response = self.find_file_by_path(cur_path).create_directory(new_dir, 'fail', fail_silently=True)
return response
# 复制文件,可指定新文件名
# 如果发生文件冲突,会 fail
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_copy?view=odsp-graph-online
def copy(self,
dest_dir_id: str,
new_filename: str = None,
fail_silently=False) -> requests.Response:
json = {"parentReference": {"id": dest_dir_id}}
if new_filename:
json['name'] = new_filename
return onedrive_http_request(self.uri + '/copy', 'POST', json, fail_silently=fail_silently)
# 移动文件,可指定新文件名
# 如果发生文件冲突,会 fail
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_move?view=odsp-graph-online
def move(self,
dest_dir_id: str,
new_filename: str = None,
fail_silently=False) -> requests.Response:
json = {"parentReference": {"id": dest_dir_id}}
if new_filename:
json['name'] = new_filename
return onedrive_http_request(self.uri, 'PATCH', json, fail_silently=fail_silently)
# 删除文件
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_delete?view=odsp-graph-online
def delete(self, fail_silently=False) -> requests.Response:
return onedrive_http_request(self.uri, 'DELETE', fail_silently=fail_silently)
# 下载文件
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_get_content?view=odsp-graph-online
def download(self, fail_silently=False) -> requests.Response:
return onedrive_http_request(self.uri + '/content', fail_silently=fail_silently)
# 上传文件,文件内容放在 data 中
# DriveItem 应当为文件的路径,Onedrive 会创建或更新该文件
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online
def upload(self, data, conflict_behavior: str = 'fail', fail_silently=False) -> requests.Response:
return onedrive_http_request(self.uri + f'/[email protected]={conflict_behavior}',
'PUT', data=data, fail_silently=fail_silently)
# 通过 URL 上传文件,类似于离线下载,仅 Onedrive 个人可使用该 API
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_upload_url?view=odsp-graph-online
def upload_via_url(self,
source_url: str,
filename: str = None,
conflict_behavior: str = 'fail',
fail_silently=False) -> requests.Response:
assert conflict_behavior in ('fail', 'replace', 'rename')
json = {
"@microsoft.graph.sourceUrl": source_url,
"@microsoft.graph.conflictBehavior": conflict_behavior,
"file": {}
}
if filename:
json['name'] = 'filename'
return onedrive_http_request(self.uri + '/children', 'POST', json, extra_headers={"Prefer": "respond-async"},
fail_silently=fail_silently)
# 上传大文件,创建会话后,需向返回的 uploadUrl 进行 PUT
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession?view=odsp-graph-online
def create_upload_session(self,
conflict_behavior: str = 'fail',
fail_silently=False) -> requests.Response:
assert conflict_behavior in ('fail', 'replace', 'rename')
return onedrive_http_request(self.uri + '/createUploadSession', 'POST', {
"item": {"@microsoft.graph.conflictBehavior": conflict_behavior}
}, fail_silently=fail_silently)
# 创建文件分享链接
# https://docs.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createlink?view=odsp-graph-online
def create_link(self, fail_silently=False) -> requests.Response:
return onedrive_http_request(self.uri + '/createLink', 'POST', {
"type": "view",
"scope": "anonymous"
}, fail_silently=fail_silently)
# 创建分享链接后改写链接,使其可以被(重定向后)下载
def get_download_link(self, fail_silently=False) -> str:
response = self.create_link(fail_silently=fail_silently)
share_link = response.json()['link']['webUrl']
download_link = share_link.split('?')[0] + '?download=1'
return download_link
|
py | b4032b4420dc9a0a45619cca335ef29c72024301 | # -*- encoding: utf-8 -*-
# Copyright (c) 2018-2020, Eduardo Rodrigues and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/particle for details.
from __future__ import absolute_import, division, print_function
import sys
import pytest
from pytest import approx
from particle.particle.enums import Charge, Parity, SpinType, Status, Inv
from particle.particle import Particle
from particle.particle.particle import ParticleNotFound, InvalidParticle
from particle.pdgid import PDGID
from particle.pdgid.functions import _digit, Location
from particle import data
from hepunits import second, meter
def test_find():
# 1 match found
prepr = repr(Particle.find(name="gamma"))
assert prepr == '<Particle: name="gamma", pdgid=22, mass=0.0 MeV>'
# No match found
with pytest.raises(ParticleNotFound):
Particle.find(name="NotInPDT")
# Multiple matches found
with pytest.raises(RuntimeError):
Particle.find(name=lambda x: "Upsilon" in x)
def test_lambda_style_search():
particles = Particle.findall(lambda p: p.pdg_name == "p")
assert len(particles) == 4
assert 2212 in particles
assert -2212 in particles
assert 1000010010 in particles
assert -1000010010 in particles
[p.pdgid for p in Particle.findall(lambda p: p.pdg_name == "p" and p > 0)] == [
2212,
1000010010,
]
[p.pdgid for p in Particle.findall(lambda p: p.pdg_name == "p" and p < 0)] == [
-2212,
-1000010010,
]
def test_fuzzy_name_search():
particles = Particle.findall("p~")
assert len(particles) == 2
assert -2212 in particles
assert -1000010010 in particles
def test_keyword_style_search():
particles = Particle.findall(pdg_name="p")
assert len(particles) == 4
assert 2212 in particles
assert -2212 in particles
assert 1000010010 in particles
assert -1000010010 in particles
def test_keyword_style_search_with_except_catch():
particles = Particle.findall(ctau=float("inf"))
assert 11 in particles
particles = Particle.findall(name="p")
assert len(particles) == 2
assert 2212 in particles
assert 1000010010 in particles
[p.pdgid for p in Particle.findall(pdg_name="p", particle=True)] == [
2212,
1000010010,
]
[p.pdgid for p in Particle.findall(pdg_name="p", particle=False)] == [
-2212,
-1000010010,
]
[p.pdgid for p in Particle.findall(name="p", particle=True)] == [2212, 1000010010]
[p.pdgid for p in Particle.findall(name="p~", particle=False)] == [
-2212,
-1000010010,
]
def test_keyword_lambda_style_search():
particles = Particle.findall(pdg_name=lambda x: "p" == x)
assert len(particles) == 4
assert 2212 in particles
assert -2212 in particles
assert 1000010010 in particles
assert -1000010010 in particles
# Fuzzy name
particles = Particle.findall(name=lambda x: "p" in x)
assert len(particles) > 2
assert 2212 in particles
assert -2212 in particles
# Name and particle
assert len(Particle.findall(name=lambda x: x == "p", particle=True)) == 2
# Unit based comparison
assert 2212 in Particle.findall(lifetime=lambda x: x > 1 * second)
def test_complex_search():
# Find all strange mesons with c*tau > 1 meter
particles = Particle.findall(
lambda p: p.pdgid.is_meson
and p.pdgid.has_strange
and p.width > 0
and p.ctau > 1000.0,
particle=True,
)
assert len(particles) == 2 # K+ and KL0
assert 130 in particles
assert 321 in particles
# Find all strange anti-mesons with c*tau > 1 meter
particles = Particle.findall(
lambda p: p.pdgid.is_meson
and p.pdgid.has_strange
and p.width > 0
and p.ctau > 1000.0,
particle=False,
)
assert len(particles) == 1 # only the K-
assert -321 in particles
def test_pdg():
assert Particle.from_pdgid(211).pdgid == 211
with pytest.raises(InvalidParticle):
Particle.from_pdgid(0)
def test_pdg_convert():
p = Particle.from_pdgid(211)
assert isinstance(p.pdgid, PDGID)
assert int(p) == 211
assert PDGID(p) == 211
def test_sorting():
assert Particle.from_pdgid(211) < Particle.from_pdgid(311)
assert Particle.from_pdgid(211) < Particle.from_pdgid(-311)
def test_int_compare():
assert Particle.from_pdgid(211) > 0
assert Particle.from_pdgid(-211) < 0
assert Particle.from_pdgid(211) >= 0
assert Particle.from_pdgid(-211) <= 0
assert 0 < Particle.from_pdgid(211)
assert 0 > Particle.from_pdgid(-211)
assert 0 <= Particle.from_pdgid(211)
assert 0 >= Particle.from_pdgid(-211)
def test_string():
pi = Particle.from_string("pi+")
assert pi.pdgid == 211
with pytest.raises(ParticleNotFound):
Particle.from_string("unknown")
def test_fuzzy_string():
"""
The input name is not specific enough, in which case the search is done
by pdg_name after failing a match by name.
"""
p = Particle.from_string("a(0)(980)") # all 3 charge stages match
assert p.pdgid == 9000111
def test_str():
pi = Particle.from_pdgid(211)
assert str(pi) == "pi+"
def test_rep():
pi = Particle.from_pdgid(211)
assert "pdgid=211" in repr(pi)
assert 'name="pi+"' in repr(pi)
assert "mass=139.57" in repr(pi)
def test_basic_props():
pi = Particle.from_pdgid(211)
assert pi.pdg_name == "pi"
assert pi.pdgid == 211
assert pi.three_charge == Charge.p
assert pi.charge == 1
def test_lifetime_props():
pi = Particle.from_pdgid(211)
assert pi.lifetime == approx(26.0327460625985) # in nanoseconds
assert pi.ctau == approx(7804.4209306) # in millimeters
def test_charge_consistency():
"""
The charge of a particle is presently stored in the CSV files
(see Particle.charge for the motivation), but it can also be retrieved
from the particle's PDG ID, *if* the latter is valid.
This test makes sure both numbers are consistent for all particles in the PDG table.
"""
for p in Particle.all():
assert p.three_charge == p.pdgid.three_charge
def test_P_consistency():
"""
The parity quantum number is stored in the (curated) data CSV files.
For unflavoured mesons it can be calculated as P = (-1)^(L+1),
and this relation can be checked against the CSV data.
Note: mesons with PDGIDs of the kind 9XXXXXX (N=9) are not experimentally
well-known particles and P is undefined.
"""
for p in Particle.all():
if not p.is_unflavoured_meson:
continue
elif _digit(p.pdgid, Location.N) == 9:
continue
elif p.pdgid == 22: # Special case of the photon
assert p.P == -1
else:
assert p.P == (-1) ** (p.L + 1)
def test_C_consistency():
"""
The charge conjugation parity is stored in the (curated) data CSV files.
For unflavoured mesons it can be calculated as C = (-1)^(L+S),
and this relation can be checked against the CSV data.
Note: mesons with PDGIDs of the kind 9XXXXXX (N=9) are not experimentally
well-known particles and C is undefined.
"""
for p in Particle.all():
if not p.is_unflavoured_meson:
continue
elif _digit(p.pdgid, Location.N) == 9:
continue
elif p.pdgid == 22: # Special case of the photon
assert p.C == -1
elif p.pdgid in [130, 310]: # Special case of the KS and KL
assert p.C == Parity.u
else:
assert p.C == (-1) ** (p.L + p.S)
checklist_describe = (
# Test undefined width value
[1, "Width = None"], # d quark
# Test print-out of zero width values
[22, "Width = 0.0 MeV"], # photon
# Test print-out of symmetric width errors
[413, u"Width = 0.0834 ± 0.0018 MeV"], # D*(2010)+
[443, u"Width = 0.093 ± 0.003 MeV"], # J/psi
# Test print-out of asymmetric width errors
[4222, "Width = 1.89 + 0.09 - 0.18 MeV"], # Sigma_c(2455)++
[23, u"Width = 2495.2 ± 2.3 MeV"], # H0
# Test print-out of symmetric lifetime errors
[5332, u"Lifetime = 1.65e-03 ± 1.8e-04 ns"], # Omega_b-
[211, u"Lifetime = 26.033 ± 0.005 ns"], # pion
# Test print-out of asymmetric lifetime errors
[4332, "Lifetime = 2.7e-04 + 3e-05 - 3e-05 ns"], # Omega_c^0
# Test particles with at present an upper limit on their width
[423, "Width < 2.1 MeV"], # D*(2007)0
[10431, "Width < 10.0 MeV"], # D(s0)*(2317)+
[20433, "Width < 6.3 MeV"], # D(s1)(2460)+
[4212, "Width < 4.6 MeV"], # Sigma(c)(2455)+
[4214, "Width < 17.0 MeV"], # Sigma(c)(2520)+
)
if sys.version_info < (3, 0):
for i, pair_vals in enumerate(checklist_describe):
checklist_describe[i][1] = pair_vals[1].replace(u"±", u"+/-")
@pytest.mark.parametrize("pid,description", checklist_describe)
def test_describe(pid, description):
particle = Particle.from_pdgid(pid)
assert description in particle.describe()
def test_default_table_loading():
assert Particle.table_names() == ("particle2019.csv", "nuclei2020.csv")
def test_default_table_loading_bis():
Particle.all()
p = Particle.from_pdgid(211)
assert p.table_loaded() is True
assert p.table_names() == ("particle2019.csv", "nuclei2020.csv")
def test_explicit_table_loading():
Particle.load_table(data.open_text(data, "particle2019.csv"))
assert Particle.table_loaded() == True
assert len(Particle.table_names()) == 1
assert Particle.all() is not None
checklist_html_name = (
(22, "γ"), # photon
(1, "d"), # d quark
(-2, "u̅"), # u antiquark
(11, "e<SUP>-</SUP>"), # e-
(-13, "μ<SUP>+</SUP>"), # mu+
(-14, "ν̅<SUB>μ</SUB>"), # nu_mu_bar
(111, "π<SUP>0</SUP>"), # pi0
(-211, "π<SUP>-</SUP>"), # pi-
(-213, "ρ(770)<SUP>-</SUP>"), # rho(770)-
(20213, "a<SUB>1</SUB>(1260)<SUP>+</SUP>"), # a_1(1260)+
(321, "K<SUP>+</SUP>"), # K+
(130, "K<SUB>L</SUB><SUP>0</SUP>"), # K_L
(10321, "K<SUB>0</SUB><SUP>*</SUP>(1430)<SUP>+</SUP>"), # K(0)*(1430)+
(-10321, "K<SUB>0</SUB><SUP>*</SUP>(1430)<SUP>-</SUP>"), # K(0)*(1430)-
(10433, "D<SUB>s1</SUB>(2536)<SUP>+</SUP>"), # D_s1(2536)+
(-511, "B̅<SUP>0</SUP>"), # B0_bar
(443, "J/ψ(1S)"), # J/psi
(10441, "χ<SUB>c0</SUB>(1P)"), # chi_c0(1P)
(300553, "Υ(4S)"), # Upsilon(4S)
(2212, "p"), # proton
(-2112, "n̅"), # antineutron
(-2224, "Δ̅(1232)<SUP>--</SUP>"), # Delta_bar(1232)--
(3322, "Ξ<SUP>0</SUP>"), # Xi0
(-3322, "Ξ̅<SUP>0</SUP>"), # Xi0_bar
(-5122, "Λ̅<SUB>b</SUB><SUP>0</SUP>"), # Lb0_bar
)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Requires Python 3")
@pytest.mark.parametrize("pid,html_name", checklist_html_name)
def test_html_name(pid, html_name):
particle = Particle.from_pdgid(pid)
assert particle.html_name == html_name
checklist_is_self_conjugate = (
(1, False), # d quark
(-13, False), # mu+
(111, True), # pi0
(211, False), # pi+
(-211, False), # pi-
(443, True), # J/psi
(300553, True), # Upsilon(4S)
(130, True), # K_L
(2212, False), # proton
(-2112, False), # antineutron
(3322, False), # Xi0
(-3322, False), # Xi0_bar
(-511, False), # B0_bar
(5122, False), # Lb0
)
@pytest.mark.parametrize("pid,is_self_conjugate", checklist_is_self_conjugate)
def test_is_self_conjugate(pid, is_self_conjugate):
particle = Particle.from_pdgid(pid)
assert particle.is_self_conjugate == is_self_conjugate
def test_self_conjugation_consistenty():
"""
The logic implemented in ``Particle.invert()`` and ``Particle.is_self_conjugate``
should be consistent. In other words, the inverse of
``self.anti_flag == Inv.ChargeInv and self.three_charge != Charge.o``
in ``Particle.invert()`` should match ``Particle.is_self_conjugate``.
"""
n_inconsistencies = 0
for p in Particle.all():
if (
p.anti_flag == Inv.ChargeInv and p.three_charge == Charge.o
) and not p.is_self_conjugate:
n_inconsistencies += 1
assert n_inconsistencies == 0
checklist_is_name_barred = (
(1, False), # d quark
(-2, True), # u antiquark
(11, False), # e-
(-13, False), # mu+
(111, False), # pi0
(211, False), # pi+
(-211, False), # pi-
(-213, False), # rho(770)-
(443, False), # J/psi
(300553, False), # Upsilon(4S)
(130, False), # K_L
(2212, False), # proton
(-2112, True), # antineutron
(3322, False), # Xi0
(-3322, True), # Xi0_bar
(-511, True), # B0_bar
(-5122, True), # Lb0_bar
)
@pytest.mark.parametrize("pid,has_bar", checklist_is_name_barred)
def test_is_name_barred(pid, has_bar):
particle = Particle.from_pdgid(pid)
assert particle.is_name_barred == has_bar
spin_type_classification = (
# Gauge bosons
(23, SpinType.Unknown), # Z0 - no parity defined for it
# Leptons aren't assigned a SpinType
(11, SpinType.NonDefined), # e-
# Only mesons are given a SpinType
# - Pseudo-scalars J^P = 0^-
(211, SpinType.PseudoScalar), # pi+
(310, SpinType.PseudoScalar), # K_S
(-421, SpinType.PseudoScalar), # D0_bar
# - Scalars J^P = 0^+
(9000211, SpinType.Scalar), # a_0(980)+
(9010221, SpinType.Scalar), # f_0(980)
# - Vector J^P = 1^-
(333, SpinType.Vector), # phi(1020)
(443, SpinType.Vector), # J/psi
# Axial-vector - J^P = 1^+
(20213, SpinType.Axial), # a_1(1260)+
(20313, SpinType.Axial), # K_1(1400)0
(10433, SpinType.Axial), # D_s1(2536)+
# Tensor - J^P = 2^+
(225, SpinType.Tensor), # f_2(1270)
(315, SpinType.Tensor), # K*_2(1430)0
# Pseudo-tensor - J^P = 2^-
(10225, SpinType.PseudoTensor), # eta_2(1645)
# J > 2 mesons
(329, SpinType.Unknown), # K*_4(2045)+
# Baryons aren't assigned a SpinType
(2212, SpinType.NonDefined), # proton
)
@pytest.mark.parametrize("pid,stype", spin_type_classification)
def test_spin_type(pid, stype):
particle = Particle.from_pdgid(pid)
assert particle.spin_type == stype
checklist_isospin = (
# Quarks
(1, 0.5), # d
# Gauge bosons
(22, None), # photon
(23, None), # Z0
# Leptons
(11, None), # e-
(-12, None), # nu(e)_bar
# Mesons
(211, 1.0), # pi+
(310, 0.5), # K_S
(-421, 0.5), # D0_bar
(333, 0.0), # phi(1020)
(443, 0.0), # J/psi
(521, 0.5), # B+
(531, 0.0), # Bs
# Baryons
(2212, 0.5), # proton
(2214, 1.5), # Delta+
)
@pytest.mark.parametrize("pid,isospin", checklist_isospin)
def test_isospin(pid, isospin):
particle = Particle.from_pdgid(pid)
assert particle.I == isospin
def test_default_particle():
p = Particle.empty()
assert repr(p) == '<Particle: name="Unknown", pdgid=0, mass=None>'
assert "Name: Unknown" in p.describe()
assert p.mass == None
assert p.width == None
assert p.spin_type == SpinType.NonDefined
assert p.programmatic_name == "Unknown"
assert p.status == Status.NotInPDT
def test_dump_table():
tbl = Particle.dump_table(
filter_fn=lambda p: p.pdgid.is_meson
and p.pdgid.has_strange
and p.ctau > 1 * meter,
exclusive_fields=["pdgid", "name"],
)
assert (
tbl
== " pdgid name\n------- ------\n 130 K(L)0\n 321 K+\n -321 K-"
)
tbl = Particle.dump_table(
filter_fn=lambda p: p.pdgid > 0
and p.pdgid.is_meson
and p.pdgid.has_strange
and p.pdgid.has_charm,
exclusive_fields=["name"],
n_rows=2,
tablefmt="html",
)
assert "<td>D(s)+ </td></tr>\n<tr><td>D(s)*+</td>" in tbl
ampgen_style_names = (
("b", 5),
("b~", -5),
("pi+", 211),
("pi-", -211),
("K~*0", -313),
("K*(892)bar0", -313),
("a(1)(1260)+", 20213),
("rho(1450)0", 100113),
("rho(770)0", 113),
("K(1)(1270)bar-", -10323),
# ("K(1460)bar-", -100321),
("K(2)*(1430)bar-", -325),
)
@pytest.mark.parametrize("name,pid", ampgen_style_names)
def test_ampgen_style_names(name, pid):
particle = Particle.from_string(name)
assert int(particle) == pid
assert particle.pdgid == pid
assert particle == pid
decfile_style_names = (
("s", 3),
("anti-b", -5),
("anti-K*0", -313),
("eta", 221),
("eta'", 331),
("a_0+", 9000211),
("a_00", 9000111),
("a_1-", -20213),
("a_10", 20113),
("f_0", 9010221),
("f'_0", 10221),
("f_1", 20223),
("f'_1", 20333),
("f'_2", 335),
("h_1", 10223),
("h'_1", 10333),
("rho+", 213),
("rho(2S)0", 100113),
("omega", 223),
("omega(1650)", 30223),
("Delta++", 2224),
("Delta+", 2214),
("Delta0", 2114),
("Delta-", 1114),
("D+", 411),
# ("D'_1+", 10413),
# ("anti-D'_10", -10423),
("D_2*+", 415),
("D_s+", 431),
("anti-B0", -511),
("B+", 521),
("B-", -521),
("B*+", 523),
("B*-", -523),
("N(1440)+", 12212),
("anti-N(1440)-", -12212),
("anti-Lambda_b0", -5122),
("Sigma_b+", 5222),
# ("Sigma_b0", 5212),
("Sigma_b-", 5112),
("anti-Sigma_b-", -5222),
# ("anti-Sigma_b0", -5212),
("anti-Sigma_b+", -5112),
("Sigma_b*+", 5224),
# ("Sigma_b*0", 5214),
("Sigma_b*-", 5114),
("anti-Sigma_b*-", -5224),
# ("anti-Sigma_b*0", -5214),
("anti-Sigma_b*+", -5114),
)
@pytest.mark.parametrize("name,pid", decfile_style_names)
def test_decfile_style_names(name, pid):
assert Particle.from_evtgen_name(name).pdgid == pid
|
py | b4032b91aec133057c9f077738fe941c49818d60 | from __future__ import print_function
def binary_search(lst, item, start, end):
if start == end:
if lst[start] > item:
return start
else:
return start + 1
if start > end:
return start
mid = (start + end) // 2
if lst[mid] < item:
return binary_search(lst, item, mid + 1, end)
elif lst[mid] > item:
return binary_search(lst, item, start, mid - 1)
else:
return mid
def insertion_sort(lst):
length = len(lst)
for index in range(1, length):
value = lst[index]
pos = binary_search(lst, value, 0, index - 1)
lst = lst[:pos] + [value] + lst[pos:index] + lst[index+1:]
return lst
def merge(left, right):
if not left:
return right
if not right:
return left
if left[0] < right[0]:
return [left[0]] + merge(left[1:], right)
return [right[0]] + merge(left, right[1:])
def tim_sort(lst):
runs, sorted_runs = [], []
length = len(lst)
new_run = [lst[0]]
sorted_array = []
for i in range(1, length):
if i == length - 1:
new_run.append(lst[i])
runs.append(new_run)
break
if lst[i] < lst[i - 1]:
if not new_run:
runs.append([lst[i - 1]])
new_run.append(lst[i])
else:
runs.append(new_run)
new_run = []
else:
new_run.append(lst[i])
for run in runs:
sorted_runs.append(insertion_sort(run))
for run in sorted_runs:
sorted_array = merge(sorted_array, run)
return sorted_array
def main():
lst = [5,9,10,3,-4,5,178,92,46,-18,0,7]
sorted_lst = tim_sort(lst)
print(sorted_lst)
if __name__ == '__main__':
main()
|
py | b4032bfe3e5b833e0400e5c085a1453dec7103fa | # Copyright (c) SCYTHE, Inc. Use is subject to agreement.
import argparse
import shlex
import struct
import json
import ast
import os
from argparse import Namespace
# noinspection PyUnusedLocal
def create_parser(db, os="windows"):
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError(message)
epilog = ' scythe.proclist"\n'
parser = ArgumentParser(prog="proclist", description="Show running processes on an implant.",
epilog=epilog)
parser.add_argument("--ppid", help="PPID to filter by", required=False)
parser.add_argument("--pid",help="Return |Name|PID|PPID|Mem|Nice|Priority|CPU|User|Cwd|Env|Cmdline|CreateTime|Exe| for a given process",required=False)
parser.add_argument("--user",help="User to filter results by",required=False)
parser.add_argument("--format",help="Return data as JSON or as ASCII table {'table','json'}",required=False,default="table")
return parser
# noinspection PyUnusedLocal
def succeeded(db, request,response):
result = False
if response and len(response) > 72:
content = response[72:].tobytes().decode('utf-8')
if not "Error: " in content:
result = True
return result
# noinspection PyUnusedLocal
def tags(reserved, request, response):
"""
:param reserved: Reserved for future use
:param request: Original request sent to device
:param response: Reply from device for request
:return: return a list of strings
:rtype: list
"""
r = []
if len(request) > 0:
r = ["scythe", "att&ck", "att&ck-technique:T1057"]
return r
def usage(db, os):
"""Return the usage of this module as a string
:return str: Usage string for this module
"""
return create_parser(db, os).format_help()
# noinspection PyUnusedLocal
def create_message_body(db, command_line, campaign_name, endpoint_name):
"""Create a SCYTHE message body
:param db: used only to retrieve operating system
:param str command_line: command line string. If None is provided, command line will be received from sys.argv
:param campaign_name: ignored
:param endpoint_name: ignored
:return str: String with message body
"""
# You may call: db.get_setting_value("language")
# This will return a language id string such as: "en-US"
# You may use this result to present localized strings in the user interface.
# You may call: db.get_campaign_operating_system_name(campaign_name)
# This will return "windows" for Windows campaigns.
parser = create_parser(db, db.get_campaign_operating_system_name(campaign_name))
#if not command_line:
# raise ValueError("Error: --format argument is missing")
#else:
argv = shlex.split(command_line, posix=False)
args = parser.parse_args(argv)
dict_to_send = {}
#try:
# format = ast.literal_eval(args.format)
#except:
# raise ValueError(f"Error: --format could not be parsed with value: {args.format}!")
if args.format:
dict_to_send['format'] = str(args.format).lower()
else:
dict_to_Send['format'] = "table"
if args.ppid:
dict_to_send['ppid']= args.ppid
if args.pid:
dict_to_send['pid'] = args.pid
if args.user:
dict_to_send['user'] = args.user
return json.dumps(dict_to_send).encode('utf-8')
# noinspection PyUnusedLocal
def report(db, request, response, format_):
"""Generate a report for a request and response for this module
:param db: ignored
:param request: Request to report on
:param response: Response to report on
:param format_: ignored, always pre
:return tuple(str, str, str): request report, response report, and format
"""
# size of the response message is response[64:72]
sz = struct.unpack("<Q", request[64:72].tobytes())[0]
request_contents = request[72:72 + sz].tobytes().decode("utf-8")
content = response[72:].tobytes().decode("utf-8")
return "\"%s\"" % request_contents, content, "pre"
def main():
pass
if __name__ == "__main__":
main()
|
py | b4032c286a0a6d52fb2aaefd3533d51911be8f83 | """
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import pandas as pd
from typing import List, Mapping, Union
from .core import DataFrameWithInfo, FloatWithInfo, SeriesWithInfo, StringWithInfo, sort_risk
from gs_quant.base import InstrumentBase, PricingKey
def __dataframe_handler(field: str, mappings: tuple, result: List, pricing_key: PricingKey) -> DataFrameWithInfo:
components = []
for date_key, date_result in zip(pricing_key, result):
records = [{k: datum[v] for k, v in mappings} for datum in date_result[field]]
df = pd.DataFrame.from_records(records)
df = sort_risk(df, tuple(k for k, _ in mappings))
components.append(DataFrameWithInfo(date_key, df, unit=date_result.get('unit')))
return DataFrameWithInfo.compose(components, pricing_key) if len(pricing_key) > 1 else components[0]
def __double_handler(field: str, result: List, pricing_key: PricingKey) -> Union[FloatWithInfo, SeriesWithInfo]:
components = [FloatWithInfo(k, r.get(field, float('nan')), r.get('unit')) for k, r in zip(pricing_key, result)]
return FloatWithInfo.compose(components, pricing_key) if len(pricing_key) > 1 else components[0]
def __string_handler(field: str, result: List, pricing_key: PricingKey) -> Union[StringWithInfo, SeriesWithInfo]:
components = [StringWithInfo(k, r.get(field)) for k, r in zip(pricing_key, result)]
return StringWithInfo.compose(components, pricing_key) if len(pricing_key) > 1 else components[0]
def cashflows_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase) -> DataFrameWithInfo:
mappings = (
('payment_date', 'payDate'),
('accrual_start_date', 'accStart'),
('accrual_end_date', 'accEnd'),
('payment_amount', 'payAmount'),
('notional', 'notional'),
('payment_type', 'paymentType'),
('floating_rate_option', 'index'),
('floating_rate_designated_maturity', 'indexTerm'),
('day_count_fraction', 'dayCountFraction'),
('spread', 'spread'),
('rate', 'rate'),
('discount_factor', 'discountFactor')
)
for r in result:
for cashflow in r['cashflows']:
for field in ('payDate', 'setDate', 'accStart', 'accEnd'):
value = cashflow.get(field)
date = dt.date.fromisoformat(value) if value else dt.date.max
cashflow[field] = date
return __dataframe_handler('cashflows', mappings, result, pricing_key)
def error_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase)\
-> Union[StringWithInfo, SeriesWithInfo]:
components = [StringWithInfo(k, r.get('errorString')) for k, r in zip(pricing_key, result)]
return StringWithInfo.compose(components, pricing_key) if len(pricing_key) > 1 else components[0]
def leg_definition_handler(result: List, pricing_key: PricingKey, instrument: InstrumentBase)\
-> Union[InstrumentBase, Mapping[dt.date, InstrumentBase]]:
instruments_by_date = {}
for date_key, field_values in zip(pricing_key, result):
new_instrument = instrument.from_dict(field_values)
new_instrument.unresolved = instrument
new_instrument.name = instrument.name
new_instrument.resolution_key = date_key
instruments_by_date[date_key.pricing_market_data_as_of[0].pricing_date] = new_instrument
return instruments_by_date if len(pricing_key) > 1 else next(iter(instruments_by_date.values()))
def message_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase)\
-> Union[StringWithInfo, SeriesWithInfo]:
return __string_handler('message', result, pricing_key)
def number_and_unit_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase)\
-> Union[FloatWithInfo, SeriesWithInfo]:
return __double_handler('value', result, pricing_key)
def required_assets_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase):
mappings = (('mkt_type', 'type'), ('mkt_asset', 'asset'))
return __dataframe_handler('requiredAssets', mappings, result, pricing_key)
def risk_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase)\
-> Union[FloatWithInfo, SeriesWithInfo]:
return __double_handler('val', result, pricing_key)
def risk_by_class_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase)\
-> Union[FloatWithInfo, SeriesWithInfo]:
sum_result = []
for date_result in result:
if date_result['$type'] == 'Error':
return error_handler(date_result)
sum_result.append({'unit': date_result.get('unit'), 'val': sum(date_result['values'])})
return __double_handler('val', sum_result, pricing_key)
def risk_vector_handler(result: List, pricing_key: PricingKey, _instrument: InstrumentBase)\
-> Union[DataFrameWithInfo, StringWithInfo]:
for date_result in result:
if date_result['$type'] == 'Error':
return error_handler(date_result)
for points, value in zip(date_result['points'], date_result['asset']):
points.update({'value': value})
mappings = (
('mkt_type', 'type'),
('mkt_asset', 'asset'),
('mkt_class', 'class_'),
('mkt_point', 'point'),
('value', 'value')
)
return __dataframe_handler('points', mappings, result, pricing_key)
result_handlers = {
'Error': error_handler,
'IRPCashflowTable': cashflows_handler,
'LegDefinition': leg_definition_handler,
'Message': message_handler,
'NumberAndUnit': number_and_unit_handler,
'RequireAssets': required_assets_handler,
'Risk': risk_handler,
'RiskByClass': risk_by_class_handler,
'RiskVector': risk_vector_handler
}
|
py | b4032cae737afa2aa1f2c933b98f91c35b7201bc | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import marshal
import logging
from winsys import exc, ipc
class MailslotHandler(logging.Handler):
"""A logging-compatible handler which will write to a named
mailslot. The data is marshalled before being sent which means
that only Python primitives may be sent, but allows, eg, None
to be used as a sentinel value.
"""
def __init__(self, mailslot_name):
logging.Handler.__init__(self)
self.mailslot_name = mailslot_name
def put(self, msg):
ipc.mailslot(self.mailslot_name).put(marshal.dumps(msg))
def emit(self, record):
self.put(self.format(record))
def close(self):
try:
self.put(None)
except exc.x_not_found:
pass
class PermanentMailslotHandler(MailslotHandler):
"""Subclass the MailslotHandler but take no action on closedown.
This is intended to be used when the receiving mailslot is running
permanently so shouldn't be closed when the logging process finishes.
"""
def close(self):
return
if __name__ == '__main__':
import sys
import subprocess
import logging
import time
import uuid
mailslot_name = str(uuid.uuid1())
subprocess.Popen([sys.executable, "extras/mailslot_listener.pyw", mailslot_name])
time.sleep(1)
logger = logging.getLogger(mailslot_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(MailslotHandler(mailslot_name))
logger.debug("DEBUG")
logger.info("INFO")
logger.warn("WARN")
logger.error("ERROR")
raw_input("Press enter...") |
py | b4032d23f344df69c022a3d89c531ef26e4a5cc3 | # Code for "ActionCLIP: ActionCLIP: A New Paradigm for Action Recognition"
# Mengmeng Wang, Jiazheng Xing, Yong Liu
#
# Built on top of official implementation at https://github.com/sallymmx/ActionCLIP
#
# Modifications by Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from towhee.models import clip
def text_aug(word):
augs = [f"a photo of action {word}", f"a picture of action {word}", f"Human action of {word}",
f"{word}, an action", f"{word} this is an action", f"{word}, a video of action",
f"Playing action of {word}", f"{word}", f"Playing a kind of action, {word}",
f"Doing a kind of action, {word}", f"Look, the human is {word}",
f"Can you recognize the action of {word}?", f"Video classification of {word}", f"A video of {word}",
f"The man is {word}", f"The woman is {word}"]
return augs
def text_prompt(labels):
text_dict = {}
num_txt_augs = len(text_aug(""))
txt_augs = [text_aug(c) for c in labels]
for i in range(num_txt_augs):
vals = [clip.tokenize(augs[i]) for augs in txt_augs]
text_dict[i] = torch.cat(vals)
classes = torch.cat([v for k, v in text_dict.items()])
return classes, num_txt_augs, text_dict
|
py | b4032e01781fd8e88746b20b47cfb10416b990bc | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from flask_login import current_user
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from app.models import User,Post,Comment
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
submit = SubmitField('Post')
class RequestResetForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('There is no account with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
class CommentForm(FlaskForm):
content = TextAreaField('Content', validators=[DataRequired()])
submit = SubmitField('Comment') |
py | b4032ecdeef73ca64300210ddc4a81eb0abe268d | # Generated by Django 2.1.7 on 2019-05-06 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dailies', '0003_auto_20190506_1733'),
]
operations = [
migrations.AlterField(
model_name='honkaiimpactdaily',
name='emoji',
field=models.CharField(max_length=200, unique=True),
),
]
|
py | b4032f0d509b527f02146d1bdbfdc013cad47dba | # -*- coding: utf-8 -*-
"""Network related utility tools."""
import logging
from typing import Dict
import numpy as np
import torch
from typing import List
def to_device(m, x):
"""Send tensor into the device of the module.
Args:
m (torch.nn.Module): Torch module.
x (Tensor): Torch tensor.
Returns:
Tensor: Torch tensor located in the same place as torch module.
"""
if isinstance(m, torch.nn.Module):
device = next(m.parameters()).device
elif isinstance(m, torch.Tensor):
device = m.device
else:
raise TypeError(
"Expected torch.nn.Module or torch.tensor, " f"bot got: {type(m)}"
)
return x.to(device)
def pad_list(xs : List [ torch.Tensor ], pad_value : float):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
max_len = max([x.size(0) for x in xs])
sizes = [n_batch, max_len] + list(xs[0].size()[1:])
pad = xs[0].new_full(sizes,pad_value)
for i in range(n_batch):
pad[i, : xs[i].size(0)] = xs[i]
return pad
def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if maxlen is None:
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
else:
assert xs is None
assert maxlen >= int(max(lengths))
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
def mask_by_length(xs, lengths, fill=0):
"""Mask tensor according to length.
Args:
xs (Tensor): Batch of input tensor (B, `*`).
lengths (LongTensor or List): Batch of lengths (B,).
fill (int or float): Value to fill masked part.
Returns:
Tensor: Batch of masked input tensor (B, `*`).
Examples:
>>> x = torch.arange(5).repeat(3, 1) + 1
>>> x
tensor([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]])
>>> lengths = [5, 3, 2]
>>> mask_by_length(x, lengths)
tensor([[1, 2, 3, 4, 5],
[1, 2, 3, 0, 0],
[1, 2, 0, 0, 0]])
"""
assert xs.size(0) == len(lengths)
ret = xs.data.new(*xs.size()).fill_(fill)
for i, l in enumerate(lengths):
ret[i, :l] = xs[i, :l]
return ret
def th_accuracy(pad_outputs, pad_targets, ignore_label):
"""Calculate accuracy.
Args:
pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
pad_targets (LongTensor): Target label tensors (B, Lmax, D).
ignore_label (int): Ignore label id.
Returns:
float: Accuracy value (0.0 - 1.0).
"""
pad_pred = pad_outputs.view(
pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)
).argmax(2)
mask = pad_targets != ignore_label
numerator = torch.sum(
pad_pred.masked_select(mask) == pad_targets.masked_select(mask)
)
denominator = torch.sum(mask)
return float(numerator) / float(denominator)
def to_torch_tensor(x):
"""Change to torch.Tensor or ComplexTensor from numpy.ndarray.
Args:
x: Inputs. It should be one of numpy.ndarray, Tensor, ComplexTensor, and dict.
Returns:
Tensor or ComplexTensor: Type converted inputs.
Examples:
>>> xs = np.ones(3, dtype=np.float32)
>>> xs = to_torch_tensor(xs)
tensor([1., 1., 1.])
>>> xs = torch.ones(3, 4, 5)
>>> assert to_torch_tensor(xs) is xs
>>> xs = {'real': xs, 'imag': xs}
>>> to_torch_tensor(xs)
ComplexTensor(
Real:
tensor([1., 1., 1.])
Imag;
tensor([1., 1., 1.])
)
"""
# If numpy, change to torch tensor
if isinstance(x, np.ndarray):
if x.dtype.kind == "c":
# Dynamically importing because torch_complex requires python3
from torch_complex.tensor import ComplexTensor
return ComplexTensor(x)
else:
return torch.from_numpy(x)
# If {'real': ..., 'imag': ...}, convert to ComplexTensor
elif isinstance(x, dict):
# Dynamically importing because torch_complex requires python3
from torch_complex.tensor import ComplexTensor
if "real" not in x or "imag" not in x:
raise ValueError("has 'real' and 'imag' keys: {}".format(list(x)))
# Relative importing because of using python3 syntax
return ComplexTensor(x["real"], x["imag"])
# If torch.Tensor, as it is
elif isinstance(x, torch.Tensor):
return x
else:
error = (
"x must be numpy.ndarray, torch.Tensor or a dict like "
"{{'real': torch.Tensor, 'imag': torch.Tensor}}, "
"but got {}".format(type(x))
)
try:
from torch_complex.tensor import ComplexTensor
except Exception:
# If PY2
raise ValueError(error)
else:
# If PY3
if isinstance(x, ComplexTensor):
return x
else:
raise ValueError(error)
def get_subsample(train_args, mode, arch):
"""Parse the subsampling factors from the args for the specified `mode` and `arch`.
Args:
train_args: argument Namespace containing options.
mode: one of ('asr', 'mt', 'st')
arch: one of ('rnn', 'rnn-t', 'rnn_mix', 'rnn_mulenc', 'transformer')
Returns:
np.ndarray / List[np.ndarray]: subsampling factors.
"""
if arch == "transformer":
return np.array([1])
elif mode == "mt" and arch == "rnn":
# +1 means input (+1) and layers outputs (train_args.elayer)
subsample = np.ones(train_args.elayers + 1, dtype=np.int)
logging.warning("Subsampling is not performed for machine translation.")
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
return subsample
elif (
(mode == "asr" and arch in ("rnn", "rnn-t"))
or (mode == "mt" and arch == "rnn")
or (mode == "st" and arch == "rnn")
):
subsample = np.ones(train_args.elayers + 1, dtype=np.int)
if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"):
ss = train_args.subsample.split("_")
for j in range(min(train_args.elayers + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
"Subsampling is not performed for vgg*. "
"It is performed in max pooling layers at CNN."
)
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
return subsample
elif mode == "asr" and arch == "rnn_mix":
subsample = np.ones(
train_args.elayers_sd + train_args.elayers + 1, dtype=np.int
)
if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"):
ss = train_args.subsample.split("_")
for j in range(
min(train_args.elayers_sd + train_args.elayers + 1, len(ss))
):
subsample[j] = int(ss[j])
else:
logging.warning(
"Subsampling is not performed for vgg*. "
"It is performed in max pooling layers at CNN."
)
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
return subsample
elif mode == "asr" and arch == "rnn_mulenc":
subsample_list = []
for idx in range(train_args.num_encs):
subsample = np.ones(train_args.elayers[idx] + 1, dtype=np.int)
if train_args.etype[idx].endswith("p") and not train_args.etype[
idx
].startswith("vgg"):
ss = train_args.subsample[idx].split("_")
for j in range(min(train_args.elayers[idx] + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
"Encoder %d: Subsampling is not performed for vgg*. "
"It is performed in max pooling layers at CNN.",
idx + 1,
)
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
subsample_list.append(subsample)
return subsample_list
else:
raise ValueError("Invalid options: mode={}, arch={}".format(mode, arch))
def rename_state_dict(
old_prefix: str, new_prefix: str, state_dict: Dict[str, torch.Tensor]
):
"""Replace keys of old prefix with new prefix in state dict."""
# need this list not to break the dict iterator
old_keys = [k for k in state_dict if k.startswith(old_prefix)]
if len(old_keys) > 0:
logging.warning(f"Rename: {old_prefix} -> {new_prefix}")
for k in old_keys:
v = state_dict.pop(k)
new_k = k.replace(old_prefix, new_prefix)
state_dict[new_k] = v
def get_activation(act):
"""Return activation function."""
# Lazy load to avoid unused import
from espnet.nets.pytorch_backend.conformer.swish import Swish
activation_funcs = {
"hardtanh": torch.nn.Hardtanh,
"tanh": torch.nn.Tanh,
"relu": torch.nn.ReLU,
"selu": torch.nn.SELU,
"swish": Swish,
}
return activation_funcs[act]()
|
py | b403307e238251cfe0c0f9adf49080f5c855cee3 | from distutils.core import setup
import py2exe
setup(console=['matplot.py'], requires=['requests', 'bs4', 'matplotlib', 'sklearn', 'numpy']) |
py | b40330ee96234643de6c280e70e7b600232b436d |
from mcfunction.versions.mc_1_8.clear import clear, ParsedClearCommand
from mcfunction.nodes import EntityNode
def test_clear():
parsed = clear.parse('clear')
parsed: ParsedClearCommand
assert parsed.target is None
assert str(parsed) == 'clear'
def test_clear_entity():
parsed = clear.parse('clear @e')
parsed: ParsedClearCommand
assert isinstance(parsed.target, EntityNode)
assert str(parsed) == 'clear @e'
def test_clear_item():
parsed = clear.parse('clear @e test:item')
parsed: ParsedClearCommand
assert parsed.item.namespace == 'test'
assert parsed.item.name == 'item'
assert str(parsed) == 'clear @e test:item'
def test_clear_data():
parsed = clear.parse('clear @e test:item data')
parsed: ParsedClearCommand
assert parsed.data.value == 'data'
assert str(parsed) == 'clear @e test:item data'
def test_clear_count():
parsed = clear.parse('clear @e test:item data 69')
parsed: ParsedClearCommand
assert parsed.count.value == 69
assert str(parsed) == 'clear @e test:item data 69'
|
py | b40330f21c7344e6d0ed01d9c8981f39cce396ef | n=int(input())
sum=1
while n>1:
sum *= n
n-=1
print(sum)
|
py | b40332c3bf0d47803fe595d7c59b313e078e98de | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'voice_recognize_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | b4033333f5883dff521cb00fc7850934363af70d | """Particle generator for Py-Climber"""
from src.particle import Particle
import random
class ParticleGenerator():
"""The ParticleGenerator class is responsible for creating and tracking Particle
objects which are just 2D rects of a given color. A calling object may specify
a callback to customize the particles generated, e.g. their velocities and color
"""
def __init__(self, screen, settings, color, x, y, generator_callback=None):
"""Init the position and color"""
self.screen = screen
self.screen_rect = self.screen.get_rect()
self.settings = settings
self.x = x
self.y = y
self.color = color
self.particles = []
self.active = False
self.active_frames = 0
self.frames_to_generate = 0
self.callback = generator_callback
def start(self, frames_to_generate):
"""Tells the generator to start generating particles"""
self.active = True
self.active_frames = 0
self.frames_to_generate = frames_to_generate
def stop(self):
"""Tells the generator to stop generating particles"""
self.active = False
self.active_frames = 0
# start() dictates the duration
self.frames_to_generate = 0
def update(self):
"""Update the position of all alive particles"""
# We always want to draw the particles, so unlike other sprites,
# the 'active' or 'on' property will control the generation instead
# This way when the generator stops, the existing particles will
# finish out their lives. If it controlled the drawing, particles
# in-flight would just vanish (or you would need additional logic
# in the drawing code)
if self.active:
self.generate_particles(self.settings.particle_gen_per_frame)
self.active_frames += 1
if self.active_frames > self.frames_to_generate:
self.stop()
# For any particles still alive, we need to update them, even if the
# generator is stopped. Once a particle is 'dead', remove it
for particle in self.particles:
particle.update()
if not particle.alive():
self.particles.remove(particle)
def generate_particles(self, number_of_new_particles):
"""Create a new particle at the generator's location and give it an initial velocity"""
# In the callback case the implementer controls it all, including the number
# create an empty list to hold the data
particle_data = []
if self.callback:
# We have a callback, so delegate all of the work....
particle_data = self.callback()
else:
# No callback, so make some random ones by default
for particle_index in range(0, number_of_new_particles):
new_data = (random.randint(-2, 2), random.randint(5, 20) * -1, (random.randint(0,255), random.randint(0,255), random.randint(0,255)))
particle_data.append(new_data)
# Callback or not, at this point we should have a list of particle data
for particle_info in particle_data:
# Create a new particle object
new_particle = Particle(self.screen, self.settings, self.x, self.y, particle_info[0], particle_info[1], random.randint(1, 4), particle_info[2])
# Add it to the list to track/draw
self.particles.append(new_particle)
def draw(self):
"""Draw all of the particles"""
# Since the are not pygame.sprites, can't just use the Group as with the blobs
# Just another way to do things
for particle in self.particles:
particle.draw()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.