metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jkonglab/Liver_Cancer_Segmentation",
"score": 2
} |
#### File: HistoCAE/final_codes/liverImages_main_MultiRes_FCV2_OverlapData.py
```python
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="5" #model will be trained on GPU 0
import keras
import math
#from matplotlib import pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import gzip
from keras.models import Model
from keras.optimizers import RMSprop
from keras.layers import Input,Dense,Flatten,Dropout,merge,Reshape,Conv2D,MaxPooling2D,UpSampling2D,Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.models import Model,Sequential
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adadelta, RMSprop,SGD,Adam
from keras import regularizers
from keras import backend as K
from keras.utils import to_categorical
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Activation, concatenate
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from keras.callbacks import LearningRateScheduler
from keras.layers import LeakyReLU, Lambda, Layer
from keras.preprocessing.image import ImageDataGenerator
import logging
logger = logging.getLogger(__name__)
import os
from keras.preprocessing import image as image_utils
from PIL import Image
from PIL import ImageFilter
import matplotlib.pyplot as plt
import numpy as np
#import cPickle
import pickle
from keras.layers import *
from keras_contrib.losses import DSSIMObjective
from keras import metrics
from keras.regularizers import l2
# 20x dataset
train_data_20x = np.load('multires/datasets_Overlap/datasets_20x_train/full_x.npy')
train_labels_20x = np.load('multires/datasets_Overlap/datasets_20x_train/full_y.npy')
val_data_20x = np.load('multires/datasets_Overlap/datasets_20x_val/full_x.npy')
val_labels_20x = np.load('multires/datasets_Overlap/datasets_20x_val/full_y.npy')
test_data_20x = np.load('multires/datasets_Overlap/datasets_20x_test/full_x.npy')
test_labels_20x = np.load('multires/datasets_Overlap/datasets_20x_test/full_y.npy')
# 10x dataset
train_data_10x = np.load('multires/datasets_Overlap/datasets_10x_train/full_x.npy')
train_labels_10x = np.load('multires/datasets_Overlap/datasets_10x_train/full_y.npy')
val_data_10x = np.load('multires/datasets_Overlap/datasets_10x_val/full_x.npy')
val_labels_10x = np.load('multires/datasets_Overlap/datasets_10x_val/full_y.npy')
test_data_10x = np.load('multires/datasets_Overlap/datasets_10x_test/full_x.npy')
test_labels_10x = np.load('multires/datasets_Overlap/datasets_10x_test/full_y.npy')
# 5x dataset
train_data_5x = np.load('multires/datasets_Overlap/datasets_5x_train/full_x.npy')
train_labels_5x = np.load('multires/datasets_Overlap/datasets_5x_train/full_y.npy')
val_data_5x = np.load('multires/datasets_Overlap/datasets_5x_val/full_x.npy')
val_labels_5x = np.load('multires/datasets_Overlap/datasets_5x_val/full_y.npy')
test_data_5x = np.load('multires/datasets_Overlap/datasets_5x_test/full_x.npy')
test_labels_5x = np.load('multires/datasets_Overlap/datasets_5x_test/full_y.npy')
# Shapes of training set
print("Training set (images) shape: {shape}".format(shape=train_data_20x.shape))
print("Validation dataset (images) shape: {shape}".format(shape=val_data_20x.shape))
# Shapes of test set
print("Test set (images) shape: {shape}".format(shape=test_data_20x.shape))
print(test_labels_20x)
#print(test_data)
train_data_20x.dtype, test_data_20x.dtype
# Create dictionary of target classes
label_dict = {
0: 'non-viable',
1: 'viable',
}
train_X_20x = train_data_20x
valid_X_20x = val_data_20x
train_ground_20x = train_labels_20x
valid_ground_20x = val_labels_20x
train_X_10x = train_data_10x
valid_X_10x = val_data_10x
train_ground_10x = train_labels_10x
valid_ground_10x = val_labels_10x
train_X_5x = train_data_5x
valid_X_5x = val_data_5x
train_ground_5x = train_labels_5x
valid_ground_5x = val_labels_5x
# The convolutional Autoencoder
batch_size = 64
epochs = 200
inChannel = 3
x, y = 256, 256 #128, 128
input_img = Input(shape = (x, y, inChannel))
input_img1 = Input(shape = (x, y, inChannel))
input_img2 = Input(shape = (x, y, inChannel))
input_img3 = Input(shape = (x, y, inChannel))
num_classes = 2
inner_dim = 2048 #1024 #512 #2048 #4096
dropout_rate = 0.5
lr= 0.0001
beta_1 = 0.05
def encoder(input_img):
#encoder
#input = 28 x 28 x 1 (wide and thin)
conv1 = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) #28 x 28 x 32
conv1 = BatchNormalization()(conv1)
conv1 = Conv2D(16, (3, 3), activation='relu', strides=(2, 2), padding='same')(conv1)
conv1 = BatchNormalization()(conv1)
#pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) #14 x 14 x 32
conv2 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1) #14 x 14 x 64
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(32, (3, 3), activation='relu', strides=(2, 2), padding='same')(conv2)
conv2 = BatchNormalization()(conv2)
#pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) #7 x 7 x 64
conv3 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2) #7 x 7 x 128 (small and thick)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(64, (3, 3), activation='relu', strides=(2, 2), padding='same')(conv3)
conv3 = BatchNormalization()(conv3)
conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv3) #7 x 7 x 256 (small and thick)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(64, (3, 3), activation='relu', strides=(2, 2), padding='same')(conv4)
conv4 = BatchNormalization()(conv4)
return conv4
def decoder(conv4):
#decoder
conv5 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv4) #7 x 7 x 128
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv5)
conv5 = BatchNormalization()(conv5)
up1 = UpSampling2D((2,2))(conv5)
conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1) #7 x 7 x 64
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv6)
conv6 = BatchNormalization()(conv6)
up2 = UpSampling2D((2,2))(conv6) #14 x 14 x 64
conv7 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2) # 14 x 14 x 32
conv7 = BatchNormalization()(conv7)
conv7 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv7)
conv7 = BatchNormalization()(conv7)
up3 = UpSampling2D((2,2))(conv7) # 28 x 28 x 32
conv8 = Conv2D(16, (3, 3), activation='relu', padding='same')(up3) # 14 x 14 x 32
conv8 = BatchNormalization()(conv8)
conv8 = Conv2D(16, (3, 3), activation='relu', padding='same')(conv8)
conv8 = BatchNormalization()(conv8)
up4 = UpSampling2D((2,2))(conv8) # 28 x 28 x 32
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(up4) # 28 x 28 x 1
return decoded
dec_ip = encoder(input_img) #, mask1, maks2, mask3, mask4, middle_dim, middle_tensor_shape
enco = Model(input_img, dec_ip)
enco.summary()
len(enco.layers)
########################
#20x autoencoder
autoencoder_20x = Model(input_img1, decoder(encoder(input_img1))) #, mask1, maks2, mask3, mask4, middle_dim, middle_tensor_shape))
#autoencoder_20x.summary()
autoencoder_20x.layers
len(autoencoder_20x.layers)
adam = Adam(
lr=lr, beta_1=beta_1
)
autoencoder_20x.compile(loss='mean_squared_error', optimizer = adam) #RMSprop())
######################
# 10x autoencoder
autoencoder_10x = Model(input_img2, decoder(encoder(input_img2))) #, mask1, maks2, mask3, mask4, middle_dim, middle_tensor_shape))
#autoencoder_10x.summary()
autoencoder_10x.layers
len(autoencoder_10x.layers)
adam = Adam(
lr=lr, beta_1=beta_1
)
autoencoder_10x.compile(loss='mean_squared_error', optimizer = adam)
#####################
# 5x autoencoder
autoencoder_5x = Model(input_img3, decoder(encoder(input_img3))) #, mask1, maks2, mask3, mask4, middle_dim, middle_tensor_shape))
#autoencoder_5x.summary()
autoencoder_5x.layers
len(autoencoder_5x.layers)
adam = Adam(
lr=lr, beta_1=beta_1
)
autoencoder_5x.compile(loss='mean_squared_error', optimizer = adam)
# reconstruction result from autoencoder
# 3 different reconstruction weights for 3 different resolutions
autoencoder_20x.load_weights('multires/autoencoder_bottleneck_withoutflatten_20x_V3_200epoch_OverlapData.h5')
autoencoder_10x.load_weights('multires/autoencoder_bottleneck_withoutflatten_10x_V3_200epoch_OverlapData.h5')
autoencoder_5x.load_weights('multires/autoencoder_bottleneck_withoutflatten_5x_V3_200epoch_OverlapData.h5')
score_20x = autoencoder_20x.evaluate(test_data_20x, test_data_20x, verbose=1)
print(score_20x)
score_10x = autoencoder_10x.evaluate(test_data_10x, test_data_10x, verbose=1)
print(score_10x)
score_5x = autoencoder_5x.evaluate(test_data_5x, test_data_5x, verbose=1)
print(score_5x)
#########################
# Segmenting the liver cancer images
# Change the labels from categorical to one-hot encoding
train_Y_one_hot_20x = to_categorical(train_labels_20x)
val_Y_one_hot_20x = to_categorical(val_labels_20x)
test_Y_one_hot_20x = to_categorical(test_labels_20x)
#test_Y_one_hot = test_Y_one_hot1[:100]
# Display the change for category label using one-hot encoding
print('Original label:', test_labels_20x[150])
print('After conversion to one-hot:', test_Y_one_hot_20x[150])
# Change the labels from categorical to one-hot encoding
train_Y_one_hot_10x = to_categorical(train_labels_10x)
val_Y_one_hot_10x = to_categorical(val_labels_10x)
test_Y_one_hot_10x = to_categorical(test_labels_10x)
#test_Y_one_hot = test_Y_one_hot1[:100]
# Display the change for category label using one-hot encoding
print('Original label:', test_labels_10x[150])
print('After conversion to one-hot:', test_Y_one_hot_10x[150])
# Change the labels from categorical to one-hot encoding
train_Y_one_hot_5x = to_categorical(train_labels_5x)
val_Y_one_hot_5x = to_categorical(val_labels_5x)
test_Y_one_hot_5x = to_categorical(test_labels_5x)
#test_Y_one_hot = test_Y_one_hot1[:100]
# Display the change for category label using one-hot encoding
print('Original label:', test_labels_5x[150])
print('After conversion to one-hot:', test_Y_one_hot_5x[150])
train_label_20x = train_Y_one_hot_20x
valid_label_20x = val_Y_one_hot_20x
#train_label = train_labels
#valid_label = val_labels
train_X_20x.shape,valid_X_20x.shape,train_label_20x.shape,valid_label_20x.shape
train_label_10x = train_Y_one_hot_10x
valid_label_10x = val_Y_one_hot_10x
#train_label = train_labels
#valid_label = val_labels
train_X_10x.shape,valid_X_10x.shape,train_label_10x.shape,valid_label_10x.shape
train_label_5x = train_Y_one_hot_5x
valid_label_5x = val_Y_one_hot_5x
#train_label = train_labels
#valid_label = val_labels
train_X_5x.shape,valid_X_5x.shape,train_label_5x.shape,valid_label_5x.shape
def fc(enco1, enco2, enco3):
# enco1 for 20x
x1 = Conv2D(128, (3, 3), activation='relu', strides=(2, 2), padding='same')(enco1)
x1 = BatchNormalization()(x1)
x1 = Conv2D(256, (3, 3), activation='relu', strides=(2, 2), padding='same')(x1)
x1 = BatchNormalization()(x1)
x1 = Conv2D(512, (3, 3), activation='relu', strides=(2, 2), padding='same')(x1)
x1 = BatchNormalization()(x1)
#x1 = Conv2D(512, (3, 3), activation='relu', strides=(2, 2), padding='same')(x1)
#x1 = BatchNormalization()(x1)
# enco1 for 10x
x2 = Conv2D(128, (3, 3), activation='relu', strides=(2, 2), padding='same')(enco2)
x2 = BatchNormalization()(x2)
x2 = Conv2D(256, (3, 3), activation='relu', strides=(2, 2), padding='same')(x2)
x2 = BatchNormalization()(x2)
x2 = Conv2D(512, (3, 3), activation='relu', strides=(2, 2), padding='same')(x2)
x2 = BatchNormalization()(x2)
#x2 = Conv2D(512, (3, 3), activation='relu', strides=(2, 2), padding='same')(x2)
#x2 = BatchNormalization()(x2)
# enco1 for 5x
x3 = Conv2D(128, (3, 3), activation='relu', strides=(2, 2), padding='same')(enco3)
x3 = BatchNormalization()(x3)
x3 = Conv2D(256, (3, 3), activation='relu', strides=(2, 2), padding='same')(x3)
x3 = BatchNormalization()(x3)
x3 = Conv2D(512, (3, 3), activation='relu', strides=(2, 2), padding='same')(x3)
x3 = BatchNormalization()(x3)
#x3 = Conv2D(512, (3, 3), activation='relu', strides=(2, 2), padding='same')(x3)
#x3 = BatchNormalization()(x3)
flat1 = Flatten()(x1)
flat2 = Flatten()(x2)
flat3 = Flatten()(x3)
res = concatenate([flat1, flat2, flat3], axis = -1)
den = Dense(512, activation='relu', kernel_regularizer=l2(0.01))(res)
den = BatchNormalization()(den)
den = Dropout(0.5)(den)
#den = Dense(256, activation='relu', kernel_regularizer=l2(0.01))(den)
#den = BatchNormalization()(den)
#den = Dropout(0.3)(den)
den = Dense(512, activation='relu', kernel_regularizer=l2(0.01))(den)
den = BatchNormalization()(den)
den = Dropout(0.5)(den)
out = Dense(num_classes, activation='softmax', kernel_regularizer=l2(0.01))(den)
#out = Dense(1, activation='sigmoid')(den)
return out
encode1 = encoder(input_img1)
encode2 = encoder(input_img2)
encode3 = encoder(input_img3)
full_model = Model([input_img1, input_img2, input_img3], fc(encode1, encode2, encode3))
full_model.summary()
len(full_model.layers)
# Load weights from reconstruction model
for l1,l2 in zip(full_model.layers[3:54:3],autoencoder_20x.layers[1:17]): #L[start:stop:step]
l1.set_weights(l2.get_weights())
for l1,l2 in zip(full_model.layers[4:54:3],autoencoder_10x.layers[1:17]):
l1
l1.set_weights(l2.get_weights())
for l1,l2 in zip(full_model.layers[5:54:3],autoencoder_5x.layers[1:17]):
l1
l1.set_weights(l2.get_weights())
# learning rate schedule
def step_decay(epoch):
initial_lrate = 0.0001
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate]
#########################
# Classifying with full training (without frozen)
for layer in full_model.layers[0:54]:
layer.trainable = True
def on_epoch_end(epoch, logs=None):
print(K.eval(full_model.optimizer.lr))
# Train the Model
cp_cb = ModelCheckpoint(filepath = 'autoencoder_classification_bottleneck_HistoVAE_V2_withoutflatten_MultiRes_FCV2_300epoch_OverlapData.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
lr_decay = LearningRateScheduler(schedule=lambda epoch: lr * (0.9 ** epoch))
callbacks_list = [cp_cb, lr_decay]
full_model.compile(loss=keras.losses.categorical_crossentropy, optimizer= Adam(lr=lr), metrics=['accuracy'])
on_epoch_end(epoch=10, logs=None)
"""
logger.debug('Fitting model')
classify_train = full_model.fit_generator(datagen.flow(train_X, train_label, batch_size=batch_size),
validation_data=(valid_X, valid_label), steps_per_epoch=len(train_X) / batch_size,
epochs= epochs,
callbacks=[cp_cb],
verbose=1,
shuffle=True)
"""
classify_train = full_model.fit([train_X_20x,train_X_10x,train_X_5x], train_label_20x, batch_size=64,epochs=200,verbose=1,callbacks=callbacks_list,validation_data=([valid_X_20x,valid_X_10x,valid_X_5x], valid_label_20x))
accuracy = classify_train.history['acc']
val_accuracy = classify_train.history['val_acc']
loss = classify_train.history['loss']
val_loss = classify_train.history['val_loss']
epochs = range(len(accuracy))
fig = plt.figure()
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
fig.savefig('Train_Val_accuracy_classification_bottleneck_HistoVAE_V2_withoutflatten_MultiRes_FCV2_300epoch_OverlapData.png')
fig = plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
#plt.show()
fig.savefig('Train_Val_Loss_classification_bottleneck_HistoVAE_V2_withoutflatten_MultiRes_FCV2_300epoch_OverlapData.png')
full_model.load_weights('autoencoder_classification_bottleneck_HistoVAE_V2_withoutflatten_MultiRes_FCV2_300epoch_OverlapData.h5')
# Model Evaluation on the Test Set
test_eval = full_model.evaluate([test_data_20x, test_data_10x,test_data_5x], test_Y_one_hot_20x, verbose=0)
print('Test loss:', test_eval[0])
print('Test accuracy:', test_eval[1])
f = open( 'file_classification_bottleneck_HistoVAE_V2_withoutflatten_MultiRes_FCV2_300epoch_OverlapData.txt', 'a' )
f.write( 'Test loss: = ' + repr(test_eval[0]) + '\n' )
f.write( 'Test accuracy: = ' + repr(test_eval[1]) + '\n' )
f.close()
# Predict labels
predicted_classes = full_model.predict([test_data_20x, test_data_10x,test_data_5x])
predicted_classes = np.argmax(np.round(predicted_classes),axis=1)
correct = np.where(predicted_classes==test_labels_20x)[0]
print("Found %d correct labels" % len(correct))
f = open( 'file_classification_bottleneck_HistoVAE_V2_withoutflatten_multiRes_FCV2_300epoch_OverlapData.txt', 'a' )
f.write( 'correct labels: = ' + repr(len(correct)) + '\n' )
f.close()
incorrect = np.where(predicted_classes!=test_labels_20x)[0]
print("Found %d incorrect labels" % len(incorrect))
f = open( 'file_classification_bottleneck_HistoVAE_V2_withoutflatten_MultiRes_FCV2_300epoch_OverlapData.txt', 'a' )
f.write( 'incorrect labels: = ' + repr(len(incorrect)) + '\n' )
f.close()
# CLassification Report
from sklearn.metrics import classification_report
target_names = ["Class {}".format(i) for i in range(num_classes)]
print(classification_report(test_labels_20x, predicted_classes, target_names=target_names))
```
#### File: byClassification/ResNet-50-101-152/load_data.py
```python
import numpy as np
from PIL import Image
import os
import tqdm
BASEDIR = os.path.join(os.path.dirname(__file__), './')
# standard output format
SPACE = 35
# tqdm parameter
UNIT_SCALE = True
def load(set_='train_resize', label=True):
x = []
y = []
path = BASEDIR+'/dataset/'+set_
for name in tqdm.tqdm(
os.listdir(BASEDIR+'/dataset/'+set_),
desc='{:{}}'.format('Load dataset', SPACE),
unit_scale=UNIT_SCALE):
img = Image.open(path+'/'+name)
img = np.asarray(img, np.float64)
class_ = name.split('_')[0]
#print(class_)
if not label:
#class_ = class_[:class_.rfind('.')] + '.jpg'
class_ = name
x.append(img)
y.append(class_)
x = np.asarray(x)
x = mean_substraction(x)
if label:
y = np.asarray(y, np.int32)
return x, y
# mean substraction by RGB
def mean_substraction(x):
size_ = x.shape[0]
mean = [141.45639998, 136.75046567, 119.34598043]
std = [71.96843246, 70.93090444, 75.99979494]
for j in range(size_):
for i in range(3):
x[j][:, :, i] = (x[j][:, :, i] - mean[i]) / (std[i] + 1e-7)
return x
def mean_std(x):
size_ = x.shape[0]
mean_ = np.array([0.0, 0.0, 0.0])
std_ = np.array([0.0, 0.0, 0.0])
p = 0
for i in range(size_):
h, w = x[i].shape[0:2]
p += h*w
for i in range(size_):
mean_[0] += np.sum(x[i][:, :, 0])/p
mean_[1] += np.sum(x[i][:, :, 1])/p
mean_[2] += np.sum(x[i][:, :, 2])/p
for i in range(size_):
std_[0] += np.sum(((x[i][:, :, 0]-mean_[0])**2)/p)
std_[1] += np.sum(((x[i][:, :, 1]-mean_[1])**2)/p)
std_[2] += np.sum(((x[i][:, :, 2]-mean_[2])**2)/p)
std_ = np.sqrt(std_)
print('{:{}}: {}'.format('Mean', SPACE, mean_))
print('{:{}}: {}'.format('Std', SPACE, std_))
def main():
x_train, y_train = load('train_resize')
x_test, y_test = load('val_resize')
if __name__ == '__main__':
main()
```
#### File: byClassification/ResNet-50-101-152/val.py
```python
import tensorflow as tf
import numpy as np
import os
import sys
import math
import resnet
import parser
import load_data
import eval_
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
BASEDIR = os.path.join(os.path.dirname(__file__), './')
# get argument
args = parser.test_parser()
# standard output format
SPACE = 35
# default: resnet_v2_101
RESNET_V2 = 'resnet_v2_' + args.layers
# default: 6
CLASSES = args.classes
# default: 16
BATCH_SIZE = args.batch
# defalut: -1
RESTORE_TARGET = args.recover
# restore weights path
RESTORE_CKPT_PATH = BASEDIR + "/models/" + RESNET_V2 + "/model_" +\
str(RESTORE_TARGET) + ".ckpt"
if not os.path.isfile(RESTORE_CKPT_PATH + ".index"):
print("Recover target not found.")
sys.exit()
SIZE = None
ITER = None
WIDTH = 224
HEIGHT = 224
KEY = tf.GraphKeys.GLOBAL_VARIABLES
# crop center 224*224
def crop_center(img):
img_ = []
size_ = img.shape[0]
for i in range(size_):
h, w = img[i].shape[0:2]
# random crop
shift1 = int((h-HEIGHT)/2)
shift2 = int((w-WIDTH)/2)
img_.append(img[i][shift1:HEIGHT+shift1, shift2:WIDTH+shift2][:])
return np.asarray(img_)
def net_(xp, is_train):
x = xp
# create network
net = resnet.resnet(x, RESNET_V2, is_train, CLASSES)
# squeeze
net = tf.squeeze(net, axis=(1, 2))
prediction = tf.argmax(net, axis=1)
return prediction
def val_net(x_val, y_val):
# set placeholder
xp = tf.placeholder(tf.float32, shape=(None, HEIGHT, WIDTH, 3))
is_train = tf.placeholder(tf.bool)
# get network
prediction = net_(xp, is_train)
with tf.Session() as sess:
# setup saver
restorer = tf.train.Saver(tf.global_variables())
# load weight
restorer.restore(sess, RESTORE_CKPT_PATH)
print('Val acc:')
eval_.compute_accuracy(xp, BATCH_SIZE, is_train, x_val,
y_val, prediction, sess)
def main():
# get data
x_val, y_val = load_data.load('val_resize')
global SIZE
SIZE = np.size(y_val)
global ITER
ITER = int(math.ceil(SIZE/BATCH_SIZE))
# train network
val_net(x_val, y_val)
if __name__ == '__main__':
main()
``` |
{
"source": "JKooll/code-segment",
"score": 3
} |
#### File: code-segment/Data Science/train_test_split.py
```python
def train_test_split(train, label, train_size=None, test_size=None, shuffle=True):
assert len(train) == len(label)
if train_size is not None and test_size is not None:
assert train_size + test_size <= 1
elif train_size is not None:
test_size = 1 - train_size
else:
train_size = 1 - test_size
N = len(train)
train_size = int(train_size * N)
test_size = int(test_size * N)
indices = np.arange(N)
if shuffle:
np.random.shuffle(indices)
train_idx, test_idx = indices[:train_size], indices[train_size:train_size+test_size]
return train[train_idx], train[test_idx], label[train_idx], label[test_idx]
```
#### File: code-segment/Sequential Search/sequential_search.py
```python
class SequentialSearchST():
first = None
class Node():
def __init__(self, key, val, next):
self.key = key
self.val = val
self.next = next
def get(self, key):
x = self.first
while x is not None:
if key == x.key:
return x.val
x = x.next
return None
def put(self, key, val):
x = self.first
while x is not None:
if key == x.key:
x.val = val
return
self.first = Node(key, val, self.first)
```
#### File: code-segment/Shell Sort/shell_sort.py
```python
def shell_sort(data):
N = len(data)
h = 1
while h < N / 3:
h = h * 3 + 1
while h >= 1:
for i in range(h, N):
for j in (i, h - 1, -h):
if a[j] >= a[j - h]:
break
temp = a[j]
a[j] = a[j - h]
a[j - h] = temp
``` |
{
"source": "jkooy/darts_ignoring",
"score": 3
} |
#### File: jkooy/darts_ignoring/augment-ignore.py
```python
import os
import torch
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
from config import AugmentConfig
import utils
from models.augment_cnn import AugmentCNN
import copy
config = AugmentConfig()
device = torch.device("cuda")
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
config.print_params(logger.info)
class Architect():
""" Compute gradients of alphas """
def __init__(self, net, w_momentum, w_weight_decay):
"""
Args:
net
w_momentum: weights momentum
"""
self.net = net
self.v_net = copy.deepcopy(net)
self.w_momentum = w_momentum
self.w_weight_decay = w_weight_decay
def virtual_step(self, trn_X, trn_y, xi, w_optim, model, Likelihood, batch_size, step):
"""
Compute unrolled weight w' (virtual step)
Step process:
1) forward
2) calc loss
3) compute gradient (by backprop)
4) update gradient
Args:
xi: learning rate for virtual gradient step (same as weights lr)
w_optim: weights optimizer
"""
# forward & calc loss
dataIndex = len(trn_y)+step*batch_size
ignore_crit = nn.CrossEntropyLoss(reduction='none').cuda()
# forward
logits,_ = self.net(trn_X)
# sigmoid loss
loss = torch.dot(torch.sigmoid(Likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))/(torch.sigmoid(Likelihood[step*batch_size:dataIndex]).sum())
# gradient of train loss towards likelihhod
loss.backward()
dtloss_ll = Likelihood.grad
dtloss_w = []
# do virtual step (update gradient)
# below operations do not need gradient tracking
with torch.no_grad():
# dict key is not the value, but the pointer. So original network weight have to
# be iterated also.
for w, vw in zip(self.net.weights(), self.v_net.weights()):
m = w_optim.state[w].get('momentum_buffer', 0.) * self.w_momentum
# gradient of train loss towards current weights
if w.grad is not None:
vw.copy_(w - xi * (m + w.grad ))
# update virtual weight
dtloss_w.append(m + w.grad )
elif w.grad is None:
dtloss_w.append(w.grad )
return dtloss_w, dtloss_ll
# 1399:[48, 3, 3, 3], 1:25000
def unrolled_backward(self, trn_X, trn_y, val_X, val_y, xi, w_optim, model, likelihood, Likelihood_optim, batch_size, step):
""" Compute unrolled loss and backward its gradients
Args:
xi: learning rate for virtual gradient step (same as net lr)
w_optim: weights optimizer - for virtual step
"""
# do virtual step (calc w`)
dtloss_w, dtloss_ll = self.virtual_step(trn_X, trn_y, xi, w_optim, model, likelihood, batch_size, step)
logits, aux_logits = self.v_net(val_X)
# calc unrolled loss
crit = nn.CrossEntropyLoss().to(device)
dataIndex = len(trn_y)+step*batch_size
loss = crit(logits, val_y) # L_val(w`) # L_val(w`)
# compute gradient
loss.backward()
dvloss_tloss = 0
for v, dt in zip(self.v_net.weights(), dtloss_w):
if v.grad is not None:
grad_valw_d_trainw = torch.div(v.grad, dt)
grad_valw_d_trainw[torch.isinf(grad_valw_d_trainw)] = 0
grad_valw_d_trainw[torch.isnan(grad_valw_d_trainw)] = 0
grad_val_train = torch.sum(grad_valw_d_trainw)
# print(grad_val_train)
dvloss_tloss += grad_val_train
dlikelihood = dvloss_tloss* dtloss_ll
vprec1, vprec5 = utils.accuracy(logits, val_y, topk=(1, 5))
Likelihood_optim.zero_grad()
likelihood.grad = dlikelihood
Likelihood_optim.step()
return likelihood, Likelihood_optim, loss, vprec1, vprec5
def main():
logger.info("Logger is set - training start")
# set default gpu device id
torch.cuda.set_device(config.gpus[0])
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.benchmark = True
# get data with meta info
input_size, input_channels, n_classes, train_val_data, test_data = utils.get_data(
config.dataset, config.data_path, config.cutout_length, validation=True)
criterion = nn.CrossEntropyLoss().to(device)
use_aux = config.aux_weight > 0.
model = AugmentCNN(input_size, input_channels, config.init_channels, n_classes, config.layers,
use_aux, config.genotype).to(device) #single GPU
# model = nn.DataParallel(model, device_ids=config.gpus).to(device)
# model size
mb_params = utils.param_size(model)
logger.info("Model size = {:.3f} MB".format(mb_params))
# weights optimizer with SGD
optimizer = torch.optim.SGD(model.parameters(), config.lr, momentum=config.momentum,
weight_decay=config.weight_decay)
n_train = len(train_val_data)
split = n_train // 2
indices = list(range(n_train))
# each train data is endowed with a weight
Likelihood = torch.nn.Parameter(torch.ones(len(indices[:split])).cuda(),requires_grad=True)
Likelihood_optim = torch.optim.SGD({Likelihood}, config.lr)
# data split
train_data = torch.utils.data.Subset(train_val_data, indices[:split])
valid_data = torch.utils.data.Subset(train_val_data, indices[split:])
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.workers,
pin_memory=False)
valid_loader = torch.utils.data.DataLoader(valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.workers,
pin_memory=False)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs)
architect = Architect(model, 0.9, 3e-4)
best_top1 = 0.
# training loop
for epoch in range(config.epochs):
lr_scheduler.step()
lr = lr_scheduler.get_lr()[0]
drop_prob = config.drop_path_prob * epoch / config.epochs
model.drop_path_prob(drop_prob)
# training
train(train_loader, valid_loader, model, architect, optimizer, criterion, lr, epoch, Likelihood, Likelihood_optim, config.batch_size)
# validation
cur_step = (epoch+1) * len(train_loader)
top1 = validate(valid_loader, model, criterion, epoch, cur_step)
# save
if best_top1 < top1:
best_top1 = top1
is_best = True
else:
is_best = False
utils.save_checkpoint(model, config.path, is_best)
print("")
logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
def train(train_loader, valid_loader, model, architect, optimizer, criterion, lr, epoch, Likelihood, Likelihood_optim, batch_size):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
standard_losses = utils.AverageMeter()
valid_losses = utils.AverageMeter()
cur_step = epoch*len(train_loader)
cur_lr = optimizer.param_groups[0]['lr']
logger.info("Epoch {} LR {}".format(epoch, cur_lr))
writer.add_scalar('train/lr', cur_lr, cur_step)
model.train()
for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(train_loader, valid_loader)):
trn_X, trn_y = trn_X.to(device, non_blocking=True), trn_y.to(device, non_blocking=True)
val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True)
N = trn_X.size(0)
M = val_X.size(0)
# phase 2. Likelihood step (Likelihood)
Likelihood_optim.zero_grad()
Likelihood, Likelihood_optim, valid_loss, vprec1, vprec5= architect.unrolled_backward(trn_X, trn_y, val_X, val_y, lr, optimizer, model, Likelihood, Likelihood_optim, batch_size, step)
# phase 1. network weight step (w)
optimizer.zero_grad()
logits, aux_logits = model(trn_X)
ignore_crit = nn.CrossEntropyLoss(reduction='none').to(device)
dataIndex = len(trn_y)+step*batch_size
loss = torch.dot(torch.sigmoid(Likelihood[step*batch_size:dataIndex]), ignore_crit(logits, trn_y))
loss = loss/(torch.sigmoid(Likelihood[step*batch_size:dataIndex]).sum())
'''
if config.aux_weight > 0.:
loss += config.aux_weight * criterion(aux_logits, y)
'''
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
# update network weight on train data
optimizer.step()
#compare normal loss without weighted
standard_loss = criterion(logits, trn_y)
prec1, prec5 = utils.accuracy(logits, trn_y, topk=(1, 5))
losses.update(loss.item(), N)
standard_losses.update(standard_loss.item(), N)
valid_losses.update(valid_loss.item(), M)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step % config.print_freq == 0 or step == len(train_loader)-1:
logger.info(
"Train: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} standard Loss {slosses.avg:.3f} Valid Loss {vlosses.avg:.3f}"
" Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, len(train_loader)-1, losses=losses, slosses=standard_losses, vlosses=valid_losses,
top1=top1, top5=top5))
writer.add_scalar('train/loss', loss.item(), cur_step)
writer.add_scalar('train/top1', prec1.item(), cur_step)
writer.add_scalar('train/top5', prec5.item(), cur_step)
writer.add_scalar('val/loss', valid_loss.item(), cur_step)
writer.add_scalar('val/top1', vprec1.item(), cur_step)
writer.add_scalar('val/top5', vprec5.item(), cur_step)
cur_step += 1
logger.info("Train: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg))
def validate(valid_loader, model, criterion, epoch, cur_step):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
with torch.no_grad():
for step,(X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
logits, _ = model(X)
loss = criterion(logits, y)
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
losses.update(loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step % config.print_freq == 0 or step == len(valid_loader)-1:
logger.info(
"Test: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, len(valid_loader)-1, losses=losses,
top1=top1, top5=top5))
writer.add_scalar('test/loss', losses.avg, cur_step)
writer.add_scalar('test/top1', top1.avg, cur_step)
writer.add_scalar('test/top5', top5.avg, cur_step)
logger.info("Test: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg))
return top1.avg
if __name__ == "__main__":
main()
```
#### File: darts_ignoring/scripts/dot2png.py
```python
from graphviz import Digraph
import glob
import os
import subprocess
from PIL import Image
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument("src_dir", help="dir of original dot files")
parser.add_argument("dst_dir", help="destination of dot files & png files")
parser.add_argument("gif_prefix", help="final gif prefix (path with file prefix)")
args = parser.parse_args()
if not os.path.exists(args.dst_dir):
os.makedirs(args.dst_dir)
def glob_with_nonext(pattern):
# glob + non-ext + sort
dots = glob.glob(pattern)
dots = list(filter(lambda p: not os.path.splitext(p)[1], dots))
dots = sorted(dots)
return dots
def add_epoch_caption(src_dir, dst_dir):
""" add epoch to dots
"""
print("Add epoch caption to dots ...")
dots = glob_with_nonext(os.path.join(src_dir, "*"))
out_paths = []
for path in dots:
print(path)
fn = os.path.basename(path)
ep = int(fn[2:4])
out_path = os.path.join(dst_dir, fn)
with open(path) as f:
r = f.readlines()
r.insert(-1,
"\tfontname=times fontsize=20 height=1.5 label=\"Epoch {:02d}\" "
"overlap=false\n".format(ep+1))
text = "".join(r)
with open(out_path, "w") as fout:
fout.write(text)
# convert dot to png
png_path = out_path + ".png"
subprocess.call("dot -Tpng {dot} -o {png}".format(dot=out_path, png=png_path), shell=True)
out_paths.append(out_path)
return out_paths
def to_square(dir_path):
""" Re-sizing: adjust png size to square (max_size x max_size)
Arguments:
paths: dot file paths. (dot_path + '.png' == png_path)
"""
dot_paths = glob_with_nonext(os.path.join(dir_path, "*"))
# get max size
max_w = 0
max_h = 0
for path in dot_paths:
png_path = path + ".png"
img = Image.open(png_path)
w, h = img.size
max_w = max(max_w, w)
max_h = max(max_h, h)
# re-size
w, h = max_w, max_h
extent = "{}x{}".format(w, h)
print("\nRe-size to {} ...".format(extent))
for path in dot_paths:
print(path)
png_path = path + ".png"
final_path = path + "-maxsize.png"
subprocess.call("convert {png} -gravity center -background white "
"-extent {extent} {out}".format(
png=png_path, out=final_path, extent=extent),
shell=True)
def to_gif(dst_dir, gif_prefix):
# Convert to GIF
print("\nConvert to gif ...")
st = time.time()
print("Normal ... ", end="")
cmd = "convert -resize 40% -delay 30 -loop 0 {target_glob} {output_path}".format(
target_glob=os.path.join(dst_dir, "*-normal-maxsize.png"),
output_path=gif_prefix+"-normal.gif")
subprocess.call(cmd, shell=True)
print("{:.0f}s".format(time.time() - st))
st = time.time()
print("Reduce ... ", end="")
subprocess.call("convert -resize 40% -delay 30 -loop 0 {target_glob} {output_path}".format(
target_glob=os.path.join(dst_dir, "*-reduce-maxsize.png"),
output_path=gif_prefix+"-reduce.gif"),
shell=True)
print("{:.0f}s".format(time.time() - st))
print("DONE !")
if __name__ == "__main__":
#add_epoch_caption(args.src_dir, args.dst_dir)
to_square(args.dst_dir)
to_gif(args.dst_dir, args.gif_prefix)
``` |
{
"source": "jkopczyn/sandbox",
"score": 2
} |
#### File: jkopczyn/sandbox/sandbox_compose.py
```python
from glob import glob
import os
import shlex
import subprocess
import sys
class compose(object):
DEFAULT_PRE_SCRIPT_DIRS = ['compose/setenv.d', 'compose/local.d']
def __init__(self):
self.env = None
self.pre_script_dirs = [d for d in self.DEFAULT_PRE_SCRIPT_DIRS]
def main(self):
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--pre-script-dir',
nargs='*',
help='scripts to source before running docker-compose')
arg_parser.add_argument('--sudo', action='store_true')
arg_parser.add_argument('env')
subparsers = arg_parser.add_subparsers()
up_parser = subparsers.add_parser('up')
up_parser.add_argument('--detached', '-d', action='store_true')
up_parser.set_defaults(parser_func=self.up)
down_parser = subparsers.add_parser('down')
down_parser.set_defaults(parser_func=self.down)
start_parser = subparsers.add_parser('start')
start_parser.add_argument('services', nargs='*')
start_parser.set_defaults(parser_func=self.start)
stop_parser = subparsers.add_parser('stop')
stop_parser.add_argument('services', nargs='*')
stop_parser.set_defaults(parser_func=self.stop)
create_parser = subparsers.add_parser('create')
create_parser.add_argument('--force-recreate', action='store_true')
create_parser.add_argument('--build', action='store_true')
create_parser.add_argument('--no-build', dest='build', action='store_false')
create_parser.add_argument('services', nargs='*')
create_parser.set_defaults(parser_func=self.create)
logs_parser = subparsers.add_parser('logs')
logs_parser.add_argument('--follow', '-f', action='store_true')
logs_parser.add_argument('--tail', '-t')
logs_parser.add_argument('services', nargs='*')
logs_parser.set_defaults(parser_func=self.logs)
args = arg_parser.parse_args()
self.env = args.env
if args.pre_script_dir:
self.pre_script_dirs = args.pre_script_dir
if 'parser_func' not in vars(args):
# no environment given
sys.stderr.write('no sub-command given\n\n')
arg_parser.print_help()
exit(1)
args_ignore = set(['parser_func', 'env', 'pre_script_dir'])
kwargs = {k: v for k, v in vars(args).items() if k not in args_ignore}
os.environ['COMPOSE_PROJECT_NAME'] = 'fnb'
args.parser_func(**kwargs)
def up(self, sudo=False, detached=False):
cmd = self._docker_compose_prefix(sudo=sudo)
cmd.append('up')
if detached:
cmd.append('-d')
cmd_full = self._build_command(cmd)
self._exec_command(cmd_full)
def down(self, sudo=False):
cmd = self._docker_compose_prefix(sudo=sudo)
cmd.append('down')
cmd_full = self._build_command(cmd)
self._exec_command(cmd_full)
def start(self, services=None, sudo=False):
cmd = self._docker_compose_prefix(sudo=sudo)
cmd.append('start')
if services is not None:
cmd.extend(services)
cmd_full = self._build_command(cmd)
self._exec_command(cmd_full)
def stop(self, services=None, sudo=False):
cmd = self._docker_compose_prefix(sudo=sudo)
cmd.append('stop')
if services is not None:
cmd.extend(services)
cmd_full = self._build_command(cmd)
self._exec_command(cmd_full)
def create(self, services=None, build=None, force_recreate=False, sudo=False):
cmd = self._docker_compose_prefix(sudo=sudo)
cmd.append('create')
if force_recreate:
cmd.append('--force-recreate')
if build is not None:
if build:
cmd.append('--build')
else:
cmd.append('--no-build')
if services is not None:
cmd.extend(services)
cmd_full = self._build_command(cmd)
self._exec_command(cmd_full)
def logs(self, services=None, sudo=False, follow=False, tail=None):
cmd = self._docker_compose_prefix(sudo=sudo)
cmd.append('logs')
if follow:
cmd.append('-f')
if tail:
cmd.append('--tail={}'.format(tail))
if services is not None:
cmd.extend(services)
cmd_full = self._build_command(cmd)
self._exec_command(cmd_full)
def _docker_compose_file(self):
return os.path.join('compose', self.env+'.yaml')
def _docker_compose_prefix(self, sudo=False):
prefix = []
if sudo:
prefix.extend(['sudo', '-E'])
prefix.append('docker-compose')
prefix.extend(['-f', self._docker_compose_file()])
return prefix
def _build_command(self, cmd):
'''
The command cmd should include 'docker-compose' and 'sudo' (if required).
'''
script = self._build_command_script(cmd)
return ['bash', '-c', '\n' + script] # add newline for debug printing purposes
def _build_command_script(self, cmd):
script = self._build_pre_script()
script += ' '.join((shlex.quote(a) for a in cmd))
script += '\n'
return script
def _build_pre_script(self):
scripts = []
for d in self.pre_script_dirs:
if not os.path.isdir(d):
sys.stderr.write('pre-script: skipping non-directory {}'.format(d))
continue
scripts.extend(glob(os.path.join(d, '*.sh')))
return ''.join(('. {}\n'.format(shlex.quote(s)) for s in scripts))
def _exec_command(self, cmd, dry_run=False):
sys.stderr.write('{}\n'.format(' '.join(shlex.quote(a) for a in cmd)))
if not dry_run:
subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)
if __name__ == '__main__':
main = compose()
main.main()
``` |
{
"source": "jkopka/Cleverreach-Backup",
"score": 2
} |
#### File: jkopka/Cleverreach-Backup/backup_groups.py
```python
import json
import time
import csv
import configparser
import requests
import logging
from tqdm import tqdm
def main():
""" Hauptfunktion """
# cache_tool = CacheTool('cleverreach.db')
# Config aus config.ini laden
config = configparser.ConfigParser()
config.read("config.ini")
BASE_URL = config["CLEVERREACH"]["BASE_URL"]
LOGIN = config["CLEVERREACH"]["LOGIN"]
PASSWORD = config["CLEVERREACH"]["PASSWORD"]
CLIENT_ID = config["CLEVERREACH"]["CLIENT_ID"]
CR_TOKEN = config["CLEVERREACH"]["TOKEN"]
PAGESIZE = int(config["CLEVERREACH"]["PAGESIZE"])
# Logging-Datei
logging.basicConfig(filename="cleverreach_sicherung.log", level=logging.INFO)
# Zu sichernde Gruppen aus groups.ini laden
config_groups = configparser.ConfigParser()
config_groups.read("groups.ini")
groups = []
for group_key in config_groups.keys():
if not group_key == "DEFAULT":
group_item = {
"id": group_key,
"last_saved": config_groups[group_key]["last_saved"],
}
groups.append(group_item)
# DEBUG-Ausgaben
debug = False
logging.info("## CLEVERREACH-BACKUP")
logging.info("# " + time.asctime())
logging.info(" ... pagesize: " + str(PAGESIZE))
logging.info(" ... check, ob Token noch gültig ist.")
token_request = requests.get(BASE_URL + "/groups", {"token": CR_TOKEN})
if token_request.status_code != 200:
logging.info(
" ... Token ungültig. Hole einen neuen.", token_request.status_code
)
token_login_data = {
"client_id": CLIENT_ID,
"login": LOGIN,
"password": PASSWORD,
}
token_request = requests.post(BASE_URL + "/login", token_login_data)
CR_TOKEN = token_request.text.strip()
else:
logging.info(" ... Token gültigt.")
# TOKEN-Anhängsel
data_token = "?token=" + CR_TOKEN
logging.info("Folgende Gruppen aus INI geholt:")
for group in groups:
logging.info(group["id"] + "; " + group["last_saved"])
tmp_list_count = 0
logging.info("Anzahl Gruppen: " + str(len(groups)))
# Die Empfänger jeder Gruppe holen und speichern
for group in groups:
logging.info("# Gruppe: " + str(group["id"]))
if time.strptime(group["last_saved"])[2] == time.strptime(time.asctime())[2]:
continue
tmp_list_count += 1
# Statistiken holen
# Vorallem für die Gesamtanzahl an Einträgen
logging.info(" ... hole stats")
item_stats_url = "/groups.json/{0}/stats{1}".format(group["id"], data_token)
item_stats_encoded = json.loads(
requests.get(BASE_URL + item_stats_url).text.strip()
)
if "error" in item_stats_encoded:
logging.info(" ... Gruppe nicht vorhanden: " + str(group["id"]))
print("Gruppe nicht vorhanden: ", group["id"])
continue
logging.info(" ... check")
# Infos holen
# Für den Gruppennamen
logging.info(" ... hole infos")
item_info_url = "/groups.json/{0}{1}".format(group["id"], data_token)
item_info_encoded = json.loads(
requests.get(BASE_URL + item_info_url).text.strip()
)
logging.info(" ... check")
if "error" in item_info_encoded:
logging.info(" ... Gruppe nicht vorhanden: " + str(group["id"]))
print("Fehler bei group-ID ", group["id"])
continue
# print(item_info_encoded)
group["name"] = item_info_encoded["name"]
logging.info(" ... Gruppenname: " + str(group["name"]))
# Seitenanzahl berechnen
if (
item_stats_encoded["total_count"] < PAGESIZE
and not item_stats_encoded["total_count"] == 0
):
item_pages = 1
elif item_stats_encoded["total_count"] == 0:
logging.info(" .. Gruppe leer")
continue
else:
item_pages = item_stats_encoded["total_count"] // PAGESIZE + 1
logging.info(" .. Länge der Liste: " + str(item_stats_encoded["total_count"]))
logging.info(" .. Anzahl Seiten: " + str(item_pages))
# CSV erstellen und Header schreiben
filename = (
"backups/"
+ group["name"]
+ "_"
+ time.strftime("%Y-%m-%d_%H-%M", time.localtime())
+ ".csv"
)
csv_file = csv.writer(open(filename, "w", newline=""))
csv_file.writerow(
[
"id",
"email",
"activated",
"registered",
"source",
"active",
"global_attributes_name",
"global_attributes_vorname",
"global_attributes_brief_anrede",
]
)
logging.info(" .. CSV erstellt und geöffnet.")
with tqdm(
total=item_stats_encoded["total_count"],
desc="Anzahl Einträge",
unit=" Einträge",
) as pbar:
item_url = "/groups.json/{0}/receivers".format(group["id"])
for page_count in range(0, item_pages):
# Jede Seite von Cleverreach laden
# Wird nicht gecached!
# item_return_json = cache_tool.request(BASE_URL+item_url+data_token+'&page='+str(page_count)+'&pagesize='+str(PAGESIZE))
item_return_json = requests.get(
BASE_URL
+ item_url
+ data_token
+ "&page="
+ str(page_count)
+ "&pagesize="
+ str(PAGESIZE)
).text.strip()
if debug:
print(
" .. Seite {0} von {1} geholt.".format(page_count, item_pages)
)
item_return_encoded = json.loads(item_return_json)
# Wenn die Liste nicht leer ist, wird sie in einer CSV gespeichert
if debug:
print(" .. Länge der Liste: ", len(item_return_encoded))
if len(item_return_encoded) > 0:
if debug:
print(" .. Seite bearbeiten.")
for reciever in item_return_encoded:
# print(" .. Reciever-eMail: ",reciever["email"])
csv_file.writerow(
[
reciever["id"],
reciever["email"],
reciever["activated"],
reciever["registered"],
reciever["source"],
reciever["active"],
reciever["global_attributes"]["name"],
reciever["global_attributes"]["vorname"],
reciever["global_attributes"]["briefanrede"],
]
)
pbar.update(1)
else:
# print(group['name'], item_stats_encoded['total_count'], len(item_return_encoded))
# print(item_url)
logging.info("Gruppe leer! " + str(group["name"]))
logging.info(" ... csv gespeichert.")
config_groups[group["id"]]["last_saved"] = time.asctime()
logging.info(" ... done.")
# if tmp_list_count == 1:
# print("# {0} Listen abgefragt.")
# break
logging.info(" ... alle Gruppen gesichert.")
with open("groups.ini", "w") as configfile:
config_groups.write(configfile)
logging.info(" ... Änderungen bei Gruppendaten gespeichert.")
logging.info("## ENDE")
if __name__ == "__main__":
main()
``` |
{
"source": "jkopka/Essensplan",
"score": 3
} |
#### File: Essensplan/essensplan/app.py
```python
import requests
from flask import request, flash
from flask import render_template
from flask import Flask, url_for, redirect
from models import Schema
from models import Dishes
from models import Day
from models import Days
from models import Ingredients
from models import Tags
from models import TaggedDishes
from customFunctions import getMonday,addTodoListToWunderlist
from datetime import timedelta
from datetime import date
from datetime import datetime
from calendar import month_name
# Datenbank: SQLAlchemy
from flask_sqlalchemy import SQLAlchemy
# Usermanagement: Flask-User
from flask_user import login_required, UserManager, UserMixin
import operator # Für die Wunderlist-Funktion: Sortierung
@app.route("/displayWeek", methods=["POST", "GET"])
def displayWeek():
try:
choosenDate = datetime.strptime(request.args.get('date', ''), "%Y-%m-%d").date()
except:
choosenDate = date.today()
monday = getMonday(choosenDate)
try:
message = request.args.get('message', '')
except KeyError:
message = ''
year = monday.strftime("%Y")
monthNames = ["Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"]
monthNumbers = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"]
month = monthNames[monday.month-1]
day = monday.day
week_number = choosenDate.isocalendar()[1]
week = [(monday+timedelta(days=0)).day, (monday+timedelta(days=1)).day, (monday+timedelta(days=2)).day, (monday+timedelta(days=3)).day, (monday+timedelta(days=4)).day, (monday+timedelta(days=5)).day, (monday+timedelta(days=6)).day]
weekdays = {(monday+timedelta(days=0)).day:'Montag', (monday+timedelta(days=1)).day:'Dienstag', (monday+timedelta(days=2)).day:'Mittwoch', (monday+timedelta(days=3)).day:'Donnerstag', (monday+timedelta(days=4)).day:'Freitag', (monday+timedelta(days=5)).day:'Samstag', (monday+timedelta(days=6)).day:'Sonntag'}
dishes = Dishes()
daysObject = Days()
# FEATURE: Abrufen des zugewiesenen Gerichts für den Tag
# In der Schleife werden die dishes
days = {}
for day in range(0,7):
actualDay = monday+timedelta(days=day)
dishesperDay = daysObject.list(actualDay)
if len(dishesperDay) != 0:
days[actualDay.day] = Day(year, int(actualDay.strftime("%m")), actualDay.strftime("%d"), dishesperDay, True)
else:
days[actualDay.day] = Day(year, int(actualDay.strftime("%m")), actualDay.strftime("%d"), dishesperDay, False)
if actualDay == datetime.now().date():
days[actualDay.day].isToday = True
if message:
flash(message)
output = render_template('index.html', linkCreateDish=url_for('formCreateDish'), linkchooseDish=url_for('formChooseDish'), nextWeek=(choosenDate+timedelta(days=7)).strftime("%Y-%m-%d"), lastWeek=(choosenDate+timedelta(days=-7)).strftime("%Y-%m-%d"), week_number=week_number, choosenDatum=monday, year=year, monthName=month, monthNames=monthNames, monthNumbers=monthNumbers, week=week, weekdays=weekdays, month=monday.month-1, days=days, dishes=dishes)
return output
@app.route("/dishes")
def listDishes():
dishes = Dishes()
returnText = ""
listDishes = dishes.list()
del dishes
countDishes = len(listDishes)
# returnText = returnText + "Anzahl Dishes: " + str(countDishes) + "<br />"
## Für das Tagging:
# taggedDishes = TaggedDishes()
# for index, item in enumerate(listDishes):
# # if listDishes[index]["lastCooked"]:
# # print(datetime.strptime(listDishes[index]["lastCooked"], "%Y-%m-%d").strftime("%d-%m-%Y"))
# tag_for_dish = taggedDishes.list_with_names(listDishes[index]['dish_id'])
# listDishes[index]['tags'] = tag_for_dish
# for tag in listDishes[index]['tags']:
# print(tag)
returnTemplate = render_template('dishes.html', dishes=listDishes)
# for dish in listDishes:
# deleteLink = url_for('deleteDish') + "?id=" + str(dish["dish_id"])
# returnText = returnText + "<a href='" + deleteLink + "'> " + dish["name"] + "</a><br />"
return returnTemplate
# return "Liste der verfügbaren Dishes"
@app.route("/formCreateDish")
def formCreateDish():
tags = Tags()
allTags = tags.list_tags()
countTags = len(allTags)
return render_template('formCreateDish.html', tags=allTags, countTags=countTags)
@app.route("/formEditDish", methods=["POST", "GET"])
def formEditDish():
""" Zeigt ein Formular zur bearbeitung eines Gerichts an. Parameter: dish_id """
# Abfragen, ob Daten übergeben wurden
if request:
dish_id = request.args.get('dish_id', '')
else:
return "Error"
# Falls dish_id als String übergeben wurde -> in Integer umwandeln
if type(dish_id) == str:
dish_id = int(dish_id)
# Gericht & Tags mit Namen laden
dishes = Dishes()
dish = dishes.getDish(dish_id)[0]
taggedDishes = TaggedDishes()
tag_for_dish = taggedDishes.list_with_names(dish['dish_id'])
dish['tags'] = tag_for_dish
# Allgemeine Tags laden
tags = Tags()
allTags = tags.list_tags()
countTags = len(allTags)
# Zutaten für das Gericht laden
ingredientsAll = Ingredients()
ingredients = ingredientsAll.list(dish_id)
# Alle Namen und Einheiten der Zutaten laden
suggestions = ingredientsAll.get_suggestions()
# print(suggestions)
# Tags selektieren
for index in range(len(allTags)):
for tag2 in tag_for_dish:
if allTags[index]['tag_id'] == tag2['tag_id']:
allTags[index]['selected'] = " selected"
# Template füllen und zurückgeben
return render_template('formEditDish.html', allTags=allTags, countTags=countTags, dish=dish, ingredients=ingredients, ingredientsCount=len(ingredients), suggestions=suggestions)
@app.route("/chooseDish", methods=["POST", "GET"])
def formChooseDish():
dishes = Dishes()
returnText = ""
choosenDate = request.args.get('choosen_date')
listDishes = dishes.list()
print(choosenDate)
return render_template('formChooseDish.html', dishes=listDishes, date=choosenDate)
@app.route("/createDish", methods=["POST", "GET"])
def createDish():
params = {}
if request:
params["name"] = request.args.get('name', '')
params["note"] = request.args.get('note', '')
params["ingredients"] = request.args.get('ingredients', '')
params["countCooked"] = 0
params["tags"] = request.args.get('tags','')
params["tags"] = request.args.getlist('tags')
tags_list = []
# Datenbank öffnen
tags = Tags()
for tag_name_request in params["tags"]:
tag_id = tags.get_tag_id(tag_name_request)[0]['tag_id']
print("Tag_name: {0}".format(tag_name_request))
print(" Tag_id ist: {0}".format(tag_id))
if not tag_id:
print(" Tag erstellen")
tag_id = tags.create_tag("tag_name_request")
tags_list.append(tag_id)
print("")
# Datenbank und Tabelle schließen
del tags
# Datenbank öffnen & Gericht erstellen
dishes = Dishes()
dish_id = dishes.create(params)
del dishes
# Zum testen hier die feste dish_id 22, somit wird nicht jedes mal ein Gericht angelegt
# dish_id = 22
if dish_id:
taggedDishes = TaggedDishes()
for tag_id in tags_list:
print("Zuweisung: {0} zu {1}".format(dish_id, tag_id))
taggedDishes.assign_tag_to_dish(dish_id, tag_id)
return "Erfolgreich<br /><a href=\"" + url_for('listDishes') + "\">Gerichte anzeigen</a>"
else:
return "Fehler"
@app.route("/editDish", methods=["POST"])
def editDish():
params = {}
if request:
params["dish_id"] = request.form.get('dish_id', '')
params["name"] = request.form.get('name', '')
params["note"] = request.form.get('note', '')
# params["ingredients"] = request.args.get('ingredients', '')
params["countCooked"] = "0"
params["lastCooked"] = "2019-01-01"
# params["tags"] = request.args.get('tags','')
params["tags"] = request.form.getlist('tags')
tags_list = []
# print(params["tags"])
# Datenbank öffnen
tags = Tags()
for tag_name_request in params["tags"]:
tag_id = tags.get_tag_id(tag_name_request)[0]['tag_id']
# print("Tag_name: {0}".format(tag_name_request))
# print(" Tag_id ist: {0}".format(tag_id))
if not tag_id:
# print(" Tag erstellen")
tag_id = tags.create_tag("tag_name_request")
tags_list.append(tag_id)
# Übergabeparameter zusammenstellen
dish_stuff = {}
# dish_stuff['dish_id'] = params['dish_id']
dish_stuff['name'] = params['name']
dish_stuff['note'] = params['note'].splitlines()
# dish_stuff['ingredients'] = params['ingredients']
dish_stuff['countCooked'] = params['countCooked']
# dish_stuff['lastCooked'] = params['lastCooked']
dish_stuff['lastCooked'] = params['lastCooked']
# Datenbank und Tabelle schließen
del tags
# Datenbank öffnen & Gericht erstellen
dishes = Dishes()
returnValue = dishes.update(params["dish_id"], dish_stuff)
del dishes
# Zum testen hier die feste dish_id 22, somit wird nicht jedes mal ein Gericht angelegt
# dish_id = 22
taggedDishes = TaggedDishes()
# print(tags_list)
existing_tags = taggedDishes.list(params["dish_id"])
# print("Existing Tags: {0}".format(existing_tags))
# print("tags_list: {0}".format(tags_list))
for tag_id in tags_list:
# print([item for item in existing_tags if item["tag_id"] == tag_id])
if not [item for item in existing_tags if item["tag_id"] == tag_id]:
# print("tag_id: {0} - existing_tag: {1}".format(tag_id, existing_tag))
# print("Zuweisung: {1} zu {0}".format(params["dish_id"], tag_id))
taggedDishes.assign_tag_to_dish(params["dish_id"], tag_id)
for existing_tag in existing_tags:
if not existing_tag['tag_id'] in tags_list:
# print("Zuweisung löschen: {1} zu {0}".format(params["dish_id"], existing_tag['tag_id']))
taggedDishes.remove_tag_from_dish(params["dish_id"], existing_tag['tag_id'])
return redirect(url_for('showDish',dish_id=params["dish_id"]))
@app.route("/assignDish", methods=["POST", "GET"])
def assignDish():
dish_id = int(request.args.get('dish_id', ''))
choosen_date = request.args.get('choosen_date', '')
if dish_id and choosen_date:
days = Days()
dishes = Dishes()
params = {}
params["dish_id"] = int(dish_id)
params["day"] = choosen_date
update_dict = {}
print(choosen_date)
update_dict["lastCooked"] = choosen_date
update_dict['countCooked'] = 0
dish = dishes.getDish(dish_id)
update_dict["name"] = dish[0]["name"]
days.create(params)
# dishes.update(dish_id, update_dict)
return redirect(url_for('displayWeek',date=choosen_date))
else:
return "0"
@app.route("/removeDish", methods=["POST", "GET"])
def removeDish():
dish_id = int(request.args.get('dish_id', ''))
choosen_date = request.args.get('choosen_date', '')
if dish_id and choosen_date:
days = Days()
params = {}
params["dish_id"] = int(dish_id)
params["day"] = choosen_date
returnValue = days.delete(params)
return redirect(url_for('displayWeek',date=choosen_date))
else:
return redirect(url_for('displayWeek',date=choosen_date))
@app.route("/deleteDish", methods=["POST", "GET"])
def deleteDish():
dishes = Dishes()
params = {}
print(request.form)
if request:
print(dishes.delete(request.args.get('dish_id', '')))
return redirect(url_for('listDishes'))
else:
return "Fehler! <br /><a href\"" + url_for('listDishes') + "\">Liste</a>"
@app.route("/dish/<int:dish_id>")
def showDish(dish_id):
if type(dish_id) is int:
dishes = Dishes()
dish = dishes.getDish(dish_id)[0]
taggedDishes = TaggedDishes()
tag_for_dish = taggedDishes.list_with_names(dish['dish_id'])
dish['tags'] = tag_for_dish
if dish['note']:
dish['note'] = dish['note'].replace('\n', '<br>')
else:
dish['note'] = ''
for tag in dish['tags']:
print(tag)
ingredientsAll = Ingredients()
ingredients = ingredientsAll.list(dish_id)
return render_template('dish.html', dish=dish, ingredients=ingredients, ingredientsCount=len(ingredients))
else:
returnText = "Detailansicht eines Gerichts: Fehler"
# return returnText
@app.route("/createIngredient", methods=["POST", "GET"])
def createIngredient():
ingredients = Ingredients()
params = {}
if request:
params["dish_id"] = request.args.get('dish_id', '')
params["name"] = request.args.get('name', '')
params["amount"] = request.args.get('amount', '')
params["unit"] = request.args.get('unit', '')
dish_id = ingredients.create(params)
return redirect(url_for('formEditDish', dish_id=params["dish_id"]))
else:
return "Fehler"
@app.route("/deleteIngredient", methods=["POST", "GET"])
def deleteIngredient():
ingredients = Ingredients()
params = {}
if request:
dish_id = request.args.get('dish_id', '')
name = request.args.get('name', '')
ingredients.delete(dish_id, name)
ingredients.__del__
del(ingredients)
# return redirect(url_for('getTags'), msg='Tag created')
return redirect(url_for('formEditDish', dish_id=dish_id))
# return "{0}".format(params)
else:
return "Fehler"
@app.route("/create_tag", methods=["POST", "GET"])
def create_tag():
tags = Tags()
params = {}
if request:
params["tag"] = request.args.get('tag', '')
if len(params["tag"]) == 0:
return redirect(url_for('getTags'))
tag_id = tags.create_tag(params)
print(tag_id)
# return "yay {0}".format(params["tag"])
# return redirect(url_for('getTags', msg='Tag {0} wurde hinzugefügt.'.format(params['name'])))
return redirect(url_for('getTags'))
else:
return "Fehler"
return "Fehler"
@app.route("/tags", methods=["POST", "GET"])
def getTags():
has_message = False
if request:
if request.args.get('msg', ''):
has_message = request.args.get('msg', '')
# return redirect(url_for('listDishes'))
else:
has_message = False
return "Fehler! <br /><a href\"" + url_for('listDishes') + "\">Liste</a>"
tags = Tags()
allTags = tags.list_tags()
# if len(allTags) > 0:
# for tag in allTags:
# print(tag)
return render_template('tags.html', tags=allTags, has_message=has_message)
@app.route("/deletetag", methods=["POST", "GET"])
def deleteTag():
tags = Tags()
params = {}
if request:
# params["tag"] = request.args.get('tag', '')
params['tag_id'] = request.args.get('tag_id', '')
print(params)
tags.delete_tag(params)
tags.__del__
del(tags)
# return redirect(url_for('getTags'), msg='Tag created')
return redirect(url_for('getTags'))
# return "{0}".format(params)
else:
return "Fehler"
@app.route("/addToWunderlist", methods=["POST", "GET"])
def addToWunderlist():
# Alle Zutaten der Gerichte der angezeigten Woche zu Wunderlist hinzufügen
#
# input: Angezeigte Woche
#
# -> Alle Gerichte der Woche holen
#
# -> Alle Zutaten der Gerichte sammeln und gleiche Zutaten kombinieren
#
# -> Zutaten per API zu Wunderlist hinzufügen
try:
choosenDate = datetime.strptime(request.args.get('date', ''), "%Y-%m-%d").date()
except:
choosenDate = date.today()
monday = getMonday(choosenDate)
day = monday.day
year = monday.strftime("%Y")
# week = [(monday+timedelta(days=0)).day, (monday+timedelta(days=1)).day, (monday+timedelta(days=2)).day, (monday+timedelta(days=3)).day, (monday+timedelta(days=4)).day, (monday+timedelta(days=5)).day, (monday+timedelta(days=6)).day]
# weekdays = {(monday+timedelta(days=0)).day:'Montag', (monday+timedelta(days=1)).day:'Dienstag', (monday+timedelta(days=2)).day:'Mittwoch', (monday+timedelta(days=3)).day:'Donnerstag', (monday+timedelta(days=4)).day:'Freitag', (monday+timedelta(days=5)).day:'Samstag', (monday+timedelta(days=6)).day:'Sonntag'}
dishes = Dishes()
dishes_for_wunderlist = {}
daysObject = Days()
ingredientsObject = Ingredients()
html = "bla"
ingredients_for_wunderlist = []
ingredients = {}
ingredients_obj = []
for day in range(0,7):
actualDay = monday+timedelta(days=day)
dishesperDay = daysObject.list(actualDay)
for dish in dishesperDay:
# print(dish)
dish_ingredients = ingredientsObject.list(dish['dish_id'])
for ing in dish_ingredients:
if not ing["amount"]:
continue
amount = ing['amount']
if amount % 1 == 0:
amount = int(amount)
ing['amount'] = amount
element_index = next((index for (index, d) in enumerate(ingredients_obj) if d["name"] == ing['name']), None)
if element_index:
print("Gefunden" + ing['name'])
ingredients_obj[element_index]['amount'] += amount
else:
ingredients_obj.append(ing)
# Zutaten sortieren
ingredients_obj.sort(key=operator.itemgetter('name'))
for ing in ingredients_obj:
if ing['unit'] + " " + ing['name'] in ingredients:
ingredients[ing['unit'] + " " + ing['name']] += amount
else:
ingredients[ing['unit'] + " " + ing['name']] = amount
for key, value in ingredients.items():
if not addTodoListToWunderlist("{0} {1}".format(value, key)) == 201:
return "Error!"
# Test, der eine einzelne Todo erstellt
# if not addTodoListToWunderlist("2.0 Stück Chilischote") == 201:
# return "Error!"
return redirect(url_for('displayWeek', message='Zutaten wurden zur Wunderlist hinzugefügt'))
if __name__ == "__main__":
Schema()
app.run(host='0.0.0.0', debug=True)
``` |
{
"source": "jkopka/price_tracker",
"score": 3
} |
#### File: price_tracker/app/models.py
```python
from bs4 import BeautifulSoup
import requests
from urllib.parse import urljoin
from re import sub
from decimal import Decimal
import numpy as np
import matplotlib.pyplot as plt
from flask import Markup
from urllib.parse import urlparse
import logging
import time
# Objekt für einzelne Suchen
class SearchItem:
def __init__(self, url):
self.url = url
self.all_prices = []
self.quantity = 0
self.quantity_ignored = 0
self.search_query = ""
self.url_next_page = ""
self.searched = False
self.error = ""
def get_search_query(self):
return self.search_query
def get_percentile(self, perc):
# rint(self.all_prices)
return np.percentile(self.all_prices, perc).round(2)
def get_quantity(self):
return self.quantity
def get_quantity_ignored(self):
return self.quantity_ignored
# Plattform
class Plattform:
"""
Zentrale Klasse für das Crawlen.
Über init einrichten. Dann über .fetch() crawlen.
"""
def __init__(self, urls=[], keywords=[]):
"""
Initialisiert die Klasse.
Zu übergebende Parameter: urls<liste>, keywords<liste>
"""
logging.basicConfig(
format="%(asctime)s %(message)s", filename="logging.log", level=logging.INFO
)
self.base_url_ebay_kleinanzeigen = "https://www.ebay-kleinanzeigen.de/"
self.base_url_ebay_de = "https://www.ebay.de/"
self.max_articles = 1000
self.urls = urls
self.keywords = [element.lower() for element in keywords]
# print(self.keywords)
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
self.proxies = {
"http": None,
"https": None,
}
search_items = []
for url in urls:
# Für jeden übergebenen Link wird ein SearchItem angelegt. Hier wird auch direkt gecheckt,
# ob die URL valid und ob es sich um die mobile Website handelt.
if self.uri_validator(url) == True:
print("--------")
logging.info("URL: " + url)
print("--------")
search_items.append(SearchItem(self.get_web_version(url)))
self.search_items = search_items
def get_web_version(self, url):
"""
Funktion checkt, ob es sich bei dem Link um die mobile Website hält. Wenn ja, wird der Link zur Desktopversion geholt.
Todo: Es fehlt noch der Teil für eBay.de
"""
# print(url)
if "m.ebay-kleinanzeigen" in url:
print("Mobile version detected!")
r = requests.get(url, headers=self.headers, proxies=self.proxies)
doc = BeautifulSoup(r.text.replace("​", ""), "html.parser")
url = urljoin(
self.base_url_ebay_kleinanzeigen,
doc.find(id="footer-webversion-link").get("href"),
)
return url
def uri_validator(self, x):
"""
Validiert ein URL
"""
try:
result = urlparse(x)
return all([result.scheme, result.netloc, result.path])
except:
return False
def set_max_articles(self, max_articles):
"""
Setzt die maximal zu crawlenden Artikel.
"""
self.max_articles = max_articles if max_articles > 0 else 1000
def fetch_url(self, url):
"""
Holt eine URL mittels requests und liefert das Response-Objekt zurück.
"""
try:
# print('...fetching with headers',url)
r = requests.get(url, headers=self.headers, proxies=self.proxies)
r.raise_for_status()
return r
except:
# print('fetch_url>except!', url)
print(r.status_code)
return r
def fetch(self):
"""
.fetch crawled jede URL.
Keine Parameter. Bei Erfolg True, bei einem Fehler False.
"""
if len(self.search_items) == 0:
return False
result = []
for search_item in self.search_items:
# https://www.ebay-kleinanzeigen.de/s-boote-bootszubehoer/detmold/jolle/k0c211l1792r30
if "ebay-kleinanzeigen.de" in search_item.url:
result.append(self.fetch_page_ebay_kleinanzeigen(search_item))
elif "ebay.de" in search_item.url:
result.append(self.fetch_page_ebay_de(search_item))
else:
print("Link unbekannt! -> ", search_item.url)
# Momentan noch nicht implementiert!
# elif search_item.site == 'ebay.de':
# result.append(self.fetch_page_ebay_de(search_item))
# print(result)
for res in result:
if res == False:
return False
return True
def fetch_page_ebay_kleinanzeigen(self, search_item):
"""Hole die Artikel der Seite.
Übergabe von zu holender URL + aktuelle Anzahl der Artikel.
Weitere Seiten werden über Rekursion bearbeitet.
Rückgabe: Alle Artikelpreise als list, Anzahl der bearbeiteten Artikel
"""
keywords = self.keywords
# Artikel holen
article = self.fetch_url(search_item.url)
if article == False:
return False
doc = BeautifulSoup(article.text.replace("​", ""), "html.parser")
doc_search_query = doc.find(id="site-search-query")
# Falls der Titel 'Security Violation', mit False zurück
if article.status_code == 503:
search_item.error = doc.select_one("title").text.strip()
print("Error-Code: ", article.status_code)
# print(doc)
return False
if doc.select_one("title").text.strip() == "Security Violation (503)":
print("Security Violation (503)")
# print(doc)
search_item.error = doc.select_one("title").text.strip()
return False
elif doc_search_query is None:
print("None")
# print(doc)
search_item.error = "None"
return False
# Suchstring speichern
search_item.search_query = doc_search_query.get("value")
all_prices = []
for element in doc.select(".aditem"):
# Link auf Artikel
# link = element.select_one('.ellipsis').get('href')
# Titel holen
title = element.select_one(".ellipsis").text.strip().lower()
# Titel nach Keywords ausschließen
if [title for keyword in keywords if (keyword in title)]:
# print('Keyword!Title')
search_item.quantity_ignored += 1
continue
# Anreisser-Description nach Keywords ausschließen
descr = element.select_one(".aditem-main p").text.strip().lower()
if [descr for keyword in keywords if (keyword in descr)]:
# print('Keyword!Descr')
search_item.quantity_ignored += 1
continue
# Preis holen
price = element.select_one(".aditem-details").strong.text.strip()
# Preis säubern
price = self.clean_price( price)
if price == False:
search_item.quantity_ignored += 1
continue
# print(" # ", title, price)
search_item.quantity += 1
all_prices.append(price)
# Nächste Seite aufrufen
next_page = doc.select_one(".pagination-next")
# print(next_page)
# Wenn Link auf nächste Seite und Anzahl der Anzeigen nicht über self.max_articles...
if next_page and search_item.quantity < self.max_articles:
search_item.url_next_page = urljoin(
self.base_url_ebay_kleinanzeigen, next_page.get("href")
)
# print(url_next_page)
time.sleep(0.4)
print("next page!", search_item.quantity)
self.fetch_page_ebay_kleinanzeigen(search_item)
if doc_search_query.get("value") in search_item.all_prices:
print("alle_preise: url schon vorhanden!", doc_search_query.get("value"))
search_item.all_prices.extend(all_prices)
else:
print(
"alle_preise: url noch nicht vorhanden!", doc_search_query.get("value")
)
search_item.all_prices = all_prices
search_item.searched = True
self.searched = True
return True
def fetch_page_ebay_de(self, search_item):
"""Hole die Artikel der Seite.
Übergabe von zu holender URL + aktuelle Anzahl der Artikel.
Weitere Seiten werden über Rekursion bearbeitet.
Rückgabe: Alle Artikelpreise als list, Anzahl der bearbeiteten Artikel
"""
keywords = self.keywords
# Artikel holen
article = self.fetch_url(search_item.url)
if article == False:
return False
doc = BeautifulSoup(article.text.replace("​", ""), "html.parser")
doc_search_query = doc.find(id="gh-ac")
# Falls der Titel 'Security Violation', mit False zurück
if article.status_code == 503:
search_item.error = doc.select_one("title").text.strip()
print("Error-Code: ", article.status_code)
# print(doc)
return False
if doc.select_one("title").text.strip() == "Security Violation (503)":
print("Security Violation (503)")
# print(doc)
search_item.error = doc.select_one("title").text.strip()
return False
elif doc_search_query is None:
print("None")
# print(doc)
search_item.error = "None"
return False
# Suchstring speichern
search_item.search_query = doc_search_query.get("value")
all_prices = []
for element in doc.select(".sresult"):
# Link auf Artikel
# link = element.select_one('.ellipsis').get('href')
# Titel holen
title = (
element.select_one(".lvtitle")
.text.replace("Neues Angebot", "")
.strip()
.lower()
)
# Titel nach Keywords ausschließen
if [title for keyword in keywords if (keyword in title)]:
# print('Keyword!Title')
search_item.quantity_ignored += 1
continue
# Preis holen
price = element.select_one(".lvprice").text.strip()
# Preis säubern
price = self.clean_price( price)
if price == False:
search_item.quantity_ignored += 1
continue
# print(' # ', title, price)
search_item.quantity += 1
all_prices.append(price)
# print(title,': ', price)
# Nächste Seite aufrufen
next_page = doc.select_one(".pagn-next .gspr")
# print(next_page)
# Wenn Link auf nächste Seite und Anzahl der Anzeigen nicht über self.max_articles...
if next_page and search_item.quantity < self.max_articles:
search_item.url_next_page = urljoin(
self.base_url_ebay_de, next_page.get("href")
)
# print(url_next_page)
time.sleep(0.4)
print("next page!", search_item.quantity)
self.fetch_page_ebay_kleinanzeigen(search_item)
if doc_search_query.get("value") in search_item.all_prices:
print("alle_preise: url schon vorhanden!", doc_search_query.get("value"))
search_item.all_prices.extend(all_prices)
else:
print(
"alle_preise: url noch nicht vorhanden!", doc_search_query.get("value")
)
search_item.all_prices = all_prices
search_item.searched = True
self.searched = True
return True
def clean_price( self, price):
'''
Original Preis übergeben und verschiedene Optionen filtern. False wird zurückgegeben, wenn der Preis nicht eindeutig ist.
'''
cleaning_strings_cut = ('UVP','(','Bisher')
if price == "VB" or price.strip() == "" or "bis" in price or "Zu verschenken" in price:
return False
for string_cut in cleaning_strings_cut:
if string_cut in price:
price = price[:price.index(string_cut)].strip()
try:
if '.' in price:
price = price.replace('.','')
price = float(
price.replace(" €", "")
.replace("EUR", "")
.replace(',','.')
.replace(" VB", "")
.strip()
)
except:
return False
return price
def get_error(self):
"""
Liefert alle bisherigen Fehler zurück
"""
error = ""
for search_item in self.search_items:
if not search_item.error == "":
error += Markup(search_item.url + ": " + search_item.error)
return error
def get_search_querys(self):
"""
Liefert zur Anzeige die Suchbegriffe.
"""
if len(self.search_items) > 1:
search_querys_text = ""
for search_item in self.search_items:
if not search_querys_text == "":
search_querys_text += " - "
search_querys_text += search_item.search_query
else:
search_querys_text = self.search_items[0].search_query
return search_querys_text
def get_plot(self):
"""
Generiert den Boxplot für die URLs.
Rückgabe ist ein png.
"""
import io
import base64
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
matplotlib.use("agg")
fig, axs = plt.subplots()
all_prices_list = []
labels_list = []
for search_item in self.search_items:
all_prices_list.append(search_item.all_prices)
labels_list.append(search_item.search_query)
axs.boxplot(all_prices_list, labels=labels_list)
# Convert plot to PNG image
pngImage = io.BytesIO()
FigureCanvas(fig).print_png(pngImage)
# Encode PNG image to base64 string
pngImageB64String = "data:image/png;base64,"
pngImageB64String += base64.b64encode(pngImage.getvalue()).decode("utf8")
return pngImageB64String
``` |
{
"source": "jkopka/routefinder",
"score": 3
} |
#### File: jkopka/routefinder/map_engine.py
```python
import pygame, random
import configparser
from pygame import Color, Surface, draw
from pygame.locals import *
from PIL import Image
import os
# Konfiguration
config = configparser.ConfigParser()
config.read('config.ini')
WHITE = Color(config['COLOR']['WHITE'])
BLUE = Color(config['COLOR']['BLUE'])
GREEN = Color(config['COLOR']['GREEN'])
RED = Color(config['COLOR']['RED'])
YELLOW = Color(config['COLOR']['YELLOW'])
BLACK = Color(config['COLOR']['BLACK'])
GREY = Color(config['COLOR']['GREY'])
# BG_COLOR = (10, 30, 30)
BG_COLOR = WHITE
DISPLAY_WIDTH = int(config['DISPLAY']['DISPLAY_WIDTH'])
DISPLAY_HEIGHT = int(config['DISPLAY']['DISPLAY_HEIGHT'])
MAP_WIDTH = int(config['MAP']['MAP_WIDTH'])
MAP_HEIGHT = int(config['MAP']['MAP_HEIGHT'])
# Speichert die Daten eines Tile-Typs:
class TileType(object):
# Im Konstruktor speichern wir den Namen
# und erstellen das Rect (den Bereich) dieses Typs auf der Tileset-Grafik.
def __init__(self, name, color, width, height):
self.name = name
self.rect = pygame.rect.Rect(0, 0, width, height)
self.color = color
self.width = width
self.height = height
self.status = -1
# Speichert die Daten eines Tile-Typs bei der Verwendung einer Tileset-Grafik:
class TileTypeGraphic(object):
# Im Konstruktor speichern wir den Namen
# und erstellen das Rect (den Bereich) dieses Typs auf der Tileset-Grafik.
def __init__(self, name, tile_set_grafic_coords, cost, width, height):
self.name = name
self.tile_set_grafic_coords = tile_set_grafic_coords
self.width = width
self.height = height
self.cost = cost
self.status = -1
# Verwaltet eine Liste mit Tile-Typen.
class Tileset(object):
# Im Konstruktor erstellen wir ein leeres Dictionary für die Tile-Typen.
def __init__(self, colorkey, tile_width, tile_height):
self.tile_width = tile_width
self.tile_height = tile_height
self.color = colorkey
self.tile_types = dict()
def load_tile_table(self, filename, width, height):
# Tilesetgrafik laden und einzelne Tiles zurückgeben
image = pygame.image.load(filename).convert()
image_width, image_height = image.get_size()
tile_table = []
for tile_x in range(0, image_width//width):
line = []
tile_table.append(line)
for tile_y in range(0, image_height//height):
rect = (tile_x*width, tile_y*height, width, height)
line.append(image.subsurface(rect))
return tile_table
# Neuen Tile-Typ hinzufügen.
def add_tile(self, name, color):
self.tile_types[name] = TileType(name, color, self.tile_width, self.tile_height)
def add_tile_graphic(self, name, cost, tile_set_grafic_coords):
# Fügt den Tile-Types einen neuen Typen mit den Koordinaten in der Tileset-Grafik hinzu
self.tile_types[name] = TileTypeGraphic(name, tile_set_grafic_coords, cost, self.tile_width, self.tile_height)
# Versuchen, einen Tile-Type über seinen Namen in der Liste zu finden.
# Falls der Name nicht existiert, geben wir None zurück.
def get_tile(self, name):
try:
return self.tile_types[name]
except KeyError:
return None
# Die Tilemap Klasse verwaltet die Tile-Daten, die das Aussehen der Karte beschreiben.
class Tilemap(object):
def __init__(self, screen, position_finish, font_UI, map_file='none', tile_set_file='none'):
# Wir erstellen ein neues Tileset.
# Hier im Tutorial fügen wir manuell vier Tile-Typen hinzu.
self.tileset = Tileset((255, 0, 255), 32, 32)
self.screen = screen
self.position_finish = position_finish
# Wenn True, dann werden auf den jeweiligen Tiles Infos angezeigt
self.print_tileinfo = True
self.print_pre_tile_lines = False
# Erstellen einer leeren Liste für die Tile Daten.
self.tiles = list()
# Array für die Status-Text-Box
self.status_text = []
if tile_set_file == "none":
# Farben, falls keine Tileset-Grafik eingestellt wurde
self.tileset.add_tile("blank",WHITE)
self.tileset.add_tile("wall",BLACK)
self.tileset.add_tile("start",(255, 0, 100))
self.tileset.add_tile("finish",RED)
self.tileset.add_tile("route",(0, 0, 255))
self.tileset.add_tile("closed",GREY)
self.tileset.add_tile("open",YELLOW)
self.use_tile_set_file = False
else:
# Wenn es eine Tileset-Grafik gibt, dann Grafik laden und Tiles zuweisen
self.tile_set_table = self.tileset.load_tile_table(tile_set_file, 32, 32)
self.tileset.add_tile_graphic("blank",1,(0,0))
self.tileset.add_tile_graphic("wall",-1,(31,0))
self.tileset.add_tile_graphic("forest",-1,(5,0))
self.tileset.add_tile_graphic("start",1,(19,7))
self.tileset.add_tile_graphic("finish",1,(28,4))
self.tileset.add_tile_graphic("route",1,(0,7))
self.tileset.add_tile_graphic("closed",1,(16,6))
self.tileset.add_tile_graphic("open",1,(0,6))
self.use_tile_set_file = True
if map_file == "none":
# Die Größe der Maps in Tiles.
self.width = MAP_WIDTH
self.height = MAP_HEIGHT
self.create_random_map()
else:
self.create_map_from_file(map_file)
# Festlegen der Startposition der Kamera auf die Mitte der Karte.
self.camera_x = self.width/2
self.camera_y = self.height/2
# print(DISPLAY_WIDTH, self.width)
# Zoomfaktor anpassen
if self.width < self.height:
self.tileset.tile_width = DISPLAY_WIDTH/self.width
self.tileset.tile_height = DISPLAY_WIDTH/self.height
else:
self.tileset.tile_height = DISPLAY_HEIGHT/self.height
self.tileset.tile_width = DISPLAY_HEIGHT/self.height
scaled_size = (int(MAP_HEIGHT*self.tileset.tile_height), int(MAP_WIDTH*self.tileset.tile_height))
self.map_image_scaled = pygame.transform.scale(self.map_image, scaled_size)
self.tiles[10][10] = "start"
self.tiles[self.position_finish[0]][self.position_finish[1]] = "finish"
# print(self.tiles)
pygame.font.init()
# font_size = 12*self.tileset.tile_width/40
font_size = 7
# print(int(font_size),self.tileset.tile_width)
self.font = pygame.font.SysFont("futura", int(font_size))
self.font_line_height = 15
self.font_UI = font_UI
# Liefert den Tiletypen an gegebener Position zurück
def get_tile_type( self, position):
"""Liefert den Tiletypen an gegebener Position zurück"""
return self.tiles[position[0]][position[1]]
def zoom(self,zoom_rate,focus_center):
# print()
# print('Tilesize vorher: ', self.tileset.tile_height, self.tileset.tile_width)
# print('Camera Vorher: ', self.camera_y,self.camera_x)
# print('Focus Screen: ', focus_center)
#
tile_height_old = self.tileset.tile_height
tile_focus_y = focus_center[0]/(self.tileset.tile_height*self.tileset.tile_height)
tile_focus_x = focus_center[1]/(self.tileset.tile_width*self.tileset.tile_width)
# print('Focus Tile: ', tile_focus_y, tile_focus_x)
self.camera_y = self.camera_y + tile_focus_y
self.camera_x = self.camera_x + tile_focus_x
# print('Camera Nachher: ', self.camera_y,self.camera_x)
# tile_middle_x = int(DISPLAY_WIDTH/self.tileset.tile_width/2+tile_focus_x)
# tile_middle_y = int(DISPLAY_HEIGHT/self.tileset.tile_height/2+tile_focus_y)
self.tileset.tile_height += zoom_rate
self.tileset.tile_width += zoom_rate
# print('Tilesize nachher: ', self.tileset.tile_height, self.tileset.tile_width)
if self.tileset.tile_height < 1:
self.tileset.tile_height = 1
if self.tileset.tile_width < 1:
self.tileset.tile_width = 1
# Neue Schriftgröße berechnen
font_size = 12*self.tileset.tile_width/40
self.font_line_height = 15*self.tileset.tile_width/40
self.font = pygame.font.SysFont("futura", int(font_size))
# Grundkarte skalieren
# zoom_factor = 1/tile_height_old*self.tileset.tile_height
scaled_size = (int(MAP_HEIGHT*self.tileset.tile_height), int(MAP_WIDTH*self.tileset.tile_height))
# scaled_size = (int(self.map_image_scaled.get_width()*zoom_factor), int(self.map_image_scaled.get_height()*zoom_factor))
# print(zoom_factor, self.map_image_scaled.get_size(), scaled_size)
self.map_image_scaled = pygame.transform.scale(self.map_image, scaled_size)
# print(MAP_HEIGHT,MAP_WIDTH, self.map_image.get_size(), self.map_image_scaled.get_size(), self.tileset.tile_height)
def create_map_from_file(self, map_file):
global MAP_HEIGHT, MAP_WIDTH
# Karte laden
map_file = Image.open(map_file)
# MAP_WIDTH und MAP_HEIGHT aus den Dimensionen der Karte erstellen
MAP_WIDTH, MAP_HEIGHT = map_file.size
# Leere Grafik der Karte anlegen
self.map_image = pygame.Surface((MAP_WIDTH*32, MAP_HEIGHT*32))
# X und Y durchgehen und nach der Pixelfarbe die Tiles anlegen
for x in range(0,MAP_WIDTH):
self.tiles.append(list())
for y in range(0, MAP_HEIGHT):
# print(x,y, map_file.size)
pixel_color = map_file.getpixel((y,x))
if pixel_color < 200:
self.tiles[x].append('wall')
# elif pixel_color >= 10 and pixel_color < 200:
# self.tiles[x].append('forest')
else:
self.tiles[x].append('blank')
self.width = MAP_WIDTH
self.height = MAP_HEIGHT
for x in range(0, len(self.tiles)):
for y in range(0, len(self.tiles[x])):
# print(x,y, map_file.size, len(self.tiles[y]))
tile = self.tileset.get_tile(self.tiles[y][x])
self.map_image.blit(self.tile_set_table[tile.tile_set_grafic_coords[1]][tile.tile_set_grafic_coords[0]], (x*32, y*32))
scaled_size = (MAP_HEIGHT*32,MAP_WIDTH*32)
self.map_image_scaled = pygame.transform.scale(self.map_image, scaled_size)
# print(MAP_HEIGHT,MAP_WIDTH, map_file.size, self.map_image.get_size(), self.map_image_scaled.get_size(), self.tileset.tile_height)
# quit()
# Grafik der Karte erstellen
def create_random_map( self):
""" Erstellt eine Karte, die per Zufall mit Mauern gefüllt wird """
# Manuelles Befüllen der Tile-Liste:
# Jedes Feld bekommt ein zufälliges Tile zugewiesen.
for i in range(0, self.height):
self.tiles.append(list())
for j in range(0, self.width):
x = random.randint(0, 30)
if x < 28:
self.tiles[i].append("blank")
else:
self.tiles[i].append("wall")
# Alternative render-Funktion
# Die ursprüngliche ist kästchenbasiert, diese soll pixelgenau sein.
# Hier rendern wir den sichtbaren Teil der Karte.
def render(self, screen,navi_tiles):
font = self.font
tile_width = self.tileset.tile_width
tile_height = self.tileset.tile_height
# Die Größe der Tiles berechnen
scaled_size = (int(MAP_HEIGHT*tile_height), int(MAP_WIDTH*tile_height))
scaled_size_tile = (int(tile_height), int(tile_width))
# Anzahl Tiles, die auf den Screen passen
tile_count_x = DISPLAY_WIDTH/tile_width
tile_count_y = DISPLAY_HEIGHT/tile_height
# Von der Kameraposition links oben berechnen. Hiervon aus werden die TILES platziert.
camera_null_x = int(self.camera_x-tile_count_x/2)
camera_null_y = int(self.camera_y-tile_count_y/2)
# Grundkarte zeichnen
position1 = (-camera_null_x * tile_width,-camera_null_y * tile_height)
screen.blit(self.map_image_scaled, position1)
# return True
# Zeilenweise durch die Tiles durchgehen.
for y in range(0, int(screen.get_height() / tile_height) + 1):
# Die Kamera Position mit einbeziehen.
ty = y + camera_null_y
if ty >= self.height or ty < 0:
continue
# Die aktuelle Zeile zum einfacheren Zugriff speichern.
line = self.tiles[ty]
# Und jetzt spaltenweise die Tiles rendern.
# tx und ty ist die Position im Array
# x und y ist die Position des Tiles auf dem Screen
for x in range(0, int(screen.get_width() / tile_width) + 1):
# Auch hier müssen wir die Kamera beachten.
tx = x + camera_null_x
# print( int(screen.get_width() / tile_width) + 1, tx, len(line))
if tx >= self.width or tx < 0 or tx >= len(line):
continue
# Wir versuchen, die Daten des Tiles zu bekommen.
# tilename = line[tx]
navi_tile = navi_tiles[ty][tx]
tilename = navi_tile.type
if tilename == "blank" or tilename == "wall":
continue
tile = self.tileset.get_tile(tilename)
# Falls das nicht fehlschlägt können wir das Tile auf die screen-Surface blitten.
if tile is not None:
# screen.fill(tile.color, tile.rect)
tile_pos_x = x*tile_width
tile_pos_y = y*tile_height
# if navi_tiles[y][x].status == 0:
# pygame.draw.rect(screen,YELLOW, pygame.rect.Rect(tile_pos_x,tile_pos_y,tile_width,tile_height),1)
# else:
# pygame.draw.rect(screen,tile.color, pygame.rect.Rect(tile_pos_x,tile_pos_y,tile_width,tile_height),1)
if navi_tile.status == 0 and not tilename == 'route':
continue
# if not self.use_tile_set_file:
# # self.map_image_scaled = pygame.transform.scale(self.map_image, scaled_size)
# screen.blit(pygame.transform.scale(self.tile_set_table[tile.tile_set_grafic_coords[1]][tile.tile_set_grafic_coords[0]], scaled_size_tile), (tile_pos_x, tile_pos_y))
# else:
# # print(tile.tile_set_grafic_coords, len(self.tile_set_table))
# screen.blit(pygame.transform.scale(self.tile_set_table[tile.tile_set_grafic_coords[1]][tile.tile_set_grafic_coords[0]], scaled_size_tile), (tile_pos_x, tile_pos_y))
# # screen.fill(BLACK, pygame.rect.Rect(tile_pos_x,tile_pos_y,tile_width,tile_height))
else:
if self.use_tile_set_file:
tileset_graphic_y = tile.tile_set_grafic_coords[0]
tileset_graphic_x = tile.tile_set_grafic_coords[1]
# print(tile.name, tile.tile_set_grafic_coords, tileset_graphic_x, tileset_graphic_y)
# print(len(self.tile_set_table),tileset_graphic_x)
# print(self.tile_set_table[tileset_graphic_x])
# print(len(self.tile_set_table[tileset_graphic_x]))
tileset_graphic = self.tile_set_table[tileset_graphic_x][tileset_graphic_y]
screen.blit(pygame.transform.scale(tileset_graphic, scaled_size_tile), (tile_pos_x, tile_pos_y))
else:
screen.fill(tile.color, pygame.rect.Rect(tile_pos_x,tile_pos_y,tile_width,tile_height))
# print(tile_pos_y,tile_pos_x)
# pre_tile-Vektoren zeichnen
if not navi_tile.pre_tile == -1 and self.print_pre_tile_lines == True:
pre_tile = navi_tile.pre_tile
pre_tile_x = pre_tile[0]
pre_tile_y = pre_tile[1]
# pre_tile_pos = (pre_tile_y*tile_height, pre_tile_x*tile_width)
pre_tile_pos = (position1[0] + (pre_tile_y*tile_height + tile_width/2), position1[1] + (pre_tile_x*tile_width + tile_width/2))
# print(position1, tile_height, tile_width, y, x, navi_tile.position, tile_pos_y, tile_pos_x, '|', pre_tile, pre_tile_y, pre_tile_x, pre_tile_pos, camera_null_y, camera_null_x)
pygame.draw.line(screen, BLUE, (tile_pos_x+tile_width/2,tile_pos_y+tile_height/2), pre_tile_pos, 1)
# Texteinblendungen über den einzelnen Tiles rendern
if navi_tile and not tilename == "wall" and tile_width > 15 and self.print_tileinfo:
# Estimated Cost einfügen
text_to_render = str(navi_tile.get_cost_from_start())
# text_to_render = tilename
# Kosten vom Start
if not tilename == "closed":
text_to_render = str(navi_tile.get_cost_from_start())
tile_text_cost = font.render(text_to_render, True, (200, 0, 200), BLACK)
screen.blit(tile_text_cost,(tile_pos_x+1,tile_pos_y+1))
# Tileposition einfügen
# text_to_render = str(ty) + "," + str(tx)
text_to_render = str(round(navi_tile.get_total_cost(),2))
tile_text_pos = font.render(text_to_render, True, (200, 0, 200), BLACK)
screen.blit(tile_text_pos,(tile_pos_x+1,tile_pos_y+self.font_line_height))
self.print_status_text()
def render_one_tile(self, tile):
tile_width = self.tileset.tile_width
tile_height = self.tileset.tile_height
# Die Größe des Tiles berechnen
scaled_size_tile = (int(tile_height), int(tile_width))
# Anzahl Tiles, die auf den Screen passen
tile_count_x = DISPLAY_WIDTH/tile_width
tile_count_y = DISPLAY_HEIGHT/tile_height
# Von der Kameraposition links oben berechnen. Hiervon aus werden die TILES platziert.
camera_null_x = int(self.camera_x-tile_count_x/2)
camera_null_y = int(self.camera_y-tile_count_y/2)
ty = tile.position[1] + camera_null_y
tx = tile.position[0] + camera_null_x
tile_type = self.tileset.get_tile(tile.type)
if tile_type is not None:
# screen.fill(tile.color, tile.rect)
tile_pos_x = tile.position[1]*tile_width
tile_pos_y = tile.position[0]*tile_height
if not self.use_tile_set_file:
# self.map_image_scaled = pygame.transform.scale(self.map_image, scaled_size)
self.screen.blit(pygame.transform.scale(self.tile_set_table[tile.tile_set_grafic_coords[0]][tile.tile_set_grafic_coords[1]], scaled_size_tile), (tile_pos_x, tile_pos_y))
else:
self.screen.fill(RED, pygame.rect.Rect(tile_pos_x,tile_pos_y,tile_width,tile_height))
def add_status_text(self, text):
if len(self.status_text) > 2:
self.status_text.pop(0)
self.status_text.append(text)
def add_status_text_with_clear(self, text):
self.status_text = []
self.add_status_text(text)
def print_status_text(self):
line = 1
for text in self.status_text:
text_to_print = self.font_UI.render(text, True, (200, 0, 200), BLACK)
self.screen.blit(text_to_print,(self.screen.get_width()/2-(text_to_print.get_width()/2),self.screen.get_height()-80+line*15))
line += 1
```
#### File: jkopka/routefinder/navi.py
```python
from priorityqueue import PriorityQueue
from math import sqrt
from pygame import Color
import configparser
# Konfiguration
config = configparser.ConfigParser()
config.read('config.ini')
WHITE = Color(config['COLOR']['WHITE'])
BLUE = Color(config['COLOR']['BLUE'])
GREEN = Color(config['COLOR']['GREEN'])
RED = Color(config['COLOR']['RED'])
YELLOW = Color(config['COLOR']['YELLOW'])
BLACK = Color(config['COLOR']['BLACK'])
GREY = Color(config['COLOR']['GREY'])
# BG_COLOR = (10, 30, 30)
BG_COLOR = WHITE
DISPLAY_WIDTH = int(config['DISPLAY']['DISPLAY_WIDTH'])
DISPLAY_HEIGHT = int(config['DISPLAY']['DISPLAY_HEIGHT'])
MAP_WIDTH = int(config['MAP']['MAP_WIDTH'])
MAP_HEIGHT = int(config['MAP']['MAP_HEIGHT'])
# Navi-Klassen
class TileInfo:
def __init__(self, position, tile_type, estimated_cost_to_finish, cost_from_start,pre_tile):
self.position = position
self.estimated_cost_to_finish = estimated_cost_to_finish
self.cost_from_start = cost_from_start
self.pre_tile = pre_tile
self.route_cost = cost_from_start + estimated_cost_to_finish
self.type = tile_type
# Status des Tiles
# -1 > Nicht bearbeitet
# 0 > Bearbeitet
self.status = -1
def set_cost_from_start(self, cost):
self.cost_from_start = cost
def get_cost_from_start(self):
return self.cost_from_start
def set_route_cost(self, cost):
self.route_cost = cost
def get_route_cost(self):
return self.route_cost
def get_estimated_cost_to_finish(self):
return self.estimated_cost_to_finish
def get_total_cost(self):
return self.route_cost
class Navi:
def __init__(self, position_start, position_finish, map):
tiles = []
self.open_list = PriorityQueue()
self.closed_list = PriorityQueue()
# queue = PriorityQueue()
self.map = map
self.finish_reached = False
self.route = []
self.position_start = position_start
self.position_finish = position_finish
self.open_list.insert(TileInfo(position_start,self.get_estimated_cost_to_finish(position_start),0,-1,-1))
for y in range(0,map.height):
tiles.append([])
for x in range(0, map.width):
tile = TileInfo((y,x), map.tiles[y][x], self.get_estimated_cost_to_finish((y,x)),99999,-1)
tiles[y].append(tile)
self.tiles = tiles
self.navi_active = False
self.recursion_level = 0
self.max_recursion_level = 100
self.use_diagonal_tiles = True
# Array für die Abfrage der umgebenden Tiles
self.surroundings = []
if self.use_diagonal_tiles == True:
self.surroundings.append((-1,-1))
self.surroundings.append((-1,+1))
self.surroundings.append((+1,-1))
self.surroundings.append((+1,+1))
self.surroundings.append((-1,0))
self.surroundings.append((0,-1))
self.surroundings.append((0,+1))
self.surroundings.append((+1,0))
def navi_step(self, tile_work='next'):
# map = self.map
# print('navistep')
self.recursion_level += 1
if tile_work == 'next':
tile_work = self.open_list.get_and_delete()
# pre_tile = self.tiles[tile_work.position[0]][tile_work.position[1]].pre_tile
# Den Vorgänger-Tile des work-Tiles holen
pre_tile = self.get_pre_tile(tile_work)
# Wenn der Tile > -1 ist, hole die Kosten zum Start.
if not pre_tile == -1:
pre_tile_cost_from_start = self.tiles[pre_tile[0]][pre_tile[1]].cost_from_start
else:
pre_tile_cost_from_start = -1
# Wenn der Work-Tile die Zielposition, also das Ziel erreicht ist.
if tile_work.position == self.position_finish:
self.map.add_status_text_with_clear("FINISH")
tile_work.set_route_cost(pre_tile_cost_from_start + 1)
self.route_finished(tile_work)
self.finish_reached = True
if pre_tile_cost_from_start >= 99999:
pre_tile_cost_from_start = 0
# Work-Tile: Die Kosten zum Start sind Pre-Tile + 1
tile_work_cost_from_start = pre_tile_cost_from_start + 1
tile_work.set_cost_from_start(tile_work_cost_from_start)
tile_work.set_route_cost(self.get_estimated_cost_to_finish(tile_work.position)+tile_work.cost_from_start)
tile_work.status = 0
# Der Work-Tile wurde berechnet und kann also auf die Closed-List
self.closed_list.insert(tile_work)
self.tiles[tile_work.position[0]][tile_work.position[1]].type = "closed"
# Um weiter zu machen, holen wir uns die umgebenden Tiles
surrounding_tiles = self.get_surrounding_tiles(tile_work.position)
# Solange wir noch nicht alle Tiles bearbeitet haben, durchlaufen wir die while-Schleife
while not surrounding_tiles.isEmpty():
# print(surrounding_tiles.get_size())
surrounding_tile = surrounding_tiles.get_and_delete()
if surrounding_tile == False:
# print("Surround: no next tiles")
break
if surrounding_tile.type == "wall":
# print('Surround: wall')
continue
tile_cost_from_start = tile_work_cost_from_start + 1
if self.closed_list.exist(surrounding_tile):
# Wenn ein Tile bereits in der closedlist ist, wurde er schon mal hinzugefügt
# Es wird dann gecheckt, ob ...?
# print('Surround: is in closedlist')
continue
elif self.open_list.exist(surrounding_tile):
# Wenn ein Tile bereits in der openlist ist, wurde er schon mal hinzugefügt
# Es wird dann gecheckt, ob ...?
# print('Surround: is in openlist')
tile_from_open_list = self.open_list.get_tile_and_delete(surrounding_tile)
# print(tile_from_open_list.cost_from_start, tile_cost_from_start)
if tile_from_open_list.cost_from_start + 1 >= tile_cost_from_start:
# print('Surround: Neuer Weg ist teurer')
continue
else:
# print('Surround: Neuer Weg ist günstiger')
tile_from_open_list.cost_from_start = surrounding_tile.cost_from_start+1
tile_from_open_list.set_route_cost(self.get_estimated_cost_to_finish(tile_from_open_list.position)+tile_work_cost_from_start)
self.open_list.insert(tile_from_open_list)
continue
else:
if surrounding_tile.position == tile_work.pre_tile:
# Wenn der umliegende Tile der vorherige vom tile_work ist, kann er ignoriert werden
continue
# Wenn bis hierher nichts dagegen spricht, ist der Tile legitim, um ihn in nem navistep zu bearbeiten
# pre-tile festlegen
surrounding_tile.pre_tile = tile_work.position
# Den pre-tile auch in der tiles.Liste festlegen
self.tiles[surrounding_tile.position[0]][surrounding_tile.position[1]].pre_tile = tile_work.position
# In die open-list einfügen
self.open_list.insert(surrounding_tile)
# Entsprechenden Tile als open markieren
self.tiles[surrounding_tile.position[0]][surrounding_tile.position[1]].type = "open"
# print("Open List: ", self.open_list.get_size())
# print("Closed List: ", self.closed_list.get_size())
# print(self.finish_reached)
# if self.finish_reached == False and self.recursion_level < self.max_recursion_level:
# self.navi_step()
self.recursion_level = 0
return (tile_work.position, tile_work.route_cost)
# self.navi_step(tile.position,position)
def route_finished(self,tile):
""" Route wurde gefunden! """
route = []
route.append(tile.position)
next_tile = tile.pre_tile
while True:
route.append(next_tile)
if len(route) > 1000:
print('Finish: Route > 1000')
break
# print(next_tile)
next_tile = self.tiles[next_tile[0]][next_tile[1]].pre_tile
if next_tile == self.position_start:
print('Finish: Start erreicht.')
break
if next_tile == -1:
break
for tile_position in route:
self.tiles[tile_position[0]][tile_position[1]].type = "route"
self.map.add_status_text("Kosten: "+ str(tile.get_route_cost()))
print("Kosten: ", tile.get_route_cost())
self.map.add_status_text("Länge Route: "+ str(len(route)))
print("Länge Route: ",len(route))
# print(route)
self.navi_active = False
self.position_start = tile.position
def get_next_navi_tile(self, surrounding_tiles, position, last_position):
""" Liefert den nächsten Navi-Tile zurück. Checkt, ob alle Bedingungen eingehalten werden."""
# Bedingungen:
# 1. Tiletype != wand
# 2. Tiletype != navi
# 3. Tiletype != last_position
# 4. Tile ist in self.queue
for tile in surrounding_tiles:
if not tile:
return False
tile_type = self.map.get_tile_type(tile.position)
print(tile.position, tile_type)
if not tile_type == "wall" and not tile_type == "navi" and not tile.position == last_position:
return tile
print("Sackgasse?")
return False
# if tile_surround.position == self.position_finish:
# print("FINISH")
# print("Routenlänge: ",len(self.route))
def get_estimated_cost_to_finish(self, position):
""" Liefert die estimated cost an gegebener Position zurück."""
distance_to_point = float(sqrt((position[0]-self.position_finish[0])**2 + (position[1]-self.position_finish[1])**2))
return distance_to_point
def get_pre_tile(self, tile):
""" Liefert den Vorgänger zurück """
# print('get_pre_tile()')
surrounding_tiles = self.get_surrounding_tiles(tile.position, order='start')
# print('surrounding_tiles: ', surrounding_tiles)
pre_tile = surrounding_tiles.get_and_delete()
# print('pre_tile: ', pre_tile)
return pre_tile.position
def get_surrounding_tiles(self, position, order='finish'):
""" Liefert eine Queue der angrenzenden Tiles zurück."""
tiles = PriorityQueue(order)
# print('Order: ', order)
# self.surroundings
for surround in self.surroundings:
# Ränder abfragen
# y unten
if position[0] == len(self.tiles)-1 and surround[0] == +1:
continue
# y oben
if position [0] == 0 and surround[0] == -1:
continue
# x rechts
if position[1] == len(self.tiles[0])-1 and surround[1] == +1:
continue
# x links
if position[1] == 0 and surround[1] == -1:
continue
x = position[1]+surround[1]
y = position[0]+surround[0]
tiles.insert(self.tiles[y][x])
# Wenn Position am unteren Rande der y-Achse ist
# tiles.sort(key=lambda x: x.estimated_cost_to_finish, reverse=False)
return tiles
def show_open_list(self):
for item in self.open_list.queue:
print(item.position, item.get_estimated_cost_to_finish())
def get_open_list(self):
return self.open_list.queue
def get_closed_list(self):
return self.closed_list.queue
def show_closed_list(self):
for item in self.closed_list.queue:
print(item.position)
def get_finish_tile(self):
return self.tiles[self.position_finish[0]][self.position_finish[1]]
``` |
{
"source": "jkoppenhaver/neopixel-animate",
"score": 3
} |
#### File: src/animations/chase.py
```python
from animator_base import AnimatorBase
# This class can be used as a starting point for new animations
class Chase(AnimatorBase):
def __init__(self, np, color, width=1, period=0.1):
# Add any initialization and variables you need here
self.index = 0
self.width = width
self.n = np.n
self.color = color
self.off = tuple([0] * len(color))
# You can also pass a different period to the Animator Base if you need to
super().__init__(np, int(period*1000))
def update(self, timer):
# This update function is called at regular intervals by the Animator Base
# Replace 'pass' with your pattern updates and np.write()
self.np[self.index-self.width] = self.off
self.np[self.index] = self.color
if(self.index == self.n-1):
self.index = 0
else:
self.index = self.index + 1
self.np.write()
``` |
{
"source": "JKorang/eufy_vacuum",
"score": 3
} |
#### File: custom_components/eufy_vacuum/property.py
```python
import enum
class StringEnum(enum.Enum):
def __str__(self):
return self.value
class DeviceProperty:
def __init__(self, key, type_cast=None, read_only=False):
self.key = key
self.type_cast = type_cast
self.read_only = read_only
def __get__(self, instance, owner):
value = instance.state.get(self.key)
if value is not None and self.type_cast is not None:
value = self.type_cast(value)
return value
def __set__(self, instance, value):
if self.read_only:
raise AttributeError("can't set attribute")
if not isinstance(value, (bool, int, float, str, type(None))):
value = str(value)
instance.set({self.key: value})
``` |
{
"source": "jkordish/Python",
"score": 3
} |
#### File: jkordish/Python/CBRtoCBZ.py
```python
__author__ = 'jkordish'
#!/usr/bin/python2
# CBR to CBZ converter
# Pass an individual CBR file or a directory containing CBRs
# MUST HAVE RARFILE MODULE - sudo easy_install rarfile
# It is a hodgepodge but it works...
import os
import sys
import rarfile
import zipfile
import shutil
input = sys.argv[1]
def main():
'''main function'''
# test if the user input is a file
if os.path.isdir(input) == True:
# walk the directory
for root, dir, files in os.walk(input):
for file in files:
# find files that are of cbr extension
if os.path.splitext(file)[1] == '.cbr':
# comic full path including extension
ComicFullPath = os.path.join(root, file)
# comic full path sans extension
ComicName = os.path.splitext(ComicFullPath)[0]
# pass vars to the unrar function
UnrarCBR(ComicFullPath, ComicName)
# test if the user input is a file
elif os.path.isfile(input) == True:
if os.path.splitext(input)[1] == '.cbr':
# comic full path including extension
ComicFullPath = os.path.abspath(input)
# comic full path sans extension
ComicName = os.path.splitext(ComicFullPath)[0]
# pass vars to the unrar function
UnrarCBR(ComicFullPath, ComicName)
def UnrarCBR( cbrin, cbrout ):
'''function to unrar the cbr files'''
# Using TRY since not all CBRs are actually RARs!
# Should do something more intelligent than just renaming to ZIP
try:
rf = rarfile.RarFile(cbrin)
# unrar the cbr into fullpath sans extension
rf.extractall(cbrout)
rf.close()
# delete the cbr file
os.unlink(cbrin)
# pass the comic full path sans extension to the CreateCBZ function
CreateCBZ(cbrout)
except:
print 'Renamed: ' + cbrout + '.cbr' + ' =>' + cbrout + '.cbz'
shutil.move(cbrin, cbrout + '.cbz')
def CreateCBZ( cbzin ):
'''function to create the zip file from the unrar'd'''
# setup the zip file var to be the comic full path with a cbz extension
zip_file = cbzin + '.cbz'
zip = zipfile.ZipFile(zip_file, 'w', compression=zipfile.ZIP_DEFLATED)
root_len = len(os.path.abspath(cbzin))
# walk the directory
for root, dir, files in os.walk(cbzin):
archive_root = os.path.abspath(cbzin)[root_len:]
for file in files:
fullpath = os.path.join(root, file)
archive_name = os.path.join(archive_root, file)
zip.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
# remove the archive directory
shutil.rmtree(cbzin)
zip.close()
print "Finished: " + os.path.basename(zip_file)
return zip_file
if __name__ == '__main__':
main()
``` |
{
"source": "JKorf/MediaPi",
"score": 2
} |
#### File: src/Controllers/RuleManager.py
```python
import time
from datetime import datetime, timedelta
import math
from Controllers.TradfriManager import TradfriManager
from Controllers.PresenceManager import PresenceManager
from Controllers.TVManager import TVManager
from Controllers.ToonManager import ToonManager
from Database.Database import Database
from Shared.Logger import LogVerbosity, Logger
from Shared.Settings import Settings
from Shared.Threading import CustomThread
from Shared.Util import Singleton, current_time, add_leading_zero
class Rule:
def __init__(self, id, name, created, active, last_execution):
self.id = id
self.last_check_time = 0
self.created = created
self.last_execution = last_execution
self.active = active
self.conditions = []
self.actions = []
self.name = name
self.description = None
def add_action(self, id, type, parameters):
self.actions.append(RuleManager.actions[type](id, type, *parameters))
self.description = self.get_description()
def add_condition(self, id, type, parameters):
self.conditions.append(RuleManager.conditions[type](id, type, *parameters))
self.description = self.get_description()
def check(self):
should_execute = True
for condition in self.conditions:
if not condition.check():
should_execute = False
return should_execute
def execute(self):
for action in self.actions:
action.execute()
self.last_execution = current_time()
def get_description(self):
result = "If "
for condition in self.conditions:
result += condition.get_description() + " and "
result = result[:-4]
result += "then "
for action in self.actions:
result += action.get_description() + " and "
result = result[:-4]
return result
class IsBetweenTimeCondition:
name = "Time between"
parameter_descriptions = [("Start time", "time"), ("End time", "time")]
description = "Triggers when time is between 2 points"
def __init__(self, id, type, start_time, end_time):
self.id = id
self.type = type
self.start_time_hours = math.floor(int(start_time) / 60)
self.start_time_minutes = int(start_time) % 60
self.end_time_hours = math.floor(int(end_time) / 60)
self.end_time_minutes = int(end_time) % 60
self.parameters = [int(start_time), int(end_time)]
self._next_start_time = RuleManager.update_check_time(self.start_time_hours, self.start_time_minutes)
self._next_end_time = RuleManager.update_check_time(self.end_time_hours, self.end_time_minutes)
if self._next_end_time < self._next_start_time:
self._next_end_time += timedelta(days=1)
def check(self):
now = datetime.now()
if now > self._next_end_time:
self._next_start_time = RuleManager.update_check_time(self.start_time_hours, self.start_time_minutes)
self._next_end_time = RuleManager.update_check_time(self.end_time_hours, self.end_time_minutes)
result = self._next_start_time < now < self._next_end_time
return result
def get_description(self):
return "time is between " + add_leading_zero(self.start_time_hours)+":" + add_leading_zero(self.start_time_minutes) + " and " + add_leading_zero(self.end_time_hours)+":" + add_leading_zero(self.end_time_minutes)
class IsPassingTimeCondition:
name = "Time passing"
parameter_descriptions = [("Time trigger", "time")]
description = "Triggers at a specific time"
def __init__(self, id, type, trigger_time):
self.id = id
self.type = type
self.time_hour = math.floor(int(trigger_time) / 60)
self.time_minute = int(trigger_time) % 60
self.parameters = [int(trigger_time)]
self._next_check_time = RuleManager.update_check_time(self.time_hour, self.time_minute)
def check(self):
result = datetime.now() > self._next_check_time
if result:
self._next_check_time = RuleManager.update_check_time(self.time_hour, self.time_minute)
return result
def get_description(self):
return "time is " + add_leading_zero(self.time_hour)+":" + add_leading_zero(self.time_minute)
class IsHomeCondition:
name = "Is anyone home"
parameter_descriptions = [("Anyone home", "bool")]
description = "Triggers if people are home (or not)"
def __init__(self, id, type, should_be_home):
self.id = id
self.type = type
value = should_be_home == "True" or should_be_home == "true" or should_be_home == "1" or should_be_home is True
self.parameters = [value]
self.should_be_home = value
def check(self):
return PresenceManager().anyone_home == self.should_be_home
def get_description(self):
if self.should_be_home:
return "someone is home"
return "no one is home"
class OnLeavingHomeCondition:
name = "On leaving home"
parameter_descriptions = []
description = "Triggers when everyone left home"
def __init__(self, id, type):
self.id = id
self.type = type
self.parameters = []
self.last_home_check = False
def check(self):
current_state = PresenceManager().anyone_home
if not current_state and self.last_home_check:
self.last_home_check = current_state
return True
self.last_home_check = current_state
return False
def get_description(self):
return "last person left home"
class OnComingHomeCondition:
name = "On coming home"
parameter_descriptions = []
description = "Triggers when someone comes home"
def __init__(self, id, type):
self.id = id
self.type = type
self.parameters = []
self.last_home_check = False
def check(self):
new_check = PresenceManager().anyone_home
old_check = self.last_home_check
self.last_home_check = new_check
if new_check and not old_check:
return True
return False
def get_description(self):
return "first person comes home"
class ToggleTradfriGroupAction:
name = "Toggle a Tradfri group"
description = "Turns a Tradfri group on or off"
parameter_descriptions = [("Group", "tradfri_group"), ("On/Off", "bool")]
def __init__(self, id, type, group_ids, on):
self.id = id
self.type = type
on_value = on == "True" or on == "true" or on == "1" or on is True
self.parameters = [group_ids, on_value]
self.group_ids = group_ids.split('|')
self.on = on_value
def execute(self):
for group_id in self.group_ids:
TradfriManager().set_group_state(group_id, self.on)
def get_description(self):
if self.on:
return "turn on the devices for Tradfri group " + str(self.parameters[0])
return "turn off the devices for Tradfri group " + str(self.parameters[0])
class SetTemperatureAction:
name = "Set temperature"
description = "Sets the temperature"
parameter_descriptions = [("Target temperature", "int")]
def __init__(self, id, type, temp):
self.id = id
self.type = type
self.parameters = [temp]
self.temp = int(temp)
def execute(self):
ToonManager().set_temperature(self.temp, "rule")
def get_description(self):
return "set the temperature to " + str(self.temp) + "°C"
class ToggleTvAction:
name = "Turn on/off TV"
description = "Turn the TV on or off"
parameter_descriptions = [("Instance", "instance"), ("On/Off", "bool")]
def __init__(self, id, type, instance, on):
self.id = id
self.type = type
on_value = on == "True" or on == "true" or on == "1" or on is True
self.parameters = [instance, on_value]
self.instance = int(instance)
self.on = on_value
def execute(self):
# TODO slave?
if self.on:
TVManager().turn_tv_on()
TVManager().switch_input_to_pi()
else:
TVManager().turn_tv_off()
def get_description(self):
if self.on:
return "turn on the tv"
return "turn off the tv"
class PlayRadioAction:
name = "Play radio"
description = "Play radio"
parameter_descriptions = [("Instance", "instance"), ("Channel", "radio")]
def __init__(self, id, type, instance, channel):
self.id = id
self.type = type
self.parameters = [instance, channel]
self.instance = int(instance)
self.channel = int(channel)
def execute(self):
# TODO slave?
radio = [x for x in Database().get_radios() if x.id == self.channel][0]
from MediaPlayer.MediaManager import MediaManager
MediaManager().start_radio(radio.title, radio.url)
def get_description(self):
return "play a radio channel"
class RuleManager(metaclass=Singleton):
conditions = {
1: IsBetweenTimeCondition,
2: IsPassingTimeCondition,
3: IsHomeCondition,
4: OnLeavingHomeCondition,
5: OnComingHomeCondition
}
actions = {
1: ToggleTradfriGroupAction,
2: SetTemperatureAction,
3: ToggleTvAction,
4: PlayRadioAction
}
def __init__(self):
self.running = False
self.current_rules = []
self.check_thread = CustomThread(self.check_rules, "Rule checker")
self.load_rules()
enabled = Database().get_stat("rules_enabled")
self.enabled = bool(enabled)
def start(self):
if Settings.get_bool("slave"):
return
self.running = True
self.check_thread.start()
def stop(self):
self.running = False
self.check_thread.join()
def set_enabled(self, enabled):
self.enabled = enabled
Database().update_stat("rules_enabled", enabled)
def check_rules(self):
while self.running:
Logger().write(LogVerbosity.All, "Checking rules")
for rule in self.current_rules:
if rule.check():
Logger().write(LogVerbosity.Info, "Executing rule " + rule.name + ": " + rule.description)
if self.enabled:
try:
rule.execute()
except Exception as e:
Logger().write_error(e, "Rule error")
Database().update_rule(rule)
time.sleep(10)
def update_rule(self, rule_id, active, name, actions, conditions):
if rule_id == -1:
rule = Rule(-1, name, current_time(), True, 0)
self.current_rules.append(rule)
else:
rule = [x for x in self.current_rules if x.id == rule_id][0]
rule.name = name
rule.active = active
rule.actions = []
for action in actions:
rule.add_action(-1, action[0], [x for x in action[1:] if x is not None])
rule.conditions = []
for condition in conditions:
rule.add_condition(-1, condition[0], [x for x in condition[1:] if x is not None])
rule.last_execution = 0
Database().save_rule(rule)
def remove_rule(self, rule_id):
self.current_rules = [x for x in self.current_rules if x.id != rule_id]
Database().remove_rule(rule_id)
def get_rule(self, rule_id):
return [x for x in self.current_rules if x.id == rule_id][0]
def get_rules(self):
return self.enabled, self.current_rules
def get_actions_and_conditions(self):
actions = [ActionModel(id, action.name, action.description, action.parameter_descriptions) for id, action in self.actions.items()]
conditions = [ActionModel(id, condition.name, condition.description, condition.parameter_descriptions) for id, condition in self.conditions.items()]
return actions, conditions
def load_rules(self):
db_rules = Database().get_rules()
self.current_rules = self.parse_rules(db_rules)
def parse_rules(self, rules):
result = []
for r in rules:
rule = Rule(r.id, r.name, r.created, r.active, r.last_execution)
for link in r.links:
if link.rule_link_type == "Condition":
rule.add_condition(link.id, link.link_type, link.parameters)
else:
rule.add_action(link.id, link.link_type, link.parameters)
result.append(rule)
return result
@staticmethod
def update_check_time(hour, minute):
result = datetime.now()
if result.hour > hour or (result.hour == hour and result.minute >= minute):
result += timedelta(days=1)
result = result.replace(hour=hour, minute=minute, second=0, microsecond=0)
return result
class ActionModel:
def __init__(self, id, name, description, parameter_descriptions):
self.id = id
self.name = name
self.description = description
self.parameter_description = parameter_descriptions
```
#### File: src/Controllers/TVManager.py
```python
import subprocess
import sys
from Shared.Logger import Logger, LogVerbosity
from Shared.Threading import CustomThread
from Shared.Util import Singleton
class TVManager(metaclass=Singleton):
# 0 - TV
# 1,2 - Recording 1/2
# 3,6,7,A - Tuner 1/2/3/4
# 4,8,B - Playback 1/2/3
def __init__(self):
self.pi_source = "1"
self.tv_source = "0"
self.debug_level = "1"
self.cec_process = None
self.pi_is_active = False
self.pi = sys.platform == "linux" or sys.platform == "linux2"
def switch_input_to_pi(self):
if not self.pi or self.pi_is_active:
return
self.pi_is_active = True
self.__request('as')
def switch_input_to_tv(self):
if not self.pi or not self.pi_is_active:
return
self.pi_is_active = False
self.__request('is')
def turn_tv_on(self):
if not self.pi:
return
self.__request('on ' + self.tv_source)
def turn_tv_off(self):
if not self.pi:
return
self.__request('standby ' + self.tv_source)
def __read_cec(self):
for line in iter(self.cec_process.stdout.readline, b''):
Logger().write(LogVerbosity.All, "CEC: " + line.decode('utf-8'))
def start(self):
if not self.pi:
return
self.cec_process = subprocess.Popen(['cec-client'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
t = CustomThread(self.__read_cec, "Cec reader", [])
t.start()
@staticmethod
def __construct_request(source, destination, command):
return 'echo "tx ' + source + destination + ":" + command + '" | cec-client -s'
def __request(self, command):
Logger().write(LogVerbosity.Debug, "TV manager sending command: " + command)
result = subprocess.check_output('echo "' + command + '" | cec-client -s -d ' + self.debug_level, shell=True).decode("utf8")
Logger().write(LogVerbosity.Debug, "TV manager result: " + str(result))
```
#### File: MediaPlayer/Player/VLCPlayer.py
```python
import datetime
import os
import time
from enum import Enum
import sys
from MediaPlayer.Player import vlc
from MediaPlayer.Player.vlc import libvlc_get_version, Media, MediaList
from Shared.Events import EventManager, EventType
from Shared.Logger import Logger, LogVerbosity
from Shared.Observable import Observable
from Shared.Settings import Settings
from Shared.Threading import CustomThread
from Shared.Util import Singleton
class VLCPlayer(metaclass=Singleton):
def __init__(self):
self.__vlc_instance = None
self.player_state = PlayerData()
self.instantiate_vlc()
self.media = None
self.__player = self.__vlc_instance.media_player_new()
self.__list_player = self.__vlc_instance.media_list_player_new()
self.__list_player.set_media_player(self.__player)
self.__event_manager = self.__player.event_manager()
self.set_volume(75)
EventManager.register_event(EventType.SetSubtitleFiles, self.set_subtitle_files)
EventManager.register_event(EventType.StopPlayer, self.stop)
self.player_observer = CustomThread(self.observe_player, "Player observer")
self.player_observer.start()
self.stop_player_thread = None
def instantiate_vlc(self):
parameters = self.get_instance_parameters()
Logger().write(LogVerbosity.Debug, "VLC parameters: " + str(parameters))
self.__vlc_instance = vlc.Instance("cvlc", *parameters)
Logger().write(LogVerbosity.Info, "VLC version " + libvlc_get_version().decode('utf8'))
def play(self, url, time=0):
parameters = self.get_play_parameters(url, time)
Logger().write(LogVerbosity.Info, "VLC Play | Url: " + url)
Logger().write(LogVerbosity.Info, "VLC Play | Time: " + str(time))
Logger().write(LogVerbosity.Info, "VLC Play | Parameters: " + str(parameters))
self.player_state.start_update()
self.player_state.path = url
self.player_state.stop_update()
self.media = Media(url, *parameters)
if 'youtube' in url:
media_list = MediaList()
media_list.add_media(self.media)
self.__list_player.set_media_list(media_list)
self.__list_player.play()
else:
self.__player.set_media(self.media)
self.__player.play()
@staticmethod
def get_instance_parameters():
params = ["--verbose=" + str(Settings.get_int("vlc_log_level")),
"--network-caching=" + str(Settings.get_int("network_caching")),
"--ipv4-timeout=500",
"--image-duration=-1"]
if sys.platform == "linux" or sys.platform == "linux2":
log_path = Settings.get_string("base_folder") + "/Logs/" + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
params.append("--logfile=" + log_path + '/vlc_' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + ".txt")
params.append("--file-logging")
params.append("--file-caching=5000")
return params
def get_play_parameters(self, url, time):
params = []
if time != 0:
params.append("start-time=" + str(time // 1000))
return params
def set_window(self, handle):
if sys.platform == "linux" or sys.platform == "linux2":
self.__player.set_xwindow(handle)
else:
self.__player.set_hwnd(handle)
def pause_resume(self):
Logger().write(LogVerbosity.All, "Player pause resume")
self.__player.pause()
def stop(self):
Logger().write(LogVerbosity.All, "Player stop")
thread = CustomThread(lambda: self.__player.stop(), "Stopping VLC player")
thread.start()
def set_volume(self, vol):
Logger().write(LogVerbosity.Debug, "Player set volume " + str(vol))
self.__player.audio_set_volume(vol)
self.player_state.start_update()
self.player_state.volume = vol
self.player_state.stop_update()
def get_volume(self):
return self.__player.audio_get_volume()
def get_position(self):
return self.__player.get_time()
def get_length(self):
return int(self.__player.get_length())
def set_time(self, pos):
Logger().write(LogVerbosity.Debug, "Player set time " + str(pos))
self.__player.set_time(pos)
self.player_state.start_update()
self.player_state.playing_for = pos
self.player_state.stop_update()
def set_position(self, pos):
Logger().write(LogVerbosity.Debug, "Player set position " + str(pos))
self.__player.set_position(pos)
def set_subtitle_delay(self, delay):
Logger().write(LogVerbosity.Debug, "Player set subtitle delay " + str(delay))
self.__player.video_set_spu_delay(delay)
self.player_state.start_update()
self.player_state.sub_delay = delay
self.player_state.stop_update()
def get_state(self):
return self.__player.get_state()
def get_audio_track(self):
return self.__player.audio_get_track()
def set_audio_track(self, track_id):
Logger().write(LogVerbosity.Debug, "Player set audio track " + str(track_id))
self.__player.audio_set_track(track_id)
self.player_state.start_update()
self.player_state.audio_track = track_id
self.player_state.stop_update()
def get_audio_tracks(self):
tracks = self.__player.audio_get_track_description()
result = []
for trackid, trackname in tracks:
result.append((trackid, trackname.decode('utf8')))
return result
def set_subtitle_files(self, files):
Logger().write(LogVerbosity.Debug, "Adding " + str(len(files)) + " subtitle files")
pi = sys.platform == "linux" or sys.platform == "linux2"
for file in reversed(files):
if not pi and file[1] != ":":
file = "C:" + file
file = file.replace("/", os.sep).replace("\\", os.sep)
# NOTE this must be called after Play()
self.__player.video_set_subtitle_file(file)
def set_subtitle_track(self, id):
Logger().write(LogVerbosity.Debug, "Player set subtitle track " + str(id))
self.__player.video_set_spu(id)
self.player_state.start_update()
self.player_state.sub_track = id
self.player_state.stop_update()
def get_subtitle_count(self):
return self.__player.video_get_spu_count()
def get_subtitle_tracks(self):
tracks = self.__player.video_get_spu_description()
result = []
for trackid, trackname in tracks:
result.append((trackid, trackname.decode('utf-8')))
return result
def get_subtitle_delay(self):
return self.__player.video_get_spu_delay()
def get_selected_sub(self):
return self.__player.video_get_spu()
def try_play_subitem(self):
media = self.__player.get_media()
if media is None:
self.stop()
return
subs = media.subitems()
if subs is None:
self.stop()
return
if len(subs) == 1:
subs[0].add_options("demux=avformat")
self.__player.set_media(subs[0])
self.__player.play()
def observe_player(self):
while True:
state = self.get_state().value
if state in [5, 6, 7]:
state = 0
new_state = PlayerState(state)
if new_state == PlayerState.Nothing and self.player_state.state != PlayerState.Nothing:
self.stop_player_thread = CustomThread(self.stop, "Stopping player")
self.stop_player_thread.start()
self.player_state.start_update()
self.player_state.state = new_state
self.player_state.playing_for = self.get_position()
self.player_state.length = self.get_length()
self.player_state.audio_tracks = self.get_audio_tracks()
self.player_state.audio_track = self.get_audio_track()
self.player_state.sub_delay = self.get_subtitle_delay()
self.player_state.sub_track = self.get_selected_sub()
self.player_state.sub_tracks = self.get_subtitle_tracks()
self.player_state.volume = self.get_volume()
self.player_state.stop_update()
time.sleep(0.5)
class PlayerState(Enum):
Nothing = 0
Opening = 1
Buffering = 2
Playing = 3
Paused = 4
class PlayerData(Observable):
def __init__(self):
super().__init__("PlayerData", 0.5)
self.path = None
self.state = PlayerState.Nothing
self.playing_for = 0
self.length = 0
self.volume = 0
self.sub_delay = 0
self.sub_track = 0
self.sub_tracks = []
self.audio_track = 0
self.audio_tracks = []
```
#### File: MediaPlayer/Subtitles/SubtitlesSubDB.py
```python
import hashlib
from MediaPlayer.Subtitles.SubtitleSourceBase import SubtitleSourceBase
from Shared.Logger import Logger, LogVerbosity
from Shared.Network import RequestFactory
class SubtitlesSubDB(SubtitleSourceBase):
def __init__(self):
super().__init__()
@staticmethod
def get_subtitles(size, file_length, filename, first_64k, last_64k):
data = first_64k + last_64k
file_hash = hashlib.md5(data).hexdigest()
result = RequestFactory.make_request(
"http://sandbox.thesubdb.com/?action=download&hash=" + file_hash + "&language=en",
useragent="SubDB/1.0 (MediaPi/0.1; http://github.com/jkorf/mediapi)")
if not result:
Logger().write(LogVerbosity.Info, "SubDB: no subtitles found for " + file_hash)
return []
Logger().write(LogVerbosity.Info, "SubDB: Found a subtitle for hash " + file_hash)
return [SubtitleSourceBase.save_file("SubDB", result)]
```
#### File: Torrents/DHT/Bucket.py
```python
from MediaPlayer.Torrents.DHT.Node import NodeState
from Shared.Logger import Logger, LogVerbosity
from Shared.Util import current_time
class Bucket:
def __init__(self, start, end):
self.start = start
self.end = end
self.max_nodes = 8
self.nodes = []
self.last_changed = 0
Logger().write(LogVerbosity.Debug, "Creating new bucket from " + str(self.start) + " to " + str(self.end))
def split(self):
self.start += (self.end - self.start) // 2
Logger().write(LogVerbosity.Debug, "DHT: Splitting bucket, new range: " + str(self.start) + " to " + str(self.end))
split_nodes = [x for x in self.nodes if x.int_id < self.start]
self.nodes = [x for x in self.nodes if x.int_id >= self.start]
return split_nodes
def fits(self, id):
return self.start <= id < self.end
def contains_node(self, id_bytes):
return len([x for x in self.nodes if x.byte_id == id_bytes]) == 1
def full(self):
return len([x for x in self.nodes if x.node_state != NodeState.Bad]) == self.max_nodes
def add_node(self, node):
if len(self.nodes) >= self.max_nodes:
self.nodes = [x for x in self.nodes if x.node_state != NodeState.Bad]
self.nodes.append(node)
self.last_changed = current_time()
def get_node(self, id):
nodes = [x for x in self.nodes if x.byte_id == id]
if len(nodes) != 0:
return nodes[0]
return None
def questionable_nodes(self):
return [x for x in self.nodes if x.node_state == NodeState.Questionable]
```
#### File: Torrents/Peer/PeerMetaDataManager.py
```python
from MediaPlayer.Torrents.ExtensionManager import ProtocolExtensionManager
from MediaPlayer.Torrents.Peer.PeerMessages import BitfieldMessage, InterestedMessage, HandshakeMessage, ExtensionHandshakeMessage, \
UninterestedMessage, MetadataMessage, HaveNoneMessage
from MediaPlayer.Util.Enums import ExtensionName, MetadataMessageType, TorrentState, PeerInterestedState, \
PeerSpeed, PeerState
from Shared.LogObject import LogObject
from Shared.Logger import Logger, LogVerbosity
from Shared.Settings import Settings
from Shared.Util import current_time
class PeerMetaDataManager(LogObject):
def __init__(self, peer):
super().__init__(peer, "meta")
self.peer = peer
self.handshake_send = False
self.handshake_successful = False
self.extension_handshake_send = False
self.bitfield_done = False
self.metadata_requested = False
self.pause_handled = False
self.port_send = False
self.last_peer_exchange = current_time()
self._low_peer_max_speed = Settings.get_int("low_peer_max_download_buffer") / 3
self._medium_peer_max_speed = Settings.get_int("medium_peer_max_download_buffer") / 3
def update(self):
if self.peer.state != PeerState.Started:
return True
if not self.handshake_send:
Logger().write(LogVerbosity.All, str(self.peer.id) + ' Sending handshake')
self.handshake_send = True
self.send_handshake()
return True
if not self.handshake_successful:
if current_time() - self.peer.connection_manager.connected_on > 5000:
self.peer.protocol_logger.update("No handshake response")
# No handshake received
self.peer.stop_async("No handshake")
return False
return True
if not self.extension_handshake_send and self.peer.extension_manager.peer_supports(ExtensionName.ExtensionProtocol):
Logger().write(LogVerbosity.All, str(self.peer.id) + ' sending extended handshake')
dic = ProtocolExtensionManager.create_extension_dictionary()
handshake = ExtensionHandshakeMessage(dic)
self.peer.protocol_logger.update("Sending extension handshake")
self.peer.connection_manager.send(handshake.to_bytes())
self.extension_handshake_send = True
if self.peer.torrent.state == TorrentState.DownloadingMetaData:
if self.metadata_requested:
return True
if self.peer.extension_manager.extension_dict is None:
Logger().write(LogVerbosity.All, "Peer didn't receive extension handshake yet")
return True
if not self.peer.extension_manager.peer_supports(ExtensionName.Metadata):
Logger().write(LogVerbosity.All, "Peer doesn't support metadata extension")
return True
self.metadata_requested = True
Logger().write(LogVerbosity.Debug, str(self.peer.id) + " Requesting metadata")
self.peer.protocol_logger.update("Sending metadata requests")
to_request = self.peer.torrent.metadata_manager.get_pieces_to_do()
for index in to_request:
Logger().write(LogVerbosity.All, "Meta data request for piece " + str(index.index))
self.peer.connection_manager.send(MetadataMessage(self.peer, MetadataMessageType.Request, index.index).to_bytes())
return True
if self.peer.torrent.state == TorrentState.WaitingUserFileSelection:
return True
if self.peer.torrent.state == TorrentState.Paused:
if not self.pause_handled:
self.pause_handled = True
if self.peer.communication_state.out_interest == PeerInterestedState.Interested:
self.peer.protocol_logger.update("Sending uninterested (paused)")
Logger().write(LogVerbosity.Debug, "Paused, sending uninterested")
self.peer.communication_state.out_interest = PeerInterestedState.Uninterested
self.peer.connection_manager.send(UninterestedMessage().to_bytes())
return True
if not self.port_send:
self.port_send = True
# if self.peer.extension_manager.peer_supports(ExtensionName.DHT):
# Logger().write(LogVerbosity.All, str(self.peer.id) + ' sending port message')
# self.peer.connection_manager.send(PortMessage(Settings.get_int("dht_port")).to_bytes())
if not self.peer.torrent.data_manager.bitfield:
return False
if not self.bitfield_done:
Logger().write(LogVerbosity.All, str(self.peer.id) + ' Sending initial bitfield')
self.bitfield_done = True
if self.peer.extension_manager.peer_supports(ExtensionName.FastExtension) and \
self.peer.torrent.data_manager.bitfield.has_none:
self.peer.protocol_logger.update("Sending HaveNone")
Logger().write(LogVerbosity.All, "Got nothing, sending HaveNone")
self.peer.connection_manager.send(HaveNoneMessage().to_bytes())
else:
Logger().write(LogVerbosity.All, "Sending bitfield message")
self.peer.protocol_logger.update("Sending bitfield")
self.peer.connection_manager.send(BitfieldMessage(self.peer.torrent.data_manager.bitfield.get_bitfield()).to_bytes())
if self.peer.communication_state.out_interest == PeerInterestedState.Uninterested and self.peer.download_manager.has_interesting_pieces():
Logger().write(LogVerbosity.All, str(self.peer.id) + ' Sending interested message')
self.peer.protocol_logger.update("Sending interested")
self.peer.communication_state.out_interest = PeerInterestedState.Interested
self.peer.connection_manager.send(InterestedMessage().to_bytes())
if self.peer.extension_manager.peer_supports(ExtensionName.PeerExchange):
pass
counter_value = self.peer.counter.value
if counter_value < self._low_peer_max_speed:
self.peer.peer_speed = PeerSpeed.Low
elif counter_value < self._medium_peer_max_speed:
self.peer.peer_speed = PeerSpeed.Medium
else:
self.peer.peer_speed = PeerSpeed.High
return True
def send_handshake(self):
message = HandshakeMessage(self.peer.torrent.info_hash.sha1_hashed_bytes)
message.reserved = ProtocolExtensionManager.add_extensions_to_handshake(message.reserved)
Logger().write(LogVerbosity.All, "Sending handshake")
self.peer.protocol_logger.update("Sending handshake")
self.peer.connection_manager.send(message.to_bytes())
def stop(self):
self.peer = None
```
#### File: Torrents/Streaming/StreamListener.py
```python
import os
import socket
import sys
import urllib.parse
import time
import select
from Shared.LogObject import LogObject
from Shared.Logger import Logger, LogVerbosity
from Shared.Settings import Settings
from Shared.Threading import CustomThread
from Shared.Util import current_time
class StreamListener(LogObject):
wait_for_data = 0.1
mime_mapping = {
".mp4": "video/mp4",
".avi": "video/x-msvideo",
".mkv": "video/mp4",
".srt": "json"
}
def __init__(self, name, port, arg=None):
super().__init__(arg, name)
self.name = name
self.torrent = arg
self.port = port
self.thread = None
self.chunk_length = Settings.get_int("max_chunk_size")
self.server = StreamServer(self.name, port, self.handle_request)
self.sockets_writing_data = []
self.running = False
self.bytes_send = 0
self.id = 0
def start_listening(self):
self.thread = CustomThread(self.server.start, "Listener: " + self.name)
self.running = True
self.thread.start()
def handle_request(self, socket):
Logger().write(LogVerbosity.Info, self.name + " new request")
# Read headers
total_message = self.read_headers(socket)
if total_message is None:
return
header = HttpHeader.from_string(total_message)
if header.path == "/torrent":
# Handle torrent stream request
self.handle_torrent_request(socket, header)
elif header.path.startswith("/file"):
# Handle file stream request
self.handle_file_request(socket, header)
else:
# Unknown request
Logger().write(LogVerbosity.Info, self.name + " streamListener received unknown request: " + header.path)
socket.close()
def read_headers(self, socket):
try:
total_message = b''
while not total_message.endswith(b'\r\n\r\n'):
rec = socket.recv(1024)
if len(rec) == 0:
break
total_message += rec
time.sleep(0)
except (socket.timeout, ConnectionRefusedError, ConnectionAbortedError, ConnectionResetError, OSError):
socket.close()
Logger().write(LogVerbosity.Info, self.name + " error reading http header")
return
if not total_message.endswith(b'\r\n\r\n'):
socket.close()
Logger().write(LogVerbosity.Info, self.name + " invalid http header, closing")
return
return total_message
def handle_file_request(self, socket, header):
file_path = header.path[6:]
if sys.platform == "linux" or sys.platform == "linux2":
file_path = "/" + file_path
Logger().write(LogVerbosity.Debug, self.name + " file request for " + file_path)
if not os.path.exists(file_path):
file_path = urllib.parse.unquote_plus(file_path)
if not os.path.exists(file_path):
Logger().write(LogVerbosity.Info, self.name + " file not found: " + file_path)
self.write_header(socket, "404 Not Found")
socket.close()
return
read_file = ReadFile(file_path)
read_file.open()
if header.range_end == 0 or header.range_end == -1:
header.range_end = read_file.size - 1
if header.range is None:
Logger().write(LogVerbosity.Debug, self.name + ' request without range')
self.write_header_with_content(socket, "200 OK", 0, header.range_end, read_file.size, file_path)
self.write_data(socket, header.range_start, header.range_end - header.range_start + 1, read_file.get_bytes)
else:
Logger().write(LogVerbosity.Debug, self.name + ' request with range')
self.write_header_with_content(socket, "206 Partial Content", header.range_start, header.range_end, read_file.size, file_path)
self.write_data(socket, header.range_start, header.range_end - header.range_start + 1, read_file.get_bytes)
read_file.close()
def handle_torrent_request(self, socket, header):
if not self.torrent or not self.running:
socket.close()
Logger().write(LogVerbosity.Debug, self.name + " stopping connection because there is no more torrent")
return
if header.range_end == 0 or header.range_end == -1:
header.range_end = self.torrent.media_file.length - 1
if header.range:
range_start = header.range_start
if range_start == self.torrent.media_file.length:
Logger().write(LogVerbosity.Debug, "Request for content length 0, cant process")
self.write_header(socket, "416 Requested range not satisfiable")
socket.close()
return
if header.range is None:
Logger().write(LogVerbosity.Debug, self.name + ' request without range')
success = self.write_header_with_content(socket, "200 OK", 0, header.range_end, self.torrent.media_file.length,
self.torrent.media_file.path)
if not success:
return
self.write_data(socket, header.range_start, header.range_end - header.range_start + 1,
self.torrent.get_data)
else:
Logger().write(LogVerbosity.Debug, self.name + ' request with range')
success = self.write_header_with_content(socket, "206 Partial Content", header.range_start, header.range_end,
self.torrent.media_file.length, self.torrent.media_file.path)
if not success:
return
self.write_data(socket, header.range_start, header.range_end - header.range_start + 1,
self.torrent.get_data)
def write_header(self, socket, status):
response_header = HttpHeader()
response_header.status_code = status
Logger().write(LogVerbosity.Info, self.name + " return header: " + response_header.to_string())
try:
socket.send(response_header.to_string().encode())
return True
except (ConnectionAbortedError, ConnectionResetError, OSError):
Logger().write(LogVerbosity.Info, "Connection closed 2 during sending of response header")
socket.close()
return False
def write_header_with_content(self, socket, status, start, end, length, path):
response_header = HttpHeader()
Logger().write(LogVerbosity.Debug, self.name + " stream requested: " + str(start) + "-" + str(end))
response_header.status_code = status
response_header.content_length = end - start + 1
response_header.set_range(start, end, length)
filename, file_extension = os.path.splitext(path.lower())
if file_extension not in StreamListener.mime_mapping:
Logger().write(LogVerbosity.Info, self.name + " unknown video type: " + str(file_extension) + ", defaulting to mp4")
response_header.mime_type = StreamListener.mime_mapping[".mp4"]
else:
response_header.mime_type = StreamListener.mime_mapping[file_extension]
Logger().write(LogVerbosity.Info, self.name + " return header: " + response_header.to_string())
try:
socket.send(response_header.to_string().encode())
return True
except (ConnectionAbortedError, ConnectionResetError, OSError):
Logger().write(LogVerbosity.Info, "Connection closed 2 during sending of response header")
socket.close()
return False
def write_data(self, socket, requested_byte, length, data_delegate):
written = 0
Logger().write(LogVerbosity.Info, self.name + " write data: " + str(requested_byte) + ", length " + str(length))
id = self.id
self.id += 1
data_writer = SocketWritingData(self, id, socket, requested_byte, requested_byte + length, current_time())
self.sockets_writing_data.append(data_writer)
if len(self.sockets_writing_data) > 1:
Logger().write(LogVerbosity.Debug, "Multiple data writers:")
for writer in self.sockets_writing_data:
Logger().write(LogVerbosity.Debug, " " + str(writer))
while written < length:
part_length = min(length - written, self.chunk_length)
if not self.running:
Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + " canceling retrieving data because we are no longer running 1")
data_writer.close()
self.sockets_writing_data.remove(data_writer)
return
if data_writer.stop:
Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + " canceling because we're seeking and expecting a new request")
data_writer.close()
self.sockets_writing_data.remove(data_writer)
return
if not self.wait_writable(data_writer, socket):
Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + " closed")
self.sockets_writing_data.remove(data_writer)
return
data = data_delegate(requested_byte + written, part_length)
if not self.running:
Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + " canceling retrieved data because we are no longer running 2")
data_writer.close()
self.sockets_writing_data.remove(data_writer)
return
if data is None:
time.sleep(self.wait_for_data)
continue
Logger().write(LogVerbosity.Info, self.name + ' writer ' + str(data_writer.id) + ' data retrieved: ' + str(requested_byte + written) + " - " + str(requested_byte + written + part_length))
send = 0
try:
while send < len(data):
this_send = data[send: send + 50000]
data_length = len(this_send)
socket.sendall(this_send)
written += data_length
send += data_length
self.bytes_send += data_length
data_writer.streamed += data_length
Logger().write(LogVerbosity.All, self.name + ' writer ' + str(data_writer.id) + " send " + str(data_length) + " bytes")
time.sleep(0.005) # give other threads some time
except (ConnectionAbortedError, ConnectionResetError, OSError) as e:
Logger().write(LogVerbosity.Info, self.name + " writer " + str(data_writer.id) + " connection closed during sending of data: " + str(e))
data_writer.close()
self.sockets_writing_data.remove(data_writer)
return
Logger().write(LogVerbosity.Info, "Completed request: " + str(data_writer))
data_writer.close()
self.sockets_writing_data.remove(data_writer)
def wait_writable(self, writer, socket):
while True:
if not self.running:
return False
if writer.stop:
Logger().write(LogVerbosity.Debug, self.name + " canceling because we're seeking and expecting a new request")
writer.close()
return False
# check if socket is still open
readable, writeable, exceptional = select.select([socket], [socket], [socket], 0)
if len(readable) == 1:
read = []
try:
read = socket.recv(1024)
except Exception as e:
Logger().write(LogVerbosity.Debug, "Request socket closed with exception: " + str(e))
if len(read) == 0:
Logger().write(LogVerbosity.Info, self.name + " socket no longer open 3")
writer.close()
return False
else:
Logger().write(LogVerbosity.Info, self.name + " recv received data?? - " + str(read.decode("utf-8'")))
if len(writeable) == 0:
# not currently writeable, wait for it to become available again
time.sleep(0.1)
continue
return True
def stop(self):
self.running = False
for writer in self.sockets_writing_data:
writer.stop = True
self.torrent = None
if self.server is not None:
self.server.close()
Logger().write(LogVerbosity.Info, self.name + " stopped")
class StreamServer:
def __init__(self, name, port, client_thread):
self.port = port
self.name = name
self.soc = None
self.running = False
self.client_thread = client_thread
def start(self):
Logger().write(LogVerbosity.Debug, self.name + " starting listener on port " + str(self.port))
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.running = True
try:
self.soc.bind(("", self.port))
Logger().write(LogVerbosity.Info, "StreamServer "+self.name+" listening on port " + str(self.port))
except (socket.error, OSError) as e:
Logger().write(LogVerbosity.Info, "Couldn't start StreamServer " + self.name + ": " + str(e))
return
self.soc.listen(10)
try:
while True:
Logger().write(LogVerbosity.Debug, "StreamServer "+self.name+" listening for incoming connection")
conn, addr = self.soc.accept()
if not self.running:
break
ip, port = str(addr[0]), str(addr[1])
Logger().write(LogVerbosity.Debug, 'New connection from ' + ip + ':' + port)
thread = CustomThread(self.client_thread, "Stream request", [conn])
thread.start()
except Exception as e:
Logger().write_error(e, "Stream server")
Logger().write(LogVerbosity.Debug, "StreamServer "+self.name+" closing")
self.soc.close()
def close(self):
self.running = False
if self.soc is not None:
try:
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(("127.0.0.1", self.port))
except ConnectionRefusedError:
pass
class ReadFile:
def __init__(self, path):
self.path = path
self.size = os.path.getsize(path)
self.location = 0
self.file = None
def open(self):
self.file = open(self.path, 'rb')
self.location = 0
def get_bytes(self, start, length):
if self.location != start:
Logger().write(LogVerbosity.Info, "Seeking file to " + str(start))
self.file.seek(start)
data = self.file.read(length)
self.location = start + len(data)
return data
def close(self):
self.file.close()
class HttpHeader:
def __init__(self):
self.host = None
self.range = None
self.range_start = 0
self.range_end = 0
self.range_total = 0
self.path = None
self.content_length = 0
self.mime_type = None
self.accept_ranges = None
self.connection = None
self.status_code = None
@classmethod
def from_string(cls, header):
header = header.decode('utf8')
Logger().write(LogVerbosity.Info, "Received header: " + header)
result = cls()
split = header.splitlines(False)
request = split[0].split(" ")
result.path = request[1]
for head in split:
keyvalue = head.split(': ')
if len(keyvalue) != 2:
continue
if keyvalue[0] == "Host":
result.host = keyvalue[1]
if keyvalue[0] == "Range":
result.range = keyvalue[1]
type_bytes = result.range.split("=")
start_end = type_bytes[1].split("-")
result.range_start = int(start_end[0])
if len(start_end) > 1 and start_end[1] is not "":
result.range_end = int(start_end[1])
else:
result.range_end = -1
if keyvalue[0] == "Content-Length":
result.content_length = keyvalue[1]
return result
def set_range(self, start, end, total):
self.range = "bytes " + str(start) + "-" + str(end) + "/" + str(total)
def to_string(self):
result = ""
result += "HTTP/1.1 " + self.status_code + "\r\n"
if self.mime_type:
result += "Content-Type: " + self.mime_type + "\r\n"
if self.content_length:
result += "Accept-Ranges: bytes" + "\r\n"
result += "Content-Length: " + str(self.content_length) + "\r\n"
result += "Content-Range: " + self.range + "\r\n" + "\r\n"
return result
class SocketWritingData(LogObject):
@property
def stream_speed(self):
return self.streamed / ((current_time() - self.connect_time) / 1000)
def __init__(self, parent, id, socket, range_start, range_end, connect_time):
super().__init__(parent, "request " + str(id))
self.id = id
self.socket = socket
self.range_start = range_start
self.range_end = range_end
self.connect_time = connect_time
self.streamed = 0
self.stop = False
def close(self):
self.socket.close()
self.finish()
def __str__(self):
return "Id: "+str(self.id)+", Range: " + str(self.range_start) + "-" + str(self.range_end) + " connected at " + str(self.connect_time) + ", streamed: " +str(self.streamed)
```
#### File: Torrents/Torrent/TorrentMessageProcessor.py
```python
from time import sleep
from MediaPlayer.Torrents.Peer.PeerMessages import ChokeMessage, BasePeerMessage, UnchokeMessage, InterestedMessage, \
UninterestedMessage, HaveMessage, RequestMessage, PieceMessage, CancelMessage, PortMessage, BitfieldMessage, \
ExtensionHandshakeMessage, PeerExchangeMessage, MetadataMessage, KeepAliveMessage, HaveAllMessage, HaveNoneMessage, \
AllowedFastMessage, SuggestPieceMessage, RejectRequestMessage, HandshakeMessage
from MediaPlayer.Torrents.TorrentManager import TorrentManager
from MediaPlayer.Util import Bencode
from MediaPlayer.Util.Bencode import BTFailure
from MediaPlayer.Util.Enums import PeerSource, PeerChokeState, PeerInterestedState, MetadataMessageType, PeerState
from Shared.Events import EventManager, EventType
from Shared.Logger import Logger, LogVerbosity
from Shared.Timing import Timing
class TorrentMessageProcessor(TorrentManager):
def __init__(self, torrent):
super().__init__(torrent, "message processor")
self.torrent = torrent
self.metadata_wait_list = []
# Logging props
self.metadata_wait_list_log = 0
def process_messages(self, messages):
Timing().start_timing("process_messages")
if not self.torrent.is_preparing and len(self.metadata_wait_list) > 0:
for peer, message, timestamp in self.metadata_wait_list:
self.handle_message(peer, message, timestamp)
self.metadata_wait_list.clear()
self.metadata_wait_list_log = 0
for peer, message_bytes, timestamp in messages:
if peer.state != PeerState.Started:
continue
if not peer.metadata_manager.handshake_successful:
# Handshake is the first message we should receive
handshake = HandshakeMessage.from_bytes(message_bytes)
if handshake is None:
Logger().write(LogVerbosity.Debug, str(peer.id) + ' invalid handshake response')
peer.stop_async("Invalid handshake")
continue
if handshake.protocol != b'BitTorrent protocol':
Logger().write(LogVerbosity.Debug, 'Unknown bittorrent protocol, disconnecting. ' + str(handshake.protocol))
peer.stop_async("Invalid protocol")
continue
peer.protocol_logger.update("Received Handshake")
peer.extension_manager.parse_extension_bytes(handshake.reserved)
peer.metadata_manager.handshake_successful = True
continue
message = BasePeerMessage.from_bytes(message_bytes)
if message is None:
Logger().write(LogVerbosity.Info, "Unknown or invalid peer message received (id = " + str(message_bytes[0]) + "), closing connection")
peer.stop_async("Unknown msg id")
continue
if self.torrent.is_preparing:
# Add messages we cannot process yet to wait list
if not isinstance(message, MetadataMessage) and not isinstance(message, ExtensionHandshakeMessage):
Logger().write(LogVerbosity.All, str(peer.id) + " Adding " + str(message.__class__.__name__) + " to metadata wait list")
self.metadata_wait_list.append((peer, message, timestamp))
self.metadata_wait_list_log = len(self.metadata_wait_list)
continue
self.handle_message(peer, message, timestamp)
sleep(0)
Timing().stop_timing("process_messages")
def handle_message(self, peer, message, timestamp):
if peer.protocol_logger is None:
return
if isinstance(message, PieceMessage):
Logger().write(LogVerbosity.All, str(peer.id) + ' Received piece message: ' + str(message.index) + ', offset ' + str(message.offset))
peer.protocol_logger.update("Sending/receiving requests", True)
self.torrent.data_manager.block_done(peer, message.index, message.offset, message.data)
peer.download_manager.block_done(message.index * self.torrent.data_manager.piece_length + message.offset, timestamp)
peer.counter.add_value(message.length)
return
elif isinstance(message, KeepAliveMessage):
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received keep alive message')
peer.protocol_logger.update("Received KeepAlive")
return
elif isinstance(message, ChokeMessage):
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received choke message')
peer.protocol_logger.update("Received Choke")
peer.communication_state.in_choke = PeerChokeState.Choked
return
elif isinstance(message, UnchokeMessage):
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received unchoke message')
peer.protocol_logger.update("Received UnChoke")
peer.communication_state.in_choke = PeerChokeState.Unchoked
return
elif isinstance(message, InterestedMessage):
Logger().write(LogVerbosity.Info, str(peer.id) + ' Received interested message')
peer.protocol_logger.update("Received Interested")
peer.communication_state.in_interest = PeerInterestedState.Interested
return
elif isinstance(message, UninterestedMessage):
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received uninterested message')
peer.protocol_logger.update("Received Uninterested")
peer.communication_state.in_interest = PeerInterestedState.Uninterested
return
elif isinstance(message, HaveMessage):
if peer.state == PeerState.Started:
Logger().write(LogVerbosity.All, str(peer.id) + ' Received have message for piece ' + str(message.piece_index))
peer.protocol_logger.update("Received Have", True)
peer.bitfield.update_piece(message.piece_index, True)
return
elif isinstance(message, BitfieldMessage):
if peer.state == PeerState.Started:
Logger().write(LogVerbosity.All, str(peer.id) + ' Received bitfield message')
peer.protocol_logger.update("Received Bitfield")
peer.bitfield.update(message.bitfield)
return
elif isinstance(message, RequestMessage):
Logger().write(LogVerbosity.Info, str(peer.id) + ' Received request message')
peer.protocol_logger.update("Received Request")
return
elif isinstance(message, CancelMessage):
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received cancel message')
peer.protocol_logger.update("Received Cancel")
return
elif isinstance(message, PortMessage):
Logger().write(LogVerbosity.All, str(peer.id) + ' Received port message, port = ' + str(message.port))
peer.protocol_logger.update("Received Port")
EventManager.throw_event(EventType.NewDHTNode, [peer.connection_manager.uri.hostname, message.port])
return
elif isinstance(message, HaveAllMessage):
if peer.state == PeerState.Started:
Logger().write(LogVerbosity.All, str(peer.id) + " Received HaveAll message")
peer.protocol_logger.update("Received HaveAll")
peer.bitfield.set_has_all()
return
elif isinstance(message, HaveNoneMessage):
if peer.state == PeerState.Started:
Logger().write(LogVerbosity.All, str(peer.id) + " Received HaveNone message")
peer.protocol_logger.update("Received HaveNone")
peer.bitfield.set_has_none()
return
elif isinstance(message, AllowedFastMessage):
if peer.state == PeerState.Started:
Logger().write(LogVerbosity.All, str(peer.id) + " Received AllowedFast message")
peer.protocol_logger.update("Received AllowedFast", True)
peer.allowed_fast_pieces.append(message.piece_index)
return
elif isinstance(message, SuggestPieceMessage):
Logger().write(LogVerbosity.All, str(peer.id) + " Received SuggestPiece message")
peer.protocol_logger.update("Received SuggestPiece", True)
return
elif isinstance(message, RejectRequestMessage):
if peer.state == PeerState.Started:
Logger().write(LogVerbosity.Debug, str(peer.id) + " Received RejectRequest message")
peer.protocol_logger.update("Received RejectRequest", True)
peer.download_manager.request_rejected(message.index, message.offset, message.data_length)
return
elif isinstance(message, ExtensionHandshakeMessage):
Logger().write(LogVerbosity.All, str(peer.id) + ' Received extension handshake message')
peer.protocol_logger.update("Received ExtensionHandshake")
try:
dic = Bencode.bdecode(message.bencoded_payload)
except BTFailure:
Logger().write(LogVerbosity.Debug, "Invalid extension handshake received")
peer.stop_async("Invalid extension handshake")
return
peer.extension_manager.parse_dictionary(dic)
if b'metadata_size' in dic:
if peer is not None:
self.torrent.metadata_manager.set_total_size(dic[b'metadata_size'])
return
elif isinstance(message, PeerExchangeMessage):
peer.protocol_logger.update("Received PeerExchange")
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received ' + str(len(message.added)) + ' peers from peer exchange')
self.torrent.peer_manager.add_potential_peers(message.added, PeerSource.PeerExchange)
return
elif isinstance(message, MetadataMessage):
if message.metadata_message_type == MetadataMessageType.Data:
peer.protocol_logger.update("Received Metadata")
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received metadata message index ' + str(message.piece_index))
self.torrent.metadata_manager.add_metadata_piece(message.piece_index, message.data)
else:
peer.protocol_logger.update("Received Metadata rejected")
Logger().write(LogVerbosity.Debug, str(peer.id) + ' Received metadata reject message ' + str(message.piece_index))
return
def stop(self):
super().stop()
self.metadata_wait_list = []
```
#### File: Torrents/Torrent/TorrentPeerProcessor.py
```python
import time
from MediaPlayer.Torrents.TorrentManager import TorrentManager
from Shared.Threading import CustomThread
from Shared.Timing import Timing
from Shared.Util import current_time
class TorrentPeerProcessor(TorrentManager):
def __init__(self, torrent):
TorrentManager.__init__(self, torrent, "Peer processor")
self.running = False
self.process_thread = CustomThread(self.process, "Peer processor")
def start(self):
self.running = True
self.process_thread.start()
def stop(self):
self.running = False
self.process_thread.join()
super().stop()
def process(self):
while self.running:
start_time = current_time()
peers_to_process = self.torrent.peer_manager.connected_peers
Timing().start_timing("peer_processing")
for peer in peers_to_process:
peer.metadata_manager.update()
peer.download_manager.update_timeout()
Timing().stop_timing("peer_processing")
spend_time = current_time() - start_time
time.sleep(0.1 - (spend_time / 1000))
```
#### File: Torrents/Tracker/Tracker.py
```python
import random
import time
from urllib.parse import urlparse
from MediaPlayer.Torrents.Tracker import TrackerMessages
from MediaPlayer.Util import Bencode
from MediaPlayer.Util.Bencode import BTFailure
from MediaPlayer.Util.Enums import PeerSource
from MediaPlayer.Util.Util import uri_from_bytes
from Shared.Events import EventManager, EventType
from Shared.Logger import Logger, LogVerbosity
from Shared.Network import RequestFactory, UdpClient
from Shared.Settings import Settings
from Shared.Threading import CustomThread
from Shared.Util import current_time
class TrackerFactory:
@staticmethod
def create_tracker(uri):
parsed_uri = urlparse(uri)
if parsed_uri.scheme == 'udp':
return UdpTracker(parsed_uri.hostname, parsed_uri.port if parsed_uri.port is not None else 80)
elif parsed_uri.scheme == 'http':
return HttpTracker(parsed_uri)
class HttpTracker:
def __init__(self, uri):
self.uri = uri
self.host = uri.hostname
self.last_announce = 0
self.tracker_peer_request_amount = Settings.get_int("tracker_peer_request_amount")
def announce_torrent(self, torrent):
self.last_announce = current_time()
announce_message = TrackerMessages.TrackerAnnounceMessage.for_http(torrent.info_hash, 2, torrent.total_size - torrent.left, torrent.left, torrent.uploaded, self.tracker_peer_request_amount)
path = self.uri.path + announce_message.as_param_string()
response = RequestFactory.make_request(path)
if response is None:
return False
try:
response_dict = Bencode.bdecode(response)
except BTFailure:
Logger().write(LogVerbosity.Info, 'Invalid tracker response: ' + str(response))
return False
if b"peers" not in response_dict:
return False
peers_data = response_dict[b"peers"]
total_peers = int(len(peers_data) / 6)
offset = 0
peers = []
for index in range(total_peers):
peers.append(uri_from_bytes(peers_data[offset:offset + 6]))
offset += 6
EventManager.throw_event(EventType.PeersFound, [peers, PeerSource.HttpTracker])
class UdpTracker:
def __init__(self, host, port):
self.host = host
self.port = port
self.last_announce = 0
self.transaction_id = 0
self.connection_id = 0
self.connection_id_retrieved = 0
self.connection = UdpClient(host, port, Settings.get_int("connection_timeout") / 1000)
self.tracker_peer_request_amount = Settings.get_int("tracker_peer_request_amount")
def connect(self):
if self.connection_id_retrieved > current_time() - (1000 * 60):
# If we already have a connection id which is still valid ( 1 minute ), use this one
return True
self.transaction_id = random.randint(0, 9999999)
self.connection_id = 0x41727101980
connection_message = TrackerMessages.TrackerConnectionMessage(self.connection_id, self.transaction_id, 0)
send_okay = self.connection.send(connection_message.as_bytes())
data = self.connection.receive()
if not send_okay or data is None:
return False
response_message = TrackerMessages.TrackerConnectionMessage.for_receive(data)
if response_message is None:
return False
self.connection_id = response_message.connection_id
self.connection_id_retrieved = current_time()
return True
def announce_torrent(self, torrent):
self.last_announce = current_time()
if not self.connect():
return False
announce_message = TrackerMessages.TrackerAnnounceMessage.for_udp(self.connection_id, self.transaction_id, torrent.info_hash, 2,
torrent.total_size - torrent.left, torrent.left, torrent.uploaded, self.tracker_peer_request_amount,
6881)
send_okay = self.connection.send(announce_message.as_bytes())
response_message_bytes = self.connection.receive()
if not send_okay or response_message_bytes is None:
return False
response_message = TrackerMessages.TrackerResponseMessage.from_bytes(response_message_bytes)
if response_message is None or response_message.error is not None:
return False
EventManager.throw_event(EventType.PeersFound, [response_message.peers, PeerSource.UdpTracker])
return True
class TrackerManager:
def __init__(self):
self.trackers = []
self.initialized = False
self.request_peers_id = EventManager.register_event(EventType.RequestPeers, self.request_peers)
def request_peers(self, torrent):
if not self.initialized:
for uri in torrent.announce_uris:
if len([x for x in self.trackers if x.host == uri]) == 0:
tracker = TrackerFactory.create_tracker(uri)
if tracker is not None:
self.trackers.append(tracker)
Logger().write(LogVerbosity.Debug, "Initialized " + str(len(self.trackers)) + " trackers")
self.initialized = True
for tracker in self.trackers:
thread = CustomThread(self.tracker_announce, "Tracker announce", [tracker, torrent])
thread.start()
@staticmethod
def tracker_announce(tracker, torrent):
if not tracker.announce_torrent(torrent):
Logger().write(LogVerbosity.Debug, 'Could not connect to tracker: ' + tracker.host)
else:
Logger().write(LogVerbosity.Debug, 'Tracker ok: ' + tracker.host)
def stop(self):
EventManager.deregister_event(self.request_peers_id)
```
#### File: MediaPlayer/Util/Util.py
```python
import os
import socket
import re
from MediaPlayer.Util import Network
from MediaPlayer.Util.Network import read_ushort
def get_file_info(filename):
with open(filename, "rb") as f:
first = f.read(65536)
f.seek(-65536, os.SEEK_END)
last = f.read(65536)
return os.path.getsize(filename), first, last
def uri_to_bytes(uri):
uri_port = uri.split(':')
result = socket.inet_aton(uri_port[0]) + bytearray(2)
Network.write_ushort(result, int(uri_port[1]), 4)
return result
def ip_port_to_bytes(ip, port):
result = bytearray(socket.inet_aton(ip)) + bytearray(2)
Network.write_ushort(result, port, 4)
return result
def ip_port_from_bytes(data):
ip = socket.inet_ntop(socket.AF_INET, data[0: 4])
offset, port = read_ushort(data, 4)
return ip, port
def ip_port_from_bytes_multiple(data):
result = []
for i in range(int(len(data) / 6)):
result.append(ip_port_from_bytes(data[i*6: i*6 + 6]))
return result
def uri_from_bytes(data):
ip = socket.inet_ntop(socket.AF_INET, data[0: 4])
offset, port = read_ushort(data, 4)
return 'tcp://' + ip + ":" + str(port)
def check_bytes_length(byte_data, expected):
if byte_data is None or len(byte_data) is not expected:
return False
return True
def check_minimal_bytes_length(byte_data, minimal):
if byte_data is None or len(byte_data) < minimal:
return False
return True
def try_parse_season_episode(path):
path = path.lower()
season_number = 0
epi_number = 0
matches = re.findall("\d+[x]\d+", path) # 7x11
if len(matches) > 0:
match = matches[-1]
season_epi = re.split("x", match)
if len(season_epi) == 2:
season_number = int(season_epi[0])
epi_number = int(season_epi[1])
if season_number == 0:
matches = re.findall("[s][0]\d+", path) # s01
if len(matches) > 0:
match = matches[-1]
season_number = int(match[1:])
if season_number == 0:
matches = re.findall("[s]\d+", path) # s1
if len(matches) > 0:
match = matches[-1]
season_number = int(match[1:])
if season_number == 0:
if "season" in path:
season_index = path.rfind("season") + 6 # season 1
season_number = try_parse_number(path[season_index: season_index + 3])
if epi_number == 0:
matches = re.findall("[e][0]\d+", path) # e01
if len(matches) > 0:
match = matches[-1]
epi_number = int(match[1:])
if epi_number == 0:
matches = re.findall("[e]\d+", path) # e1
if len(matches) > 0:
match = matches[-1]
epi_number = int(match[1:])
if epi_number == 0:
if "episode" in path:
epi_index = path.rfind("episode") + 7 # episode 1
epi_number = try_parse_number(path[epi_index: epi_index + 3])
return season_number, epi_number
def try_parse_number(number_string):
if number_string.isdigit():
return int(number_string)
if len(number_string) > 1:
if number_string[0: 2].isdigit():
return int(number_string[0: 2])
if number_string[1: 3].isdigit():
return int(number_string[1: 3])
if number_string[0].isdigit():
return int(number_string[0])
if number_string[1].isdigit():
return int(number_string[1])
return 0
def is_media_file(path):
if "." not in path:
return False
ext = os.path.splitext(path)[1].lower()
if ext == ".mp4" or ext == ".mkv" or ext == ".avi":
return True
```
#### File: src/Shared/LogObject.py
```python
from enum import Enum
from Shared.Logger import LogItemTracker
from Shared.Settings import Settings
class LogObject:
def __init__(self, parent, name):
self._logging = Settings.get_bool("state_logging")
if self._logging:
parent_id = 0
if parent is not None:
parent_id = parent.log_tracker.id
self.log_tracker = LogItemTracker(parent_id, name)
def __setattr__(self, name, value):
if hasattr(self, "_logging") and self._logging:
self.process_update(name, value)
super().__setattr__(name, value)
def process_update(self, name, value):
if not hasattr(self, "log_tracker") or self.log_tracker is None:
return
if name.startswith("_"):
return
if hasattr(self, name) and getattr(self, name) == value:
return
if not isinstance(value, (str, int, float, bool)):
if isinstance(value, Enum):
value = str(value)
else:
return
self.log_tracker.update(name, value)
def finish(self):
if self._logging:
self.log_tracker.finish()
```
#### File: src/Shared/Stats.py
```python
import time
from Database.Database import Database
from Shared.Logger import Logger, LogVerbosity
from Shared.Observable import Observable
from Shared.Threading import CustomThread
from Shared.Util import Singleton
class StatList(Observable):
def __init__(self):
super().__init__("Stats", 1)
self.statistics = dict()
def update(self, name, value):
self.statistics[name] = value
self.changed()
def get(self, name):
if name in self.statistics:
return float(self.statistics[name])
return 0
class Stats(metaclass=Singleton):
def __init__(self):
self.cache = StatList()
self.changed = False
self.work_thread = CustomThread(self.save_stats, "Stat saver", [])
def start(self):
stats = Database().get_stats()
for key, value, last_change in stats:
self.cache.update(key, value)
self.work_thread.start()
def _update_stat(self, name, value):
self.cache.update(name, value)
self.changed = True
def save_stats(self):
while True:
if self.changed:
self.changed = False
copy = self.cache.statistics.copy()
Logger().write(LogVerbosity.Debug, "Saving stats")
for key, val in copy.items():
Database().update_stat(key, val)
time.sleep(15)
def add(self, name, value):
stat = self.cache.get(name)
if stat == 0:
self._update_stat(name, value)
else:
self._update_stat(name, stat + value)
def total(self, name):
return self.cache.get(name)
def set(self, name, value):
self._update_stat(name, value)
```
#### File: src/Shared/Threading.py
```python
import threading
import time
from Shared.LogObject import LogObject
from Shared.Logger import Logger, LogVerbosity
from Shared.Util import current_time, Singleton
class ThreadManager(LogObject, metaclass=Singleton):
def __init__(self):
super().__init__(None, "Threads")
self.threads = []
self.thread_history = dict()
self.thread_count = 0
def add_thread(self, thread):
thread.history_entry = ThreadEntry(thread.thread_name, current_time())
if thread.thread_name not in self.thread_history:
self.thread_history[thread.thread_name] = []
self.thread_history[thread.thread_name].append(thread.history_entry)
self.threads.append(thread)
self.thread_count = len(self.threads)
def remove_thread(self, thread):
thread.history_entry.end_time = current_time()
self.threads.remove(thread)
self.thread_count = len(self.threads)
class ThreadEntry:
def __init__(self, name, start_time):
self.thread_name = name
self.start_time = start_time
self.end_time = 0
class CustomThread(LogObject):
@property
def is_alive(self):
return self.thread.is_alive
def __init__(self, target, thread_name, args=[]):
super().__init__(ThreadManager(), "Thread " + thread_name)
self.target = target
self.args = args
self.thread = threading.Thread(name=thread_name, target=self.__run)
self.thread.daemon = True
self.thread_name = thread_name
self.start_time = 0
self.history_entry = None
self.started = False
def start(self):
self.start_time = current_time()
Logger().write(LogVerbosity.All, "Starting thread " + self.thread_name)
ThreadManager().add_thread(self)
self.thread.start()
def __run(self):
try:
self.started = True
self.target(*self.args)
ThreadManager().remove_thread(self)
self.finish()
Logger().write(LogVerbosity.All, "Thread " + self.thread_name + " done")
except Exception as e:
Logger().write_error(e, "Exception in thread " + self.thread_name)
ThreadManager().remove_thread(self)
self.finish()
def join(self):
if threading.current_thread() is not self.thread:
if not self.started:
time.sleep(0)
self.thread.join()
```
#### File: Webserver/Controllers/AuthController.py
```python
import uuid
from flask import request
from Database.Database import Database
from Shared.Logger import Logger, LogVerbosity
from Shared.Settings import SecureSettings
from Shared.Util import to_JSON
from Webserver.APIController import app, APIController
class AuthController:
@staticmethod
@app.route('/auth/login', methods=['POST'])
def login():
client_id = request.headers.get('Client-ID', None)
p = request.args.get('p')
ip_addr = request.headers.get('HTTP_X_FORWARDED_FOR', None) or request.remote_addr
user_agent = request.user_agent.string
success, key = AuthController.validate(client_id, p, ip_addr, user_agent)
Logger().write(LogVerbosity.Info, str(client_id) + " log on result: " + str(success))
status = 200
if not success:
Database().add_login_attempt(APIController.get_salted(client_id), ip_addr, user_agent, "Login")
status = 401
return to_JSON(AuthResult(success, key)), status
@staticmethod
@app.route('/auth/refresh', methods=['POST'])
def refresh():
client_id = request.headers.get('Client-ID', None)
client_key = APIController.get_salted(client_id)
client_known = Database().client_known(client_key)
ip_addr = request.headers.get('HTTP_X_FORWARDED_FOR', None) or request.remote_addr
user_agent = request.user_agent.string
if not client_known:
Logger().write(LogVerbosity.Info, str(client_id) + " failed to refresh")
Database().add_login_attempt(client_key, ip_addr, user_agent, "Refresh")
return to_JSON(AuthResult(False, None)), 401
session_key = AuthController.generate_session_key()
Database().refresh_session_key(client_key, session_key, ip_addr, user_agent)
Logger().write(LogVerbosity.Debug, str(client_id) + " successfully refreshed")
return to_JSON(AuthResult(True, session_key)), 200
@staticmethod
def validate(client_id, p, ip, user_agent):
if APIController.get_salted(p) == SecureSettings.get_string("api_password"):
client_key = APIController.get_salted(client_id)
session_key = AuthController.generate_session_key()
Database().add_client(client_key, session_key, ip, user_agent)
return True, session_key
return False, None
@staticmethod
def generate_session_key():
return uuid.uuid4().hex
class AuthResult:
def __init__(self, success, key):
self.success = success
self.key = key
```
#### File: Controllers/Home/TradfriController.py
```python
import urllib.parse
from flask import request
from Controllers.TradfriManager import TradfriManager
from Shared.Logger import Logger, LogVerbosity
from Shared.Util import to_JSON
from Webserver.APIController import app
from Webserver.Models import LightControl, LightDevice, DeviceGroup, SocketControl, SocketDevice
class TradfriController:
@staticmethod
@app.route('/tradfri/devices', methods=['GET'])
def get_devices():
result = TradfriManager().get_devices()
if len(result) == 0:
result = TradfriController.create_test_data()
return to_JSON(result)
@staticmethod
@app.route('/tradfri/device_name', methods=['POST'])
def set_device_name():
device_id = int(request.args.get('device_id'))
name = urllib.parse.unquote(request.args.get('name'))
Logger().write(LogVerbosity.Info, "Set device " + str(device_id) + " to name " + str(name))
TradfriManager().set_device_name(device_id, name)
return "OK"
@staticmethod
@app.route('/tradfri/device_state', methods=['POST'])
def set_device_state():
device_id = int(request.args.get('device_id'))
state = request.args.get('state') == "true"
Logger().write(LogVerbosity.Info, "Set device " + str(device_id) + " to state " + str(state))
TradfriManager().set_state(device_id, state)
return "OK"
@staticmethod
@app.route('/tradfri/light_warmth', methods=['POST'])
def set_light_warmth():
device_id = int(request.args.get('device_id'))
warmth = int(request.args.get('warmth'))
Logger().write(LogVerbosity.Info, "Set light " + str(device_id) + " to warmth " + str(warmth))
TradfriManager().set_light_warmth(device_id, warmth)
return "OK"
@staticmethod
@app.route('/tradfri/light_dimmer', methods=['POST'])
def set_light_dimmer():
device_id = int(request.args.get('device_id'))
dimmer = int(request.args.get('dimmer'))
Logger().write(LogVerbosity.Info, "Set light " + str(device_id) + " to dimmer " + str(dimmer))
TradfriManager().set_light_dimmer(device_id, dimmer)
return "OK"
@staticmethod
@app.route('/tradfri/groups', methods=['GET'])
def get_groups():
result = TradfriManager().get_device_groups()
if len(result) == 0:
result = TradfriController.create_test_groups()
return to_JSON(result)
@staticmethod
@app.route('/tradfri/group_devices', methods=['GET'])
def get_group_devices():
group_id = int(request.args.get('group_id'))
result = TradfriManager().get_devices_in_group(group_id)
if len(result) == 0:
result = TradfriController.create_test_data()
return to_JSON(result)
@staticmethod
@app.route('/tradfri/group_state', methods=['POST'])
def set_group_state():
group_id = int(request.args.get('group_id'))
state = request.args.get('state') == "true"
Logger().write(LogVerbosity.Info, "Set group " + str(group_id) + " to state " + str(state))
TradfriManager().set_group_state(group_id, state)
return "OK"
@staticmethod
@app.route('/tradfri/group_dimmer', methods=['POST'])
def set_group_dimmer():
group_id = int(request.args.get('group_id'))
dimmer = int(request.args.get('dimmer'))
Logger().write(LogVerbosity.Info, "Set group " + str(group_id) + " to dimmer " + str(dimmer))
TradfriManager().set_group_dimmer(group_id, dimmer)
return "OK"
@staticmethod
@app.route('/tradfri/group_name', methods=['POST'])
def set_group_name():
group_id = int(request.args.get('group_id'))
name = urllib.parse.unquote(request.args.get('name'))
Logger().write(LogVerbosity.Info, "Set group " + str(group_id) + " to name " + str(name))
TradfriManager().set_group_name(group_id, name)
return "OK"
@staticmethod
def create_test_groups():
return [DeviceGroup(1, "Woonkamer 1", True, 254, 6), DeviceGroup(2, "Woonkamer 2", True, 128, 6), DeviceGroup(3, "Keuken", False, 254, 6)]
@staticmethod
def create_test_data():
result = [LightControl(1,
"Test led",
"Type",
123,
True,
True,
True,
True,
[
LightDevice(
True,
200,
260,
"4a418a")
]), SocketControl(2,
"Test socket 2",
"Type",
123,
True,
[
SocketDevice(False)
]), LightControl(3,
"Test led 3",
"Type",
123,
True,
True,
False,
False,
[
LightDevice(
False,
200,
0,
"")
]), LightControl(4,
"Test led 4",
"Type",
123,
True,
True,
False,
False,
[
LightDevice(
False,
200,
0,
"")
]), LightControl(5,
"Test led 5",
"Type",
123,
True,
True,
False,
False,
[
LightDevice(
False,
200,
0,
"")
])]
return result
```
#### File: Controllers/MediaPlayer/RadioController.py
```python
from flask import request
from Database.Database import Database
from Shared.Logger import Logger, LogVerbosity
from Shared.Util import to_JSON
from Webserver.Models import BaseMedia
from Webserver.APIController import app
class Radio(BaseMedia):
def __init__(self, radio_id, title, url, poster):
super().__init__(radio_id, poster, title)
self.url = url
class RadioController:
@staticmethod
@app.route('/radios', methods=['GET'])
def get():
Logger().write(LogVerbosity.Debug, "Get radio list")
return to_JSON(Database().get_radios())
```
#### File: Controllers/MediaPlayer/TorrentController.py
```python
import re
from bs4 import Tag
from flask import request
from bs4 import BeautifulSoup
from Shared.Network import RequestFactory
from Shared.Settings import Settings
from Shared.Util import to_JSON
from Webserver.APIController import app
from Webserver.Models import TorrentModel
class TorrentController:
base_url = Settings.get_string("torrent_api")
@staticmethod
@app.route('/torrents/top', methods=['GET'])
def top():
category = request.args.get('category')
if category == "TV":
category = "television"
elif category == "Movies":
category = "movies"
return to_JSON(TorrentController.get_torrents(TorrentController.base_url + "/top-100-" + category))
@staticmethod
@app.route('/torrents', methods=['GET'])
def search():
terms = request.args.get('keywords')
category = request.args.get('category')
return to_JSON(TorrentController.get_torrents(TorrentController.base_url + "/category-search/" + terms + "/" + category + "/1/"))
@staticmethod
def get_magnet_url(url):
torrent_result = RequestFactory.make_request(TorrentController.base_url + url, timeout=10)
parsed = BeautifulSoup(torrent_result, "lxml")
magnet_link = parsed.findAll('a', href=re.compile('^magnet:\?xt=urn:btih:'))
if len(magnet_link) == 0:
return None
return magnet_link[0].attrs['href']
@staticmethod
def get_torrents(url):
search_result = RequestFactory.make_request(url, timeout=10)
if search_result is None:
return []
parsed = BeautifulSoup(search_result, "lxml")
table_rows = parsed.find_all('tr')
torrent_rows = [row.contents for row in table_rows if len([child for child in row.contents if isinstance(child, Tag) and child.name == "td" and ('name' in child.attrs['class'] or 'seeds' in child.attrs['class'])]) != 0]
result = []
for row in torrent_rows:
childs = [x for x in row if isinstance(x, Tag)]
name = [x for x in childs if 'name' in x.attrs['class']][0].text
seeds = int([x for x in childs if 'seeds' in x.attrs['class']][0].text)
leeches = int([x for x in childs if 'leeches' in x.attrs['class']][0].text)
size = [x for x in childs if 'size' in x.attrs['class']][0].contents[0]
torrent = [x for x in childs if 'name' in x.attrs['class']][0].contents[1].attrs['href']
result.append(TorrentModel(name, seeds, leeches, size, torrent))
return result
```
#### File: Webserver/Controllers/UtilController.py
```python
import os
import time
import urllib.parse
import sys
import objgraph
from flask import request
from Database.Database import Database
from MediaPlayer.MediaManager import MediaManager
from MediaPlayer.Player.VLCPlayer import VLCPlayer
from Shared.Events import EventManager, EventType
from Shared.Logger import Logger, LogVerbosity
from Shared.Network import RequestFactory
from Shared.Util import to_JSON, write_size, current_time
from Updater import Updater
from Webserver.APIController import app, APIController
from Webserver.Controllers.MediaPlayer.MovieController import MovieController
from Webserver.Controllers.MediaPlayer.ShowController import ShowController
from Webserver.Controllers.MediaPlayer.TorrentController import TorrentController
class UtilController:
health_cache = dict()
@staticmethod
@app.route('/util/update', methods=['GET'])
def get_update():
instance = int(request.args.get("instance"))
if instance == 1:
return to_JSON(UpdateAvailable(Updater().check_version(), Updater().last_version))
else:
result = APIController().slave_request(instance, "get_last_version", 10)
if result is None:
return to_JSON(UpdateAvailable(False, ""))
return to_JSON(UpdateAvailable(result[0], result[1]))
@staticmethod
@app.route('/util/get_action_history', methods=['GET'])
def get_action_history():
topic = request.args.get("topic")
start_time = int(request.args.get("start"))
end_time = int(request.args.get("end"))
return to_JSON(Database().get_action_history(topic, start_time, end_time))
@staticmethod
@app.route('/util/update', methods=['POST'])
def update():
instance = int(request.args.get("instance"))
if instance == 1:
Updater().update()
else:
APIController().slave_command(instance, "updater", "update")
return "OK"
@staticmethod
@app.route('/util/restart_device', methods=['POST'])
def restart_device():
instance = int(request.args.get("instance"))
if instance == 1:
os.system('sudo reboot')
else:
APIController().slave_command(instance, "system", "restart_device")
return "OK"
@staticmethod
@app.route('/util/restart_application', methods=['POST'])
def restart_application():
instance = int(request.args.get("instance"))
if instance == 1:
python = sys.executable
os.execl(python, python, *sys.argv)
else:
APIController().slave_command(instance, "system", "restart_application")
return "OK"
@staticmethod
@app.route('/util/close_application', methods=['POST'])
def close_application():
instance = int(request.args.get("instance"))
if instance == 1:
sys.exit()
else:
APIController().slave_command(instance, "system", "close_application")
return "OK"
@staticmethod
@app.route('/util/log', methods=['POST'])
def debug_log():
Logger().write(LogVerbosity.Important, "Test")
return "OK"
@staticmethod
@app.route('/util/logs', methods=['GET'])
def get_log_files():
log_files = Logger.get_log_files()
return to_JSON([(name, path, write_size(size)) for name, path, size in log_files])
@staticmethod
@app.route('/util/log', methods=['GET'])
def get_log_file():
file = urllib.parse.unquote(request.args.get('file'))
return Logger.get_log_file(file)
@staticmethod
@app.route('/util/shelly', methods=['POST'])
def shelly():
ip = request.args.get("ip")
state = "on" if request.args.get("state") == "true" else "off"
Logger().write(LogVerbosity.Info, "Set shelly " + ip + " to " + state)
result = RequestFactory.make_request("http://" + ip + "?state=" + state)
if result is not None:
Logger().write(LogVerbosity.Info, result)
return "OK"
@staticmethod
@app.route('/util/system_health_check', methods=['POST'])
def execute_health_test():
Logger().write(LogVerbosity.Info, "System health test")
result = HealthTestResult()
UtilController.run_endpoint_checks(result)
UtilController.run_torrent_check(result)
return to_JSON(result)
@staticmethod
def run_endpoint_checks(result):
movies = MovieController.request_movies(MovieController.movies_api_path + "movies/1?sort=Trending")
shows = ShowController.request_shows(ShowController.shows_api_path + "shows/1?sort=Trending")
torrents = TorrentController.get_torrents(TorrentController.base_url + "/top-100-movies")
result.request_movies_result.set_result(len(movies) != 0, "No movies returned")
result.request_shows_result.set_result(len(shows) != 0, "No shows returned")
result.request_torrents_result.set_result(len(torrents) != 0, "No torrents returned")
return result
@staticmethod
def run_torrent_check(result):
best_movie_torrents = MovieController.request_movies(MovieController.movies_api_path + "movies/1?sort=Trending")[0: 20]
all_torrents = []
for arr in [x.torrents for x in best_movie_torrents]:
all_torrents += arr
if len(all_torrents) == 0:
return
torrent = max(all_torrents, key=lambda t: t.seeds / (t.peers or 1))
Logger().write(LogVerbosity.Info,
"System health selected torrent at " + torrent.quality + ", " + str(torrent.peers) + "/" + str(torrent.seeds) + " l/s")
MediaManager().start_movie(0, "Health check", torrent.url, None, 0)
created = UtilController.wait_for(2000, lambda: MediaManager().torrent is not None)
result.torrent_starting_result.set_result(created, "Didn't create torrent")
if not created:
return result
executing = UtilController.wait_for(10000, lambda: MediaManager().torrent.is_preparing or MediaManager().torrent.is_executing)
result.torrent_starting_result.set_result(executing, "Torrent isn't executing")
if not executing:
return result
downloading = UtilController.wait_for(10000, lambda: MediaManager().torrent.network_manager.average_download_counter.total > 0)
result.torrent_downloading_result.set_result(downloading, "No bytes downloaded at all")
playing = False
if downloading:
playing = UtilController.wait_for(30000, lambda: VLCPlayer().player_state.playing_for > 0)
result.torrent_playing_result.set_result(playing, "Didn't start playing torrent")
if playing:
MediaManager().seek(1000 * 60 * 5) # seek to 5 minutes in
playing = UtilController.wait_for(10000, lambda: VLCPlayer().player_state.playing_for > 1000 * 60 * 5)
result.torrent_playing_after_seek_result.set_result(playing, "Didn't start playing torrent after seeking")
MediaManager().stop_play()
stopped_event = UtilController.wait_for_event(20000, EventType.TorrentStopped)
result.torrent_disposing_result.set_result(stopped_event, "Torrent stopped event not received")
if stopped_event:
disposed = UtilController.wait_for(5000, lambda: len(objgraph.by_type('MediaPlayer.Torrents.Torrent.Torrent.Torrent')) == 0)
result.torrent_disposing_result.set_result(disposed, "Torrent not disposed after stopping")
return result
@staticmethod
def wait_for(max_time, action):
start = current_time()
while current_time() - start < max_time:
if action():
return True
time.sleep(0.5)
return False
@staticmethod
def wait_for_event(max_time, event):
UtilController.health_cache[event] = False
evnt = EventManager.register_event(event, lambda *x: UtilController.assign(event))
result = UtilController.wait_for(max_time, lambda: UtilController.health_cache[event])
EventManager.deregister_event(evnt)
return result
@staticmethod
def assign(name):
UtilController.health_cache[name] = True
class UpdateAvailable:
def __init__(self, available, commit_hash):
self.available = available
self.hash = commit_hash
class HealthTestResult:
def __init__(self):
self.request_movies_result = HealthTestResultItem("Fetch movies")
self.request_shows_result = HealthTestResultItem("Fetch shows")
self.request_torrents_result = HealthTestResultItem("Fetch torrents")
self.torrent_starting_result = HealthTestResultItem("Starting torrent")
self.torrent_downloading_result = HealthTestResultItem("Downloading torrent")
self.torrent_playing_result = HealthTestResultItem("Playing torrent")
self.torrent_playing_after_seek_result = HealthTestResultItem("Playing after seek")
self.torrent_disposing_result = HealthTestResultItem("Disposing torrent")
class HealthTestResultItem:
def __init__(self, name):
self.name = name
self.result = True
self.run = False
self.reason = None
def set_result(self, result, reason):
self.result = result
self.run = True
self.reason = reason
```
#### File: Controllers/Websocket2/BaseWebsocketController.py
```python
from flask_socketio import Namespace
from Shared.Logger import Logger, LogVerbosity
from Shared.Threading import CustomThread
from Shared.Util import to_JSON
from Webserver.APIController import APIController, Request
class BaseWebsocketController(Namespace):
def __init__(self, namespace):
super().__init__(namespace)
self.requests = []
def request_wait(self, topic, timeout, room, *args):
request_message = self._send_request(topic, args, room)
return self.wait_for_request_response(request_message, timeout)
def request_cb(self, topic, callback, timeout, room, *args):
request_message = self._send_request(topic, args, room)
thread = CustomThread(self.wait_for_request_response, "Request callback " + topic, [request_message, timeout, callback])
thread.start()
def message(self, title, message, room):
self.emit("message", (title, message), room=room)
def send_no_wait(self, topic, command, room, args):
Logger().write(LogVerbosity.Debug, "Client command: " + topic + ", command")
self.emit("command", (topic, command, *args), room=room)
def on_response(self, request_id, args):
Logger().write(LogVerbosity.Debug, "Client response for id " + str(request_id) + ": " + str(args))
requests = [x for x in self.requests if x.request_id == request_id]
if len(requests) == 0:
Logger().write(LogVerbosity.Debug, "No pending request found for id " + str(request_id))
return
requests[0].set(args)
def _send_request(self, topic, data, room):
Logger().write(LogVerbosity.Debug, "Sending request: " + topic + ", data: " + str(data))
request_id = APIController().next_id()
request_message = Request(request_id, topic, data, room, self._complete_request)
self.requests.append(request_message)
data = to_JSON(data)
self.emit("request", (request_id, topic, data), room=room)
return request_message
def timeout_request(self, request_message):
self.emit("timeout", request_message.request_id, room=request_message.room)
def wait_for_request_response(self, request_message, timeout, callback=None):
responded, response = request_message.wait(timeout)
if not responded:
self.timeout_request(request_message)
response = [None]
if callback is not None:
callback(*response)
return response
def _complete_request(self, request_message):
self.requests.remove(request_message)
Logger().write(LogVerbosity.Debug, "Request done, now " + str(len(self.requests)) + " requests open")
``` |
{
"source": "jkorkko/oulu-smartcampus-dataforwarding",
"score": 3
} |
#### File: oulu-smartcampus-dataforwarding/dataforwarding/parser.py
```python
from time import time
import datetime
from enum import Enum as enum
class MeasurementTimestampError(Exception):
pass
class MeasurementTypeError(Exception):
pass
class Sensor(enum):
nb_100 = 1
aistin = 2
def temperature_humidity(data):
'''
Temp in Celcius degrees,
humidity in %
'''
temperature = round(float(data["temperature"]), 1)
humidity = round(float(data["humidity"]), 1)
return {"temperature": temperature, "humidity": humidity}
def pressure(data):
'''
Pressure in Bars
'''
return {"pressure": float(data["pressure"] / (1*10**3))}
def acceleration(data):
'''
Convert each axis to G
'''
acceleration_ = {}
acceleration_["x"] = int(data["X-axis"]) / (1*10**3)
acceleration_["y"] = int(data["Y-axis"]) / (1*10**3)
acceleration_["z"] = int(data["Z-axis"]) / (1*10**3)
return {"acceleration": acceleration_}
def battery(data):
'''
Voltage converted to mV, percentage in %
'''
battery_voltage = float(data["battery voltage"]) / (1*10**3)
battery_percentage = round(float(data["battery"]))
return {"battery": battery_voltage,
"battery_percentage": battery_percentage}
def orientation(data):
'''
Offset from horizontal level(top up) in degrees
'''
return {"orientation": float(data["orientation"])}
def motion(data):
'''
timeframes = lenght of measurement period in seconds
motions = Amount of registered movements inside the time period
output["motion"] = average amount of motions per 15 mins
'''
timeframes = data["period"] / 900
motions = data["movement"]
return {"motion": int(motions / timeframes)}
def moisture(data):
'''
Given in relative humidity RH%
'''
return {"moisture": float(data["moisture"])}
def co2(data):
'''
Given in parts per million
'''
return {"co2": int(data["carbon dioxide"])}
def total_movement(data):
'''
Total movements registered since boot
'''
return {"total_movement": int(data["count"])}
def differential_pressure(data):
'''
Difference in pressure of two areas.
Given in Pascals for lower resolution.
'''
return {"differential_pressure": float(data["pressure"])}
def volatile_compounds(data):
'''
Given in parts per billion
'''
return {"organic_compounds": int(data["Total Volatile Organic Compounds"])}
def object_temperature(data):
'''
Object temperature in Celcius degrees
'''
return {"object_temperature": float(data["temperature"])}
def amplitude_frequency(data):
'''
amplitude in mm, frequency in Hz
'''
return {"amplitude": int(data["amplitude"]),
"frequency": int(data["frequency"])}
def nb_100(data, deveui):
'''
reformat nb_100 data
'''
output = {}
temperature_humidity_parsed = temperature_humidity(data[deveui + "-1"])
output.update(temperature_humidity_parsed)
# convert pressure to Bars
# NOTE! received value is not in mBars as suggested.
output["pressure"] = round(float(data[deveui + "-2"]["pressure"]) / (1*10**5), 3)
# unit dBm
output["rssi"] = float(data[deveui + "-3"]["Signal strength"])
# convert battery voltage to mV
output["battery"] = float(data[deveui + "-4"]["battery"]) / (1*10**3)
return output
def parse(package, sensor=Sensor.nb_100):
'''
Gather essential info from package for reformatting
'''
data ={}
try:
data["deveui"] = package["nodeName"]
data["time"] = package["time"]
measurements = package["data"]
for measurement in measurements:
data[measurement["dataID"]] = {}
for i in range(len(measurement["name"])):
data[measurement["dataID"]][measurement["name"][i]] = measurement["value"][i]
except (TypeError, IndexError, KeyError) as err:
raise MeasurementTypeError("Unexpected packet") from err
return reformat(data, sensor)
def reformat(data, sensor):
'''
Rebuild objects to unified form
'''
output = {}
# Unique identification for each device
output["deveui"] = data.pop("deveui")
try:
output["timestamp_parser"] = int(time())
# convert nodes timestamp to unix epoch time
time_parsed = datetime.datetime.strptime(data.pop("time"), "%Y-%m-%dT%H:%M:%S.%fZ")
timestamp_node = int(time_parsed.replace(tzinfo=datetime.timezone.utc).timestamp())
output["timestamp_node"] = timestamp_node
except ValueError:
raise MeasurementTimestampError("Unexpected node timestamp") from err
# nb-100 packets
if sensor == Sensor.nb_100:
parsed_data = nb_100(data, output["deveui"])
output.update(parsed_data)
# aistin packets
elif sensor == Sensor.aistin:
for measurement in data:
parsed_data = aistin_measurements[measurement](data[measurement])
output.update(parsed_data)
return output
aistin_measurements = {
"B188": temperature_humidity,
"B168": pressure,
"B328": acceleration,
"B228": battery,
"B38C": orientation,
"B1A9": motion,
"B198": moisture,
"B1E8": co2,
"B1B8": total_movement,
"B16C": differential_pressure,
"B1EA": volatile_compounds,
"B1D8": object_temperature,
"B32A": amplitude_frequency
}
``` |
{
"source": "jkorkko/oulu-smartcampus-thermal_image",
"score": 3
} |
#### File: jkorkko/oulu-smartcampus-thermal_image/ers_eye_photo.py
```python
import influxdb
import warnings
import configparser
from dateutil import parser
import matplotlib.pyplot as plt
from requests.packages.urllib3.exceptions import InsecureRequestWarning
CONF_FILE = "ers_photo_settings.conf"
config = configparser.ConfigParser()
config.read(CONF_FILE)
DB_USER = config.get("InfluxDb", "user")
DB_PASS = config.get("InfluxDb", "pass")
DB_HOST = config.get("InfluxDb", "host")
DB_DEFAULT = config.get("InfluxDb", "db_default")
DB_PORT = config.get("InfluxDb", "port")
DB_SSL = config.get("InfluxDb", "SSL").lower() == "true"
CLIENT = influxdb.InfluxDBClient(DB_HOST, DB_PORT, DB_USER, DB_PASS, DB_DEFAULT, DB_SSL, DB_SSL)
def form_matrix(photo):
'''
Make 2d list out of the photo data
'''
matrix = []
for y in range(8):
matrix.append([])
for x in range(8):
matrix[y].append(photo[8 * y + x])
return matrix
def reverse(photo):
'''
Rotate the picture
'''
turned = list(reversed(photo))
return turned
def get_img(devices, offset):
'''
Fetch image data from db. Timer is set to 7d from the offset value to prevent timeouts.
'''
print("\n")
i = 0
orig_len = len(devices)
data = []
timing = "AND time > now() - 7d"
if offset:
timing = "AND time < now() - " + offset + " AND time > now() - " + offset + " - " + "7d"
warnings.simplefilter('ignore',InsecureRequestWarning)
while i < len(devices):
device = devices[i]
query = "SELECT * FROM \"mqtt_consumer\" WHERE (\"topic\" = 'cwc/LoRaWAN_GW1/uplink' AND \"deveui\" = '{}' AND \"port\" = 17) {} AND \"size\" > 15 ORDER BY time DESC LIMIT 1".format(device, timing)
print("Getting img data from device {}...".format(device))
response = CLIENT.query(query)
if list(response.get_points()) == []:
print("FAILED")
devices.pop(i)
continue
else:
data.append(list(response.get_points()))
print("OK")
i += 1
print("Image data reveiced from {} out of {} devices.".format(len(devices), orig_len))
return data, devices
def ask():
'''
Ask user input
'''
devices = []
eyes = {
"1": "a8-17-58-ff-fe-06-3d-9a", #roni
"2": "a8-17-58-ff-fe-06-3d-9b", #risto
"3": "a8-17-58-ff-fe-06-3d-97", #jerkku
"4": "a8-17-58-ff-fe-06-3d-98", #osku
"5": "a8-17-58-ff-fe-06-3d-99" #aleksi
}
while True:
print("Choose device(s). Type eg. \"135\" to select multiple (sensors 1, 3 and 5 in this case).")
print("[1]-D9A\n[2]-D9B\n[3]-D97\n[4]-D98\n[5]-D99")
choice = input("Devices: ")
if len(choice) < 6:
for i in choice:
if i in eyes:
devices.append(eyes.pop(i))
choice = choice.replace(i, "")
if len(choice) == 0:
offset = ask_offset()
return offset, devices
else:
print("Input [x] number(s) from above.\n")
break
else:
print("Invalid input\n")
def ask_offset():
OFFSET_UNITS = ["m", "h", "d"]
while True:
print("Type time offset in format <value><unit>. For example \"7d\" for 7 days.\n-Units:")
print(" m=minutes\n h=hours\n d=days\n zero(0) to get the latest values")
offset = input("Offset: ")
if offset[-1] in OFFSET_UNITS:
try:
int(offset[:-1])
return offset
except ValueError:
print("Invalid time offset input\n")
elif offset == "0":
return 0
print("Invalid time offset input\n")
def figure(num_of_photos):
'''
Create figure for images
'''
h = 1 #figure height
w = num_of_photos #figure width
if num_of_photos > 3:
h = 2
w = 3
if num_of_photos == 4:
w = 2
fig, ax = plt.subplots(h, w)
fig.tight_layout()
return fig, ax, h, w
def main():
i = 0 # current index of the data in the package
x, y = 0, 0 # current index on plt figure
photo = []
offset, devices = ask()
data, devices = get_img(devices, offset)
if len(devices) == 0:
raise RuntimeError("No image data received from any of the given devices.")
fig, ax, h, w = figure(len(devices))
for packet in data:
packet = packet[0]
deveui = packet["deveui"].replace("-", "").upper()[-3:]
date_time = parser.isoparse(packet["time"]).strftime("%H:%M on %m/%d/%Y")
while "payload_" + str(i) in packet:
# identify the next data value
value = packet["payload_" + str(i)]
if value == 19: # the start of grideye photo data 0x13
for k in range(64):
# Pick indexes 2-65, skip first reserved value
photo.append(packet.pop("payload_" + str (i + k + 2)))
# Format
photo = reverse(photo)
matrix = form_matrix(photo)
title = "-" + deveui + " " + date_time + " UK time"
# Draw
if len(devices) == 1:
ax.set_title(title)
ax.imshow(matrix,'coolwarm')
ax.axis("off")
elif h == 1:
ax[x].set_title(title)
ax[x].imshow(matrix,'coolwarm')
ax[x].axis("off")
else:
ax[y][x].set_title(title)
ax[y][x].imshow(matrix,'coolwarm')
ax[y][x].axis("off")
if x == w - 1:
x = 0
y += 1
else:
x += 1
break
elif value == 62:
raise RuntimeError("Sensor settings package received")
else: # ignore every other measurement
# find bytes occupied by current measurement from stypes
lenght = STYPES[value][1]
i = i + lenght + 1 # move index
if h == 2 and w == 3:
fig.delaxes(ax[1][2])
plt.show() #Show img(s)
STYPES = STYPES = {
0x01: ("temperature", 2),
0x02: ("humidity", 1),
0x03: ("acceleration", 3),
0x04: ("light", 2),
0x05: ("motion", 1), # pir
0x06: ("co2", 2),
0x07: ("battery", 2),
0x08: ("moisture", 2),
0x0f: ("motion", 1), # accelerometer movement
0x14: ("pressure", 4),
0x15: ("sound", 2),
0x3d: ("debug", 4),
0x11: ("occupancy", 1),
0x0a: ("relative_pulse_count", 2),
0x0b: ("absolute_pulse_count", 4),
0x0d: ("digital", 1),
0x13: ("grideye", 65)
}
main()
``` |
{
"source": "jkormik/Salary",
"score": 3
} |
#### File: jkormik/Salary/fetch_superjob.py
```python
import requests
import objectpath
from fetched_data_processing import predict_salary
def fetch_sj_vacancies(superjob_secret_key, **params):
headers = {
"X-Api-App-Id": superjob_secret_key
}
url = "https://api.superjob.ru/2.0/vacancies"
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
decoded_response = response.json()
if "error" in decoded_response:
raise requests.exceptions.HTTPError(decoded_response["error"])
return decoded_response
def predict_rub_salary_sj(superjob_secret_key, vacancy, area):
average_salaries = []
for salary in fetch_all_sj_salaries(superjob_secret_key, vacancy, area):
if salary and salary[2] == "rub" and (salary[1]+salary[0]):
average_salaries.append(predict_salary(
int(salary[0]),
int(salary[1])
))
return list(filter(lambda average_salary:
average_salary,
average_salaries
))
def fetch_sj_vacancies_pages_amount(superjob_secret_key, search_query, area):
amount_of_vacancies = fetch_sj_vacancies_amount(
superjob_secret_key,
search_query, area
)
return (amount_of_vacancies//20) + 1
def fetch_sj_vacancies_amount(superjob_secret_key, search_query, area):
vacancies = fetch_sj_vacancies(
superjob_secret_key,
keyword=search_query,
town=area
)
tree_obj = objectpath.Tree(vacancies)
return list(tree_obj.execute("$..total"))[0]
def fetch_all_sj_salaries(superjob_secret_key, search_query, area):
pages_found = fetch_sj_vacancies_pages_amount(
superjob_secret_key,
search_query,
area
)
all_sj_salaries = []
for page in range(pages_found):
vacancies = fetch_sj_vacancies(
superjob_secret_key,
keyword=search_query,
town=area,
page=page
)
tree_obj = objectpath.Tree(vacancies)
all_sj_salaries_from = list(tree_obj.execute("$..payment_from"))
all_sj_salaries_to = list(tree_obj.execute("$..payment_to"))
all_sj_salary_currencies = list(tree_obj.execute("$..currency"))
all_sj_salaries += zip(
all_sj_salaries_from,
all_sj_salaries_to,
all_sj_salary_currencies
)
return all_sj_salaries
```
#### File: jkormik/Salary/main.py
```python
import os
from dotenv import load_dotenv
from fetch_headhunter import fetch_hh_vacancies_amount, predict_rub_salary_hh, fetch_hh_area_id
from fetch_superjob import predict_rub_salary_sj, fetch_sj_vacancies_amount
from fetched_data_processing import print_asciitable
def main():
load_dotenv()
superjob_secret_key = os.getenv("SUPERJOB_SECRET_KEY")
programming_languages = [
"Python",
"Java",
"JavaScript",
"Ruby",
"PHP",
"C++",
"C#",
"GO"
]
town = "Москва"
hh_language_popularity = {}
sj_language_popularity = {}
hh_area_id = fetch_hh_area_id(text=town)
for language in programming_languages:
average_hh_salaries = predict_rub_salary_hh( # кол-во страниц
f"Программист {language}",
hh_area_id
)
average_sj_salaries = predict_rub_salary_sj(
superjob_secret_key,
f"Программист {language}",
town
)
hh_vacancies_processed = len(average_hh_salaries)
sj_vacancies_processed = len(average_sj_salaries)
if hh_vacancies_processed:
average_hh_salary = int(
sum(average_hh_salaries) / hh_vacancies_processed
)
else:
average_hh_salary = None
if sj_vacancies_processed:
average_sj_salary = int(
sum(average_sj_salaries) / sj_vacancies_processed
)
else:
average_sj_salary = None
hh_language_popularity[language] = {
"vacancies_found": fetch_hh_vacancies_amount( # кол-во вакансий
f"Программист {language}",
hh_area_id
),
"vacancies_processed": hh_vacancies_processed,
"average_salary": average_hh_salary
}
sj_language_popularity[language] = {
"vacancies_found": fetch_sj_vacancies_amount(
superjob_secret_key,
f"Программист {language}",
town
),
"vacancies_processed": sj_vacancies_processed,
"average_salary": average_sj_salary
}
print_asciitable(hh_language_popularity, "HeadHunter Moscow")
print_asciitable(sj_language_popularity, "Superjob Moscow")
if __name__ == "__main__":
main()
``` |
{
"source": "jkormik/SpaceTelegram",
"score": 3
} |
#### File: jkormik/SpaceTelegram/fetch_spacex.py
```python
import requests
from link_processing import get_file_format_from_link
from data_collection import download_picture
def fetch_spacex_launch(spacex_launch_number, picture_path):
response = requests.get(f"https://api.spacexdata.com/v3/launches/{spacex_launch_number}")
response.raise_for_status()
spacex_img_links = response.json()["links"]["flickr_images"]
for link in spacex_img_links:
file_format_from_link = get_file_format_from_link(link)
if file_format_from_link in (".jpg", ".png"):
download_picture(link, picture_path)
```
#### File: jkormik/SpaceTelegram/main.py
```python
from dotenv import load_dotenv
import os
import telegram
import time
from fetch_nasa import fetch_nasa_epics, fetch_nasa_apods
from fetch_spacex import fetch_spacex_launch
import pathlib
def send_imgs_to_tg(telegram_bot_api_key, telegram_chat_id,
picture_path):
bot = telegram.Bot(telegram_bot_api_key)
for picture in os.listdir(picture_path):
with open(f"{picture_path}/{picture}", "rb") as document:
bot.send_document(chat_id=telegram_chat_id, document=document)
time.sleep(86400)
def main():
load_dotenv()
nasa_api_key = os.getenv("NASA_API_KEY")
telegram_bot_api_key = os.getenv("TELEGRAM_BOT_API_KEY")
telegram_chat_id = os.getenv("TELEGRAM_CHAT_ID")
spacex_launch_number = os.getenv("SPACEX_LAUNCH_NUMBER", "14")
picture_path = os.getenv("WHERE_TO_PUT_PICTURES", f"{os.getcwd()}/images")
date_for_epic = os.getenv("DATE_FOR_EPIC", "2018-08-15")
pathlib.Path(picture_path).mkdir(exist_ok=True)
fetch_spacex_launch(spacex_launch_number, picture_path)
fetch_nasa_epics(picture_path, nasa_api_key, date_for_epic)
fetch_nasa_apods(picture_path, nasa_api_key)
send_imgs_to_tg(telegram_bot_api_key, telegram_chat_id, picture_path)
if __name__ == "__main__":
main()
``` |
{
"source": "jkorsvik/electron-react-boilerplate",
"score": 3
} |
#### File: backend/api/scraper.py
```python
import datetime
import pandas as pd
import yfinance as yf
# import streamlit as st
# import requests
class Scraper:
"""Scraper class"""
def __init__(self, ticker="^VIX", n_days=30):
self.ticker = yf.Ticker(f"{ticker}")
self.historic_data = self.get_historic_data(ticker=self.ticker, n_days=n_days)
def get_json(self, ticker_str=None, n_days=30):
"""Return json from desired dataframe"""
if ticker_str: # If a ticker is provided, select the provided.
ticker = yf.Ticker(f"{ticker_str}")
else: # Else use the one from the instance.
ticker = self.ticker
# Return data as a json
data = self.get_historic_data(ticker=ticker, n_days=n_days)
return data.to_json(index=True, orient='split')
@staticmethod
def get_historic_data(ticker, n_days=30):
"""Get historic data from the past n_days, with the current ticker object."""
# Time Info
current_datetime = datetime.datetime.now()
start = current_datetime - datetime.timedelta(days=n_days)
end = current_datetime - datetime.timedelta(days=1)
# Get Data
data = ticker.history(
start=start.strftime("%Y-%m-%d"), end=end.strftime("%Y-%m-%d")
)
data.drop(labels=["Volume", "Dividends", "Stock Splits"], axis=1, inplace=True)
if data.shape[0] < 15:
raise RuntimeError("Not enough data, we need at least 15 days worth!")
return pd.DataFrame(data)
def get_extreme_value(self, col, method):
"""Expected col is either Open, High, Low or Close"""
return eval(f"self.historic_data['{col}'].{method}()")
def get_current_data(self):
"""Get the current price"""
return self.ticker.info.get("regularMarketPrice")
def get_current_val(self, col):
"""Get today's value from desired column."""
return self.historic_data[col].iloc[-1]
def recommendation(self, api=False):
"""Calculate a recommendation"""
current_price = self.get_current_data()
open_today = self.get_current_val("Open")
low = self.get_extreme_value("Low", "min")
high = self.get_extreme_value("High", "max")
# Get info about stock market situation
new_low = current_price < low
new_high = current_price > high
up_today = current_price > open_today
# Do a quick analysis of the data
if up_today and new_low:
sell = True
else:
sell = False
if not up_today and new_high:
buy = True
else:
buy = False
# Return the results
if api:
return {
"recommendation": {f"Recommendations: \n{buy=} \n{sell=}"},
"new_low": f"{new_low}",
"new_high": f"{new_high}",
"up_today": f"{up_today}",
}
return f"Recommendations: \n{buy=}\n{sell=}"
```
#### File: electron-react-boilerplate/backend/main.py
```python
from typing import Dict, List, Optional, Union
from api.ticker_model_manager import Ticker
from fastapi import FastAPI
from pydantic import BaseModel
from api import scraper
from api import model
new_day = True # is it a new day?
selected_ticker = Ticker.SP500 # ticker to be analyzed
print(selected_ticker.value)
# Check if
if new_day:
pass
# get data new
# Train model
# set flag to false
# check if ticker is in list of models
# get prediction on spefific ticker if ticker is not trained on create a new model and add to a list of models
class DataRequest(BaseModel):
ticker_name: str = "^VIX"
n_days: int = 30
app = FastAPI()
SCRAPER = scraper.Scraper()
@app.get("/recommendation")
def get_recommendation():
return SCRAPER.recommendation(api=1)
@app.post("/data/")
def get_data(data: DataRequest):
return SCRAPER.get_json(ticker_str=data.ticker_name, n_days=data.n_days)
```
#### File: backend/startup_setup/download_model.py
```python
import requests
import shutil
import zipfile
FRENCHMODELURL = 'https://alphacephei.com/vosk/models/vosk-model-fr-0.22.zip'
RELATIVEMODELPATH = './backend/api/model/'
def download_file(url):
local_filename = url.split('/')[-1]
with requests.get(url, stream=True) as r:
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return local_filename
def unzip_file_to_modelfolder(filename):
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(RELATIVEMODELPATH)
print(f"Extracted {filename} to models folder")
return filename
def download_unzip_french_model():
local_filename = download_file(FRENCHMODELURL)
print(f"Downloaded {local_filename}")
return local_filename
``` |
{
"source": "jkortner/ml-ops",
"score": 3
} |
#### File: ml-ops/mongodb/mongo_connection.py
```python
import logging
import datetime
from pprint import pformat
from pymongo import MongoClient
def mongo_connect():
host = '127.0.0.1'
client = MongoClient(host=host)
db = client.test_database
list_db_collections(db)
results_collection = db.results
res = {'model': 'model_id_1',
'dataset': 'dataset_id_1',
'result': 'X% acc',
'timestamp': str(datetime.datetime.now())
}
results_collection.insert_one(res)
list_db_collections(db)
def list_db_collections(db):
logger = logging.getLogger('list_db_collections')
collection_list = db.list_collection_names()
logger.info('collections:\n%s', ', '.join(collection_list))
for col_id in collection_list:
collection = db[col_id]
logger.info('collection: %s', col_id)
for entry in collection.find():
logger.info('\n%s', pformat(entry))
logger.info('-')
if __name__ == "__main__":
LOGGING_FORMAT = '%(asctime)-15s: [%(name)s] %(message)s'
# LOGGING_FORMAT = '[%(name)s] %(message)s'
logging.basicConfig(level=logging.INFO,
format=LOGGING_FORMAT)
mongo_connect()
```
#### File: sandbox/ml/train.py
```python
import mlflow
import mlflow.sklearn
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import accuracy_score
import pandas as pd
def get_X_y(df, target):
# sklearn split
train, test = train_test_split(df)
# X, y split
X_train = train.drop([target], axis=1)
X_test = test.drop([target], axis=1)
y_train = train[[target]]
y_test = test[[target]]
return X_train, X_test, y_train, y_test
def metrics(y, y_hat):
accuracy = accuracy_score(y, y_hat)
return accuracy
if __name__ == "__main__":
# get zoo.data from:
# https://archive.ics.uci.edu/ml/datasets/Zoo
# COLS = ["name", "hair", "feathers", "eggs", "milk", "airborne", "aquatic",
# "predator", "toothed", "backbone", "breathes", "venomous", "fins",
# "legs", "tail", "domestic", "catsize", "class"]
# DF = pd.read_csv('zoo.data', sep = ',', names=COLS)
# print(DF)
# DF.to_csv('zoo.csv', index=False)
# load zoo.csv
df = pd.read_csv('zoo.csv')
df = df.drop(columns=['name'])
# split df into training and test sets
X_train, X_test, y_train, y_test = get_X_y(df, 'class')
with mlflow.start_run():
clf = BernoulliNB()
clf.fit(X=X_train, y=y_train.values.ravel())
y_hat = clf.predict(X_test)
accuracy = metrics(y_test, y_hat)
print('Accuracy: %s' % accuracy)
mlflow.log_metric("accuracy", accuracy)
mlflow.sklearn.log_model(clf, "model")
``` |
{
"source": "jkorvin/cython",
"score": 2
} |
#### File: jkorvin/cython/runtests.py
```python
from __future__ import print_function
import atexit
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:numpy_old': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_old_numpy_extension(ext):
update_numpy_extension(ext, set_api17_macro=False)
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:numpy_old' : update_old_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in ('warning_errors', 'clear_to_none', 'error_on_unknown_names', 'error_on_uninitialized')
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file=os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None,
capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
commands = (self.commands
.replace("CYTHONIZE", "PYTHON %s" % os.path.join(self.cython_root, 'cythonize.py'))
.replace("CYTHON", "PYTHON %s" % os.path.join(self.cython_root, 'cython.py'))
.replace("PYTHON", sys.executable))
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + old_path
env['PYTHONPATH'] = new_path
cmd = []
out = []
err = []
for command_no, command in enumerate(filter(None, commands.splitlines()), 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if ' setup.py ' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command,
shell=True,
env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
self.assertEqual(0, os.system(
"make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)))
try:
os.remove('make.output')
except OSError:
pass
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = (testname in self.excludes
or testname.split('.')[-1] in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(testname) & 0x7fffffff if _is_py2 else _hash(testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.capture:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
``` |
{
"source": "jkorvin/msgpack-python",
"score": 2
} |
#### File: msgpack-python/test/test_pack.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
from pytest import raises, xfail
from msgpack import packb, unpackb, Unpacker, Packer, pack
from collections import OrderedDict
from io import BytesIO
def check(data, use_list=False):
re = unpackb(packb(data), use_list=use_list)
assert re == data
def testPack():
test_data = [
0, 1, 127, 128, 255, 256, 65535, 65536, 4294967295, 4294967296,
-1, -32, -33, -128, -129, -32768, -32769, -4294967296, -4294967297,
1.0,
b"", b"a", b"a"*31, b"a"*32,
None, True, False,
(), ((),), ((), None,),
{None: 0},
(1<<23),
]
for td in test_data:
check(td)
def testPackUnicode():
test_data = ["", "abcd", ["defgh"], "Русский текст"]
for td in test_data:
re = unpackb(packb(td), use_list=1, raw=False)
assert re == td
packer = Packer()
data = packer.pack(td)
re = Unpacker(BytesIO(data), raw=False, use_list=1).unpack()
assert re == td
def testPackUTF32(): # deprecated
try:
test_data = [
"",
"abcd",
["defgh"],
"Русский текст",
]
for td in test_data:
re = unpackb(packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
assert re == td
except LookupError as e:
xfail(e)
def testPackBytes():
test_data = [
b"", b"abcd", (b"defgh",),
]
for td in test_data:
check(td)
def testPackByteArrays():
test_data = [
bytearray(b""), bytearray(b"abcd"), (bytearray(b"defgh"),),
]
for td in test_data:
check(td)
def testIgnoreUnicodeErrors(): # deprecated
re = unpackb(packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore', use_list=1)
assert re == "abcdef"
def testStrictUnicodeUnpack():
with raises(UnicodeDecodeError):
unpackb(packb(b'abc\xeddef'), raw=False, use_list=1)
def testStrictUnicodePack(): # deprecated
with raises(UnicodeEncodeError):
packb("abc\xeddef", encoding='ascii', unicode_errors='strict')
def testIgnoreErrorsPack(): # deprecated
re = unpackb(packb("abcФФФdef", encoding='ascii', unicode_errors='ignore'), raw=False, use_list=1)
assert re == "abcdef"
def testDecodeBinary():
re = unpackb(packb(b"abc"), encoding=None, use_list=1)
assert re == b"abc"
def testPackFloat():
assert packb(1.0, use_single_float=True) == b'\xca' + struct.pack(str('>f'), 1.0)
assert packb(1.0, use_single_float=False) == b'\xcb' + struct.pack(str('>d'), 1.0)
def testArraySize(sizes=[0, 5, 50, 1000]):
bio = BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_array_header(size))
for i in range(size):
bio.write(packer.pack(i))
bio.seek(0)
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
def test_manualreset(sizes=[0, 5, 50, 1000]):
packer = Packer(autoreset=False)
for size in sizes:
packer.pack_array_header(size)
for i in range(size):
packer.pack(i)
bio = BytesIO(packer.bytes())
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
packer.reset()
assert packer.bytes() == b''
def testMapSize(sizes=[0, 5, 50, 1000]):
bio = BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_map_header(size))
for i in range(size):
bio.write(packer.pack(i)) # key
bio.write(packer.pack(i * 2)) # value
bio.seek(0)
unpacker = Unpacker(bio)
for size in sizes:
assert unpacker.unpack() == dict((i, i * 2) for i in range(size))
def test_odict():
seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
od = OrderedDict(seq)
assert unpackb(packb(od), use_list=1) == dict(seq)
def pair_hook(seq):
return list(seq)
assert unpackb(packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
def test_pairlist():
pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
packer = Packer()
packed = packer.pack_map_pairs(pairlist)
unpacked = unpackb(packed, object_pairs_hook=list)
assert pairlist == unpacked
def test_get_buffer():
packer = Packer(autoreset=0, use_bin_type=True)
packer.pack([1, 2])
strm = BytesIO()
strm.write(packer.getbuffer())
written = strm.getvalue()
expected = packb([1, 2], use_bin_type=True)
assert written == expected
``` |
{
"source": "jkoscialkowski/dnn-exercises",
"score": 3
} |
#### File: dnn-exercises/task_1_convnet/datasets.py
```python
import numpy as np
import os
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class FruitDataset(Dataset):
def __init__(self, path):
self.path = path
self.class_labels = os.listdir(path)
self.class_counts = [len(os.listdir(path + '/' + d))
for d in os.listdir(path)]
self.cum_class_counts = np.cumsum(self.class_counts)
self.transform = transforms.ToTensor()
def __len__(self):
return sum(self.class_counts)
def __getitem__(self, item):
# Correct class is the one for which `item` divided by cumulative sum
# of class counts and floored is 0 for the first time. We want to keep
# the class label as integer
y = np.sum((item // self.cum_class_counts) > 0)
label = self.class_labels[y]
# Pick correct filename
if y == 0:
filename = os.listdir(self.path + '/' + label)[item]
else:
filename = os.listdir(
self.path + '/' + label
)[item - self.cum_class_counts[y - 1]]
img = Image.open(self.path + '/' + label + '/' + filename)
img = self.transform(img)
return {'image': img, 'y': y, 'filename': filename}
``` |
{
"source": "jkoscialkowski/gsn-projekt",
"score": 2
} |
#### File: gsn-projekt/amphibian/train.py
```python
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from datetime import datetime
from livelossplot import PlotLosses
from scipy import stats
from torch.utils.data import DataLoader
from torchvision import transforms
import amphibian.preprocess.preprocessing as preproc
from amphibian.preprocess.train_test_split import TrainTestSplit
# Set CUDA if available
if torch.cuda.is_available():
DEVICE = 'cuda'
else:
DEVICE = 'cpu'
# Set implemented SingleTrainer parameters which can be passed to CV
IMPLEMENTED_ARCHITECTURES = [
'SoftmaxRegressionModel', 'RNNModel', 'GRUModel', 'LSTMModel',
'AttentionModel'
]
NON_MODEL_PARAMETERS = [
'learning_rate',
'max_epochs',
'early_stopping_patience'
]
class SingleTrainer:
def __init__(self, model, batch_size: int, learning_rate: float = 1e-3,
max_epochs: int = 500, early_stopping_patience: int = None):
"""Class SingleTrainer - a general wrapper for training NNs on
given datasets, for a given set of hyperparameters.
:param model: an instance of architecture class inheriting from
torch.nn.Module, as in amphibian.architectures
:param batch_size: size of the batch
:param learning_rate: learning rate for Adam constructor
:param max_epochs: maximum number of epochs
:param early_stopping_patience: if not None, a maximum number of epochs
to wait for validation loss drop before stopping training
"""
super().__init__()
# Setting parameters
self.model = model.to(DEVICE)
self.batch_size = batch_size
self.max_epochs = max_epochs
self.early_stopping_patience = early_stopping_patience
# Loss is fixed to nn.CrossEntropyLoss
self.loss = nn.CrossEntropyLoss()
# Optimizer is fixed to Adam
self.optimizer = optim.Adam(params=self.model.parameters(),
lr=learning_rate)
def train(self, train_ds, valid_ds, plot_loss=True, verbose=True,
save_path=None, need_y: str = 'no'):
"""Method for training, takes train and validation Datasets, as well
as parameters specifying training monitoring and trains a network for
a given set of hyperparameters.
:param train_ds: training Dataset
:param valid_ds: validation Dataset
:param plot_loss: whether to plot loss during training
:param verbose: whether to print loss after each epoch
:param save_path: if given, serialises the model and saves there
:param need_y: command to extract y's in order to train Attention based models with
'state' or 'switch cells' layer
"""
# Create DataLoaders
assert need_y in ['no', 'yes'], 'Should be no/yes'
train_dl = DataLoader(train_ds, batch_size=self.batch_size,
shuffle=True)
test_dl = DataLoader(valid_ds, batch_size=self.batch_size)
# Dictionary for losses
losses = {'train_loss': [], 'valid_loss': []}
# Plot losses if the user chooses so
if plot_loss:
liveloss = PlotLosses()
# Iterate over epochs
for epoch in range(self.max_epochs):
# Switch to training mode
self.model.train()
if verbose:
print('Starting epoch {}'.format(epoch + 1))
# A list for batch-wise training losses in a given epoch
epoch_loss = []
# Iterate over batches
for idx_batch, batch in enumerate(train_dl):
self.optimizer.zero_grad()
if need_y == 'yes':
out = self.model(batch[0]['train_obs'].permute(1, 0, 2),
y=batch[1].permute(1, 0))
tr_loss = self.loss(out, batch[0]['train_y'].to(DEVICE))
elif need_y == 'no':
out = self.model(batch['train_obs'].permute(1, 0, 2))
tr_loss = self.loss(out, batch['train_y'].to(DEVICE))
epoch_loss.append(tr_loss.item())
tr_loss.backward()
self.optimizer.step()
# Switch to evaluation mode
self.model.eval()
# Compute training loss for the epoch
losses['train_loss'].append(sum(epoch_loss) / len(train_dl))
# Compute validation loss by iterating through valid dl batches
with torch.no_grad():
# A list for batch-wise validation losses
val_loss = []
# Iterate over batches in the validation DataLoader
for idx_v_batch, v_batch in enumerate(test_dl):
if need_y == 'yes':
val_loss.append(self.loss(
self.model(v_batch[0]['test_obs'].permute(1, 0, 2),
y=v_batch[1].permute(1, 0)),
v_batch[0]['test_y']).item())
elif need_y == 'no':
val_loss.append(self.loss(
self.model(v_batch['test_obs'].permute(1, 0, 2)),
v_batch['test_y']).item())
losses['valid_loss'].append(sum(val_loss) / len(test_dl))
# Printing loss for a given epoch
if verbose:
print('Loss: {}'.format(losses['valid_loss'][epoch]))
# Plot loss after each epoch if the user chose to
if plot_loss:
logs = {
'log_loss': losses['train_loss'][epoch],
'val_log_loss': losses['valid_loss'][epoch]
}
liveloss.update(logs)
liveloss.draw()
# Early stopping
if self.early_stopping_patience:
lag_1 = losses['valid_loss'][
(epoch - self.early_stopping_patience):epoch
]
lag_2 = losses['valid_loss'][
(epoch - self.early_stopping_patience - 1):(epoch - 1)
]
no_drops = sum(True if l1 < l2
else False
for l1, l2 in zip(lag_1, lag_2))
if epoch > self.early_stopping_patience and no_drops == 0:
break
# Save last loss
self.final_loss = np.mean(losses['valid_loss'][-1])
self.last_epoch = epoch
# Save model
if save_path:
torch.save(self.model.state_dict(), save_path)
class CrossValidation:
def __init__(self, am_reader, int_start: int, int_end: int, architecture,
sampled_param_grid: dict, constant_param_grid: dict,
log_path: str, n_iter=100, folds=5, need_y: str = 'no'):
"""Class CrossValidation - hyperparameter optimisation by random search
and k-fold CV
:param am_reader: instance of the amphibian.fetch.reader.AmphibianReader
class
:param int_start: interval start passed to
amphibian.preprocess.train_test_split.TrainTestSplit
:param int_start: interval end passed to
amphibian.preprocess.train_test_split.TrainTestSplit
:param architecture: one of the implemented NN architectures.
:param sampled_param_grid: dictionary of hyperparameters sampled for the
given CV iteration
:param constant_param_grid: dictionary of parameters which we want to
keep fixed for all CV iterations
:param log_path: where to save a .csv file with CV results
:param n_iter: number of CV iterations
:param folds: number of CV folds
:param need_y: command to extract y's in order to train Attention based models with
'state' or 'switch cells' layer
"""
assert architecture in IMPLEMENTED_ARCHITECTURES, \
'Chosen architecture is not implemented'
self.am_reader = am_reader
self.int_start = int_start
self.int_end = int_end
self.architecture = architecture
self.sampled_param_grid = sampled_param_grid
self.constant_param_grid = constant_param_grid
self.log_path = log_path \
+ '/cv_log_{:%Y%m%d_%H%M%S}.csv'.format(datetime.now())
self.n_iter = n_iter
self.folds = folds
self.need_y = need_y
# Dictionary for sampled parameters
self.sampled_params = {k: [] for k in sampled_param_grid.keys()}
# Lists for metric statistics and numbers of epochs
self.results = {'metric_mean': [],
'metric_std': [],
'metric_min': [],
'metric_max': [],
'no_epochs': []}
@staticmethod
def get_class(cls: str):
"""Method for creating an instance of a class using a string
:param cls: a string with import path to the class
:return: an object of the input class
"""
parts = cls.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
@staticmethod
def create_datasets(self, int_start: int, int_end: int, seq_len: int,
need_y: str):
"""Create datasets for all region combinations for given time interval
beginning and end.
:param int_start: interval start
:param int_end: interval start
:param seq_len: number of days in observation
:param need_y: extract y's in order to train Attention based models with 'state' or 'switch cells' layer
:return: train and validation ConcatDatasets
"""
# Get train test split for selected part of the training set
input_regs = ['ASIA_PACIFIC', 'ASIA_PACIFIC', 'EMEIA']
pred_regs = ['EMEIA', 'AMERICA', 'AMERICA']
train_test_splits = [TrainTestSplit(self.am_reader,
int_start=int_start,
int_end=int_end,
input_reg=ir,
pred_reg=pr)
for ir, pr in zip(input_regs, pred_regs)]
# Prepare dataset
timeser_datasets = [
preproc.TimeSeriesDataset(
tts, int_len=seq_len,
transform=transforms.Compose([
preproc.FillNaN(), preproc.Normalizing(),
preproc.DummyFillNaN(), preproc.Formatting(),
preproc.FormattingY()
]),
need_y=need_y
)
for tts in train_test_splits
]
return torch.utils.data.ConcatDataset(timeser_datasets), \
torch.utils.data.ConcatDataset(
[preproc.ValidDataset(tsds) for tsds in timeser_datasets]
)
def run(self):
"""Perform Cross-Validation.
:return: a pd.DataFrame with CV results
"""
print('STARTED CROSS-VALIDATION')
print('Optimizing hyperparameters for {}'.format(self.architecture))
# CV iterations
for it in range(self.n_iter):
print('Beginning CV iteration {:d}'.format(it + 1))
# Sample parameters
sampled_params = {}
for k, v in self.sampled_param_grid.items():
par = v.rvs(size=1)[0]
if par.dtype == float:
sampled_params[k] = float(par)
else:
sampled_params[k] = int(par)
self.sampled_params[k].append(par)
print('Trying for the following parameters: {}'.
format(str(sampled_params)))
# Concatenate sampled and constant parameters
model_params = {**sampled_params, **self.constant_param_grid}
# Extract parameters for SingleTrainer
st_params = {p: model_params.pop(p)
for p in NON_MODEL_PARAMETERS}
# Lists for one-fold losses and epoch numbers before early stopping
fold_losses, last_epochs = [], []
# Beginnings and ends for cross-validation intervals
# One interval is supposed to occupy half of the training set
# and roll through its entirety
interval = self.int_end - self.int_start
delta = np.floor(interval / 2 / (self.folds - 1))
int_starts = [int(self.int_start + delta * f)
for f in range(self.folds)]
int_ends = [int(self.int_end - delta * (self.folds - f - 1))
for f in range(self.folds)]
# Iterate over folds
for fold in range(self.folds):
print('\tFold: {:d}'.format(fold + 1))
tsds, vds = self.create_datasets(
self,
int_start=int_starts[fold],
int_end=int_ends[fold],
seq_len=model_params['seq_len'],
need_y=self.need_y
)
# Create new instance of model object
architecture = self.get_class(
'amphibian.architectures.' + self.architecture
)(**model_params)
# Create new instance of SingleTrainer and begin training
st = SingleTrainer(model=architecture,
batch_size=model_params['batch_size'],
**st_params)
st.train(train_ds=tsds, valid_ds=vds, plot_loss=False,
need_y=self.need_y)
last_epochs.append(st.last_epoch)
print('\tFitting ended after {:d} epochs'.format(
st.last_epoch + 1))
fold_losses.append(st.final_loss)
print('\tLoss on this fold: {:.5f}'.format(st.final_loss))
# Summarise computed metrics for a given choice of parameters
self.results['metric_mean'].append(np.mean(fold_losses))
self.results['metric_std'].append(np.std(fold_losses))
self.results['metric_min'].append(min(fold_losses))
self.results['metric_max'].append(max(fold_losses))
self.results['no_epochs'].append(last_epochs)
self.summary_df = pd.concat(
[pd.DataFrame(self.sampled_params),
pd.DataFrame(self.results)],
axis=1
)
self.summary_df.to_csv(self.log_path)
return self.summary_df
def batch_size_dist(min_num: int, max_num: int):
"""Function for sampling powers of 2.
:param min_num: minimum number (a power of 2)
:param max_num: maximum number (a power of 2)
"""
assert math.log(min_num, 2).is_integer() and math.log(max_num, 2).is_integer(),\
'Supplied minimum and maximum have to be powers of 2'
min_pow = int(math.log(min_num, 2))
max_pow = int(math.log(max_num, 2))
no = max_pow - min_pow + 1
return stats.rv_discrete(
values=([2 ** p for p in np.arange(min_pow, max_pow + 1)],
[1/no for _ in np.arange(min_pow, max_pow + 1)])
)
``` |
{
"source": "jkosinski/XlinkAnalyzerX",
"score": 2
} |
#### File: XlinkAnalyzerX/template/cmd.py
```python
def subcommand_function(session, positional_arguments, keyword_arguments):
pass
from chimerax.core.commands import CmdDesc
subcommand_desc = CmdDesc()
# TODO: Add more subcommands here
``` |
{
"source": "jkoskela/connector-plugin-sdk",
"score": 3
} |
#### File: connector-packager/tests/test_package.py
```python
import os
import unittest
import logging
from pathlib import Path
from connector_packager.package import PACKAGED_EXTENSION
logger = logging.getLogger(__name__)
class TestPackage(unittest.TestCase):
def test_package_main(self):
expected_package_name = "postgres_odbc"
expected_dest_directory = Path("tests/test_resources/jars")
files_directory = Path("tests/test_resources/valid_connector")
path_to_test_file = expected_dest_directory / Path(expected_package_name + PACKAGED_EXTENSION)
if path_to_test_file.exists():
logging.debug("Removing old test file " + str(Path))
path_to_test_file.unlink()
os.system("python -m connector_packager.package " + str(files_directory) +
" --dest " + str(expected_dest_directory))
self.assertTrue(path_to_test_file.exists(), "Packaged connector not found in expected directory")
if path_to_test_file.exists():
path_to_test_file.unlink()
```
#### File: tdvt/config_gen/genconfig.py
```python
from string import Template
import os
import argparse
from .test_config import TestConfig
from .datasource_list import *
def generate_config_files(output_dir, ds_registry, force=False):
base_output_dir = output_dir
#Generate all the config files.
try:
os.mkdir(base_output_dir)
except:
pass
if not ds_registry:
print ("Did not find any registry of tests to generate config files for.")
return
for ds in ds_registry.dsnames:
cfg = ds_registry.dsnames[ds]
if not cfg.config_files_exist(base_output_dir) or force:
cfg.write_config_files(base_output_dir)
``` |
{
"source": "j-kota/LP-QAP",
"score": 2
} |
#### File: LP-QAP/qap-lp/Logger.py
```python
import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.spatial import ConvexHull
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
dtype_l = torch.cuda.LongTensor
torch.cuda.manual_seed(0)
else:
dtype = torch.FloatTensor
dtype_l = torch.LongTensor
torch.manual_seed(0)
def compute_recovery_rate(pred, labels):
pred = pred.max(2)[1]
error = 1 - torch.eq(pred, labels).type(dtype)#.squeeze(2)
frob_norm = error.mean(1)#.squeeze(1)
accuracy = 1 - frob_norm
accuracy = accuracy.mean(0).squeeze()
return accuracy.data.cpu().numpy()#[0]
class Logger(object):
def __init__(self, path_logger):
directory = os.path.join(path_logger, 'plots/')
self.path = path_logger
self.path_dir = directory
# Create directory if necessary
try:
os.stat(directory)
except:
os.mkdir(directory)
self.loss_train = []
self.loss_test = []
self.accuracy_train = []
self.accuracy_test = []
self.args = None
def write_settings(self, args):
self.args = {}
# write info
path = os.path.join(self.path, 'experiment.txt')
with open(path, 'w') as file:
for arg in vars(args):
file.write(str(arg) + ' : ' + str(getattr(args, arg)) + '\n')
self.args[str(arg)] = getattr(args, arg)
def save_model(self, model):
save_dir = os.path.join(self.path, 'parameters/')
# Create directory if necessary
try:
os.stat(save_dir)
except:
os.mkdir(save_dir)
path = os.path.join(save_dir, 'gnn.pt')
torch.save(model, path)
print('Model Saved.')
def load_model(self):
load_dir = os.path.join(self.path, 'parameters/')
# check if any training has been done before.
try:
os.stat(load_dir)
except:
print("Training has not been done before testing. This session will be terminated.")
sys.exit()
path = os.path.join(load_dir, 'gnn.pt')
print('Loading the most recent model...')
siamese_gnn = torch.load(path)
return siamese_gnn
def add_train_loss(self, loss):
self.loss_train.append(loss.data.cpu().numpy())
def add_test_loss(self, loss):
self.loss_test.append(loss)
def add_train_accuracy(self, pred, labels):
accuracy = compute_recovery_rate(pred, labels)
self.accuracy_train.append(accuracy)
def add_test_accuracy(self, pred, labels):
accuracy = compute_recovery_rate(pred, labels)
self.accuracy_test.append(accuracy)
def plot_train_loss(self):
plt.figure(0)
plt.clf()
iters = range(len(self.loss_train))
plt.semilogy(iters, self.loss_train, 'b')
plt.xlabel('iterations')
plt.ylabel('Cross Entropy Loss')
plt.title('Training Loss: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'training_loss.png')
plt.savefig(path)
def plot_test_loss(self):
plt.figure(1)
plt.clf()
test_freq = self.args['test_freq']
iters = test_freq * range(len(self.loss_test))
plt.semilogy(iters, self.loss_test, 'b')
plt.xlabel('iterations')
plt.ylabel('Cross Entropy Loss')
plt.title('Testing Loss: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'testing_loss.png')
plt.savefig(path)
def plot_train_accuracy(self):
plt.figure(0)
plt.clf()
iters = range(len(self.accuracy_train))
plt.plot(iters, self.accuracy_train, 'b')
plt.xlabel('iterations')
plt.ylabel('Accuracy')
plt.title('Training Accuracy: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'training_accuracy.png')
plt.savefig(path)
def plot_test_accuracy(self):
plt.figure(1)
plt.clf()
test_freq = self.args['test_freq']
iters = test_freq * range(len(self.accuracy_test))
plt.plot(iters, self.accuracy_test, 'b')
plt.xlabel('iterations')
plt.ylabel('Accuracy')
plt.title('Testing Accuracy: p={}, p_e={}'
.format(self.args['edge_density'], self.args['noise']))
path = os.path.join(self.path_dir, 'testing_accuracy.png')
plt.savefig(path)
def save_results(self):
path = os.path.join(self.path, 'results.npz')
np.savez(path, accuracy_train=np.array(self.accuracy_train),
accuracy_test=np.array(self.accuracy_test),
loss_train=self.loss_train, loss_test=self.loss_test)
``` |
{
"source": "Jkotheimer/SmartVisor",
"score": 3
} |
#### File: Jkotheimer/SmartVisor/FaceDetector.py
```python
import cv2
import time
import gc
import numpy as np
# Get a quantized tensorflow face detection model
def getFaceDetector(modelFile='models/opencv_face_detector_uint8.pb', configFile='models/opencv_face_detector.pbtxt'):
return cv2.dnn.readNetFromTensorflow(modelFile, configFile)
'''
Find the faces in an image
@param (np.uint8) img - Image to find faces from
@param (dnn_Net) model - Face detection model
@return ([right, top, right, bottom]) faces - The box around the most confident face in the image
'''
def findFace(img, model):
# cv2 magic
height, width = img.shape[:2]
blob = cv2.dnn.blobFromImage(
cv2.resize(img, (300, 300)),
1.0,
(300, 300),
(104.0, 177.0, 123.0)
)
model.setInput(blob)
res = model.forward()
# Iterate over everything the model matched with and return the guess with the highest confidence
face = []
highestConfidence = 0
for i in range(res.shape[2]):
confidence = res[0, 0, i, 2]
if confidence > 0.5 and confidence > highestConfidence:
box = res[0, 0, i, 3:7] * np.array([width, height, width, height])
# [+x, +y, -x, -y]
(left, top, right, bottom) = box.astype('int')
face = [left, top, right, bottom]
highestConfidence = confidence
del res
del model
del blob
gc.collect()
return face
def drawFace(img, face):
cv2.rectangle(img, (face[0], face[1]), (face[2], face[3]), (0, 0, 255), 3)
``` |
{
"source": "jkotlinski/acmeforth",
"score": 2
} |
#### File: jkotlinski/acmeforth/xc.py
```python
import os
import re
import sys
NEWLINE = 256
OUT = None
refs = {}
to_petscii = [
0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,
0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f,
0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
0x40,0xc1,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xcb,0xcc,0xcd,0xce,0xcf,
0xd0,0xd1,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0x5b,0x5c,0x5d,0x5e,0x5f,
0xc0,0x41,0x42,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f,
0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0xdb,0xdc,0xdd,0xde,0xdf,
0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f,
0x90,0x91,0x92,0x0c,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f,
0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf,
0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf,
0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f,
0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f,
0xe0,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xeb,0xec,0xed,0xee,0xef,
0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff,
0xd # \n
]
class Ref:
def __init__(self, addr, word = None):
self.addr = addr
self.word = word
if word:
if not addr in refs:
refs[addr] = []
refs[addr].append(word)
def __index__(self):
return self.addr
def __int__(self):
return self.addr
# ?
def __sub__(self, other):
return self.addr - other
def __rsub__(self, other):
return other - self.addr
def __add__(self, other):
return other + self.addr
def __radd__(self, other):
return other + self.addr
def __lt__(self, other):
if type(other) == Ref:
return self.addr < other.addr
else:
return self.addr < other
def __eq__(self, other):
return self.addr == other
word_hashes = []
def word_name_hash(word_name):
if word_name not in word_hashes:
word_hashes.append(word_name)
return "WORD_" + str(word_hashes.index(word_name))
def compile(dictionary_, heap_, start_word_name, outfile):
global dictionary
global heap
global OUT
OUT = open(outfile, "w")
dictionary = dictionary_
heap = heap_
words_to_export.append(dictionary.words[start_word_name])
write_header()
while True:
if words_to_export:
export_word(words_to_export.pop())
continue
if primitives_to_add:
add_primitive(primitives_to_add.pop())
continue
if doers_to_export:
export_doer(doers_to_export.pop())
continue
break
words_to_export = []
exported_words = set()
doers_to_export = []
exported_doers = set()
primitives_to_add = []
added_primitives = set()
def add_primitive_dependency(word_name):
if word_name not in added_primitives:
primitives_to_add.append(word_name)
def export_word(w):
if w in exported_words:
return
exported_words.add(w)
xt = w.xt
if w.body != None:
compile_forth_word(w)
else:
add_primitive_dependency(w.name)
def compile_forth_word(w):
s = str(w.xt)
if "COLON" in s:
compile_colon_word(w)
elif "CREATE" in s:
compile_create_word(w)
elif "CONSTANT" in s:
compile_constant_word(w)
elif "DOES_TO" in s:
compile_does_word(w)
elif "HERE" in s:
OUT.write("; raw data area\n")
compile_body(w)
else:
sys.exit("Unknown xt " + str(w.xt))
def compile_constant_word(w):
OUT.write(word_name_hash(w.name) + "\t; " + w.name + "\n")
if type(w.constant_value) == Ref:
OUT.write("\tldy\t#>REF_" + str(w.constant_value.addr) + "_W_" + str(w.constant_value.word.body) + "\n")
OUT.write("\tlda\t#<REF_" + str(w.constant_value.addr) + "_W_" + str(w.constant_value.word.body) + "\n")
if w.constant_value.word:
if w.constant_value.word not in words_to_export:
words_to_export.append(w.constant_value.word)
elif type(w.constant_value) == type(0):
OUT.write("\tldy\t#" + str((w.constant_value >> 8) & 0xff) + "\n")
OUT.write("\tlda\t#" + str(w.constant_value & 0xff) + "\n")
elif callable(w.constant_value):
word = dictionary.xt_words[w.constant_value]
if word not in words_to_export:
words_to_export.append(word)
OUT.write("\tldy\t#>" + word_name_hash(word.name) + "\t; " + word.name + "\n")
OUT.write("\tlda\t#<" + word_name_hash(word.name) + "\t; " + word.name + "\n")
else:
print(w.constant_value)
assert False
OUT.write("\tjmp\t" + word_name_hash("pushya") + "\t; pushya\n\n")
add_primitive_dependency("pushya")
def compile_create_word(w):
OUT.write(word_name_hash(w.name) + "\t; " + w.name + "\n")
OUT.write("\tldy\t#>IP_" + str(w.body) + "\n")
OUT.write("\tlda\t#<IP_" + str(w.body) + "\n")
OUT.write("\tjmp\t" + word_name_hash("pushya") + "\t; pushya\n")
OUT.write("IP_" + str(w.body) + '\n')
add_primitive_dependency("pushya")
for i in range(w.body, w.body_end):
if type(heap[i]) == type(0):
OUT.write("\t!byte\t" + str(heap[i]) + '\n')
elif callable(heap[i]):
word = dictionary.xt_words[heap[i]]
if word not in words_to_export:
words_to_export.append(word)
OUT.write("\t!word " + word_name_hash(word.name) + "\t; " + word.name + "\n")
else:
assert False
OUT.write('\n')
def compile_colon_word(w):
OUT.write(word_name_hash(w.name) + "\t; " + w.name + "\n")
compile_body(w)
def compile_body(w, start_ip = -1):
ip = w.body if start_ip == -1 else start_ip
while ip < w.body_end:
if ip in refs:
if w in refs[ip]:
OUT.write("REF_" + str(ip) + "_W_" + str(w.body) + "\n")
OUT.write("IP_" + str(ip) + "\n")
cell = heap[ip]
if callable(cell):
cell_word = dictionary.xt_words[cell]
ip = compile_call(cell_word, ip)
elif type(cell) == Ref:
OUT.write("\t!word IP_" + str(cell.addr) + "\n")
ip += 1
elif type(cell) == int:
compile_byte(cell)
else:
sys.exit("Unknown cell type " + str(cell))
ip += 1
if ip in refs:
if w in refs[ip]:
OUT.write("REF_" + str(ip) + "_W_" + str(w.body) + "\n")
OUT.write("\n")
def compile_does_word(w):
add_primitive_dependency("dodoes")
OUT.write(word_name_hash(w.name) + "\t; " + w.name + "\n")
OUT.write("\tjsr " + word_name_hash("dodoes") + "\t; dodoes\n")
OUT.write("\t!word IP_" + str(w.xt_ip) + "\n")
compile_body(w)
doers_to_export.append(w.xt_ip)
def compile_byte(cell):
if type(cell) == str:
OUT.write("\t!byte " + str(to_petscii[ord(cell)]) + "\n")
else:
OUT.write("\t!byte " + str(cell) + "\n")
def compile_jsr(callee):
if callee not in words_to_export:
words_to_export.append(callee)
OUT.write("\tjsr " + word_name_hash(callee.name) + "\t; " + callee.name + "\n")
def compile_call(callee, ip):
if callee.name == "exit":
# TODO tail-call optimization
OUT.write("\trts\n\n")
elif callee.name == "branch":
if type(heap[ip + 1]) == Ref:
addr = heap[ip + 1].addr
else:
addr = heap[ip + 1] + (heap[ip + 2] << 8)
ip += 2
OUT.write("\tjmp IP_" + str(addr) + "\t\t; branch\n")
elif callee.name == "0branch" or callee.name == "(loop)" or callee.name == "(+loop)":
if type(heap[ip + 1]) == Ref:
addr = heap[ip + 1].addr
else:
addr = heap[ip + 1] + (heap[ip + 2] << 8)
compile_jsr(callee)
ip += 2
OUT.write("\t!word\tIP_" + str(addr) + "\n")
elif callee.name == "drop":
OUT.write("\tinx\t\t\t; drop\n")
elif callee.name == "2drop":
OUT.write("\tinx\t\t\t; 2drop\n")
OUT.write("\tinx\n")
elif callee.name == "litc":
compile_jsr(callee)
ip += 1
compile_byte(heap.getchar(ip))
elif callee.name == "lit":
compile_jsr(callee)
ip += 1
val = heap.getchar(ip)
if callable(val):
word = dictionary.xt_words[heap[ip]]
if word not in words_to_export:
words_to_export.append(word)
OUT.write("\t!word " + word_name_hash(word.name) + "\t; " + word.name + "\n")
ip += 1
elif type(val) == Ref:
ref = heap[ip]
if ref.word and ref.word not in words_to_export:
words_to_export.append(ref.word)
OUT.write("\t!word IP_" + str(ref.addr) + "\t; " + str(ref.word) + "\n")
ip += 1
else:
compile_byte(val)
ip += 1
compile_byte(heap[ip])
elif callee.name == "sliteral":
compile_jsr(callee)
ip += 1
strlen = heap[ip]
OUT.write("\t!byte\t" + str(strlen) + '\n')
for i in range(strlen):
ip += 1
write_char(heap.getchar(ip))
else:
compile_jsr(callee)
return ip
def write_char(c):
if type(c) == str:
OUT.write("\t!byte\t" + str(to_petscii[ord(c)]) + "\n")
else:
OUT.write("\t!byte\t" + str(c) + "\n")
def add_primitive(word_name):
if word_name in added_primitives:
return
added_primitives.add(word_name)
if word_name in dictionary.code_words:
OUT.write(word_name_hash(word_name) + "\t; " + word_name + "\n")
# Expands %FORTH_WORD% to the corresponding assembly label.
pattern = re.compile("(.*)%(.*)%(.*)")
for line in dictionary.code_words[word_name].split('\n'):
m = pattern.match(line)
if m:
pre,word,post = m.groups()
line = pre + word_name_hash(word) + post + "\t; " + word
if word not in added_primitives:
primitives_to_add.append(word)
OUT.write(line + "\n")
OUT.write("\n")
else:
for w in dictionary.words.values():
if w.name == word_name and w.body:
export_word(w)
return
sys.exit("Missing >>>" + word_name + "<<<")
def write_header():
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
asm_header_path = os.path.join(location, "src/header.asm")
OUT.write(open(asm_header_path, "r").read() + "\n")
def export_doer(ip):
if ip in exported_doers:
return
exported_doers.add(ip)
for w in dictionary.words.values():
if w.body and w.body_end and w.body <= ip and ip < w.body_end:
OUT.write("\t;doer " + w.name + "\n")
compile_body(w, ip)
return
assert False
``` |
{
"source": "jkotrc/2D-Elastic-Gas",
"score": 3
} |
#### File: jkotrc/2D-Elastic-Gas/boltzmann.py
```python
try:
from OpenGL.GL import *
from OpenGL import GLU
import OpenGL.GL.shaders
except:
print("OpenGL wrapper for python not found")
import glfw
import numpy as np
from computation import Computation
class Graphics:
def __init__(self,width,height, computation):
if not glfw.init():
print("GLFW Failed to initialize!")
self.window = glfw.create_window(width, height, "Boltzmann", None, None);
glfw.make_context_current(self.window)
self.windowsizechanged=False
glfw.set_window_size_callback(self.window, self.resizewindow)
self.program = self.loadShaders("vertex.glsl", "fragment.glsl")
glUseProgram(self.program)
glUniform1i(glGetUniformLocation(self.program, "WIDTH"), width)
glUniform1i(glGetUniformLocation(self.program, "HEIGHT"), height)
self.width=width
self.height=height
self.comp = comp
self.points = np.array(self.comp.pos.reshape(-1,order='F'), dtype=np.float32)
self.graphicsinit()
def resizewindow(self,w,h,a):
self.windowsizechanged=True
def graphicsinit(self):
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, self.points.itemsize * self.points.size, self.points, GL_STATIC_DRAW)
position = glGetAttribLocation(self.program, "position")
glVertexAttribPointer(position, 2, GL_FLOAT, GL_FALSE, 0, None)
glEnableVertexAttribArray(position)
glClearColor(0.3, 0.3, 0.3, 1.0)
glEnable(GL_POINT_SMOOTH)
glPointSize(self.comp.size/2)
def render(self):
for i in range (0, self.comp.frameskip):
self.comp.cudastep();
self.points = self.comp.pos.reshape(-1,order='F')
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(self.program)
glBufferData(GL_ARRAY_BUFFER, self.points.itemsize * self.points.size, self.points, GL_STATIC_DRAW)
glDrawArrays(GL_POINTS, 0, int(self.points.size / 2))
glfw.swap_buffers(self.window)
def mainloop(self):
while not glfw.window_should_close(self.window):
glfw.poll_events()
if self.windowsizechanged == True:
self.width,self.height = glfw.get_framebuffer_size(self.window);
glUseProgram(self.program)
glUniform1i(glGetUniformLocation(self.program, "WIDTH"), self.width)
glUniform1i(glGetUniformLocation(self.program, "HEIGHT"), self.height)
self.windowsizechanged=False
self.render()
glfw.terminate()
def loadShaders(self, vertpath, fragpath):
vertexshader=glCreateShader(GL_VERTEX_SHADER)
fragmentshader=glCreateShader(GL_FRAGMENT_SHADER)
fragfile = open(fragpath, "r")
vertfile = open(vertpath, "r")
fragsource = fragfile.read()
fragfile.close()
vertsource = vertfile.read()
vertfile.close()
shader = OpenGL.GL.shaders.compileProgram(OpenGL.GL.shaders.compileShader(vertsource, GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(fragsource, GL_FRAGMENT_SHADER))
return shader
if __name__ == "__main__":
#A good configuration: 80x80 balls, space 24, width=height=1000, size=8, speedrange=20, frameskip=3, epsilon=0.01, blocksize=512
comp=Computation(width=1000, height=1000, space=20, xballs=100, yballs=100, speedrange=20,size=4,frameskip=1,epsilon=0.01,blocksize=512)
g=Graphics(1000, 1000,comp)
g.mainloop();
```
#### File: jkotrc/2D-Elastic-Gas/computation.py
```python
import numpy as np
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
def mag(x):
return np.sqrt(sum(i**2 for i in x))
def magsq(x):
return (sum(i**2 for i in x))
def getmodule(path):
try:
f = open(path, "r")
except:
print(f"could not open file at {path}")
return None
source = f.read();
f.close()
return SourceModule(source)
class Computation:
def __init__(self, width=1000, height=1000, space=30, xballs=50, yballs=50, speedrange=2,size=5,frameskip=1,epsilon=0.00001,blocksize=512):
self.width=np.float32(width)
self.height=np.float32(height)
self.xballs=xballs
self.yballs=yballs
self.N=np.int32(xballs*yballs) #CUDA takes in 32 bit ints
self.speedrange=speedrange
self.size=np.float32(size)
self.space=space
self.frameskip=frameskip
self.epsilon=np.float32(epsilon)
self.blocksize=blocksize
iterations = int(self.N*(self.N-1)/2)
self.gridsize = int(np.ceil(iterations/self.blocksize));
print(f"There are {self.N} balls --> {iterations} loops per tick... {self.gridsize} blocks of size {self.blocksize}")
self.v = np.zeros((2,self.N), dtype=np.float32)
self.pos = np.zeros((2,self.N), dtype=np.float32)
print(f"coords have shape {np.shape(self.pos)}")
self.module=getmodule("boltzmann.cu")
self.cstep = self.module.get_function("step_kernel")
for i in range(0, yballs):
for j in range(0, xballs):
initx = (((j + 1) * self.space) - 1)-width
inity = ((-1 * (i + 1) * self.space) + 1)+height
self.pos[:, xballs * i + j] = [initx, inity];
self.v[:,xballs*i+j] = [np.random.uniform(-speedrange,speedrange),np.random.uniform(-speedrange,speedrange)];
#double *posx, double *posy, double *vx, double *vy, int N, double size, double epsilon, double width, double height
def cudastep(self):
self.cstep(drv.InOut(self.pos[0]), drv.InOut(self.pos[1]), drv.InOut(self.v[0]), drv.InOut(self.v[1])
,self.N,self.size,self.epsilon,self.width,self.height, block=(self.blocksize,1,1), grid=(self.gridsize,1))
``` |
{
"source": "jkougoulos/rabbittools",
"score": 2
} |
#### File: jkougoulos/rabbittools/rabbitsend.py
```python
import pika
import sys
import select
import time
import argparse
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
chansend = None
conn = None
maxlines_toread = None
maxwrite_buffer = None
rcvexchange = None
routekey = None
rcvvhost = None
ttl = None
nottl = None
qos = None
def handle_tick():
global chansend
global conn
global maxlines_toread
global maxwrite_buffer
global qos
global ttl
count = 0
props = None
if nottl:
props = pika.BasicProperties(
content_type='text/plain',
delivery_mode=qos,
)
else:
props = pika.BasicProperties(
content_type='text/plain',
delivery_mode=qos,
expiration=ttl
)
# check if we have anything in stdin, we have read less than 512 line block, and the write output buffer has less than 1MB
curr_buf = conn._get_write_buffer_size()
if curr_buf < maxwrite_buffer: # our buffer is high, probably rabbit applies tcp backpressure
consec_flow = 0
while select.select([sys.stdin,],[],[],0.0)[0] and count < maxlines_toread: # we have lines to read and we read in blocks of maxlines
line = sys.stdin.buffer.readline()
if line is b'':
conn.close()
return
count += 1
if nottl:
props
chansend.basic_publish(exchange=rcvexchange,
routing_key=routekey,
body=line,
properties=props
)
delay = (maxlines_toread - count ) / maxlines_toread # adjust delay for next read to percentage of lines read - we don't want to select all the time
else:
delay = 0.3 # tcp backpressure detected - waiting a bit
conn.ioloop.call_later(delay, handle_tick)
def on_conn_open(connection):
connection.channel(on_open_callback=on_channel_open)
def on_channel_open(channel):
global chansend
chansend = channel
conn.ioloop.call_later(0.01, handle_tick )
def on_conn_close(connection, exc):
print( str(exc) )
sys.exit(0)
def on_conn_error( connection, exc ):
raise exc
parser = argparse.ArgumentParser(description='Send lines received via stdin to RabbitMQ Exchange')
parser.add_argument('--exchange', help="Exchange to receive the messages", required=True )
parser.add_argument('--routekey', help="routing key of messages", default='' )
parser.add_argument('--host', default='localhost', help="RabbitMQ host to send the messages (default is: %(default)s)" )
parser.add_argument('--user', default='guest', help="Username to use for connection to RabbitMQ (default is: %(default)s)" )
parser.add_argument('--password', default='<PASSWORD>', help="Username to use for connection to RabbitMQ (default is: %(default)s)" )
parser.add_argument('--vhost', default='/', help="Virtual host to use for connection to RabbitMQ (default is: %(default)s)" )
parser.add_argument('--lineblock', default=512 , help="Maximum number of lines to read on each round (default is: %(default)s)", type=int )
parser.add_argument('--maxnetwritebuff', default=1*1024*1024 , help="Maximum network buffer size to use while RabbitMQ applies backpressure (default is: %(default)s)", type=int )
parser.add_argument('--ttl', default='8000', help="TTL of messages in milliseconds (default is: %(default)s)" )
parser.add_argument('--nottl', help="Ignore TTL", action='store_true' )
parser.add_argument('--qos', default=1, help="QoS of messages - 1: transient - 2: persisten (default is: %(default)s)", choices = [1,2], type=int )
args = parser.parse_args()
host = args.host
user = args.user
password = <PASSWORD>.password
maxlines_toread = args.lineblock
maxwrite_buffer = args.maxnetwritebuff
rcvexchange = args.exchange
rcvvhost = args.vhost
routekey = args.routekey
ttl = args.ttl
nottl = args.nottl
qos = args.qos
creds = pika.PlainCredentials(user, password )
parameters = pika.ConnectionParameters(host=host, credentials=creds, virtual_host=rcvvhost)
conn = pika.SelectConnection(
parameters=parameters,
on_open_callback=on_conn_open,
on_close_callback=on_conn_close,
on_open_error_callback=on_conn_error
)
try:
conn.ioloop.start()
except KeyboardInterrupt:
conn.close()
conn.ioloop.start()
except Exception as exc:
print( str(exc) )
``` |
{
"source": "jkouris/echo-skill",
"score": 2
} |
#### File: jkouris/echo-skill/__init__.py
```python
from mycroft import MycroftSkill, intent_file_handler
class Echo(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('echo.intent')
def handle_echo(self, message):
self.speak_dialog('echo')
def create_skill():
return Echo()
``` |
{
"source": "jkovacic/math",
"score": 3
} |
#### File: scripts/test/comb.py
```python
from __future__ import print_function
def combTest() :
import itertools as it
perm = it.permutations("abcde")
print("Permutations...\n")
cnt = 1
for i in list(perm) :
print(cnt, ": ", ''.join(i))
cnt += 1
print("\nCombinations...\n")
for k in range(1, 6) :
print("\nK = ", k, "\n")
comb = it.combinations("abcde", k)
cnt = 1
for i in list(comb) :
print(cnt, ": ", ''.join(i))
cnt += 1
if (__name__ == '__main__'):
combTest()
``` |
{
"source": "jkovacic/py-quat-rotation",
"score": 2
} |
#### File: jkovacic/py-quat-rotation/instance_checker.py
```python
class InstanceCheck :
"""
A class with "static" methods that check whether an input variable is
an instance of certain types/classes
"""
@staticmethod
def isFloat(n) :
"""
Checks whether n is a float or an integer
(mathematically, integers are a subset of real numbers).
"""
return isinstance(n, float) or isinstance(n, int)
```
#### File: jkovacic/py-quat-rotation/quaternion.py
```python
import math
import exception
from instance_checker import InstanceCheck
class QuaternionException(exception.IException) :
"""Exception raised at illegal quaternion operations"""
pass
class Quaternion() :
"""
This class implements quaternion arithmetics, e.g. basic operations, norm, etc.
"""
# Internal instance members:
# o - quaternion's scalar component
# i - quaternion's 'i' component
# j - quaternion's 'j' component
# k - quaternion's 'k' component
"""Tolerance for determination whether a number is "close enough" to zero"""
eps = 1e-12
def __init__(self, o=0.0, i=0.0, j=0.0, k=0.0) :
"""
A "constructor" that creates an instance of a quaternion and assigns values to its components.
Input:
o - scalar component of the quaternion (default: 0)
i - component 'i' of the quaternion (default: 0)
j - component 'j' of the quaternion (default: 0)
k - component 'k' of the quaternion (default: 0)
Alternatively 'o' may be a quaternion. In this case, its components
are copied into self's ones and all other input arguments are ignored.
A QuaternionExceptionxception is raised if any argument is not an instance
of supported types (int, float, Quaternion).
"""
# all functionality is already implemented by setQ
self.setQ(o, i, j, k)
def setQ(self, o=0.0, i=0.0, j=0.0, k=0.0) :
"""
Assigns values of all quaternion's components.
Input:
o - scalar component of the quaternion (default: 0)
i - component 'i' of the quaternion (default: 0)
j - component 'j' of the quaternion (default: 0)
k - component 'k' of the quaternion (default: 0)
Alternatively 'o' may be a quaternion. In this case, its components
are copied into self's ones and all other input arguments are ignored.
A QuaternionExceptionxception is raised if any argument is not an instance
of supported types (int, float, Quaternion).
"""
if Quaternion.isQuaternion(o) :
# If o is a quaternion, simply copy its components...
self.o = o.o
self.i = o.i
self.j = o.j
self.k = o.k
else:
# otherwise check if all inputs are floats or integers...
if not (
InstanceCheck.isFloat(o) and
InstanceCheck.isFloat(i) and
InstanceCheck.isFloat(j) and
InstanceCheck.isFloat(k) ) :
raise QuaternionException("Invalid input arguments")
# if they are, just assign their values to quaternion's components
self.o = o;
self.i = i;
self.j = j;
self.k = k;
return self
def getScalar(self) :
"""Returns the scalar component of the quaternion"""
return self.o
def getI(self) :
"""Returns the component 'i' of the quaternion"""
return self.i
def getJ(self) :
"""Returns the component 'j' of the quaternion"""
return self.j
def getK(self) :
"""Returns the component 'k' of the quaternion"""
return self.k
def setScalar(self, o=0.0) :
"""
Assigns the scalar component of the quaternion.
Input:
o - value of the scalar component (default: 0)
Raises a QuaternionException if 'o' is not an instance
of a supported type (float or int).
"""
if not InstanceCheck.isFloat(o) :
raise QuaternionException("Invalid input argument")
self.o = o
return self
def setI(self, i=0.0) :
"""
Assigns the component 'i' of the quaternion.
Input:
i - value of the component 'i' (default: 0)
Raises a QuaternionException if 'i' is not an instance
of a supported type (float or int).
"""
if not InstanceCheck.isFloat(i) :
raise QuaternionException("Invalid input argument")
self.i = i;
return self
def setJ(self, j=0.0) :
"""
Assigns the component 'j' of the quaternion.
Input:
j - value of the component 'j' (default: 0)
Raises a QuaternionException if 'j' is not an instance
of a supported type (float or int).
"""
if not InstanceCheck.isFloat(j) :
raise QuaternionException("Invalid input argument")
self.j = j
return self
def setK(self, k=0.0) :
"""
Assigns the component 'k' of the quaternion.
Input:
k - value of the component 'k' (default: 0)
Raises a QuaternionException if 'k' is not an instance
of a supported type (float or int).
"""
if not InstanceCheck.isFloat(k) :
raise QuaternionException("Invalid input argument")
self.k = k
return self
def __add__(self, q) :
"""
Implementation of the addition operator '+' of two quaternions.
Input:
q - quaternion or a float value to be added to this one
Return:
a new instance of Quaternion
A QuaternionException is raised if 'q' is not an instance of
Quaternion, float or int.
"""
# Addition of quaternions is trivial:
#
# (a1 + b1*i + c1*j + d1*k) + (a2 + b2*i + c2*j + d2*k) =
# = ( (a1+a2) + (b1+b2)*i + (c1+c2)*j + (d1+d2)*k )
if Quaternion.isQuaternion(q) :
return Quaternion(
self.o + q.o,
self.i + q.i,
self.j + q.j,
self.k + q.k )
elif InstanceCheck.isFloat(q) :
return Quaternion(
self.o + q,
self.i,
self.j,
self.k )
else:
raise QuaternionException("Input must be a quaternion or a float")
def __sub__(self, q) :
"""
Implementation of the subtraction operator '-' of two quaternions.
Input:
q - quaternion or a float value to be subtracted from this one
Return:
a new instance of Quaternion
A QuaternionException is raised if 'q' is not an instance of
Quaternion, float or int.
"""
# Subtraction of quaternions is trivial:
#
# (a1 + b1*i + c1*j + d1*k) - (a2 + b2*i + c2*j + d2*k) =
# = ( (a1-a2) + (b1-b2)*i + (c1-c2)*j + (d1-d2)*k )
if Quaternion.isQuaternion(q) :
return Quaternion(
self.o - q.o,
self.i - q.i,
self.j - q.j,
self.k - q.k )
elif InstanceCheck.isFloat(q) :
return Quaternion(
self.o - q,
self.i,
self.j,
self.k )
else:
raise QuaternionException("Input must be a quaternion or a float")
def __mul__(self, q) :
"""
Implementation of the multiplication operator '*' of two quaternions.
Note that multiplication of quaternions is not commutative: (p*q != q*p)
Input:
q - quaternion or a float value to be multiplied by this one
Return:
a new instance of Quaternion
A QuaternionException is raised if 'q' is not an instance of
Quaternion, float or int.
"""
# From the following definitions:
# i*i = j*j = k*k = -1,
# i*j = k, j*i = -k, j*k = i, k*j = -i, k*i = j and i*k = -j,
# the following formula can be quickly derived:
#
# (a1 + b1*i + c1*j + d1*k) * (a2 + b2*i + c2*j + d2*k) =
# = (a1*a2 - b1*b2 - c1*c2 - d1*d2) +
# + (a1*b2 + b1*a2 + c1*d2 - d1*c2) * i +
# + (a1*c2 - b1*d2 + c1*a2 + d1*b2) * j +
# + (a1*d2 + b1*c2 - c1*b2 + d1*a2) * k
#
# Note: The following script for GNU Octave or Matlab can be used
# for a quick unit test of the function:
# http://mind.cog.jhu.edu/courses/680/octave/Installers/Octave/Octave.OSX10.6/Applications/MATLAB_R2009b.app/toolbox/aero/aero/quatmultiply.m
if Quaternion.isQuaternion(q) :
return Quaternion(
self.o * q.o - self.i * q.i - self.j * q.j - self.k * q.k,
self.o * q.i + self.i * q.o + self.j * q.k - self.k * q.j,
self.o * q.j - self.i * q.k + self.j * q.o + self.k * q.i,
self.o * q.k + self.i * q.j - self.j * q.i + self.k * q.o )
elif InstanceCheck.isFloat(q) :
return Quaternion(
self.o * q,
self.i * q,
self.j * q,
self.k * q )
else:
raise QuaternionException("Input must be a quaternion or a float")
def __iadd__(self, q) :
"""
Addition operator (+=) that adds a quaternion to this one and assigns the sum to itself.
Input:
q - quaternion or a float value to be added to this one
Return:
a reference to itself
A QuaternionException is raised if 'q' is not an instance of
Quaternion, float or int.
"""
# For a definition of quaternion addition, see __add__
if Quaternion.isQuaternion(q) :
self.o += q.o
self.i += q.i
self.j += q.j
self.k += q.k
elif InstanceCheck.isFloat(q) :
self.o += q
else:
raise QuaternionException("Input must be a quaternion or a float")
return self
def __isub__(self, q) :
"""
Subtraction operator (-=) that subtracts a quaternion from this one and assigns the difference to itself.
Input:
q - quaternion or a float value to be subtracted from this one
Return:
a reference to itself
A QuaternionException is raised if 'q' is not an instance of
Quaternion, float or int.
"""
# For a definition of quaternion subtraction, see __sub__
if Quaternion.isQuaternion(q) :
self.o -= q.o
self.i -= q.i
self.j -= q.j
self.k -= q.k
elif InstanceCheck.isFloat(q) :
self.o -= q
else:
raise QuaternionException("Input must be a quaternion or a float")
return self
def __imul__(self, q) :
"""
Multiplication operator (*=) that multiplies this by a quaternion and assigns the product to itself.
Input:
q - quaternion or a float value to be multiplied to this one
Return:
a reference to itself
A QuaternionException is raised if 'q' is not an instance of
Quaternion, float or int.
"""
# For a definition of quaternion multiplication, see __mul__
if Quaternion.isQuaternion(q) :
# From maintanance poit of view, this would
# a more elegant solution:
#qaux = self * q;
#self.o = qaux.o;
#self.i = qaux.i
#self.j = qaux.j
#self.k = qaux.k
# However, this one slightly reduces overhead with
# instantiation and destruction of another instance of Quaternion:
self.o, self.i, self.j, self. k = \
self.o * q.o - self.i * q.i - self.j * q.j - self.k * q.k, \
self.o * q.i + self.i * q.o + self.j * q.k - self.k * q.j, \
self.o * q.j - self.i * q.k + self.j * q.o + self.k * q.i, \
self.o * q.k + self.i * q.j - self.j * q.i + self.k * q.o
elif InstanceCheck.isFloat(q) :
self.o *= q
self.i *= q
self.j *= q
self.k *= q
else:
raise QuaternionException("Input must be a quaternion or a float")
return self
def __neg__(self) :
"""
Unary negation operator (-).
Return:
negated -self (all components are negated)
"""
return Quaternion(
-self.o,
-self.i,
-self.j,
-self.k )
def conj(self) :
"""
Conjugation of a quaternion, i.e. components 'i', 'j' an 'k' are negated.
Return: conjugation of self
"""
return Quaternion(
self.o,
-self.i,
-self.j,
-self.k )
def __sqsum(self) :
# An auxiliary method that calculates the sum of all components' squares
return self.o*self.o + self.i*self.i + self.j*self.j + self.k*self.k
def norm(self) :
"""
Norm of a quaternion, i.e. a square root of the sum of all components' squares
"""
return math.sqrt(self.__sqsum())
def reciprocal(self) :
"""
Reciprocal of a quaternion (q^(-1)), satisfying condition: q*q^(-1) + q^(-1)*q = 1.
A QuaternionException is raised if quaternion's norm equals 0.
"""
# Reciprocal of q is defined as:
#
# q^(-1) = q* / ||q||^2
#
# The following formula can be derived from it:
#
# a - b*i - c*j - d*k
# (a+b*i+c*j+d*k)^(-1) = -------------------------
# a^2 + b^2 + c^2 + d^2
nsq = self.__sqsum()
if nsq < Quaternion.eps :
raise QuaternionException("Reciprocal of a zero-quaternion does not exist")
return Quaternion(
self.o / nsq,
-self.i / nsq,
-self.j / nsq,
-self.k / nsq )
def unit(self) :
"""
A unit quaternion of 'self', i.e. it norm is equal to 1.
A QuaternionException is raised if quaternion's norm equals 0.
"""
n = self.norm()
if n < Quaternion.eps :
raise QuaternionException("Cannot normalize a zero-quaternion")
return Quaternion(
self.o / n,
self.i / n,
self.j / n,
self.k / n )
def __str__(self) :
"""
"Nicely" formatted output of the quaternion (e.g. 4-5i+7j-3k).
The method is called by print().
"""
# Primarily the method was introduced for brief unit testing purposes
# and not much effort was invested into a visually "nice" output
outstr = str(self.o)
if self.i >= 0 :
outstr += '+'
outstr += str(self.i) + 'i'
if self.j >= 0 :
outstr += '+'
outstr += str(self.j) + 'j'
if self.k >= 0 :
outstr += '+'
outstr += str(self.k) + 'k'
return outstr
@staticmethod
def isQuaternion(q) :
"""Is 'q' an instance of Quaternion"""
return isinstance(q, Quaternion)
``` |
{
"source": "jkowalleck/AoC2020",
"score": 3
} |
#### File: AoC2020/02/solve.py
```python
from collections import Counter
from os import path
from re import compile as re_compile
from typing import Callable, List
INPUT_FILE = path.join(path.dirname(__file__), 'input')
def get_input() -> List[str]:
with open(INPUT_FILE) as fh:
return [line.rstrip('\n') for line in fh.readlines()]
class Entry:
_LINE_FORMAT = re_compile(r'^(\d+)\-(\d+) (\w)\: (\w+)$')
def __init__(self, line_raw: str) -> None:
parts = self._LINE_FORMAT.match(line_raw)
if parts is None:
raise ValueError()
self.a = int(parts[1])
self.b = int(parts[2])
self.char = str(parts[3])
self.word = str(parts[4])
_SolveFilter = Callable[[Entry], bool]
def solve(filter_: _SolveFilter) -> int:
input_ = map(Entry, get_input())
valids = list(filter(filter_, input_))
return len(valids)
def is_valid1(line: Entry) -> bool:
counter = Counter(line.word)
char_count = counter.get(line.char, 0)
return line.a <= char_count <= line.b
def is_valid2(line: Entry) -> bool:
match_a = line.word[line.a - 1] == line.char
match_b = line.word[line.b - 1] == line.char
return match_a != match_b
if __name__ == '__main__':
solution_part1 = solve(is_valid1)
print(f'solution part 1: {solution_part1}')
solution_part2 = solve(is_valid2)
print(f'solution part 2: {solution_part2}')
```
#### File: AoC2020/04/solve.py
```python
from os import path
from re import compile as re_compile
from typing import Callable, Dict, List
INPUT_FILE = path.join(path.dirname(__file__), 'input')
def get_input() -> List[str]:
with open(INPUT_FILE) as fh:
return [line.rstrip('\n') for line in fh.readlines()]
class Document(Dict[str, str]):
def set_from_line(self, line) -> None:
for key_value in line.strip().split(' '):
key, value = key_value.split(':')
self[key] = value
class DocumentCollection(List[Document]):
@classmethod
def from_raw_lines(cls, lines: List[str]) -> "DocumentCollection":
docs = cls()
doc = Document()
line_index_max = len(lines) - 1
for line_index, line in enumerate(lines):
if line == '':
docs.append(doc)
doc = Document()
continue # for ... in ...
doc.set_from_line(line)
if line_index == line_index_max and doc:
docs.append(doc)
break
return docs
_Validation = Callable[[Document], bool]
def solve(validate: _Validation) -> int:
docs = DocumentCollection.from_raw_lines(get_input())
valid = 0
for doc in docs:
if validate(doc):
valid += 1
return valid
PASSPORT_KEYS_REQUIRED = (
'byr', # (Birth Year)
'iyr', # (Issue Year)
'eyr', # (Expiration Year)
'hgt', # (Height)
'hcl', # (Hair Color)
'ecl', # (Eye Color)
'pid', # (Passport ID)
# 'cid', # (Country ID)
)
def validation1(doc: Document) -> bool:
return all(key in doc for key in PASSPORT_KEYS_REQUIRED)
_HGT_RE = re_compile(r'^(\d+)(cm|in)$')
_HCL_RE = re_compile(r'^#[0-9a-f]{6}$')
_PID_RE = re_compile(r'^\d{9}$')
def validation2(doc: Document) -> bool:
if any(key not in doc for key in PASSPORT_KEYS_REQUIRED):
return False
if not 1920 <= int(doc['byr']) <= 2002:
return False
if not 2010 <= int(doc['iyr']) <= 2020:
return False
if not 2020 <= int(doc['eyr']) <= 2030:
return False
hgt_match = _HGT_RE.match(doc['hgt'])
if not hgt_match:
return False
hgt_value, hgt_unit = hgt_match.groups()
if hgt_unit == 'cm' and not 150 <= int(hgt_value) <= 193:
return False
if hgt_unit == 'in' and not 59 <= int(hgt_value) <= 76:
return False
if not _HCL_RE.match(doc['hcl']):
return False
if doc['ecl'] not in {'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'}:
return False
if not _PID_RE.match(doc['pid']):
return False
return True
if __name__ == '__main__':
solution_part1 = solve(validation1)
print(f'solution part 1: {solution_part1}')
solution_part2 = solve(validation2)
print(f'solution part 2: {solution_part2}')
```
#### File: AoC2020/05/solve.py
```python
from os import path
from re import compile as re_compile
from typing import List
INPUT_FILE = path.join(path.dirname(__file__), 'input')
def get_input() -> List[str]:
with open(INPUT_FILE) as fh:
return [line.rstrip('\n') for line in fh.readlines()]
def _int_from_tree(line: str, lower: str) -> int:
range_ = (0, 2 ** len(line) - 1)
for char in line:
delta_half = (range_[1] - range_[0]) // 2
range_ = (range_[0], range_[0] + delta_half) if char == lower else (range_[1] - delta_half, range_[1])
return range_[0]
class Seat:
def __init__(self, row: int, col: int) -> None:
self.row = row
self.col = col
@property
def id(self) -> int:
return self.row * 8 + self.col
__LINE_RE = re_compile(r'^([FB]+)([LR]+)$')
@classmethod
def from_line(cls, line: str) -> "Seat":
match = cls.__LINE_RE.match(line)
assert match
row, col = match.groups()
return cls(cls.__row_from_line(row), cls.__col_from_line(col))
@staticmethod
def __row_from_line(line: str) -> int:
return _int_from_tree(line, 'F')
@staticmethod
def __col_from_line(line: str) -> int:
return _int_from_tree(line, 'L')
def solve1() -> int:
seats = [Seat.from_line(line) for line in get_input()]
return max(seat.id for seat in seats)
def solve2() -> int:
taken_ids = set(Seat.from_line(line).id for line in get_input())
all_ids = set(range(min(taken_ids), max(taken_ids)))
free_ids = all_ids - taken_ids
return list(free_ids)[0]
if __name__ == '__main__':
solution_part1 = solve1()
print(f'solution part 1: {solution_part1}')
solution_part2 = solve2()
print(f'solution part 2: {solution_part2}')
```
#### File: AoC2020/06/solve.py
```python
from os import path
from typing import List, Callable, Set, Optional
INPUT_FILE = path.join(path.dirname(__file__), 'input')
def get_input() -> List[str]:
with open(INPUT_FILE) as fh:
return [line.rstrip('\n') for line in fh.readlines()]
_Answers = Set[str]
_AnswerOperation = Callable[[_Answers, _Answers], _Answers]
def from_raw_lines(lines: List[str], op: _AnswerOperation) -> List[_Answers]:
groups = []
group_answers: Optional[_Answers] = None
line_index_max = len(lines) - 1
for line_index, line in enumerate(lines):
if line == '':
if group_answers is not None:
groups.append(group_answers)
group_answers = None
continue
group_answers = op(group_answers, set(line)) if group_answers is not None else set(line)
if line_index == line_index_max:
groups.append(group_answers)
return groups
def solve(op: _AnswerOperation) -> int:
groups = from_raw_lines(get_input(), op)
return sum(len(group) for group in groups)
if __name__ == '__main__':
solution_part1 = solve(set.__or__)
print(f'solution part 1: {solution_part1}')
solution_part2 = solve(set.__and__)
print(f'solution part 2: {solution_part2}')
```
#### File: AoC2020/12/solve.py
```python
from enum import Enum, auto
from os import path
from typing import List
INPUT_FILE = path.join(path.dirname(__file__), 'input')
def get_input() -> List[str]:
with open(INPUT_FILE) as fh:
return fh.read().splitlines()
class Direction(Enum):
North = 0
East = 90
South = 180
West = 270
class Position:
def __init__(self, pos_east: int, pos_north: int) -> None:
self.pos_east = pos_east
self.pos_north = pos_north
def __str__(self) -> str:
longitude = f'{"west" if self.pos_east < 0 else "east"} {abs(self.pos_east)}'
latitude = f'{"south" if self.pos_north < 0 else "north"} {abs(self.pos_north)}'
return f'{longitude} {latitude}'
def move_north(self, value: int) -> None:
self.pos_north += value
def move_east(self, value: int) -> None:
self.pos_east += value
def move_south(self, value: int) -> None:
self.pos_north -= value
def move_west(self, value: int) -> None:
self.pos_east -= value
def manhattan_distance(self) -> int:
return abs(self.pos_north) + abs(self.pos_east)
class Ship(Position):
def __init__(self, pos_east: int, pos_north: int, direction: Direction) -> None:
super().__init__(pos_east, pos_north)
self.direction = direction
def __str__(self) -> str:
return f'{super().__str__()} - direction: {self.direction.name}'
def rotate_right(self, value: int) -> None:
self.direction = Direction((self.direction.value + value) % 360)
def rotate_left(self, value: int) -> None:
self.direction = Direction((self.direction.value - value) % 360)
def move_towards(self, vector: Position, factor: int) -> None:
self.pos_east += factor * vector.pos_east
self.pos_north += factor * vector.pos_north
_MOVE_FORWARD_DIRECTIONS = {
Direction.North: Position.move_north,
Direction.East: Position.move_east,
Direction.South: Position.move_south,
Direction.West: Position.move_west,
}
def move_forward(self, value: int) -> None:
self._MOVE_FORWARD_DIRECTIONS[self.direction](self, value)
class Waypoint(Position):
def rotate_right(self, value: int) -> None:
if self.pos_north == 0 and self.pos_east == 0:
return None
deg = value % 360
while deg > 0:
pos_north = - self.pos_east
self.pos_east = self.pos_north
self.pos_north = pos_north
deg -= 90
def rotate_left(self, value: int) -> None:
self.rotate_right(360 - value % 360)
def solve1() -> int:
ship = Ship(0, 0, Direction.East)
instructions = {
'F': ship.move_forward,
'N': ship.move_north,
'E': ship.move_east,
'S': ship.move_south,
'W': ship.move_west,
'R': ship.rotate_right,
'L': ship.rotate_left,
}
for line in get_input():
action, value = line[0], int(line[1:])
instructions[action](value)
return ship.manhattan_distance()
def solve2() -> int:
ship = Ship(0, 0, Direction.East)
waypoint = Waypoint(10, 1)
def forward(factor: int) -> None:
ship.move_towards(waypoint, factor)
instructions = {
'F': forward,
'N': waypoint.move_north,
'E': waypoint.move_east,
'S': waypoint.move_south,
'W': waypoint.move_west,
'R': waypoint.rotate_right,
'L': waypoint.rotate_left,
}
for line in get_input():
action, value = line[0], int(line[1:])
instructions[action](value)
return ship.manhattan_distance()
if __name__ == '__main__':
solution_part1 = solve1()
print(f'solution part 1: {solution_part1}')
solution_part2 = solve2()
print(f'solution part 2: {solution_part2}')
``` |
{
"source": "Jkoza/Trump-Scraper",
"score": 3
} |
#### File: Jkoza/Trump-Scraper/upload.py
```python
import sys
import pymongo
from pymongo import MongoClient
import re
import csv
import operator
import json
import datetime
import dateutil.parser as parser
def format_data(data):
return ({
'created_at' : datetime.datetime.fromtimestamp(float(data[0]), None),
'text' : data[1],
'id_str' : data[2]
})
client = MongoClient('ds119064.mlab.com:19064')
client['trump-tweets'].authenticate('jkoza', 'jkoza')
database = client['trump-tweets']['tweets']
file = open('output.csv')
rows = csv.reader(file,delimiter=',')
#skip the header
next(rows)
for i in rows:
data = format_data(i)
if (data):
if(not database.find_one({'id_str' : data['id_str']})):
database.insert_one(format_data(i)).inserted_id
file.close()
``` |
{
"source": "jkozera/jedi",
"score": 2
} |
#### File: jkozera/jedi/conftest.py
```python
import tempfile
import shutil
import pytest
import jedi
collect_ignore = ["setup.py"]
# The following hooks (pytest_configure, pytest_unconfigure) are used
# to modify `jedi.settings.cache_directory` because `clean_jedi_cache`
# has no effect during doctests. Without these hooks, doctests uses
# user's cache (e.g., ~/.cache/jedi/). We should remove this
# workaround once the problem is fixed in py.test.
#
# See:
# - https://github.com/davidhalter/jedi/pull/168
# - https://bitbucket.org/hpk42/pytest/issue/275/
jedi_cache_directory_orig = None
jedi_cache_directory_temp = None
def pytest_addoption(parser):
parser.addoption("--jedi-debug", "-D", action='store_true',
help="Enables Jedi's debug output.")
parser.addoption("--warning-is-error", action='store_true',
help="Warnings are treated as errors.")
def pytest_configure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi_cache_directory_orig = jedi.settings.cache_directory
jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-')
jedi.settings.cache_directory = jedi_cache_directory_temp
if config.option.jedi_debug:
jedi.set_debug_function()
if config.option.warning_is_error:
import warnings
warnings.simplefilter("error")
def pytest_unconfigure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi.settings.cache_directory = jedi_cache_directory_orig
shutil.rmtree(jedi_cache_directory_temp)
@pytest.fixture(scope='session')
def clean_jedi_cache(request):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
from jedi import settings
old = settings.cache_directory
tmp = tempfile.mkdtemp(prefix='jedi-test-')
settings.cache_directory = tmp
@request.addfinalizer
def restore():
settings.cache_directory = old
shutil.rmtree(tmp)
```
#### File: jedi/parser/tree.py
```python
from abc import abstractmethod, abstractproperty
from jedi._compatibility import utf8_repr, encoding, is_py3
def search_ancestor(node, node_type_or_types):
"""
Recursively looks at the parents of a node and checks if the type names
match.
:param node: The node that is looked at.
:param node_type_or_types: A tuple or a string of type names that are
searched for.
"""
if not isinstance(node_type_or_types, (list, tuple)):
node_type_or_types = (node_type_or_types,)
while True:
node = node.parent
if node is None or node.type in node_type_or_types:
return node
class NodeOrLeaf(object):
"""
The base class for nodes and leaves.
"""
__slots__ = ()
def get_root_node(self):
"""
Returns the root node of a parser tree. The returned node doesn't have
a parent node like all the other nodes/leaves.
"""
scope = self
while scope.parent is not None:
scope = scope.parent
return scope
def get_next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_previous_sibling(self):
"""
The node/leaf immediately preceding the invocant in their parent's
children list. If the invocant does not have a previous sibling, it is
None.
"""
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_previous_leaf(self):
"""
Returns the previous leaf in the parser tree.
Raises an IndexError if it's the first element in the parser tree.
"""
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == 0:
node = node.parent
if node.parent is None:
return None
else:
node = c[i - 1]
break
while True:
try:
node = node.children[-1]
except AttributeError: # A Leaf doesn't have children.
return node
def get_next_leaf(self):
"""
Returns the next leaf in the parser tree.
Returns `None` if it's the last element in the parser tree.
"""
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == len(c) - 1:
node = node.parent
if node.parent is None:
return None
else:
node = c[i + 1]
break
while True:
try:
node = node.children[0]
except AttributeError: # A Leaf doesn't have children.
return node
@abstractproperty
def start_pos(self):
"""
Returns the starting position of the prefix as a tuple, e.g. `(3, 4)`.
:return tuple of int: (line, column)
"""
@abstractproperty
def end_pos(self):
"""
Returns the end position of the prefix as a tuple, e.g. `(3, 4)`.
:return tuple of int: (line, column)
"""
@abstractmethod
def get_start_pos_of_prefix(self):
"""
Returns the start_pos of the prefix. This means basically it returns
the end_pos of the last prefix. The `get_start_pos_of_prefix()` of the
prefix `+` in `2 + 1` would be `(1, 1)`, while the start_pos is
`(1, 2)`.
:return tuple of int: (line, column)
"""
@abstractmethod
def get_first_leaf(self):
"""
Returns the first leaf of a node or itself it's a leaf.
"""
@abstractmethod
def get_last_leaf(self):
"""
Returns the last leaf of a node or itself it's a leaf.
"""
@abstractmethod
def get_code(self, normalized=False, include_prefix=True):
"""
Returns the code that was the input of the parser.
If a normalizer is given, the returned code will be normalized and will
not be equal to the input.
:param include_prefix: Removes the prefix (whitespace and comments) of e.g. a statement.
:param normalized: Deprecated. Please don't use. Will be replaced with something more powerful.
"""
class Leaf(NodeOrLeaf):
__slots__ = ('value', 'parent', 'line', 'indent', 'prefix')
def __init__(self, value, start_pos, prefix=''):
self.value = value
self.start_pos = start_pos
self.prefix = prefix
self.parent = None
@property
def start_pos(self):
return self.line, self.indent
@start_pos.setter
def start_pos(self, value):
self.line = value[0]
self.indent = value[1]
def get_start_pos_of_prefix(self):
previous_leaf = self.get_previous_leaf()
if previous_leaf is None:
return self.line - self.prefix.count('\n'), 0 # It's the first leaf.
return previous_leaf.end_pos
def get_first_leaf(self):
return self
def get_last_leaf(self):
return self
def get_code(self, normalized=False, include_prefix=True):
if normalized:
return self.value
if include_prefix:
return self.prefix + self.value
else:
return self.value
@property
def end_pos(self):
lines = self.value.split('\n')
end_pos_line = self.line + len(lines) - 1
# Check for multiline token
if self.line == end_pos_line:
end_pos_indent = self.indent + len(lines[-1])
else:
end_pos_indent = len(lines[-1])
return end_pos_line, end_pos_indent
@utf8_repr
def __repr__(self):
return "<%s: %s start=%s>" % (type(self).__name__, self.value, self.start_pos)
class BaseNode(NodeOrLeaf):
"""
The super class for all nodes.
If you create custom nodes, you will probably want to inherit from this
``BaseNode``.
"""
__slots__ = ('children', 'parent')
type = None
def __init__(self, children):
for c in children:
c.parent = self
self.children = children
self.parent = None
@property
def start_pos(self):
return self.children[0].start_pos
def get_start_pos_of_prefix(self):
return self.children[0].get_start_pos_of_prefix()
@property
def end_pos(self):
return self.children[-1].end_pos
def _get_code_for_children(self, children, normalized, include_prefix):
# TODO implement normalized (depending on context).
if include_prefix:
return "".join(c.get_code(normalized) for c in children)
else:
first = children[0].get_code(include_prefix=False)
return first + "".join(c.get_code(normalized) for c in children[1:])
def get_code(self, normalized=False, include_prefix=True):
return self._get_code_for_children(self.children, normalized, include_prefix)
def get_leaf_for_position(self, position, include_prefixes=False):
def binary_search(lower, upper):
if lower == upper:
element = self.children[lower]
if not include_prefixes and position < element.start_pos:
# We're on a prefix.
return None
# In case we have prefixes, a leaf always matches
try:
return element.get_leaf_for_position(position, include_prefixes)
except AttributeError:
return element
index = int((lower + upper) / 2)
element = self.children[index]
if position <= element.end_pos:
return binary_search(lower, index)
else:
return binary_search(index + 1, upper)
if not ((1, 0) <= position <= self.children[-1].end_pos):
raise ValueError('Please provide a position that exists within this node.')
return binary_search(0, len(self.children) - 1)
def get_first_leaf(self):
return self.children[0].get_first_leaf()
def get_last_leaf(self):
return self.children[-1].get_last_leaf()
@utf8_repr
def __repr__(self):
code = self.get_code().replace('\n', ' ').strip()
if not is_py3:
code = code.encode(encoding, 'replace')
return "<%s: %s@%s,%s>" % \
(type(self).__name__, code, self.start_pos[0], self.start_pos[1])
class Node(BaseNode):
"""Concrete implementation for interior nodes."""
__slots__ = ('type',)
def __init__(self, type, children):
super(Node, self).__init__(children)
self.type = type
def __repr__(self):
return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children)
class ErrorNode(BaseNode):
"""
A node that containes valid nodes/leaves that we're follow by a token that
was invalid. This basically means that the leaf after this node is where
Python would mark a syntax error.
"""
__slots__ = ()
type = 'error_node'
class ErrorLeaf(Leaf):
"""
A leaf that is either completely invalid in a language (like `$` in Python)
or is invalid at that position. Like the star in `1 +* 1`.
"""
__slots__ = ('original_type')
type = 'error_leaf'
def __init__(self, original_type, value, start_pos, prefix=''):
super(ErrorLeaf, self).__init__(value, start_pos, prefix)
self.original_type = original_type
def __repr__(self):
return "<%s: %s:%s, %s)>" % \
(type(self).__name__, self.original_type, repr(self.value), self.start_pos)
```
#### File: test/static_analysis/branches.py
```python
import random
if random.choice([0, 1]):
x = ''
else:
x = 1
if random.choice([0, 1]):
y = ''
else:
y = 1
# A simple test
if x != 1:
x.upper()
else:
#! 2 attribute-error
x.upper()
pass
# This operation is wrong, because the types could be different.
#! 6 type-error-operation
z = x + y
# However, here we have correct types.
if x == y:
z = x + y
else:
#! 6 type-error-operation
z = x + y
# -----------------
# With a function
# -----------------
def addition(a, b):
if type(a) == type(b):
return a + b
else:
#! 9 type-error-operation
return a + b
addition(1, 1)
addition(1.0, '')
```
#### File: test/test_api/test_interpreter.py
```python
from ..helpers import TestCase
import jedi
from jedi._compatibility import is_py33
from jedi.evaluate.compiled import mixed
class _GlobalNameSpace():
class SideEffectContainer():
pass
def get_completion(source, namespace):
i = jedi.Interpreter(source, [namespace])
completions = i.completions()
assert len(completions) == 1
return completions[0]
def test_builtin_details():
import keyword
class EmptyClass:
pass
variable = EmptyClass()
def func():
pass
cls = get_completion('EmptyClass', locals())
var = get_completion('variable', locals())
f = get_completion('func', locals())
m = get_completion('keyword', locals())
assert cls.type == 'class'
assert var.type == 'instance'
assert f.type == 'function'
assert m.type == 'module'
def test_numpy_like_non_zero():
"""
Numpy-like array can't be caster to bool and need to be compacre with
`is`/`is not` and not `==`/`!=`
"""
class NumpyNonZero:
def __zero__(self):
raise ValueError('Numpy arrays would raise and tell you to use .any() or all()')
def __bool__(self):
raise ValueError('Numpy arrays would raise and tell you to use .any() or all()')
class NumpyLike:
def __eq__(self, other):
return NumpyNonZero()
def something(self):
pass
x = NumpyLike()
d = {'a': x}
# just assert these do not raise. They (strangely) trigger different
# codepath
get_completion('d["a"].some', {'d':d})
get_completion('x.some', {'x':x})
def test_nested_resolve():
class XX():
def x():
pass
cls = get_completion('XX', locals())
func = get_completion('XX.x', locals())
assert (func.line, func.column) == (cls.line + 1, 12)
def test_side_effect_completion():
"""
In the repl it's possible to cause side effects that are not documented in
Python code, however we want references to Python code as well. Therefore
we need some mixed kind of magic for tests.
"""
_GlobalNameSpace.SideEffectContainer.foo = 1
side_effect = get_completion('SideEffectContainer', _GlobalNameSpace.__dict__)
# It's a class that contains MixedObject.
context, = side_effect._name.infer()
assert isinstance(context, mixed.MixedObject)
foo = get_completion('SideEffectContainer.foo', _GlobalNameSpace.__dict__)
assert foo.name == 'foo'
def _assert_interpreter_complete(source, namespace, completions,
**kwds):
script = jedi.Interpreter(source, [namespace], **kwds)
cs = script.completions()
actual = [c.name for c in cs]
assert sorted(actual) == sorted(completions)
def test_complete_raw_function():
from os.path import join
_assert_interpreter_complete('join("").up',
locals(),
['upper'])
def test_complete_raw_function_different_name():
from os.path import join as pjoin
_assert_interpreter_complete('pjoin("").up',
locals(),
['upper'])
def test_complete_raw_module():
import os
_assert_interpreter_complete('os.path.join("a").up',
locals(),
['upper'])
def test_complete_raw_instance():
import datetime
dt = datetime.datetime(2013, 1, 1)
completions = ['time', 'timetz', 'timetuple']
if is_py33:
completions += ['timestamp']
_assert_interpreter_complete('(dt - dt).ti',
locals(),
completions)
def test_list():
array = ['haha', 1]
_assert_interpreter_complete('array[0].uppe',
locals(),
['upper'])
_assert_interpreter_complete('array[0].real',
locals(),
[])
# something different, no index given, still just return the right
_assert_interpreter_complete('array[int].real',
locals(),
['real'])
_assert_interpreter_complete('array[int()].real',
locals(),
['real'])
# inexistent index
_assert_interpreter_complete('array[2].upper',
locals(),
['upper'])
def test_slice():
class Foo1():
bar = []
baz = 'xbarx'
_assert_interpreter_complete('getattr(Foo1, baz[1:-1]).append',
locals(),
['append'])
def test_getitem_side_effects():
class Foo2():
def __getitem__(self, index):
# possible side effects here, should therefore not call this.
return index
foo = Foo2()
_assert_interpreter_complete('foo[0].', locals(), [])
def test_property_error():
class Foo3():
@property
def bar(self):
raise ValueError
foo = Foo3()
_assert_interpreter_complete('foo.bar', locals(), ['bar'])
_assert_interpreter_complete('foo.bar.baz', locals(), [])
def test_param_completion():
def foo(bar):
pass
lambd = lambda xyz: 3
_assert_interpreter_complete('foo(bar', locals(), ['bar'])
# TODO we're not yet using the Python3.5 inspect.signature, yet.
assert not jedi.Interpreter('lambd(xyz', [locals()]).completions()
```
#### File: python3.4/site-packages/smth.py
```python
import sys
sys.path.append('/path/from/smth.py')
def extend_path():
sys.path.append('/path/from/smth.py:extend_path')
```
#### File: test/test_evaluate/test_annotations.py
```python
from textwrap import dedent
import jedi
import pytest
@pytest.mark.skipif('sys.version_info[0] < 3')
def test_simple_annotations():
"""
Annotations only exist in Python 3.
If annotations adhere to PEP-0484, we use them (they override inference),
else they are parsed but ignored
"""
source = dedent("""\
def annot(a:3):
return a
annot('')""")
assert [d.name for d in jedi.Script(source, ).goto_definitions()] == ['str']
source = dedent("""\
def annot_ret(a:3) -> 3:
return a
annot_ret('')""")
assert [d.name for d in jedi.Script(source, ).goto_definitions()] == ['str']
source = dedent("""\
def annot(a:int):
return a
annot('')""")
assert [d.name for d in jedi.Script(source, ).goto_definitions()] == ['int']
@pytest.mark.skipif('sys.version_info[0] < 3')
@pytest.mark.parametrize('reference', [
'assert 1',
'1',
'def x(): pass',
'1, 2',
r'1\n'
])
def test_illegal_forward_references(reference):
source = 'def foo(bar: "%s"): bar' % reference
assert not jedi.Script(source).goto_definitions()
@pytest.mark.skipif('sys.version_info[0] < 3')
def test_lambda_forward_references():
source = 'def foo(bar: "lambda: 3"): bar'
# For now just receiving the 3 is ok. I'm doubting that this is what we
# want. We also execute functions. Should we only execute classes?
assert jedi.Script(source).goto_definitions()
```
#### File: test/test_parser/test_absolute_import.py
```python
from jedi.parser.python import parse
def test_explicit_absolute_imports():
"""
Detect modules with ``from __future__ import absolute_import``.
"""
module = parse("from __future__ import absolute_import")
assert module.has_explicit_absolute_import()
def test_no_explicit_absolute_imports():
"""
Detect modules without ``from __future__ import absolute_import``.
"""
assert not parse("1").has_explicit_absolute_import()
def test_dont_break_imports_without_namespaces():
"""
The code checking for ``from __future__ import absolute_import`` shouldn't
assume that all imports have non-``None`` namespaces.
"""
src = "from __future__ import absolute_import\nimport xyzzy"
assert parse(src).has_explicit_absolute_import()
```
#### File: test/test_parser/test_user_context.py
```python
import jedi
def test_form_feed_characters():
s = "\f\nclass Test(object):\n pass"
jedi.Script(s, line=2, column=18).call_signatures()
``` |
{
"source": "jkozera/oumodulesbot",
"score": 3
} |
#### File: oumodulesbot/oumodulesbot/ou_utils.py
```python
import re
from collections import namedtuple
from typing import Iterable
Result = namedtuple("Result", "code,title,url")
MODULE_CODE_RE_TEMPLATE = r"[a-zA-Z]{1,6}[0-9]{1,3}(?:-[a-zA-Z]{1,5})?"
# QD = Open Degree:
QUALIFICATION_CODE_RE_TEMPLATE = (
r"[a-zA-Z][0-9]{2}(?:-[a-zA-Z]{1,5})?|[qQ][dD]"
)
QUALIFICATION_CODE_RE = re.compile(fr"^({QUALIFICATION_CODE_RE_TEMPLATE})$")
MODULE_OR_QUALIFICATION_CODE_RE_TEMPLATE = (
fr"(?:{MODULE_CODE_RE_TEMPLATE}|{QUALIFICATION_CODE_RE_TEMPLATE})"
)
def get_possible_qualification_urls(code: str) -> Iterable[str]:
return [
f"http://www.open.ac.uk/courses/qualifications/{code}",
f"http://www.open.ac.uk/postgraduate/qualifications/{code}",
]
def get_module_level(module_code: str) -> int:
for c in module_code:
if c.isdigit():
return int(c)
raise ValueError(f"Invalid module code: {module_code}")
def get_module_urls(module_code: str) -> Iterable[str]:
if get_module_level(module_code) == 0:
templates = ["http://www.open.ac.uk/courses/short-courses/{}"]
elif get_module_level(module_code) == 8:
templates = ["http://www.open.ac.uk/postgraduate/modules/{}"]
else:
templates = [
"http://www.open.ac.uk/courses/qualifications/details/{}",
"http://www.open.ac.uk/courses/modules/{}",
]
return [template.format(module_code.lower()) for template in templates]
def get_possible_urls_from_code(code: str) -> Iterable[str]:
code = code.lower()
if QUALIFICATION_CODE_RE.match(code):
return get_possible_qualification_urls(code)
return get_module_urls(code)
```
#### File: oumodulesbot/tests/test_oumodulesbot.py
```python
import json
from collections import namedtuple
from unittest import mock
import discord
import pytest
from oumodulesbot.oumodulesbot import OUModulesBot
pytestmark = pytest.mark.asyncio
QUALIFICATION_URL_TPL = "http://www.open.ac.uk/courses/qualifications/{code}"
@pytest.fixture(autouse=True)
def mock_cache(monkeypatch):
def mock_load(f):
return {
"A123": ["Mocked active module", "url1"],
"A012": ["Mocked active short course", "url2"],
"A888": ["Mocked active postgrad module", "url3"],
"B321": ["Mocked inactive module", None],
"B31": ["Mocked inactive-actually-active qualification", None],
}
monkeypatch.setattr(json, "load", mock_load)
ModuleExample = namedtuple("ModuleExample", "code,active,result")
E2E_EXAMPLES = [
ModuleExample(
"A123",
True,
("A123: Mocked active module (<url1>)"),
),
ModuleExample("B321", False, "B321: Mocked inactive module"),
ModuleExample(
"B31",
False,
"B31: Mocked inactive-actually-active qualification (<{url}>)".format(
url=QUALIFICATION_URL_TPL.format(code="b31"),
),
),
ModuleExample(
"A012",
True,
("A012: Mocked active short course (<url2>)"),
),
ModuleExample(
"A888",
True,
("A888: Mocked active postgrad module (<url3>)"),
),
]
def create_mock_message(contents, send_result="foo", id_override=None):
message = mock.Mock(spec=discord.Message)
message.content = contents
message.reply = mock.AsyncMock()
message.reply.return_value = send_result
message.id = id_override or contents
return message
async def process_message(bot, message, result):
"""
Pass the message to the bot, optionally verifying that appropriate checks
are made for inactive modules.
"""
with mock.patch("httpx.AsyncClient.head") as head_mock:
if "actually-active" in result.result:
head_mock.return_value.status_code = 200
head_mock.return_value.url = result.code
await bot.on_message(message)
if not result.active:
code = result.code.lower()
# inactive results are double-checked with http to provide a link
# in case the inactive cache.json status is no longer valid:
if "qualification" not in result.result:
prefix = "http://www.open.ac.uk/courses"
urls = [
f"{prefix}/qualifications/details/{code}",
f"{prefix}/modules/{code}",
]
else:
urls = [QUALIFICATION_URL_TPL.format(code=code)]
head_mock.assert_has_calls(
[
mock.call(url, allow_redirects=True, timeout=3)
for url in urls
],
any_order=True,
)
@pytest.mark.parametrize("module", E2E_EXAMPLES)
async def test_end_to_end_create(module):
"""
Basic test to make sure matching modules are processed correctly.
Runs with each example from E2E_EXAMPLES independently.
"""
bot = OUModulesBot()
message = create_mock_message(f"foo !{module.code}")
await process_message(bot, message, module)
message.reply.assert_called_once_with(module.result, embed=None)
async def test_end_to_end_update():
"""
Ensure `message.edit` on the original reply is called, instead of
`channel.send`, if the triggering message is edited, as opposed to new.
Processes E2E_EXAMPLES sequentially with a single bot instance.
First message is the first example, which is subsequently edited
by replacing its contents with further examples.
"""
first_post, updates = E2E_EXAMPLES[0], E2E_EXAMPLES[1:]
bot = OUModulesBot()
result_message = mock.Mock(spec=discord.Message)
message = create_mock_message(
f"foo !{first_post.code}",
# result_message is our bot's response here:
send_result=result_message,
# the id must be the same to trigger `edit`:
id_override="original_id",
)
await process_message(bot, message, first_post)
for update in updates:
update_message = create_mock_message(
f"foo !{update.code}",
id_override="original_id",
)
await process_message(bot, update_message, update)
# verify that the bot's response is updated:
result_message.edit.assert_called_once_with(
content=update.result, embed=None
)
result_message.edit.reset_mock()
@mock.patch("httpx.AsyncClient.get")
async def test_end_to_end_missing_module(get_mock):
bot = OUModulesBot()
fake_module = ModuleExample("XYZ999", False, "XYZ999: Some Random Module")
message = create_mock_message(f"foo !{fake_module.code}")
# return matching data from httpx:
# 1. Empty SPARQL:
sparql_json = {"results": {"bindings": []}}
get_mock.return_value.json = lambda: sparql_json
# 2. OUDA HTML:
get_mock.return_value.content = (
"not really html but matches the regex:"
f"<title>{fake_module.code} Some Random Module"
" - Open University Digital Archive</title>"
).encode()
# ensure module name is returned to Discord:
await process_message(bot, message, fake_module)
message.reply.assert_called_once_with(fake_module.result, embed=None)
# ensure httpx was called with appropriate URL:
get_mock.assert_called_with(
# ignore SPARQL calls
"http://www.open.ac.uk/library/digital-archive/module/"
f"xcri:{fake_module.code}"
)
``` |
{
"source": "JKozerawski/BLT",
"score": 2
} |
#### File: BLT/data/dataloader.py
```python
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os
from PIL import Image
import torch
import numpy as np
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Data transformation with augmentation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(degrees=10),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
# Dataset
class LT_Dataset(Dataset):
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
with open(txt) as f:
for line in f:
if 'iNaturalist' in root:
img_path = str(line.split()[0])[2:-1]
else:
img_path = line.split()[0]
self.img_path.append(os.path.join(root, img_path))
self.labels.append(int(line.split()[1]))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, label, path
# Load datasets
def load_data(data_root, dataset, phase, batch_size, use_sampler = False, num_workers=4, shuffle=True, gamma=1.0):
txt = './data/%s/%s_%s.txt'%(dataset, dataset, phase)
print('Loading data from %s' % (txt))
transform = data_transforms[phase]
print('Use data transformation:', transform)
set_ = LT_Dataset(data_root, txt, transform)
if phase == 'train' and use_sampler:
# Prepare the sampler:
print('Using sampler.')
txt_file = txt
with open(txt_file, 'r') as txtfile:
lines = txtfile.readlines()
num_classes = -1
for line in lines:
lbl = int(line.split()[-1])
if lbl > num_classes:
num_classes = lbl
label_count = np.zeros(num_classes+1)
for line in lines:
lbl = int(line.split()[-1])
label_count[lbl] += 1
all_images = np.sum(label_count)
weights = []
for line in lines:
lbl = int(line.split()[-1])
weights.append(all_images / (label_count[lbl]**gamma))
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
return DataLoader(dataset=set_, batch_size=batch_size, shuffle=False, sampler=sampler, num_workers=num_workers, pin_memory=True)
else:
print('No sampler.')
print('Shuffle is %s.' % (shuffle))
return DataLoader(dataset=set_, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True)
```
#### File: JKozerawski/BLT/gradient_ascent_adv.py
```python
import torch
from torch.optim import SGD
from torch.nn import functional
from torchvision import transforms
from torch.autograd import Variable
import numpy as np
class DisguisedFoolingSampleGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent, breaks as soon as
the target prediction confidence is captured
"""
def __init__(self, model, minimum_confidence):
self.model = model
self.minimum_confidence = minimum_confidence
def preprocess_image(self, image, no_augmentation=False):
if not no_augmentation:
t = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.3, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(degrees=10),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
t = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
return Variable(t(image).unsqueeze(0), requires_grad=True)
def generate_batch(self, images, true_labels, targets=None, possible_probs=[0.1, 0.15, 0.2], no_augmentation=False):
n_max = 15
batch_size = len(images)
self.processed_images = [self.preprocess_image(images[idx], no_augmentation=no_augmentation) for idx in range(batch_size)]
results = []
labels = []
j = 0
confidences = np.asarray([np.random.choice(possible_probs, 1, replace=False)[0] for i in range(batch_size)])
while batch_size > 0:
# if it is the last pass then just return current progress:
if j == n_max:
for idx in range(batch_size):
results.append(self.images[idx])
labels.append(true_labels[idx])
return results, labels
# init optimizer with current images:
optimizer = SGD(self.processed_images, lr=0.7)
self.images = torch.stack(self.processed_images).squeeze(1).cuda()
# forward pass:
self.model.batch_forward(self.images, phase='test')
output = self.model.logits
# get confidence from softmax:
target_confidences = np.asarray([functional.softmax(output)[idx][targets[idx]].data.cpu().numpy().item(0) for idx in range(batch_size)])
# find images that already fulfill the required confidence threshold:
good_indices = np.where(target_confidences-confidences > 0.0)[0]
# if there are such images, then:
if len(good_indices) > 0:
# find ones that still require altering:
keep_indices = [idx for idx in range(batch_size) if idx not in good_indices]
batch_size = len(keep_indices)
for idx in good_indices:
# add them to the final results:
results.append(self.images[idx])
labels.append(true_labels[idx])
# remove them from next iterations:
self.processed_images = [self.processed_images[idx] for idx in keep_indices]
true_labels = [true_labels[idx] for idx in keep_indices]
targets = [targets[idx] for idx in keep_indices]
confidences = np.asarray([confidences[idx] for idx in keep_indices])
# if no more images, return:
if batch_size == 0:
return results, labels
# Target specific class
class_loss = -output[0, targets[0]]
for i in range(1, batch_size):
class_loss += -output[i, targets[i]]
# Zero grads
self.model.networks['feat_model'].zero_grad()
self.model.networks['classifier'].zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
j += 1
return results, labels
```
#### File: JKozerawski/BLT/utils.py
```python
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import f1_score
import importlib
import pdb
def source_import(file_path):
"""This function imports python module directly from source code using importlib"""
spec = importlib.util.spec_from_file_location('', file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def batch_show(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(20,20))
plt.imshow(inp)
if title is not None:
plt.title(title)
def print_write(print_str, log_file):
print(*print_str)
#with open(log_file, 'a') as f:
#print(*print_str, file=f)
def init_weights(model, weights_path, caffe=False, classifier=False, temp=False, device = 'cuda:1'):
"""Initialize weights"""
print('Pretrained %s weights path: %s' % ('classifier' if classifier else 'feature model',
weights_path))
weights = torch.load(weights_path, map_location=device)
if not classifier:
if caffe:
weights = {k: weights[k] if k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['feat_model']
weights = {k: weights['module.' + k] if 'module.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['classifier']
if not temp:
weights = {k: weights['module.fc.' + k] if 'module.fc.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
return weights
model.load_state_dict(weights)
return model
def correlation(accuracies, counts):
num_images = np.sum(counts)
balanced_counts = np.asarray([num_images/len(counts) for i in range(len(counts))])
few_shot_indices = np.where(counts < 20)[0]
hallucinated_counts = np.asarray([num_images/len(counts) for i in range(len(counts))])
for idx in few_shot_indices:
hallucinated_counts[idx] *= 1.5
#print(balanced_counts)
#print(hallucinated_counts)
accuracy_correlation = np.corrcoef(accuracies, counts)
balanced_correlation = np.corrcoef(accuracies, balanced_counts)
hallucinated_correlation = np.corrcoef(accuracies, hallucinated_counts)
#print("Correlations:", accuracy_correlation, balanced_correlation, hallucinated_correlation)
print("Correlations:", accuracy_correlation[0, 1], balanced_correlation[0, 1], hallucinated_correlation[0, 1])
#return
def shot_acc (correct, labels, label_groups, counts):
tlabels = labels.detach().cpu().numpy()
unique = np.unique(tlabels)
class_correct_1 = np.zeros(len(unique))
class_correct_5 = np.zeros(len(unique))
many_shot_1 = []
medium_shot_1 = []
low_shot_1 = []
many_shot_5 = []
medium_shot_5 = []
low_shot_5 = []
for l in unique:
n = len(np.where(tlabels == l)[0])
#if n == 0:
#n = 1
correct_k = correct[:1, labels == l].view(-1).float().sum(0, keepdim=True).cpu().numpy()[0]
class_correct_1[l] = correct_k / n
correct_k = correct[:5, labels == l].view(-1).float().sum(0, keepdim=True).cpu().numpy()[0]
class_correct_5[l] = correct_k / n
if l in label_groups[0]:
many_shot_1.append(class_correct_1[l])
many_shot_5.append(class_correct_5[l])
elif l in label_groups[2]:
low_shot_1.append(class_correct_1[l])
low_shot_5.append(class_correct_5[l])
else:
medium_shot_1.append(class_correct_1[l])
medium_shot_5.append(class_correct_5[l])
correlation(class_correct_1, counts)
print("Standard deviations:", 100*np.std(many_shot_1), 100*np.std(medium_shot_1), 100*np.std(low_shot_1))
return 100*np.mean(many_shot_1), 100*np.mean(medium_shot_1), 100*np.mean(low_shot_1), 100*np.mean(many_shot_5), 100*np.mean(medium_shot_5), 100*np.mean(low_shot_5)
def shot_acc_old(preds, labels, train_data, many_shot_thr=100, low_shot_thr=20):
training_labels = np.array(train_data.dataset.labels).astype(int)
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(len(labels[labels == l]))
class_correct.append((preds[labels == l] == labels[labels == l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] >= many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] <= low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def shot_acc2(preds, labels, train_data, label_groups):
training_labels = np.array(train_data.dataset.labels).astype(int)
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(len(labels[labels == l]))
class_correct.append((preds[labels == l] == labels[labels == l]).sum())
many_shot = []
medium_shot = []
low_shot = []
many_count = 0.
medium_count = 0.
few_count = 0.
for i in range(len(train_class_count)):
if i in label_groups[0]:
many_shot.append((class_correct[i] / test_class_count[i]))
many_count += 1.
elif i in label_groups[2]:
low_shot.append((class_correct[i] / test_class_count[i]))
few_count += 1.
else:
medium_shot.append((class_correct[i] / test_class_count[i]))
medium_count += 1.
many_err = round((1. / np.sqrt(many_count)) * np.std(many_shot), 3)
medium_err = round((1. / np.sqrt(medium_count)) * np.std(medium_shot), 3)
few_err = round((1. / np.sqrt(few_count)) * np.std(low_shot), 3)
return np.mean(many_shot), np.mean(medium_shot), np.mean(low_shot), many_err, medium_err, few_err
def F_measure(preds, labels, openset=False, theta=None):
if openset:
# f1 score for openset evaluation
true_pos = 0.
false_pos = 0.
false_neg = 0.
for i in range(len(labels)):
true_pos += 1 if preds[i] == labels[i] and labels[i] != -1 else 0
false_pos += 1 if preds[i] != labels[i] and labels[i] != -1 and preds[i] != -1 else 0
false_neg += 1 if preds[i] != labels[i] and labels[i] == -1 else 0
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
return 2 * ((precision * recall) / (precision + recall + 1e-12))
else:
# Regular f1 score
return f1_score(labels.detach().cpu().numpy(), preds.detach().cpu().numpy(), average='macro')
def mic_acc_cal(preds, labels):
acc_mic_top1 = (preds == labels).sum().item() / len(labels)
return 100*acc_mic_top1
def class_count (data):
labels = np.array(data.dataset.labels)
class_data_num = []
for l in np.unique(labels):
class_data_num.append(len(labels[labels == l]))
return class_data_num
# def dataset_dist (in_loader):
# """Example, dataset_dist(data['train'][0])"""
# label_list = np.array([x[1] for x in in_loader.dataset.samples])
# total_num = len(data_list)
# distribution = []
# for l in np.unique(label_list):
# distribution.append((l, len(label_list[label_list == l])/total_num))
# return distribution
``` |
{
"source": "JKozerawski/instagram-auto-tagging",
"score": 3
} |
#### File: JKozerawski/instagram-auto-tagging/image_scraper.py
```python
import os
import urllib
from bs4 import BeautifulSoup
import selenium.webdriver as webdriver
from glob import glob
import filecmp
import pickle
import random
from pyvirtualdisplay import Display
class ImageDownloader():
def __init__(self, savePath):
self.savePath = savePath
self.justMostRecent = False
def set_tag(self, tag):
self.tag = tag
def check_if_file_exists(self):
for txtFile in self.txtFiles:
if ( filecmp.cmp('./temp.txt', txtFile) ): return False
return True
def download_images(self, tag=None):
# set tag:
if(tag!=None): self.set_tag(tag)
# create folder if one does not exist:
if(not os.path.isdir(self.savePath+self.tag+"/")):
os.mkdir(self.savePath+self.tag+"/")
#url to go to:
page_url = "https://www.instagram.com/explore/tags/"+self.tag+"/"
display = Display(visible=0, size=(800, 600))
display.start()
#Chrome stuff:
driver = webdriver.Chrome()
driver.get(page_url)
soup = BeautifulSoup(driver.page_source,"lxml")
# check how many images are in the folder already:
count = len(glob(self.savePath+self.tag+"/"+"*jpg"))
self.txtFiles = glob(self.savePath+self.tag+"/"+"*txt")
assert count == len(self.txtFiles)
#iterate over the pictures
for index, pictureParent in enumerate(soup.find_all('div', attrs={'class': '_4rbun'})) :
# omit the top posts
if(index >= 9) :
# get the description:
try:
caption = pictureParent.contents[0] ['alt']
captionText = caption.encode('UTF-8').lower() # lowercase the description
f= open("./temp.txt","w+") #the + lets it create the file
f.write(captionText)
f.close()
# proceed only if actual hashtag is in the description and if file has already been downloaded:
if("#"+self.tag in captionText and self.check_if_file_exists()):
count+=1
#download the image:
url = pictureParent.contents[0] ['src']
# name the image file:
imageFilename = self.savePath+self.tag+"/" + str(count-1) + ".jpg"
urllib.urlretrieve(url, imageFilename)
#save the caption in a text file:
# name the description file:
textFilename = self.savePath+self.tag+"/" + str(count-1) + ".txt"
f= open(textFilename,"w+") #the + lets it create the file
f.write(captionText)
f.close()
except:
print "Some error"
driver.quit()
display.stop()
def get_x_tags(self, tag, noOfTagsToDownload):
self.set_tag(tag)
if( len(glob(self.savePath+self.tag+"/"+"*jpg")) <=noOfTagsToDownload):
prev_n = len(glob(self.savePath+self.tag+"/"+"*jpg"))
self.download_images()
print "Scraping:",self.tag, "Scraped:",len(glob(self.savePath+self.tag+"/"+"*jpg")) - prev_n, "new images.","Left:", len(self.listOfTags), "categories"
else: self.listOfTags.remove(self.tag)
def go_through_list_of_tags(self, listOfTags, noOfTagsToDownload):
random.shuffle(listOfTags)
listOfTags = [x.lower() for x in listOfTags]
self.listOfTags = listOfTags
print self.listOfTags
while(len(self.listOfTags)>0 ):
for tag in self.listOfTags:
self.get_x_tags(tag, noOfTagsToDownload)
#------------------------------------------------------------------
def main():
nouns = "/media/jedrzej/Seagate/Python/instagram-auto-hashtag/words/most_common_nouns.p"
#nouns2 = "/media/jedrzej/Seagate/Python/instagram-auto-hashtag/words/nouns.p"
adjectives = "/media/jedrzej/Seagate/Python/instagram-auto-hashtag/words/adjectives.p"
verbs = "/media/jedrzej/Seagate/Python/instagram-auto-hashtag/words/verbs.p"
prepositions = "/media/jedrzej/Seagate/Python/instagram-auto-hashtag/words/prepositions.p"
adverbs = "/media/jedrzej/Seagate/Python/instagram-auto-hashtag/words/adverbs.p"
l = pickle.load( open(verbs, "rb" ) )
downloader = ImageDownloader("/media/jedrzej/Seagate/DATA/instagram_images/verbs/")
downloader.go_through_list_of_tags(l, 50)
'''
l = pickle.load( open( adjectives, "rb" ) )
print l
random.shuffle(l)
downloader = ImageDownloader("/media/jedrzej/Seagate/DATA/instagram_images/adjectives/")
downloader.go_through_list_of_tags(l, 50)
'''
if __name__ == "__main__":
main()
``` |
{
"source": "jkozerski/meteo",
"score": 3
} |
#### File: meteo_lcd/db_tools/db_to_log.py
```python
import sqlite3
import datetime
import dateutil.parser
working_dir = "/var/www/html/"
data_dir = "/home/pi/meteo/"
log_file_path = working_dir + "meteo.log"
db_path = data_dir + "meteo.db"
###################################
def db_to_log():
lf = open(data_dir + "tmp.log", "w");
conn = sqlite3.connect(db_path)
c = conn.cursor()
try:
c.execute("SELECT time, temp_in, humid_in, dew_point_in, temp, humid, dew_point, pressure FROM log ORDER BY time ASC")
rows = c.fetchall()
except Exception as e:
print("Error while get all values from db: " + str(e))
for row in rows:
new_line = datetime.datetime.fromtimestamp(row[0]).isoformat() + ";" + \
str(row[1]) + ";" + \
str(row[2]) + ";" + \
str(float(row[3])) + ";" + \
str(row[4]) + ";" + \
str(row[5]) + ";" + \
str(float(row[6])) + ";" + \
str(row[7]) + "\n"
lf.write(new_line)
lf.close()
conn.close()
###################################
db_to_log()
```
#### File: meteo_lcd/db_tools/log_to_db.py
```python
import sqlite3
import datetime
import dateutil.parser
working_dir = "/var/www/html/"
data_dir = "/home/pi/meteo/"
log_file_path = working_dir + "meteo.log"
db_path = data_dir + "meteo.db"
# Converts a string back the datetime structure
def getDateTimeFromISO8601String(s):
d = dateutil.parser.parse(s)
return d
def log_to_db ():
conn = sqlite3.connect(db_path)
c = conn.cursor()
# Open log file
lf = open(log_file_path, "r");
# From each line of log file create a pairs of meteo data (time, value)
for line in lf:
# Parse line
time, temp_in, humid_in, dew_in, temp_out, humid_out, dew_out, pressure = str(line).split(";")
#c.execute("SELECT strftime('%s', (?), 'localtime')", (time, ))
#int_time = (c.fetchone())[0]
int_time = int (getDateTimeFromISO8601String(time).strftime("%s"))
#time_ = getDateTimeFromISO8601String(time)
#print int_time
c.execute("INSERT INTO log (time, temp, humid, dew_point, pressure, temp_in, humid_in, dew_point_in) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?)", (int_time, temp_out, humid_out, dew_out, pressure, temp_in, humid_in, dew_in))
lf.close()
conn.commit()
conn.close()
def create_db ():
conn = sqlite3.connect(db_path)
c = conn.cursor()
sql = "CREATE TABLE IF NOT EXISTS log (\n\
id INTEGER PRIMARY KEY ASC,\n\
time INT NOT NULL,\n\
temp REAL,\n\
humid INT,\n\
dew_point INT,\n\
pressure REAL,\n\
temp_in REAL,\n\
humid_in INT,\n\
dew_point_in INT)"
c.execute(sql)
conn.commit()
conn.close()
print ("Creating database..")
create_db()
print ("Database OK")
print ("Copying from log file to database...")
log_to_db()
print ("Copy OK")
```
#### File: meteo/meteo_lcd/month_plot.py
```python
import dateutil.parser
import datetime # datetime and timedelta structures
import time
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
# Sqlite3 database
import sqlite3
# choose working dir
working_dir = "/var/www/html/"
data_dir = "/home/pi/meteo/"
hist_dir = working_dir + "hist/"
log_file_path = working_dir + "meteo.log"
db_path = data_dir + "meteo.db"
# Diagiam file names
temp_out_diagram_file = "temp_out.png"
humid_out_diagram_file = "humid_out.png"
dew_point_out_diagram_file = "dew_out.png"
pressure_diagram_file = "pressure.png"
# Converts a string back the datetime structure
def getDateTimeFromISO8601String(s):
d = dateutil.parser.parse(s)
return d
def get_val_month_db(month, year):
if month < 1 or month > 12:
return;
if year < 2000 or year > 9999:
return;
conn = sqlite3.connect(db_path)
c = conn.cursor()
str_time_min = str(year).zfill(4) + "-" + str(month).zfill(2) + "-01T00:00:00"
if month == 12:
str_time_max = str(year+1).zfill(4) + "-" + str(1).zfill(2) + "-01T00:00:00"
else:
str_time_max = str(year).zfill(4) + "-" + str(month+1).zfill(2) + "-01T00:00:00"
#c.execute("SELECT strftime('%s', (?))", (str_time_min, ))
#int_time_min = (c.fetchone())[0]
#c.execute("SELECT strftime('%s', (?))", (str_time_max, ))
#int_time_max = (c.fetchone())[0]
int_time_min = int (time.mktime(getDateTimeFromISO8601String(str_time_min).timetuple()))
int_time_max = int (time.mktime(getDateTimeFromISO8601String(str_time_max).timetuple()))
try:
c.execute("SELECT time, temp, humid, dew_point, pressure FROM log WHERE time >= ? AND time < ?", (int_time_min, int_time_max))
rows = c.fetchall()
# for row in rows:
# print(row)
except Exception as e:
print("Error while get_val_month from db: " + str(e))
conn.close()
return rows
def plot_set_ax_fig (date, time, data, data_len, plot_type, ylabel, title, major_locator, minor_locator, file_name):
# This keeps chart nice-looking
ratio = 0.20
plot_size_inches = 40
fig, ax = plt.subplots()
fig.set_size_inches(plot_size_inches, plot_size_inches)
# Plot data:
ax.plot_date(time, data, plot_type)
ax.set_xlim(time[0], time[data_len])
ax.set(xlabel='', ylabel=ylabel, title=title + " " + str(date.month) + "." + str(date.year))
ax.grid()
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=(0)))
ax.xaxis.set_minor_locator(matplotlib.dates.HourLocator(byhour=(0,6,12,18)))
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%m-%d %H:%M"))
ax.yaxis.set_major_locator(MultipleLocator(major_locator))
ax.yaxis.set_minor_locator(MultipleLocator(minor_locator))
ax.tick_params(labeltop=False, labelright=True)
plt.gcf().autofmt_xdate()
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
#print((xmax-xmin)/(ymax-ymin))
ax.set_aspect(abs((xmax-xmin)/(ymax-ymin))*ratio) #, adjustable='box-forced')
fig.savefig(hist_dir + str(date.year) + "." + str(date.month) + "." + file_name, bbox_inches='tight')
plt.close()
# Draw a plot
def draw_plot_month():
# Open log file
lf = open(log_file_path, "r");
# Calculates number lines in log file
num_lines = sum(1 for line in lf)
lf.seek(0)
# This keeps chart nice-looking
ratio = 0.25
plot_size_inches = 28
# Today
today = datetime.datetime.today()
if today.month == 1:
plot_date_begin = datetime.datetime(today.year-1, 12, 1)
else:
plot_date_begin = datetime.datetime(today.year, today.month-1, 1)
plot_date_end = datetime.datetime(today.year, today.month, 1)
# Helpers
j = 0
values_count = 0
lines_to_skip = num_lines - 25000
# This much entries should be more than one month (31 days)
# This will cause that generating plot will take less time
if lines_to_skip < 0:
lines_to_skip = 0;
# Use every x (e.g. every second, or every third) value - this makes chart more 'smooth'
every_x = 1;
t = []; # time axis for plot
t_out = []; # temp out for plot
h_out = []; # humid out for plot
d_out = []; # dew point for plot
p_out = []; # pressure for plot
# From each line of log file create a pairs of meteo data (time, value)
for line in lf:
if lines_to_skip > 0:
lines_to_skip -= 1
continue
j += 1
if j >= every_x:
j = 0
else:
continue
# Parse line
time, temp_in, humid_in, dew_in, temp_out, humid_out, dew_out, pressure = str(line).split(";")
time = getDateTimeFromISO8601String(time)
if time < plot_date_begin:
continue
if time > plot_date_end:
continue
values_count += 1
# Append time for time axis
t.append(time)
# Append meteo data for their axis
t_out.append(float(temp_out))
h_out.append(float(humid_out))
d_out.append(float(dew_out))
p_out.append(float(pressure))
lf.close()
# draw plots for outside values: temperature, humidity, dew piont, pressure
##############
# Temperature
plot_set_ax_fig(today, t, t_out, values_count-1, 'r-', 'Temperatura [C]', 'Wykres temperatury zewnetrznej', 1, 0.5, temp_out_diagram_file)
##############
# Humidity
plot_set_ax_fig(today, t, h_out, values_count-1, 'g-', 'Wilgotnosc wzgledna [%]', 'Wykres wilgotnosci wzglednej', 5, 1, humid_out_diagram_file)
##############
# Dew point
plot_set_ax_fig(today, t, d_out, values_count-1, 'b-', 'Temp. punktu rosy [C]', 'Wykres temperatury punktu rosy', 1, 1, dew_point_out_diagram_file)
##############
# Pressure
plot_set_ax_fig(today, t, p_out, values_count-1, 'm-', 'Cisnienie atm. [hPa]', 'Wykres cisnienia atmosferycznego', 2, 1, pressure_diagram_file)
return
# Draw a plot
def draw_plot_month_db():
# Today
today = datetime.datetime.today()
if today.month == 1:
plot_date_begin = datetime.datetime(today.year-1, 12, 1)
else:
plot_date_begin = datetime.datetime(today.year, today.month-1, 1)
t = []; # time axis for plot
t_out = []; # temp out for plot
h_out = []; # humid out for plot
d_out = []; # dew point for plot
p_out = []; # pressure for plot
rows = get_val_month_db(plot_date_begin.month, plot_date_begin.year) # month, and year
# From each row creates a pairs of meteo data (time, value)
values_count = len(rows)
# Row format: (time, temp, humid, dew_point, pressure)
for row in rows:
# Append time for time axis
t.append(datetime.datetime.fromtimestamp(row[0]))
# Append meteo data for their axis
t_out.append(row[1])
h_out.append(row[2])
d_out.append(row[3])
p_out.append(row[4])
# draw plots for outside values: temperature, humidity, dew piont, pressure
##############
# Temperature
plot_set_ax_fig(plot_date_begin, t, t_out, values_count-1, 'r-', 'Temperatura [C]', 'Wykres temperatury zewnetrznej', 1, 0.5, temp_out_diagram_file)
##############
# Humidity
plot_set_ax_fig(plot_date_begin, t, h_out, values_count-1, 'g-', 'Wilgotnosc wzgledna [%]', 'Wykres wilgotnosci wzglednej', 5, 1, humid_out_diagram_file)
##############
# Dew point
plot_set_ax_fig(plot_date_begin, t, d_out, values_count-1, 'b-', 'Temp. punktu rosy [C]', 'Wykres temperatury punktu rosy', 1, 1, dew_point_out_diagram_file)
##############
# Pressure
plot_set_ax_fig(plot_date_begin, t, p_out, values_count-1, 'm-', 'Cisnienie atm. [hPa]', 'Wykres cisnienia atmosferycznego', 2, 1, pressure_diagram_file)
return
# Main program:
draw_plot_month_db();
#draw_plot_month();
``` |
{
"source": "jk-ozlabs/op-test-framework",
"score": 2
} |
#### File: jk-ozlabs/op-test-framework/OpTestConfiguration.py
```python
import common
from common.OpTestBMC import OpTestBMC, OpTestSMC
from common.OpTestFSP import OpTestFSP
from common.OpTestOpenBMC import OpTestOpenBMC
from common.OpTestQemu import OpTestQemu
from common.OpTestSystem import OpTestSystem, OpSystemState, OpTestFSPSystem, OpTestOpenBMCSystem, OpTestQemuSystem
from common.OpTestHost import OpTestHost
from common.OpTestIPMI import OpTestIPMI, OpTestSMCIPMI
from common.OpTestOpenBMC import HostManagement
from common.OpTestWeb import OpTestWeb
import argparse
import time
from datetime import datetime
import subprocess
import sys
import ConfigParser
import errno
import OpTestLogger
import logging
# Look at the addons dir for any additional OpTest supported types
# If new type was called Kona, the layout would be as follows
# op-test-framework/addons/Kona/
# /OpTestKona.py
# /OpTestKonaSystem.py
# /OpTestKonaSetup.py
#
# OpTestKona and OpTestKonaSystem follow the same format the other supported type modules
# OpTestKonaSetup is unique for the addons and contains 2 helper functions:
# addBMCType - used to populate the choices list for --bmc-type
# createSystem - does creation of bmc and op_system objects
import importlib
import os
import addons
optAddons = dict() # Store all addons found. We'll loop through it a couple time below
# Look at the top level of the addons for any directories and load their Setup modules
qemu_default = "qemu-system-ppc64"
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("-c", "--config-file", help="Configuration File",
metavar="FILE")
tgroup = parser.add_argument_group('Test',
'Tests to run')
tgroup.add_argument("--list-suites", action='store_true',
help="List available suites to run")
tgroup.add_argument("--run-suite", action='append',
help="Run a test suite(s)")
tgroup.add_argument("--run", action='append',
help="Run individual tests")
tgroup.add_argument("-f", "--failfast", action='store_true',
help="Stop on first failure")
tgroup.add_argument("--quiet", action='store_true', default=False,
help="Don't splat lots of things to the console")
parser.add_argument("--machine-state", help="Current machine state",
choices=['UNKNOWN', 'UNKNOWN_BAD', 'OFF', 'PETITBOOT',
'PETITBOOT_SHELL', 'OS'])
# Options to set the output directory and suffix on the output
parser.add_argument("-o", "--output", help="Output directory for test reports. Can also be set via OP_TEST_OUTPUT env variable.")
parser.add_argument("-l", "--logdir", help="Output directory for log files. Can also be set via OP_TEST_LOGDIR env variable.")
parser.add_argument("--suffix", help="Suffix to add to all reports. Default is current time.")
bmcgroup = parser.add_argument_group('BMC',
'Options for Service Processor')
# The default supported BMC choices in --bmc-type
bmcChoices = ['AMI', 'SMC', 'FSP', 'OpenBMC', 'qemu']
# Loop through any addons let it append the extra bmcChoices
for opt in optAddons:
bmcChoices = optAddons[opt].addBMCType(bmcChoices)
bmcgroup.add_argument("--bmc-type",
choices=bmcChoices,
help="Type of service processor")
bmcgroup.add_argument("--bmc-ip", help="BMC address")
bmcgroup.add_argument("--bmc-username", help="SSH username for BMC")
bmcgroup.add_argument("--bmc-password", help="SSH password for BMC")
bmcgroup.add_argument("--bmc-usernameipmi", help="IPMI username for BMC")
bmcgroup.add_argument("--bmc-passwordipmi", help="IPMI password for BMC")
bmcgroup.add_argument("--bmc-prompt", default="#",
help="Prompt for BMC ssh session")
bmcgroup.add_argument("--smc-presshipmicmd")
bmcgroup.add_argument("--qemu-binary", default=qemu_default,
help="[QEMU Only] qemu simulator binary")
hostgroup = parser.add_argument_group('Host', 'Installed OS information')
hostgroup.add_argument("--host-ip", help="Host address")
hostgroup.add_argument("--host-user", help="SSH username for Host")
hostgroup.add_argument("--host-password", help="SSH password for Host")
hostgroup.add_argument("--host-lspci", help="Known 'lspci -n -m' for host")
hostgroup.add_argument("--host-scratch-disk", help="A block device we can erase", default="")
hostgroup.add_argument("--host-prompt", default="#",
help="Prompt for Host SSH session")
hostinstgroup = parser.add_argument_group('Host OS Install', 'Options for installing an OS on the Host')
hostinstgroup.add_argument("--host-name", help="Host name", default="localhost")
hostinstgroup.add_argument("--host-gateway", help="Host Gateway", default="")
hostinstgroup.add_argument("--host-submask", help="Host Subnet Mask", default="255.255.255.0")
hostinstgroup.add_argument("--host-mac",
help="Host Mac address (used by OS installer to set up OS on the host)",
default="")
hostinstgroup.add_argument("--host-dns",
help="Host DNS Servers (used by OS installer to set up OS on the host)",
default="")
hostinstgroup.add_argument("--proxy", default="", help="proxy for the Host to access the internet. "
"Only needed for tests that install an OS")
hostcmdgroup = parser.add_argument_group('Host Run Commands', 'Options for Running custom commands on the Host')
hostcmdgroup.add_argument("--host-cmd", help="Command to run", default="")
hostcmdgroup.add_argument("--host-cmd-file", help="Commands to run from file", default="")
hostcmdgroup.add_argument("--host-cmd-timeout", help="Timeout for command", type=int, default=1000)
hostgroup.add_argument("--platform",
help="Platform (used for EnergyScale tests)",
choices=['unknown','habanero','firestone','garrison','firenze','p9dsu'])
osgroup = parser.add_argument_group('OS Images', 'OS Images to boot/install')
osgroup.add_argument("--os-cdrom", help="OS CD/DVD install image", default=None)
osgroup.add_argument("--os-repo", help="OS repo", default="")
imagegroup = parser.add_argument_group('Images', 'Firmware LIDs/images to flash')
imagegroup.add_argument("--bmc-image", help="BMC image to flash(*.tar in OpenBMC, *.bin in SMC)")
imagegroup.add_argument("--host-pnor", help="PNOR image to flash")
imagegroup.add_argument("--host-hpm", help="HPM image to flash")
imagegroup.add_argument("--host-img-url", help="URL to Host Firmware image to flash on FSP systems (Must be URL accessible petitboot shell on the host)")
imagegroup.add_argument("--flash-skiboot",
help="skiboot to use/flash. Depending on platform, may need to be xz compressed")
imagegroup.add_argument("--flash-kernel",
help="petitboot zImage.epapr to use/flash.")
imagegroup.add_argument("--flash-initramfs",
help="petitboot rootfs to use/flash. Not all platforms support this option")
imagegroup.add_argument("--flash-part", nargs=2, metavar=("PART name", "bin file"), action='append',
help="PNOR partition to flash, Ex: --flash-part OCC occ.bin")
imagegroup.add_argument("--noflash","--no-flash", action='store_true', default=False,
help="Even if images are specified, don't flash them")
imagegroup.add_argument("--only-flash", action='store_true', default=False,
help="Only flash, don't run any tests (even if specified)")
imagegroup.add_argument("--pflash",
help="pflash to copy to BMC (if needed)")
imagegroup.add_argument("--pupdate",
help="pupdate to flash PNOR for Supermicro systems")
stbgroup = parser.add_argument_group('STB', 'Secure and Trusted boot parameters')
stbgroup.add_argument("--un-signed-pnor", help="Unsigned or improperly signed PNOR")
stbgroup.add_argument("--signed-pnor", help="Properly signed PNOR image(imprint)")
stbgroup.add_argument("--signed-to-pnor", help="Properly signed PNOR image(imprint or production)")
stbgroup.add_argument("--key-transition-pnor", help="Key transition PNOR image")
stbgroup.add_argument("--test-container", nargs=2, metavar=("PART name", "bin file"), action='append',
help="PNOR partition container to flash, Ex: --test-container CAPP capp_unsigned.bin")
stbgroup.add_argument("--secure-mode", action='store_true', default=False, help="Secureboot mode")
stbgroup.add_argument("--trusted-mode", action='store_true', default=False, help="Trustedboot mode")
return parser
class OpTestConfiguration():
def __init__(self):
self.args = []
self.remaining_args = []
self.basedir = os.path.dirname(sys.argv[0])
for dir in (os.walk(os.path.join(self.basedir, 'addons')).next()[1]):
optAddons[dir] = importlib.import_module("addons." + dir + ".OpTest" + dir + "Setup")
return
def parse_args(self, argv=None):
conf_parser = argparse.ArgumentParser(add_help=False)
# We have two parsers so we have correct --help, we need -c in both
conf_parser.add_argument("-c", "--config-file", help="Configuration File",
metavar="FILE")
args , remaining_args = conf_parser.parse_known_args(argv)
defaults = {}
config = ConfigParser.SafeConfigParser()
config.read([os.path.expanduser("~/.op-test-framework.conf")])
if args.config_file:
if os.access(args.config_file, os.R_OK):
config.read([args.config_file])
else:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), args.config_file)
try:
defaults = dict(config.items('op-test'))
except ConfigParser.NoSectionError:
pass
parser = get_parser()
parser.set_defaults(**defaults)
if defaults.get('qemu_binary'):
qemu_default = defaults['qemu_binary']
parser.add_argument("--check-ssh-keys", action='store_true', default=False,
help="Check remote host keys when using SSH (auto-yes on new)")
parser.add_argument("--known-hosts-file",
help="Specify a custom known_hosts file")
self.args , self.remaining_args = parser.parse_known_args(remaining_args)
stateMap = { 'UNKNOWN' : OpSystemState.UNKNOWN,
'UNKNOWN_BAD' : OpSystemState.UNKNOWN_BAD,
'OFF' : OpSystemState.OFF,
'PETITBOOT' : OpSystemState.PETITBOOT,
'PETITBOOT_SHELL' : OpSystemState.PETITBOOT_SHELL,
'OS' : OpSystemState.OS
}
# Some quick sanity checking
if self.args.known_hosts_file and not self.args.check_ssh_keys:
parser.error("--known-hosts-file requires --check-ssh-keys")
# Setup some defaults for the output options
# Order of precedence
# 1. cmdline arg
# 2. env variable
# 3. default path
if (self.args.output):
outdir = self.args.output
elif ("OP_TEST_OUTPUT" in os.environ):
outdir = os.environ["OP_TEST_OUTPUT"]
else:
outdir = os.path.join(self.basedir, "test-reports")
self.outsuffix = "test-run-%s" % self.get_suffix()
outdir = os.path.join(outdir, self.outsuffix)
# Normalize the path to fully qualified and create if not there
self.output = os.path.abspath(outdir)
if (not os.path.exists(self.output)):
os.makedirs(self.output)
if (self.args.logdir):
logdir = self.args.logdir
elif ("OP_TEST_LOGDIR" in os.environ):
logdir = os.environ["OP_TEST_LOGDIR"]
else:
logdir = self.output
self.logdir = os.path.abspath(logdir)
if (not os.path.exists(self.logdir)):
os.makedirs(self.logdir)
print "Logs in: {}".format(self.logdir)
OpTestLogger.optest_logger_glob.logdir = self.logdir
# Grab the suffix, if not given use current time
self.outsuffix = self.get_suffix()
# set up where all the logs go
logfile = os.path.join(self.output, "%s.log" % self.outsuffix)
logcmd = "tee %s" % (logfile)
# we use 'cat -v' to convert control characters
# to something that won't affect the user's terminal
if self.args.quiet:
logcmd = logcmd + "> /dev/null"
# save sh_level for later refresh loggers
OpTestLogger.optest_logger_glob.sh_level = logging.ERROR
OpTestLogger.optest_logger_glob.sh.setLevel(logging.ERROR)
else:
logcmd = logcmd + "| sed -u -e 's/\\r$//g'|cat -v"
# save sh_level for later refresh loggers
OpTestLogger.optest_logger_glob.sh_level = logging.INFO
OpTestLogger.optest_logger_glob.sh.setLevel(logging.INFO)
OpTestLogger.optest_logger_glob.setUpLoggerFile(datetime.utcnow().strftime("%Y%m%d%H%M%S%f")+'.main.log')
OpTestLogger.optest_logger_glob.setUpLoggerDebugFile(datetime.utcnow().strftime("%Y%m%d%H%M%S%f")+'.debug.log')
OpTestLogger.optest_logger_glob.optest_logger.info('TestCase Log files: {}/*{}*'.format(self.output, self.outsuffix))
OpTestLogger.optest_logger_glob.optest_logger.info('StreamHandler setup {}'.format('quiet' if self.args.quiet else 'normal'))
self.logfile_proc = subprocess.Popen(logcmd,
stdin=subprocess.PIPE,
stderr=sys.stderr,
stdout=sys.stdout,
shell=True)
self.logfile = self.logfile_proc.stdin
if self.args.machine_state == None:
if self.args.bmc_type in ['qemu']:
# Force UNKNOWN_BAD so that we don't try to setup the console early
self.startState = OpSystemState.UNKNOWN_BAD
else:
self.startState = OpSystemState.UNKNOWN
else:
self.startState = stateMap[self.args.machine_state]
return self.args, self.remaining_args
def get_suffix(self):
# Grab the suffix, if not given use current time
if (self.args.suffix):
outsuffix = self.args.suffix
else:
outsuffix = time.strftime("%Y%m%d%H%M%S")
return outsuffix
def objs(self):
if self.args.list_suites:
return
host = OpTestHost(self.args.host_ip,
self.args.host_user,
self.args.host_password,
self.args.bmc_ip,
self.output,
scratch_disk=self.args.host_scratch_disk,
proxy=self.args.proxy,
check_ssh_keys=self.args.check_ssh_keys,
known_hosts_file=self.args.known_hosts_file)
if self.args.bmc_type in ['AMI', 'SMC']:
web = OpTestWeb(self.args.bmc_ip,
self.args.bmc_usernameipmi,
self.args.bmc_passwordipmi)
bmc = None
if self.args.bmc_type in ['AMI']:
ipmi = OpTestIPMI(self.args.bmc_ip,
self.args.bmc_usernameipmi,
self.args.bmc_passwordipmi,
host=host,
logfile=self.logfile,
)
bmc = OpTestBMC(ip=self.args.bmc_ip,
username=self.args.bmc_username,
password=<PASSWORD>,
logfile=self.logfile,
ipmi=ipmi,
web=web,
check_ssh_keys=self.args.check_ssh_keys,
known_hosts_file=self.args.known_hosts_file
)
elif self.args.bmc_type in ['SMC']:
ipmi = OpTestSMCIPMI(self.args.bmc_ip,
self.args.bmc_usernameipmi,
self.args.bmc_passwordipmi,
logfile=self.logfile,
host=host,
)
bmc = OpTestSMC(ip=self.args.bmc_ip,
username=self.args.bmc_username,
password=<PASSWORD>,
ipmi=ipmi,
web=web,
check_ssh_keys=self.args.check_ssh_keys,
known_hosts_file=self.args.known_hosts_file
)
self.op_system = OpTestSystem(
state=self.startState,
bmc=bmc,
host=host,
)
ipmi.set_system(self.op_system)
bmc.set_system(self.op_system)
elif self.args.bmc_type in ['FSP']:
ipmi = OpTestIPMI(self.args.bmc_ip,
self.args.bmc_usernameipmi,
self.args.bmc_passwordipmi,
host=host,
logfile=self.logfile)
bmc = OpTestFSP(self.args.bmc_ip,
self.args.bmc_username,
self.args.bmc_password,
ipmi=ipmi,
)
self.op_system = OpTestFSPSystem(
state=self.startState,
bmc=bmc,
host=host,
)
ipmi.set_system(self.op_system)
elif self.args.bmc_type in ['OpenBMC']:
ipmi = OpTestIPMI(self.args.bmc_ip,
self.args.bmc_usernameipmi,
self.args.bmc_passwordipmi,
host=host,
logfile=self.logfile)
rest_api = HostManagement(self.args.bmc_ip,
self.args.bmc_username,
self.args.bmc_password)
bmc = OpTestOpenBMC(self.args.bmc_ip,
self.args.bmc_username,
self.args.bmc_password,
logfile=self.logfile,
ipmi=ipmi, rest_api=rest_api,
check_ssh_keys=self.args.check_ssh_keys,
known_hosts_file=self.args.known_hosts_file)
self.op_system = OpTestOpenBMCSystem(
host=host,
bmc=bmc,
state=self.startState,
)
bmc.set_system(self.op_system)
elif self.args.bmc_type in ['qemu']:
print repr(self.args)
bmc = OpTestQemu(self.args.qemu_binary,
self.args.host_pnor,
self.args.flash_skiboot,
self.args.flash_kernel,
self.args.flash_initramfs,
cdrom=self.args.os_cdrom,
logfile=self.logfile,
hda=self.args.host_scratch_disk)
self.op_system = OpTestQemuSystem(host=host, bmc=bmc,
state=self.startState)
bmc.set_system(self.op_system)
# Check that the bmc_type exists in our loaded addons then create our objects
elif self.args.bmc_type in optAddons:
(bmc, self.op_system) = optAddons[self.args.bmc_type].createSystem(self, host)
else:
raise Exception("Unsupported BMC Type")
host.set_system(self.op_system)
return
def bmc(self):
return self.op_system.bmc
def system(self):
return self.op_system
def host(self):
return self.op_system.host()
def ipmi(self):
return self.op_system.ipmi()
def lspci_file(self):
return self.args.host_lspci
def platform(self):
return self.args.platform
global conf
```
#### File: op-test-framework/testcases/I2C.py
```python
import time
import subprocess
import re
import sys
from common.OpTestConstants import OpTestConstants as BMC_CONST
import unittest
import OpTestConfiguration
from common.OpTestUtil import OpTestUtil
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed, KernelModuleNotLoaded, KernelConfigNotSet
import logging
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class I2CDetectUnsupported(Exception):
"""Asked to do i2c detect on a bus that doesn't support detection
"""
pass
class I2C():
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
self.util = OpTestUtil()
def set_up(self):
if self.test == "skiroot":
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
elif self.test == "host":
self.cv_SYSTEM.goto_state(OpSystemState.OS)
self.c = self.cv_HOST.get_ssh_connection()
else:
raise Exception("Unknown test type")
return self.c
def i2c_init(self):
self.cv_HOST.host_get_OS_Level()
# make sure install "i2c-tools" package in-order to run the test
# Check whether i2cdump, i2cdetect and hexdump commands are available on host
self.cv_HOST.host_check_command("i2cdump", "i2cdetect", "hexdump",
"i2cget", "i2cset")
# Get Kernel Version
l_kernel = self.cv_HOST.host_get_kernel_version()
mods = {"CONFIG_I2C_OPAL": "i2c_opal",
"CONFIG_I2C_CHARDEV": "i2c_dev",
"CONFIG_EEPROM_AT24": "at24"
}
try:
for (c,m) in mods.items():
self.cv_HOST.host_load_module_based_on_config(l_kernel, c, m)
except KernelConfigNotSet as ns:
self.assertTrue(False, str(ns))
except KernelModuleNotLoaded as km:
if km.module == "at24":
pass # We can fail if we don't load it, not all systems have it
else:
self.assertTrue(False, str(km))
# Get information of EEPROM chips
eeprom_info = self.host_get_info_of_eeprom_chips()
if self.cv_SYSTEM.has_host_accessible_eeprom():
self.assertNotEqual(eeprom_info, None)
else:
self.assertEqual(eeprom_info, None)
##
# @brief This function will return the list of installed i2c buses on host in two formats
# list-by number Ex: ["0","1","2",....]
# list-by-name Ex: ["i2c-0","i2c-1","i2c-2"....]
#
# @return l_list @type list: list of i2c buses by number
# l_list1 @type list: list of i2c buses by name
# or raise OpTestError if not able to get list of i2c buses
#
def host_get_list_of_i2c_buses(self):
self.c.run_command("i2cdetect -l")
l_res = self.c.run_command("i2cdetect -l | awk '{print $1}'")
# list by number Ex: ["0","1","2",....]
l_list = []
# list by name Ex: ["i2c-0","i2c-1"...]
l_list1 = []
for l_bus in l_res:
matchObj = re.search("(i2c)-(\d{1,})", l_bus)
if matchObj:
l_list.append(matchObj.group(2))
l_list1.append(l_bus)
else:
pass
return l_list, l_list1
##
# @brief It will return list with elements having pairs of eeprom chip addresses and
# corresponding i2c bus where the chip is attached. This information is getting
# through sysfs interface. format is ["0 0x50","0 0x51","1 0x51","1 0x52"....]
#
# @return l_chips @type list: list having pairs of i2c bus number and eeprom chip address.
# else raise OpTestError
#
def host_get_list_of_eeprom_chips(self):
l_res = self.c.run_command("find /sys/ -name eeprom")
l_chips = []
for l_line in l_res:
if l_line.__contains__("eeprom"):
matchObj = re.search("/(\d{1,}-\d{4})/eeprom", l_line)
if matchObj:
l_line = matchObj.group(1)
i_args = (l_line.replace("-", " "))
log.debug(i_args)
else:
continue
i_args = re.sub(" 00", " 0x", i_args)
l_chips.append(i_args)
log.debug(i_args)
return l_chips
##
# @brief This function will get information of EEPROM chips attached to the i2c buses
#
# @return l_res @type string: return EEPROM chips information
# else raise OpTestError
#
def host_get_info_of_eeprom_chips(self):
log.debug("Getting the information of EEPROM chips")
l_res = None
try:
l_res = self.c.run_command("cat /sys/bus/i2c/drivers/at24/*/name")
except CommandFailed as cf:
l_res = self.c.run_command("dmesg -C")
try:
self.c.run_command("rmmod at24")
self.cv_HOST.host_load_module("at24")
l_res = self.c.run_command("cat /sys/bus/i2c/drivers/at24/*/name")
except CommandFailed as cf:
pass
except KernelModuleNotLoaded as km:
pass
return l_res
##
# @brief The hexdump utility is used to display the specified files.
# This function will display in both ASCII+hexadecimal format.
#
# @param i_dev @type string: this is the file used as a input to hexdump for display info
# Example file:"/sys/devices/platform/3fc0000000000.xscom:i2cm@a0000:i2c-bus@1/i2c-3/3-0050/eeprom"
#
# @return BMC_CONST.FW_SUCCESS or raise OpTestError
#
#
def host_hexdump(self, i_dev):
l_res = self.c.run_command("hexdump -C %s" % i_dev)
##
# @brief This function query's the i2c bus for devices attached to it.
# i2cdetect is a utility to scan an I2C bus for devices
#
# @param i_bus @type string: i2c bus numer
#
# @return BMC_CONST.FW_SUCCESS or raise OpTestError
#
def query_i2c_bus(self, i_bus):
rc = 0
log.debug("Querying the i2c bus %s for devices attached to it" % i_bus)
try:
l_res = self.c.run_command("i2cdetect -y %i" % int(i_bus))
except CommandFailed as cf:
rc = cf.exitcode
if rc != 0:
try:
l_res = self.c.run_command("i2cdetect -F %i|egrep '(Send|Receive) Bytes'|grep yes" % int(i_bus))
except CommandFailed as cf:
log.debug("i2c bus %i doesn't support query" % int(i_bus))
raise I2CDetectUnsupported;
try:
l_res = self.c.run_command("i2cdetect -y -r %i" % int(i_bus))
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, "Querying the i2cbus for devices failed:%s\n%s" % (i_bus,str(cf)))
##
# @brief This i2cdump function takes arguments in pair of a string like "i2cbus address".
# i2cbus indicates the number or name of the I2C bus to be scanned. This number should
# correspond to one of the busses listed by i2cdetect -l. address indicates
# the address to be scanned on that bus, and is an integer between 0x03 and 0x77
# i2cdump is a program to examine registers visible through the I2C bus
#
# @param i_args @type string: this is the argument to i2cdump utility
# args are in the form of "i2c-bus-number eeprom-chip-address"
# Ex: "0 0x51","3 0x52" ....etc
#
# @return BMC_CONST.FW_SUCCESS or raise OpTestError
#
def i2c_dump(self, i_args):
try:
l_res = self.c.run_command("i2cdump -f -y %s" % i_args)
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, "i2cdump failed for the device: %s\n%s" % (i_args, str(cf)))
##
# @brief This function i2cget read from I2C/SMBus chip registers
# command usage: i2cget [-f] [-y] i2cbus chip-address [data-address [mode]]
#
# @param i_args @type string: this is the argument to i2cget utility
# args are in the form of "i2c-bus-number eeprom-chip-address"
# Ex: "0 0x51","3 0x52" ....etc
# @param i_addr @type string: this is the data-address on chip, from where data will be read
# Ex: "0x00","0x10","0x20"...
#
# @return l_res @type string: data present on data-address or raise OpTestError
#
def i2c_get(self, i_args, i_addr):
try:
l_res = self.c.run_command("i2cget -f -y %s %s" % (i_args, i_addr))
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, "i2cget: Getting data from address %s failed: %s" % (i_addr, str(cf)))
##
# @brief This function i2cset will be used for setting I2C registers
# command usage: i2cset [-f] [-y] [-m mask] [-r] i2cbus chip-address data-address [value] ... [mode]
#
# @param i_args @type string: this is the argument to i2cset utility
# args are in the form of "i2c-bus-number eeprom-chip-address"
# Ex: "0 0x51","3 0x52" ....etc
# @param i_addr @type string: this is the data-address on chip, where data will be set
# Ex: "0x00","0x10","0x20"...
# @param i_val @type string: this is the value which will be set into data-address i_addr
#
# @return BMC_CONST.FW_SUCCESS or raise OpTestError
#
def i2c_set(self, i_args, i_addr, i_val):
try:
l_res = self.c.run_command("i2cset -f -y %s %s %s" % (i_args, i_addr, i_val))
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, "i2cset: Setting the data to a address %s failed: %s" % (i_addr, str(cf)))
class FullI2C(I2C, unittest.TestCase):
BASIC_TEST = False
def setUp(self):
self.test = "host"
super(FullI2C, self).setUp()
##
# @brief This function has following test steps
# 1. Getting host information(OS and kernel info)
# 2. Checking the required utilites are present on host or not
# 3. Loading the necessary modules to test I2C device driver functionalites
# (i2c_dev, i2c_opal and at24)
# 4. Getting the list of i2c buses
# 5. Querying the i2c bus for devices
# 3. Getting the list of i2c buses and eeprom chip addresses
# 4. Accessing the registers visible through the i2cbus using i2cdump utility
# 5. Listing the i2c adapter conetents and i2c bus entries to make sure sysfs entries
# created for each bus.
# 6. Testing i2cget functionality for limited samples
# Avoiding i2cset functionality, it may damage the system.
def runTest(self):
self.set_up()
if self.test == "host":
self.i2c_init()
# Get list of i2c buses available on host,
# l_list=["0","1"....]
# l_list1=["i2c-0","i2c-1","i2c-2"....]
l_list, l_list1 = self.host_get_list_of_i2c_buses()
if self.BASIC_TEST:
# For the basic test, just go for the first of everything.
l_list = l_list[:1]
l_list1 = l_list1[:1]
# Scanning i2c bus for devices attached to it.
for l_bus in l_list:
try:
self.query_i2c_bus(l_bus)
except I2CDetectUnsupported:
log.debug("Unsupported i2cdetect on bus %s" % l_bus)
# Get list of pairs of i2c bus and EEPROM device addresses in the host
l_chips = self.host_get_list_of_eeprom_chips()
if self.cv_SYSTEM.has_host_accessible_eeprom():
self.assertNotEqual(len(l_chips), 0, "No EEPROMs detected, while OpTestSystem says there should be")
for l_args in l_chips:
# Accessing the registers visible through the i2cbus using i2cdump utility
# l_args format: "0 0x51","1 0x53",.....etc
self.i2c_dump(l_args)
else:
self.assertEqual(len(l_chips), 0, "Detected EEPROM where OpTestSystem said there should be none")
if self.cv_SYSTEM.has_host_accessible_eeprom():
self.assertGreater(len(l_chips), 0, "Expected to find EEPROM chips")
# Currently testing only getting the data from a data address,
# avoiding setting data.
# Only four samples are gathered to check whether reading eeprom
# data is working or not.
# Setting eeprom data is dangerous and make your system UNBOOTABLE
l_addrs = ["0x00", "0x10", "0x20", "0x30", "0x40", "0x50", "0x60", "0x70", "0x80", "0x90", "0xa0", "0xb0", "0xc0", "0xd0", "0xe0", "0xf0"]
for l_addr in l_addrs:
l_val = self.i2c_get(l_chips[1], l_addr)
# self.i2c_set(l_list2[1], l_addr, "0x50")
if self.test == "skiroot":
return
# list i2c adapter contents
try:
l_res = self.c.run_command("ls --color=never -l /sys/class/i2c-adapter")
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, str(cf))
# Checking the sysfs entry of each i2c bus
for l_bus in l_list1:
try:
l_res = self.c.run_command("ls --color=never -l /sys/class/i2c-adapter/%s" % l_bus)
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, str(cf))
return BMC_CONST.FW_SUCCESS
class BasicI2C(FullI2C, unittest.TestCase):
BASIC_TEST = True
def setUp(self):
self.test = "host"
super(BasicI2C, self).setUp()
class BasicSkirootI2C(FullI2C, unittest.TestCase):
BASIC_TEST = True
def setUp(self):
self.test = "skiroot"
super(FullI2C, self).setUp()
``` |
{
"source": "JKP0/PG_Academic_Projects",
"score": 3
} |
#### File: ETBDC/Learn_Hadoop/C190118a.py
```python
from mrjob.job import MRJob
#from statistics import mean
class MRJLRV(MRJob):
#def steps(self):
#return [
#MRStep(mapper=self.mapper1, #reducer=self.reducer1),
#MRStep(mapper=self.mapper2, reducer=self.reducer2),
#MRStep(mapper=self.mapper3, reducer=self.reducer3),
#]
def mapper(self, key, line):
words = line.split(' ')
for word in words:
if (word == 'rkmveri'):
yield (line, ': '+word),1
if('rkmveri' in words):
yield 'a', 1
else:
yield 'b', 1
def reducer(self, key, word):
#f=list(count_one)
#yield 'a', len(word)*sum(f)
#yield 'b', sum(f)
#yield key, word
k=key
if (k == 'a'):
w=list(word)
yield '#of revelant line is : ', sum(w)
elif (k == 'b'):
w=list(word)
yield '#of irrevelant line is : ', sum(w)
else:
yield key
if __name__ == '__main__':
MRJLRV.run()
```
#### File: ETBDC/Learn_Hadoop/C190125a1.py
```python
from mrjob.step import MRStep
from mrjob.job import MRJob
class MRMRBSK(MRJob):
def steps(self):
return [
MRStep(mapper=self.mapper1, reducer=self.reducer1),
MRStep(mapper=self.mapper2, reducer=self.reducer2),
]
def mapper1(self, key, line):
words = line.split(',')
yield words[0], words[1]
def reducer1(self, key, line):
yield 1, (key, len(list(line)))
def mapper2(self, key, line):
yield key, line
def reducer2(self, key, line):
ls=list(line)
#ma=max([y for x, y in ls])
yield next((x,y) for x, y in ls if(y==max([y for x, y in ls])))
#yield next((x,y) for x, y in ls if(y==ma))
#yield ((x, y) for x, y in ls if(y==ma))
#for x, y in ls:
#if(y==ma):
#yield x, y
if __name__ == '__main__':
MRMRBSK.run()
```
#### File: ETBDC/Learn_Hadoop/MRJWAV.py
```python
from mrjob.job import MRJob
class MyMRWC(MRJob):
def mapper(self, key, line):
words = line.split(' ')
for word in words:
#if(len(word)>3):
yield word, 1
#yield len(word),1
self.increment_counter('word','no of words',1)
def reducer(self, word, count_one):
#if len(word) >3:
f=list(count_one)
#t+=sum(f)
#s+=len(word)*sum(f)
yield len(word)*sum(f) , sum(f)
#self.increment_counter('word','no of unique words',1)
if __name__ == '__main__':
MyMRWC.run()
```
#### File: ETBDC/Learn_Hadoop/shopping_basket5.py
```python
from mrjob.job import MRJob
from mrjob.step import MRStep
class ShoppingBasket(MRJob):
def steps(self):
return [
MRStep(mapper=self.mapper_parse,
reducer=self.reducer_groupby_userID),
MRStep(mapper=self.mapper_invert,
reducer=self.reducer_max)
]
def mapper_parse(self, key, line):
(userID, productID) = line.split(',')
yield userID, 1
def reducer_groupby_userID(self, userID, occurances):
yield int(userID), sum(occurances)
def mapper_invert(self, userID, total):
yield 1,(total,userID)
def reducer_max(self, key,user_count):
yield max(user_count)
if __name__ == '__main__':
ShoppingBasket.run()
```
#### File: PG_Academic_Projects/Statistical_Study/PYHFinalStartUpFundingIndia.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv("/home/sysadm/Desktop/JKP BSPro/Used_startup_funding.csv")
#***Basic/General/Normal Information
data.head()
data.dtypes
data.info()
data.describe()
#Well this doesn't make any clear picture about this column, so simply we can ignore
#this feature for now
#One this we can notice that we have date column which canbe very useful in EDA so
#let's do feature of programming on it
##*** Data Modification
def temp(v):
try:
#pd.to_datetime(v)
return(pd.to_datetime(v.replace('.','/').replace('//','/')))
except:
print(v)
data["Date"].apply(lambda v: temp(v))
date=data["Date"].apply(lambda v: temp(v))
data["month_year"]=date.dt.strftime("%Y-%m")
data["Year"]=date.dt.strftime("%Y")
'''data['Month'] = data['Date'].dt.month
data['Year'] = data['Date'].dt.year
data['MY'] = (pd.to_datetime(data['Date'],format='%d/%m/%Y').dt.year*100)+(
pd.to_datetime(data['Date'],format='%d/%m/%Y').dt.month) # Works Fine'''
data['AmountInUSD']
data['amount']=data['AmountInUSD'].apply(lambda x:float(str(x).replace(",","")))
data['amount']
#data["amount"]=data["AmountInUSD"].str.replace(',', '').astype(float)
#print(data[["Date","month_year","Year","amount"]].head())
data[["Date","month_year","Year","amount"]]
#get list of numeric and categorical columns
get_numeric_cols= lambda df:list(df._get_numeric_data().columns)
num_cols=get_numeric_cols(data)
num_cols
cat_cols=np.setdiff1d(data.columns,num_cols)
cat_cols
#Check the data quality – Missing values, Outlier
pd.isnull(data).sum()
print(data['Remarks'])
print(data['Remarks'].unique())
data.isnull().any()
data['Remarks'].fillna(0, inplace=False)
ct=0
for i in data['Remarks']:
if i==0:
ct=ct+1
print('Total no. of NaN cells in Remarks column is ',ct)
print('Dimension of data is',data.shape)
#ct=data['Remarks'].isnull().sum()
RVac=(ct*100)/len(data)
print('Nan_cells_count_percentage in Remark column is ',RVac)
ct0=data['IndustryVertical'].isnull().sum()
RVac0=(ct0*100)/len(data)
print('Nan_cells_count_percentage in IndustryVertical column is ',RVac0)
#data=data.drop(['Remarks'], axis=1)
#data=data[data['Remarks'] != 0]
data['StartupName'].unique().shape
data['IndustryVertical'].unique().shape
data['SubVertical'].unique().shape
data['CityLocation'].unique().shape
data['InvestorsName'].unique().shape
data['InvestmentType'].unique().shape
data['AmountInUSD'].unique().shape
len(data['AmountInUSD'].unique().shape)*100/len(data)
# percentage of null values for all the columns.
pd.isnull(data).sum()/data.shape[0]*100
#So here we can see that 82.33% data has NaN values so we can ignore this
#Column for out prediction
#"Remarks" column has highest missing values, which useless for now
# We cannot analyse by tking null value out_of account
# as we have made lot of change so start from basic again
data.head()
data.dtypes
data.info()
data.describe()
data["amount"].plot.box()
#also anything above 98% and below 2% can be treated as outlier.
print(data["amount"].quantile(0.02))
print(data["amount"].quantile(0.98))
#Here anyting below 40000USD and anything above 100000000 USD is considered outliers
#*** Univariate, bivariate, multivariate
#Apply EDA techniques to identify what influences investment amount
#EDA(Effective Data Analysis)
# Univariate
yearfreq = data['Year'].value_counts().plot.bar()
month_year = data['month_year'].value_counts().plot.bar(figsize=(12,4))
data.groupby(["month_year"]).size().plot.bar(figsize=(12,5), color="steelblue")
x=data["InvestmentType"].value_counts()/data.shape[0]*100
x.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Investment Type', fontsize=12)
plt.ylabel('Shaped Count', fontsize=12)
x0=data["IndustryVertical"].value_counts()/data.shape[0]*100
x0.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Industry Vertical', fontsize=12)
plt.ylabel('Shaped Count', fontsize=12)
dt_amo=data['IndustryVertical'].groupby([data.IndustryVertical]).agg(
'count').nlargest(30)
dt_amo.plot(kind="bar",figsize=(16,9),grid=True,title="Industry wise distribution",
cmap='rainbow')
data["IndustryVertical"].value_counts().head(20)
data['IndustryVertical'].isnull().sum()
industryvertical = []
for indver in data['IndustryVertical']:
for inv in str(indver).split(","):
if inv != "":
industryvertical.append(inv.strip().lower())
StartUpIndvers = pd.Series(industryvertical).value_counts()#[:20]
StartUpIndvers
for i in range(len(industryvertical)):
if industryvertical[i] =='ECommerce':
industryvertical[i]='eCommerce'
if industryvertical[i] =='Ecommerce':
industryvertical[i]='eCommerce'
if industryvertical[i] =='ecommerce':
industryvertical[i]='eCommerce'
if industryvertical[i] =='Food & Beverages ':
industryvertical[i]='Food & Beverage '
if industryvertical[i] =='Food Delivery Platform':
industryvertical[i]='Online Food Delivery'
#Still we donot have covered all redudency
StartUpIndvers0 = pd.Series(industryvertical).value_counts()#[:20]
StartUpIndvers0.head(20)
StartUpIndvers0.head(20).plot(kind="bar",figsize=(16,9),grid=True,
title="Industry wise distribution",cmap='rainbow')
x1=data["SubVertical"].value_counts()/data.shape[0]*100
x1.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Industry SubVertical', fontsize=12)
plt.ylabel('SubVerticalCount', fontsize=12)
x2=data["SubVertical"].value_counts()
x2.head(20).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Industry SubVertical', fontsize=12)
plt.ylabel('Shaped Count', fontsize=12)
#online pharmacy has highest investments
data["SubVertical"].value_counts().head(20)
data['SubVertical'].isnull().sum()
industrysubvertical = []
for indsver in data['SubVertical']:
for insv in str(indsver).split(","):
if insv != "":
industrysubvertical.append(insv.strip().lower())
#else :
#investornames.append('unknown'.lower())
StartUpIndsvers = pd.Series(industrysubvertical).value_counts()#[:20]
StartUpIndsvers.isnull().sum()
#Still we donot have covered all redudency
StartUpIndsvers.head(20).plot(kind="bar",figsize=(16,9),grid=True,
title="Industry wise distribution",cmap='rainbow')
plt.xlabel('Industry SubVertical', fontsize=12)
plt.ylabel('Count', fontsize=12)
data['CityLocation'].value_counts().head(20)
data_ct=data['CityLocation'].groupby([data.CityLocation]).agg('count')
data_ct.plot(kind="bar",figsize=(16,9),grid=True,title="City wise distribution",
cmap='rainbow')
x3=data["CityLocation"].value_counts()/data.shape[0]*100
x3.head(20).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('City Location', fontsize=12)
plt.ylabel('Shaped Count For The City', fontsize=12)
x4=data["CityLocation"].value_counts()
x4.plot.bar(figsize=(12,5), color="steelblue") #x1.head(20)
plt.xlabel('City Location', fontsize=12)
plt.ylabel('Count For The City', fontsize=12)
x5=data["InvestorsName"].value_counts()#/data.shape[0]*100
x5.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Investors Name', fontsize=12)
plt.ylabel('ShapedCount', fontsize=12)
dt_inv=data['InvestorsName'].groupby([data.InvestorsName]).agg('count').nlargest(10)
dt_inv.plot(kind="bar",figsize=(12,9),grid=True,title="Industry wise distribution",
cmap='rainbow')
data["InvestorsName"].value_counts().head(30)
data['InvestorsName'].isnull().sum()
investornames = []
for investor in data['InvestorsName']:
for inv in str(investor).split(","):
if inv != "":
investornames.append(inv.strip().lower())
else :
investornames.append('unknown'.lower())
StartUpInvestors = pd.Series(investornames).value_counts()[:20]
StartUpInvestors#.isnull().sum()
for i in range(len(investornames)):
if investornames[i] =='undisclosed investor':
investornames[i]='undisclosed investors'
if investornames[i] =='undisclosed':
investornames[i]='undisclosed investors'
#Still we donot have covered all undisclosed
StartUpInvestors0 = pd.Series(investornames).value_counts()#[:20]
StartUpInvestors0.head(20)
StartUpInvestors0.head(20).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('InvestorsName', fontsize=12)
plt.ylabel('Count', fontsize=12)
StartUpInvestors0.head(10).plot(kind="pie",figsize=(12,9),
title="Industry wise distribution",
autopct='%1.1f%%', startangle=90,cmap='rainbow')
plt.ylabel('Count/Freq', fontsize=12)
#Bivariet analysis
data.groupby(["Year"])["amount"].sum().plot(kind="pie",figsize=(12,9),
title="Industry wise distribution",
autopct='%1.1f%%', startangle=90,cmap='rainbow')
####shows key error but not in_regular
data.groupby(["month_year"])["amount"].mean().plot.bar(figsize=(12,5), color="steelblue")
plt.ylabel('Count Of Investment', fontsize=12)
#2 months have highest average investment.. March and May of 2017 have highest investements.
#Lowest investment was seen in the month of October 2017
X6=data.groupby('StartupName')['amount'].sum().sort_values(ascending=False)
X6.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('StatrUp Name', fontsize=12)
plt.ylabel('TotalAmountGotInvestedIn_c_USD ', fontsize=12)
##Paytm and Flipkart are the 2 startups with highest investments put in to them
X7=data.groupby('StartupName')['amount'].size().sort_values(ascending=False)
X7.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('StatrUp Name', fontsize=12)
plt.ylabel('NumberOfInvestmentGot', fontsize=12)
##Swiggy is the comapany which received highest number of investments i.e,
#7 investments
x=data.groupby(["IndustryVertical"])["amount"].mean().sort_values(
ascending=False).head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('IndustryVertical', fontsize=12)
plt.ylabel('AverageAmountGotInvestedIn_c_USDperInvestor', fontsize=12)
##from the below graph we can see that average of people investing in online
#marketplace is more
x=data.groupby(["InvestorsName"])["amount"].sum().sort_values(
ascending=False).head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('InvestorsName', fontsize=12)
plt.ylabel('TotalAmountHvInvestedIn_c_USD', fontsize=12)
#Soft bank is the highest investor group in terms of sum invested
#***Hypothesis Testing
from scipy.stats import chi2_contingency
def df(df,cat_colS):
I = [0,2,6,8,10]
cat_col1=np.delete(cat_colS, I).tolist() # removed remarks,date,amountinusd(kept amount)
#columns
t=[]
t1=[]
for i in range(len(cat_col1)):
for j in range(i + 1, len(cat_col1)):
obsv=df.groupby([cat_col1[i],cat_col1[j]]).size()
obsv.name="Freq"
obsv=obsv.reset_index()
obsv=obsv.pivot_table(index=cat_col1[i],columns=cat_col1[j],values="Freq")
stat, p, dof, exp =chi2_contingency(obsv.fillna(0).values)
if p< 0.05:
t1= (cat_col1[i],cat_col1[j])
t.append(t1)
return(t)
a=df(data,cat_cols)
for b in a:
print( "%s is dependent on %s" %(b[0],b[1]))
####
#Summary:
###AmountinUSD has many missing values about 35% of data is missing.
## subvertical also has many missing values
#Remarks has lot of missing values-->we can ignore/drop remarks column fron analysis
#there are a lot of outliers in amountin USD column.
#Year 2016 had maximum number of investments
#Month July 2016 followed by January of 2016 has large number of funding.
##Seed Funding and Private Equity are the most preferable type of funding
#ConsumerInternet is the Industry vertical on which highest number of investement unlike
#Technology
##bangalore has highest number of investements
#Large number of the startup's funding are from undisclosed source
## ratan tata can be considered a special case, since all others are investment groups and he
#is an individual investing
##online pharmacy has highest investments
# 2 months have highest average investment.. March and May of 2017 have highest investements.
#Lowest investment was seen in the month of October 2017
##Paytm and Flipkart are the 2 startups with highest investments put in to them
##Swiggy is the comapany which received highest number if investments i.e, 7 investments
## from the graph we can see that average of people investing in online marketplace is more
#Soft bank is the highest investor group in terms of sum invested
#Investment type and the Year column influence the amount.
lstmsg=[10,20,10,10,20,10,20]
msg=['T','H','A','N','K','S','!']
plt.figure(figsize=(12,12))
colors=['red','green','orange']
plt.pie(lstmsg, labels=msg,autopct='THANKS!',startangle=310) #colors=colors,
plt.title('Thanks',color = 'blue',fontsize = 15)
plt.xlabel('The END', fontsize=12)
plt.show()
``` |
{
"source": "jkp85/insideview",
"score": 3
} |
#### File: insideview/insideview/custom.py
```python
from tapioca.tapioca import TapiocaClient, TapiocaClientExecutor
from urllib.parse import parse_qsl
class CustomTapiocaClient(TapiocaClient):
"""Wrappers that use TapiocaClient for InsideView authentication and
requests."""
def _wrap_in_tapioca(self, data, *args, **kwargs):
request_kwargs = kwargs.pop('request_kwargs', self._request_kwargs)
return CustomTapiocaClient(self._instatiate_api(), data=data,
api_params=self._api_params,
request_kwargs=request_kwargs,
refresh_token_by_default=self._refresh_token_default,
refresh_data=self._refresh_data,
session=self._session,
*args, **kwargs)
def _wrap_in_tapioca_executor(self, data, *args, **kwargs):
request_kwargs = kwargs.pop('request_kwargs', self._request_kwargs)
return CustomTapiocaClientExecutor(self._instatiate_api(), data=data,
api_params=self._api_params,
request_kwargs=request_kwargs,
refresh_token_by_default=self._refresh_token_default,
refresh_data=self._refresh_data,
session=self._session,
*args, **kwargs)
class CustomTapiocaClientExecutor(CustomTapiocaClient, TapiocaClientExecutor):
def pages(self, max_pages=None, max_items=None, **kwargs):
executor = self
iterator_list = executor._get_iterator_list()
page_count = 0
item_count = 0
while iterator_list:
if self._reached_max_limits(page_count, item_count, max_pages,
max_items):
break
for item in iterator_list:
if self._reached_max_limits(page_count, item_count, max_pages,
max_items):
break
yield self._wrap_in_tapioca(item)
item_count += 1
page_count += 1
next_request_kwargs = executor._get_iterator_next_request_kwargs()
if not next_request_kwargs:
break
req = self._response.request
if req.method == 'POST':
body = dict(parse_qsl(req.body))
body.update(next_request_kwargs)
response = self.post(data=body, url=req.url)
else:
response = self.get(**next_request_kwargs)
executor = response()
iterator_list = executor._get_iterator_list()
class TapiocaInstantiator:
def __init__(self, adapter_class):
self.adapter_class = adapter_class
def __call__(self, serializer_class=None, session=None, **kwargs):
refresh_token_default = kwargs.pop('refresh_token_by_default', False)
return CustomTapiocaClient(
self.adapter_class(serializer_class=serializer_class),
api_params=kwargs, refresh_token_by_default=refresh_token_default,
session=session)
```
#### File: insideview/.ipynb_checkpoints/tapioca_insideview-checkpoint.py
```python
from tapioca import (
TapiocaAdapter, generate_wrapper_from_adapter, JSONAdapterMixin)
from tapioca.tapioca import TapiocaClient
from requests.auth import AuthBase
from .resource_mapping import RESOURCE_MAPPING
class InsideViewAuth(AuthBase):
def __init__(self, access_token):
self.access_token = access_token
def __call__(self, r):
r.headers['accessToken'] = self.access_token
return r
class CustomTapiocaClient(TapiocaClient):
def pages(self, max_pages=None, max_items=None, **kwargs):
executor = self
iterator_list = executor._get_iterator_list()
page_count = 0
item_count = 0
while iterator_list:
if self._reached_max_limits(page_count, item_count, max_pages,
max_items):
break
for item in iterator_list:
if self._reached_max_limits(page_count, item_count, max_pages,
max_items):
break
yield self._wrap_in_tapioca(item)
item_count += 1
page_count += 1
next_request_kwargs = executor._get_iterator_next_request_kwargs()
import ipdb;ipdb.set_trace()
if not next_request_kwargs:
break
response = self.get(**next_request_kwargs)
executor = response()
iterator_list = executor._get_iterator_list()
class TapiocaInstantiator:
def __init__(self, adapter_class):
self.adapter_class = adapter_class
def __call__(self, serializer_class=None, session=None, **kwargs):
refresh_token_default = kwargs.pop('refresh_token_by_default', False)
return CustomTapiocaClient(
self.adapter_class(serializer_class=serializer_class),
api_params=kwargs, refresh_token_by_default=refresh_token_default,
session=session)
class InsideviewClientAdapter(JSONAdapterMixin, TapiocaAdapter):
api_root = 'https://api.insideview.com/api/v1/'
resource_mapping = RESOURCE_MAPPING
def get_request_kwargs(self, api_params, *args, **kwargs):
arguments = super().get_request_kwargs(api_params, *args, **kwargs)
arguments['headers']['Content-Type'] = 'application/x-www-form-urlencoded'
arguments['headers']['accept'] = 'application/json'
arguments['auth'] = InsideViewAuth(api_params.get('access_token'))
return arguments
def get_iterator_list(self, response_data):
std_keys = {'totalResults', 'page', 'resultsPerPage'}
key = next(key for key in response_data.keys() if key not in std_keys)
return response_data[key]
def get_iterator_next_request_kwargs(self, iterator_request_kwargs,
response_data, response):
page = response_data.get('page')
results_per_page = response_data.get('resultsPerPage')
if page:
page = int(page) + 1
return {'page': page, 'resultsPerPage': results_per_page}
Insideview = TapiocaInstantiator(InsideviewClientAdapter)
```
#### File: insideview/tests/test_tapioca_insideview.py
```python
import unittest
from tapioca_insideview import Insideview
class TestTapiocaInsideview(unittest.TestCase):
def setUp(self):
self.wrapper = Insideview()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jkp85/workspaces",
"score": 2
} |
#### File: workspaces/test/test_runner.py
```python
import os
import unittest
from . import notebook_runner
class TestNotebook(unittest.TestCase):
def test_runner(self):
nb, errors = notebook_runner.run_notebook('test/test_notebook.ipynb')
self.assertEqual(errors, [])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JKPawa/ucimlr",
"score": 3
} |
#### File: ucimlr/ucimlr/dataset.py
```python
import abc
class Dataset(abc.ABC):
def __len__(self):
return self.x.shape[0]
def __getitem__(self, i):
return self.x[i], self.y[i]
@property
def name(self):
return self.__class__.__name__
@property
def num_features(self):
return self.x.shape[1]
```
#### File: ucimlr/ucimlr/__init__.py
```python
def all_datasets():
from . import regression_datasets, classification_datasets
return regression_datasets.all_datasets() + classification_datasets.all_datasets()
```
#### File: ucimlr/ucimlr/regression_datasets.py
```python
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = pd.read_csv(file_path)
# No patient should be in both train and test set
df_train_valid = deepcopy(df.loc[df.patientId < 80, :]) # Pandas complains if it is a view
df_test = deepcopy(df.loc[df.patientId >= 80, :]) # - " -
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'patientId')
y_columns = ['reference']
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res = df_res.drop(columns='patientId')
self.x, self.y = xy_split(df_res, y_columns)
class ForecastingOrders(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Daily+Demand+Forecasting+Orders).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/Daily_Demand_Forecasting_Orders.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ForecastingStoreData(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Demand+Forecasting+for+a+store).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class FacebookComments(RegressionDataset):
"""
Predict the number of likes on posts from a collection of Facebook pages.
Every page has multiple posts, making the number of pages less than the samples
in the dataset (each sample is one post).
# Note
The provided test split has a relatively large discrepancy in terms
of distributions of the features and targets. Training and validation splits are
also made to ensure that the same page is not in both splits. This makes the distributions
of features in training and validation splits vary to a relatively large extent, possible
because the number of pages are not that many, while the features are many.
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
download_unzip(url, dataset_path)
dataset_path = os.path.join(dataset_path, 'Dataset')
# The 5th variant has the most data
train_path = os.path.join(dataset_path, 'Training', 'Features_Variant_5.csv')
test_path = os.path.join(dataset_path, 'Testing', 'Features_TestSet.csv')
df_train_valid = pd.read_csv(train_path, header=None)
df_test = pd.read_csv(test_path, header=None)
y_columns = df_train_valid.columns[-1:]
# Page ID is not included, but can be derived. Page IDs can not be
# in both training and validation sets
page_columns = list(range(29))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class Facebookmetrics (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00368/Facebook_metrics.zip'
download_unzip(url, dataset_path)
filename = 'dataset_Facebook.csv'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';')
class ForestFires(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Forest+Fires).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'forestfires.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class GNFUV(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00452/GNFUV USV Dataset.zip'
download_unzip(url, dataset_path)
dfs = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
dfs.append(pd.read_csv(file_path, header=None))
class GNFUV_2(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data+Set+2).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00466/CNFUV_Datasets.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None))
class Greenhouse_Gas_Observing_Network (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Greenhouse+Gas+Observing+Network).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00328/ghg_data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None, sep='\s+'))
class Hungarian_Chickenpox_Cases (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Hungarian+Chickenpox+Cases).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00580/hungary_chickenpox.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, index_col='Date', parse_dates=True))
class IIWA14_R820_Gazebo_Dataset_10Trajectories(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/IIWA14-R820-Gazebo-Dataset-10Trajectories).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00574/IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, header=None)
class Metro_Interstate_Traffic_Volume(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Metro+Interstate+Traffic+Volume).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Metro_Interstate_Traffic_Volume.csv.gz'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00492/Metro_Interstate_Traffic_Volume.csv.gz'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_News_Final(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'News_Final.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/News_Final.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Online_Video_Characteristics_and_Transcoding_Time(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Online+Video+Characteristics+and+Transcoding+Time+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00335/online_video_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'README.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class OnlineNews(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00332/OnlineNewsPopularity.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'OnlineNewsPopularity', 'OnlineNewsPopularity.csv')
df = pd.read_csv(file_path, )
df.drop(columns=['url', ' timedelta'], inplace=True)
y_columns = [' shares']
df[y_columns[0]] = np.log(df[y_columns[0]])
self.x, self. y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Parkinson(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/parkinsons).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path: str = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/' \
'parkinsons/telemonitoring/parkinsons_updrs.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path)
y_columns = ['motor_UPDRS', 'total_UPDRS']
df_train_valid = df[df['subject#'] <= 30]
df_test = deepcopy(df[df['subject#'] > 30])
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'subject#')
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res.drop(columns='subject#', inplace=True)
self.x, self.y = xy_split(df_res, y_columns)
class Physicochemical_Properties_of_Protein_Tertiary_Structure(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Physicochemical+Properties+of+Protein+Tertiary+Structure).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CASP.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class PPG_DaLiA_Data_Set(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/PPG-DaLiA).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00495/data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class QSAR_aquatic_toxicity(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/QSAR+aquatic+toxicity).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'qsar_aquatic_toxicity.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00505/qsar_aquatic_toxicity.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', names=["TPSA(Tot)", "SAacc", "H-050", "MLOGP", "RDCHI", " GATS1p", "nN", "C-040", "quantitative response, LC50 [-LOG(mol/L)]"])
class QSAR_fish_bioconcentration_factor(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/QSAR+fish+bioconcentration+factor+%28BCF%29).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00511/QSAR_fish_BCF.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn =='ECFP_1024_m0-2_b2_c.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class QSAR(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/QSAR+fish+toxicity).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'qsar_fish_toxicity.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00504/qsar_fish_toxicity.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', names=[" CIC0", "SM1_Dz(Z)", " GATS1i", "NdsCH", " NdssC", "MLOGP", "quantitative response, LC50 [-LOG(mol/L)]"])
class PowerPlant(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'CCPP', 'Folds5x2_pp.xlsx')
df = pd.read_excel(file_path)
y_columns = ['PE'] # Not clear if this is the aim of the dataset
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class ResidentialBuilding(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Residential-Building-Data-Set.xlsx'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00437/Residential-Building-Data-Set.xlsx'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
y_columns = ['Y house price of unit area']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class RealEstate(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Real estate valuation data set.xlsx'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00477/Real%20estate%20valuation%20data%20set.xlsx'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path, index_col='No')
class Real_time_Election_Results (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/QSAR+fish+bioconcentration+factor+%28BCF%29).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00513/ElectionData2019.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if '.csv' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class Seoul_Bike_Sharing_Demand(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Seoul+Bike+Sharing+Demand).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'SeoulBikeData.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00560/SeoulBikeData.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Servo(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Servo).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'servo.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/servo/servo.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["motor", "screw", " pgain", "vgain", "class"])
class SGEMM_GPU_kernel_performance (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SGEMM+GPU+kernel+performance).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00440/sgemm_product_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'Readme.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class Simulated_data_for_survival_modelling (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Simulated+data+for+survival+modelling).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00581/MLtoSurvival-Data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == '.gitkeep':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class SkillCraft1(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SkillCraft1+Master+Table+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'SkillCraft1_Dataset.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00272/SkillCraft1_Dataset.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class SML2010 (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SML2010).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00274/NEW-DATA.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == '.gitkeep':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\s+'))
class Solar_Flare(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Solar+Flare).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'flare.data1'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/solar-flare/flare.data1'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df1 = pd.read_csv(file_path, header=None, skiprows=[0], sep='\s+')
filename = 'flare.data2'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/solar-flare/flare.data2'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df2 = pd.read_csv(file_path, header=None, skiprows=[0], sep='\s+')
df = pd.merge(df1, df2)
class Synchronous_Machine(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Synchronous+Machine+Data+Set).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'synchronous machine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00607/synchronous machine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class Stock_portfolio(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Stock+portfolio+performance).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'stock portfolio performance data set.xlsx'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00390/stock portfolio performance data set.xlsx'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class Superconductivity(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00464/superconduct.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'train.csv')
df = pd.read_csv(file_path)
y_columns = ['critical_temp']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class WaveEnergyConverters(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Wave+Energy+Converters).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00494/WECs_DataSet.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None))
class WhiteWineQuality(RegressionDataset):
"""
Description of dataset [here](http://archive.ics.uci.edu/ml/datasets/Wine+Quality).
Citation:
```
<NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
Modeling wine preferences by data mining from physicochemical properties.
In Decision Support Systems, Elsevier, 47(4):547-553, 2009.
```
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, sep=';')
y_columns = ['quality']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class YachtHydrodynamics(RegressionDataset):
"""
Description of dataset [here](http://archive.ics.uci.edu/ml/datasets/Yacht+Hydrodynamics).
Citation:
```
<NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
Modeling wine preferences by data mining from physicochemical properties.
In Decision Support Systems, Elsevier, 47(4):547-553, 2009.
```
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'yacht_hydrodynamics.data'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None, sep='\s+')
``` |
{
"source": "jkpawlowski96/AI",
"score": 3
} |
#### File: app/ai/model.py
```python
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import sys
import copy
class Model(nn.Module):
def __init__(self):
super().__init__()
self.GAMMA = .99
self.lr = 0.001
self.opt = 'Adam'
self.reward_max = 0
self.layers = [1]
self.criterion = nn.MSELoss()
def copy(self):
return copy.deepcopy(self)
def update_optimizer(self, lr=None, opt=None):
if lr is not None:
self.lr = lr
if opt is not None:
self.opt = opt
if self.opt is 'Adam':
self.optimizer = t.optim.Adam(self.parameters(), lr=self.lr)
elif self.opt is 'SGD':
self.optimizer = t.optim.SGD(self.parameters(), lr=self.lr)
else:
self.optimizer = t.optim.Adam(self.parameters(), lr=self.lr)
def train(self, state, action, reward):
self.optimizer.zero_grad()
action = self.forward(state)
rmin = min(reward)
rmax = max(reward)
self.reward_max = max(rmax.item(), self.reward_max)
expected_action = (action * self.GAMMA) + reward
#expected_action = action*self.GAMMA + (action/abs(action+.000000001))*reward
loss = self.criterion(action, expected_action)
loss.backward()
self.optimizer.step()
e = loss.item()
print(f'sum loss {e}', file=sys.stderr)
return e
def loss(self, state, action, reward):
action = self.forward(state)
expected_action = (action * self.GAMMA) + reward
#expected_action = action*self.GAMMA + (action/abs(action+.000000001))*reward
loss = self.criterion(action, expected_action)
return loss
class Model_deep(Model):
def __init__(self, inputs, outputs, layers=[1]):
super().__init__()
self.layers = layers
self.depth = len(layers)
self.linear = []
for l in layers:
self.linear.append(nn.Linear(inputs, l))
inputs = l
self.linear = nn.ModuleList(self.linear)
self.out = nn.Linear(inputs, outputs)
self.update_optimizer()
def forward(self, x):
for i in range(self.depth):
x = self.linear[i](x)
x = F.relu(x)
x = self.out(x)
#x = F.softmax(x)
x = F.sigmoid(x)
return x
```
#### File: app/ai/service.py
```python
import app.ai.model as model
from app.ai.genetic import Genetic
import app.ai.plot as plot
import time
import datetime
import numpy as np
import torch as t
import threading
import sys
class Service():
def __init__(self, inputs=1, outputs=1, main_service=False):
self.main_service = main_service
self.uid = None
self.description = None
self.online_learning = True
self.batch_size = 10
self.lr = 0.0001
self.GAMMA = 0.999
self.opt = 'SGD'
self.layers = []
self.active = True
self.genetic_learning = False
self.mr = 0.1
self.population_size = 4
self.genetic = None
self.reward_total = 0
self.epoch = 0
self.batch = []
self.losses = []
self.date = str(datetime.datetime.now())
self.inputs = inputs
self.outputs = outputs
self.model = model.Model_deep(self.inputs,
self.outputs)
self.update_genetic()
def use_token(self, token):
return self.genetic.use_token(token)
def get_token(self):
return self.genetic.free_token()
def plot_losses(self):
return plot.linear(self.losses)
def copy(self):
service = Service(self.inputs, self.outputs)
service.layers = self.layers.copy()
service.GAMMA = self.GAMMA
service.batch_size = self.batch_size
service.online_learning = self.online_learning
service.date = self.date # may be real date
service.description = 'tmp'
service.lr = self.lr
service.opt = self.opt
service.active = self.active
service.update_service()
service.model = self.model.copy() # torch model must have copy()
return service
def init_genetic(self):
self.genetic = Genetic(service=self)
def update_genetic(self):
if self.genetic_learning and not self.genetic: # start genetic
self.init_genetic()
if not self.genetic_learning: # remove gentic
self.genetic = None
def update_service(self, form=None):
self.update_genetic()
if form is not None:
# checklist
self.options(form.getlist('options'))
form = form.to_dict()
# q-learning
if self.online_learning:
try:
self.lr_percent = form['lr_percent']
self.lr = np.float(form['lr'])
self.opt = form['opt']
self.GAMMA = np.float(form['GAMMA'])
self.batch_size = np.int(form['batch_size'])
except:
pass
# genetic
if self.genetic_learning:
if 'mr' in form.keys():
self.mr = np.float(form['mr'])
if 'psi' in form.keys():
self.genetic.psi = np.float(form['psi'])
if 'childrens' in form.keys():
self.genetic.childrens = np.int(form['childrens'])
if 'population_size' in form.keys():
self.population_size = np.int(form['population_size'])
# nn configuration
for n in range(len(self.layers)):
try:
l = form['l'+str(n)]
l = np.int(l)
if l <= 0:
self.layers = self.layers[:n-1]
pass
self.layers[n] = l
except Exception:
self.layers = self.layers[:n-1]
pass
if self.layers is not self.model.layers:
self.model = model.Model_deep(self.inputs, self.outputs,
layers=self.layers.copy())
if self.lr is not self.model.lr:
self.model.update_optimizer(lr=self.lr)
if self.GAMMA is not self.model.GAMMA:
self.model.GAMMA = self.GAMMA
if self.opt is not self.model.opt:
self.model.update_optimizer(opt=self.opt)
def options(self, options):
if 'online_learning' in options:
self.online_learning = True
else:
self.online_learning = False
if 'genetic_learning' in options:
self.genetic_learning = True
else:
self.genetic_learning = False
self.update_genetic()
def finish(self, token, data):
if not self.genetic:
return 'null'
data = data.split('$')[1]
data = data.replace(',', '.')
reward = np.float(data)
self.genetic.finish(token, reward)
def forward(self, x):
x = self.to_tensor(x)
x = self.model.forward(x.view((1, -1)))
return self.from_tensor(x)
def add(self, state, action, reward):
if not self.online_learning:
return None
if self.main_service:
return None
state = self.to_tensor(state)
action = self.to_tensor(action)
reward = self.to_tensor(reward)
self.batch.append((state, action, reward))
# if len(self.batch) > self.batch_size:
# loss = self.train_on_batch()
# self.losses.append(loss)
# self.batch = []
def train_on_batch(self):
x, y, r = data_from_batch()
loss = self.model.train(x, y, r)
#loss = self.model.train_loss(x, y, r)
self.batch = []
return loss
def data_from_batch(self):
x = t.stack([t[0] for t in self.batch])
y = t.stack([t[1] for t in self.batch])
r = t.stack([t[2] for t in self.batch])
return x, y, r
def to_tensor(self, x):
x = np.array(x).astype(np.float)
#x = [np.float(v) for v in x]
x = t.FloatTensor(x)
return x
def from_tensor(self, x):
#x = x.round()
resp = ""
for v in x.view(-1):
resp += str(v.item())+";"
resp = resp[:-1]
resp = resp.replace(".", ",")
return resp
def n_layers(self):
return len(self.layers)
``` |
{
"source": "jkpawlowski96/TRD-client-api",
"score": 3
} |
#### File: src/data/data.py
```python
from src.models.interval import Interval
from src.data import DataApi, MT5
from src.models import Pair
from datetime import datetime
class Data:
def __init__(self):
self.mt5 = MT5()
def history(self, pair:Pair, datetime_start:int, datetime_end:int=None, interval:Interval=None):
api = self.mt5
datetime_start = datetime.fromtimestamp(datetime_start)
if not interval:
interval = Interval('m1')
if datetime_end:
# range
datetime_end = datetime.fromtimestamp(datetime_end)
res = api.get_pair_range_price( pair=pair,
datetime_start=datetime_start,
datetime_end=datetime_end,
interval=interval)
else:
# single
res = api.get_pair_price(pair=pair, _datetime=datetime_start, interval=interval)
return res
def test(self):
res = []
for api in [self.mt5]:
_res = {'mt5':self.mt5.test()}
res.append(_res)
return res
```
#### File: src/data/mt5.py
```python
from datetime import datetime
from src.data.api import DataApi
from src.models import Interval, Pair
import mt5
import numpy as np
class MT5(DataApi):
intervals = {'m1': mt5.TIMEFRAME_M1,
'm5': 2 }
def __init__(self):
# connect to MetaTrader 5
if not mt5.initialize():
print("initialize() failed")
mt5.shutdown()
# request connection status and parameters
print(mt5.terminal_info())
# get data on MetaTrader 5 version
print(mt5.version())
def get_pair_price(self,
pair:Pair,
_datetime:datetime,
interval:Interval):
rates = mt5.copy_rates_from(pair.get_name(),
self.intervals[interval.get_name()],
_datetime, 1000)
rates = price_extractor(rates[0])
return rates
def get_pair_range_price(self,
pair:Pair,
datetime_start:datetime,
datetime_end:datetime,
interval:Interval):
rates = mt5.copy_rates_range(pair.get_name(),
self.intervals[interval.get_name()],
datetime_start, datetime_end)
rates = [price_extractor(row) for row in rates]
return rates
def price_extractor(arr):
return [arr[1], # open
arr[2], # high
arr[3], # low
arr[4], # close
float(arr[6]) # spread
]
``` |
{
"source": "jkpawlowski96/Web-scraper",
"score": 3
} |
#### File: Web-scraper/app/app.py
```python
from flask import Flask, Response, send_from_directory
from database import data_export, address_done, address_working, json_to_scv
from task import init_scrapper
# flask app
app = Flask(__name__)
# website scraper
scrapper = init_scrapper()
@app.route("/")
def home():
"""
Homepage
:return:
"""
return '<h1>Hello World!</h1>'
"""-----------------------------------------------------------------
Command services to download resources form website,
or check status of task
-----------------------------------------------------------------"""
@app.route('/order/<path:address>')
def order(address):
"""
Function add task to scrap website resources
:param address: website address example: https://www.youtube.com/
:return: service_answer
"""
scrapper.process_order(address) # scrap website
return "ok"
@app.route('/check/<path:address>')
def check(address):
"""
Function to chech address in resources
:param address: website address example: https://www.youtube.com/
:return: service_answer
"""
if address_done(address) is False:
# resources are not in database
if address_working(address) is False:
# service is not scraping given address
return 'not found'
else:
# # service is scraping given address
return 'in progress'
else:
# resources are already in database
return 'finished'
"""-----------------------------------------------------------------
Export resources form database
-----------------------------------------------------------------"""
@app.route('/export/json/')
def export_json():
"""
Display all data in json
:return: json
"""
return data_export()
@app.route('/export/csv/')
def export_scv():
"""
Display all data in csv
:return: csv
"""
return json_to_scv(data_export())
@app.route('/export/json/<path:address>')
def export_json_path(address):
"""
Display selected data in json
:param address: website address example: https://www.youtube.com/
:return: json
"""
return data_export(query={'Address': address})
@app.route('/export/csv/<path:address>')
def export_csv_path(address):
"""
Display selected data in csv
:param address: website address example: https://www.youtube.com/
:return: csv
"""
return json_to_scv(data_export(query={'Address': address}))
"""-----------------------------------------------------------------
Download resources form databese
-----------------------------------------------------------------"""
@app.route('/download/json/')
def download_json():
"""
Download all data in json file
:return: json file
"""
return data_export(download=True)
@app.route('/download/csv/')
def download_scv():
"""
Download all data in csv file
:return: csv file
"""
return json_to_scv(data_export(), download=True)
@app.route('/download/json/<path:address>')
def download_json_path(address):
"""
Download selected data in json file
:param address: website address example: https://www.youtube.com/
:return: json file
"""
return data_export(query={'Address': address}, download=True)
@app.route('/download/csv/<path:address>')
def download_csv_path(address):
"""
Download selected data in csv file
:param address: website address example: https://www.youtube.com/
:return: json file
"""
return json_to_scv(data_export(query={'Address': address}), download=True)
if __name__ == "__main__":
app.run(host="0.0.0.0")
```
#### File: Web-scraper/app/task.py
```python
from text import get_text, string_to_file
from images import get_images_bytes, get_images_links
from database import data_add_resource, address_done, address_working
import threading
import time
from random import randint
def init_scrapper():
"""
Initialize background thread to scrap websites
:return: thread
"""
mythread = Scrapper(name = "Thread-{}".format(1)) # ...Instantiate a thread and pass a unique ID to it
mythread.start() # ...Start the thread, invoke the run method
return mythread
def scrap_website(address):
"""
Scrap text and images from website
:param address: website address example: https://www.youtube.com/
:return: true value if done
"""
web_text = get_text(address) # scrap text
web_images_l = get_images_links(address) # scrap images
web_images_b = get_images_bytes(web_images_l) # images into bytes
row = {}
row['Address']=address
row['Text']=web_text
row['Images']=web_images_b
row['Images_links']=web_images_l
# add resource to database
if data_add_resource(row) is True: #and images in the future
return True
else:
return False
class Scrapper(threading.Thread):
"""
Scrapping thread class
"""
# Queue of address to scrap and add resources to database
job_queue=[]
def run(self):
while(True):
try:
address = self.job_queue.pop(0) # Take adress to do from the stack
except:
time.sleep(2)
continue
# first check resources
if address_done(address) is True:
continue
# and working tasks..
if address_working(address) is True:
continue
# starting process
try:
address_working(address,value=True) # add as working task
results = scrap_website(address) # scrap website
if results is True:
address_working(address,value=False) # Job is not working anymore. Adress is already in database
continue
else:
continue # Something was wrong
except:
pass
def process_order(self, address):
"""
Take and order to scrap website
:param address: website address example: https://www.youtube.com/
:return:
"""
self.job_queue.append(address)
``` |
{
"source": "jkporter/noonclient",
"score": 2
} |
#### File: src/noonclient/_serialization.py
```python
from json.decoder import WHITESPACE, JSONDecoder
from json.encoder import JSONEncoder
from importlib import import_module
import inspect
from typing import Any, Generic, Type, TypeVar
import dataclasses
import json
import typing
T = TypeVar('T')
def DEFAULT_GET_MODEL(d, model):
return model(**d)
_models_module = import_module('noonclient.alaska.model')
def _get_model_fields_types(obj):
type_hints = typing.get_type_hints(obj)
return {field.name: type_hints[field.name] for field in dataclasses.fields(obj) if field.name in type_hints}
_models_fields_types = {obj: _get_model_fields_types(obj) for (
_, obj) in inspect.getmembers(_models_module) if dataclasses.is_dataclass(obj)}
serializednames = dict()
deserializednames = dict()
for _, obj in inspect.getmembers(_models_module):
if dataclasses.is_dataclass(obj) and hasattr(obj, '_serializednames'):
serializednames.update(obj._serializednames)
deserializednames.update(obj._deserializednames)
class _ModelJSONEncoder(JSONEncoder):
def default(self, o):
t = type(o)
def serializedname(name: str):
return t._serializednames[name] if name in t._serializednames else name
transname = serializedname if hasattr(
t, '_serializednames') else lambda x: x
return {transname(k): self.default(v) if isinstance(v, dict) else v for (k, v) in (o if isinstance(o, dict) else dataclasses.asdict(o)).items() if v is not None}
class _ModelJSONDecoder(Generic[T], JSONDecoder):
def __init__(self, get_model=DEFAULT_GET_MODEL, *, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True, object_pairs_hook=None):
super(_ModelJSONDecoder, self).__init__(object_hook=object_hook, parse_float=parse_float,
parse_int=parse_int, parse_constant=parse_constant, strict=strict, object_pairs_hook=object_pairs_hook)
self.get_model = get_model
def decode(self, s, _w=WHITESPACE.match):
return _ModelJSONDecoder.deserialize(JSONDecoder.decode(self, s, _w), typing.get_args(self.__orig_class__)[0], self.get_model)
@staticmethod
def deserialize(d: dict, t: Type[T], get_model=DEFAULT_GET_MODEL) -> T:
def get_value(v, field_type):
if typing.get_origin(field_type) is list:
list_model_type = typing.get_args(field_type)[0]
if list_model_type is not None:
return list[list_model_type](_ModelJSONDecoder.deserialize(d, list_model_type, get_model) for d in v if isinstance(d, dict))
if isinstance(v, dict) and field_type in _models_fields_types:
return _ModelJSONDecoder.deserialize(v, field_type, get_model)
if field_type is Any or isinstance(v, field_type):
return v
return None
def unserializedname(name: str):
return t._deserializednames[name] if name in t._deserializednames else name
transname = unserializedname if hasattr(
t, '_deserializednames') else lambda x: x
def map_to_fields(d):
for (k, v) in d.items():
field_name = transname(k)
if field_name in _models_fields_types[t]:
yield (field_name, get_value(v, _models_fields_types[t][field_name]))
return get_model(dict(map_to_fields(d)), t)
def _json_seralize(obj):
return json.dumps(obj, cls=_ModelJSONEncoder)
def _get_loads(type: Type, get_model=lambda d, m: m(**d)):
def loads(s):
return json.loads(s, cls=_ModelJSONDecoder[type], **{'get_model': get_model})
return loads
def serializedname(name: str, serializedname: str):
def set_serializedname(cls):
if not hasattr(cls, '_serializednames'):
cls._serializednames = dict()
cls._deserializednames = dict()
cls._serializednames[name] = serializedname
cls._deserializednames[serializedname] = name
serializednames[name] = serializedname
deserializednames[serializedname] = name
return cls
return set_serializedname
``` |
{
"source": "jkppr/dftimewolf",
"score": 2
} |
#### File: tests/test_modules/thread_aware_modules.py
```python
from typing import Dict, Any
import threading
import time
from dftimewolf.lib import module
from dftimewolf.lib.containers import interface
from dftimewolf.lib.containers import containers
class TestContainer(interface.AttributeContainer):
"""Test attribute container."""
CONTAINER_TYPE = 'test_container'
def __init__(self, value: str) -> None:
super(TestContainer, self).__init__()
self.value = value
class TestContainerTwo(interface.AttributeContainer):
"""Test attribute container."""
CONTAINER_TYPE = 'test_container_two'
def __init__(self, value: str) -> None:
super(TestContainerTwo, self).__init__()
self.value = value
class TestContainerThree(interface.AttributeContainer):
"""Test attribute container."""
CONTAINER_TYPE = 'test_container_three'
def __init__(self, value: str) -> None:
super(TestContainerThree, self).__init__()
self.value = value
class ContainerGeneratorModule(module.BaseModule):
"""This is a dummy module. Generates test containers."""
def __init__(self, state, name=None):
self.list = []
super(ContainerGeneratorModule, self).__init__(state, name)
def SetUp(self, runtime_value=None): # pylint: disable=arguments-differ
"""Dummy setup function."""
print(self.name + ' Setup!')
self.list = runtime_value.split(',')
def Process(self):
"""Dummy Process function."""
print(self.name + ' Process!')
for item in self.list:
container = TestContainer(item)
self.state.StoreContainer(container)
container = TestContainerTwo(','.join(self.list))
self.state.StoreContainer(container)
class ThreadAwareConsumerModule(module.ThreadAwareModule):
"""This is a dummy Thread Aware Module. Consumes from
ContainerGeneratorModule based on the number of containers generated."""
def __init__(self, state, name=None):
super(ThreadAwareConsumerModule, self).__init__(state, name)
self.output_values = ['one', 'two', 'three']
self.output_lock = threading.Lock()
def SetUp(self): # pylint: disable=arguments-differ
"""SetUp"""
self.logger.info('{0:s} SetUp!'.format(self.name))
def Process(self, container) -> None:
"""Process"""
self.logger.info('{0:s} Process!'.format(self.name))
time.sleep(1)
# This modifies the container passed in as a parameter.
container.value += ' appended'
# This modifies some state-stored containers, generated by previous modules.
for c in self.state.GetContainers(TestContainerTwo):
c.value += ' appended'
# This generates and stores a container in state.
with self.output_lock:
new_container = TestContainerThree('output ' + self.output_values.pop())
self.state.StoreContainer(new_container)
@staticmethod
def GetThreadOnContainerType():
return TestContainer
def GetThreadPoolSize(self):
return 2
def PreProcess(self) -> None:
self.logger.info("ThreadAwareConsumerModule Static Pre Process")
def PostProcess(self) -> None:
self.logger.info("ThreadAwareConsumerModule Static Post Process")
class Issue503Module(module.ThreadAwareModule):
"""This is a module for testing a certain pattern of container handling.
As described by https://github.com/log2timeline/dftimewolf/issues/503 this
module pops containers for input, and uses the same container type as output.
"""
def __init__(self, state, name=None):
super(Issue503Module, self).__init__(state, name)
def SetUp(self): # pylint: disable=arguments-differ
"""SetUp"""
self.logger.info('{0:s} SetUp!'.format(self.name))
def Process(self, container) -> None:
"""Process"""
self.logger.info('{0:s} Process!'.format(self.name))
self.state.StoreContainer(TestContainer(container.value + " Processed"))
@staticmethod
def GetThreadOnContainerType():
return TestContainer
def GetThreadPoolSize(self):
return 2
def PreProcess(self) -> None:
pass
def PostProcess(self) -> None:
pass
def KeepThreadedContainersInState(self) -> bool:
return False
``` |
{
"source": "jk-prog-22/patstavigais_darbs_NP",
"score": 3
} |
#### File: jk-prog-22/patstavigais_darbs_NP/main.py
```python
import requests
import constant
import logging
from bs4 import BeautifulSoup
getHeaders = {
"Referer": "https://www.ss.com/en/transport/cars/filter/",
"User-Agent": constant.USERAGENT
}
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if constant.DEBUG == True:
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def getClassifieds(url = constant.FILTER):
session = requests.Session()
getContents = session.get(url, headers = getHeaders, timeout = constant.TIMEOUT, allow_redirects = True)
if getContents.status_code == 200:
soup = BeautifulSoup(getContents.content, 'html.parser')
nextPage = soup.findAll("a", {"class" : "navi"})[-1]["href"].strip()
vehicles = soup.findAll("tr", {"id" : lambda L: L and L.startswith('tr_')})
for vehicle in vehicles:
elements = vehicle.findAll("td")
try:
href = elements[1].find("a", href=True)["href"]
classified = session.get(constant.BASE + href, headers = getHeaders, timeout = constant.TIMEOUT)
if classified.status_code == 200:
cSoup = BeautifulSoup(classified.content, 'html.parser')
# Parsing the vehicles year
try:
year = int(cSoup.find("td", {"id": "tdo_18"}).text.split()[0])
except AttributeError:
print(f"{bcolors.WARNING}Skipping: Unable to retrieve year for", href + bcolors.ENDC)
continue
# Parsing the vehicles price
try:
price = int(cSoup.find("span", {"id": "tdo_8"}).text.replace(" ", "").replace("€", ""))
except AttributeError:
print(f"{bcolors.WARNING}Skipping: Unable to retrieve price for", href + bcolors.ENDC)
continue
# Parsing the vehicles engine capacity
try:
capacity = float(cSoup.find("td", {"id": "tdo_15"}).text.split()[0])
except AttributeError:
print(f"{bcolors.WARNING}Skipping: Unable to retrieve engine capacity for", href + bcolors.ENDC)
continue
# Filtering year
# TODO: Probably could use range with one if call
yearMin = constant.FILTERS["year"][0]
yearMax = constant.FILTERS["year"][1]
if (yearMin != -1 and year < yearMin):
print(f"{bcolors.HEADER}", href, "year", year, "<", yearMin, bcolors.ENDC)
continue
elif (yearMax != -1 and year > yearMax):
print(f"{bcolors.HEADER}", href, "year", year, ">", yearMax, bcolors.ENDC)
continue
# Filtering price
priceMin = constant.FILTERS["price"][0]
priceMax = constant.FILTERS["price"][1]
if (priceMin != -1 and price < priceMin):
print(f"{bcolors.HEADER}", href, "price", price, "<", priceMin, bcolors.ENDC)
continue
elif (priceMax != -1 and price > priceMax):
print(f"{bcolors.HEADER}", href, "price", price, ">", priceMax, bcolors.ENDC)
continue
# Filtering engine capacity
capMin = constant.FILTERS["capacity"][0]
capMax = constant.FILTERS["capacity"][1]
if (capMin != -1 and capacity < capMin):
print(f"{bcolors.HEADER}", href, "engine capacity", capacity, "<", capMin, bcolors.ENDC)
continue
elif (capMax != -1 and price > capMax):
print(f"{bcolors.HEADER}", href, "engine capacity", capacity, ">", capMax, bcolors.ENDC)
continue
print(f"{bcolors.OKGREEN}Vehicle fits the request:", bcolors.UNDERLINE + constant.BASE + href + bcolors.ENDC)
else:
print(f"{bcolors.FAIL}Error: Unable to fetch data for", href + bcolors.ENDC)
except IndexError:
continue
else:
print("Error fetching classifieds.")
if "page" in nextPage:
print(f"{bcolors.OKBLUE}Switching page:", nextPage + bcolors.ENDC)
getClassifieds(constant.BASE + nextPage)
getClassifieds()
``` |
{
"source": "JKProjects-Org/ski-conditions",
"score": 3
} |
#### File: management/commands/do_scraping.py
```python
import json
import re
import requests
from bs4 import BeautifulSoup
from django.core.management.base import BaseCommand
from ski_conditions.apps.app_scraping.models import SkiResort
class AbstractScraper:
def scrape(self):
pass
class AbstractScriptScraper(AbstractScraper):
def _common_scrape(self):
page = requests.get(self.url)
soup = BeautifulSoup(page.text, 'html.parser')
return soup
class AbstractVailScraper(AbstractScraper):
def _common_scrape(self):
page = requests.get(self.url)
# create a BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
# search for class c118__number1--v1
trails_summary = soup.find(class_='terrain_summary row')
# look for stuff in <span> tags
trails_summary_items = trails_summary.find_all(class_='c118__number1--v1')
# look for trail and lift totals
trail_totals = trails_summary.find_all(class_='c118__number2--v1')
return (trail_totals, trails_summary_items)
def terrain_status(self):
# gets info on individual lifts and trails, such as status
page = requests.get(self.url)
soup = BeautifulSoup(page.text, 'html.parser')
# need to look through script to get rest of values
pattern = re.compile("FR.TerrainStatusFeed = ({.*})")
regex_find = soup.find_all('script', text=pattern)
# has numbers for Status. ex. Status = 0 or 1
regex_find_numbers = regex_find[0].text
# has words for Status, Type. ex. Status = Open, Type = Black
regex_find_words = regex_find[1].text
# need to apply regex again to get just the json part
status_numbers = re.findall(pattern, regex_find_numbers)[0]
json_data_numbers = json.loads(status_numbers)
json_lifts_numbers = json_data_numbers['Lifts']
status_words = re.findall(pattern, regex_find_words)[0]
json_data_words = json.loads(status_words)
json_trails_words = json_data_words['GroomingAreas']
# fields: Id, Name, Type (Green, Blue, Black, DoubleBlack), IsOpen (True, False)
json_lifts_words = json_data_words['Lifts']
# fields: Name, Status (Open, Closed, OnHold), Type, SortOrder, Mountain
return json_trails_words, json_lifts_words
def trail_specifics(self, json_trails_words):
black_diamonds_open = 0
double_black_diamonds_open = 0
# go through each section of mountain, ex. frontside, backside (defined by vail)
for area in json_trails_words:
# tally runs in this area, ex. frontside
area_runs = area['Runs']
for run in area_runs:
if run['IsOpen']:
# tally number of black diamond runs open
if run['Type'] == 'Black':
black_diamonds_open += 1
elif run['Type'] == 'DoubleBlack':
double_black_diamonds_open += 1
return black_diamonds_open, double_black_diamonds_open
class KeystoneScraper(AbstractVailScraper):
name = 'Keystone'
url = 'https://www.keystoneresort.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
new_total_trails = int(trail_totals[2].get_text()[2:])
new_total_lifts = int(trail_totals[3].get_text()[2:])
new_acres_open = int(trails_summary_items[0].get_text())
new_terrain_percent = int(trails_summary_items[1].get_text())
new_trails_open = int(trails_summary_items[2].get_text())
new_lifts_open = int(trails_summary_items[3].get_text())
# TODO Use a struct or other data structure
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
}
class NorthstarScraper(AbstractVailScraper):
name = 'Northstar'
url = 'https://www.northstarcalifornia.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
new_total_trails = int(trail_totals[2].get_text()[2:])
new_total_lifts = int(trail_totals[1].get_text()[2:])
# remove comma from new_acres_open if present)
new_acres_open = int(trails_summary_items[0].get_text().replace(',', ''))
new_terrain_percent = int(trails_summary_items[3].get_text())
new_trails_open = int(trails_summary_items[2].get_text())
new_lifts_open = int(trails_summary_items[1].get_text())
# get json from site script containing trail, lift specifics
json_trails_words, json_lifts_words = self.terrain_status()
# get number of black diamond, double black diamonds open
black_diamonds_open, double_black_diamonds_open = self.trail_specifics(json_trails_words)
# get number of lifts on hold
lifts_on_hold = 0
for lift in json_lifts_words:
if lift['Status'] == 'OnHold':
lifts_on_hold += 1
# TODO Use a struct or other data structure
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
'lifts_on_hold': lifts_on_hold,
'black_diamonds_open': black_diamonds_open,
'double_black_diamonds_open': double_black_diamonds_open,
}
class KirkwoodScraper(AbstractVailScraper):
name = 'Kirkwood'
url = 'https://www.kirkwood.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
# only acres open and terrain percent are shown on site
new_acres_open = int(trails_summary_items[1].get_text().replace(',', ''))
new_terrain_percent = int(trails_summary_items[0].get_text())
# TODO: put the following in some function
json_trails_words, json_lifts_words = self.terrain_status()
# GroomingAreas/trails = [{frontside,runs[]}, {backside,runs[]}]
# to make applicable to all resorts, go through each element in GroomingAreas list
new_trails_open = 0
new_total_trails = 0
new_lifts_open = 0
new_total_lifts = 0
# trail and lift specifics
black_diamonds_open = 0
double_black_diamonds_open = 0
lifts_on_hold = 0
# go through each section of mountain, ex. frontside, backside (defined by vail)
for area in json_trails_words:
# tally runs in this area, ex. frontside
area_runs = area['Runs']
for run in area_runs:
new_total_trails += 1
if run['IsOpen']:
new_trails_open += 1
# tally number of black diamond runs open
if run['Type'] == 'Black':
black_diamonds_open += 1
elif run['Type'] == 'DoubleBlack':
double_black_diamonds_open += 1
# tally number of lifts open
for lift in json_lifts_words:
new_total_lifts += 1
if lift['Status'] == 'Open':
new_lifts_open += 1
elif lift['Status'] == 'OnHold':
lifts_on_hold += 1
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
'lifts_on_hold': lifts_on_hold,
'black_diamonds_open': black_diamonds_open,
'double_black_diamonds_open': double_black_diamonds_open,
}
class HeavenlyScraper(AbstractVailScraper):
name = 'Heavenly'
url = 'https://www.skiheavenly.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
# assign text to variables
new_total_trails = int(trail_totals[3].get_text()[2:])
new_total_lifts = int(trail_totals[1].get_text()[2:])
# assign ints to variables
new_acres_open = int(trails_summary_items[0].get_text().replace(',', ''))
new_terrain_percent = int(trails_summary_items[2].get_text())
new_trails_open = int(trails_summary_items[3].get_text())
new_lifts_open = int(trails_summary_items[1].get_text())
# get json from site script containing trail, lift specifics
json_trails_words, json_lifts_words = self.terrain_status()
# get number of black diamond, double black diamonds open
black_diamonds_open, double_black_diamonds_open = self.trail_specifics(json_trails_words)
# get number of lifts on hold
lifts_on_hold = 0
for lift in json_lifts_words:
if lift['Status'] == 'OnHold':
lifts_on_hold += 1
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
'lifts_on_hold': lifts_on_hold,
'black_diamonds_open': black_diamonds_open,
'double_black_diamonds_open': double_black_diamonds_open,
}
class KirkwoodSnowReport(AbstractScriptScraper):
name = 'Kirkwood'
url = 'https://www.kirkwood.com/the-mountain/mountain-conditions/snow-and-weather-report.aspx'
def scrape(self):
soup = self._common_scrape()
# create regex pattern to find snowReportData json
# only grabs stuff in parens
pattern = re.compile("snowReportData = ({.*})")
# find html that contains pattern, will contain script tags
script_items = soup.find_all('script', text=pattern)
# get script body that contains snow report numbers
script_snow_report = script_items[0].text
# use regex pattern to grab only json part
# returns a list, grab first and only element
snow_data = re.findall(pattern, script_snow_report)[0]
# use json module to read json snow_data
json_snow_data = json.loads(snow_data)
return json_snow_data
def unpack_json(self, json_data):
return {
'overnight': json_data['OvernightSnowfall']['Inches'],
'24hr': json_data['TwentyFourHourSnowfall']['Inches'],
'48hr': json_data['FortyEightHourSnowfall']['Inches'],
'7day': json_data['SevenDaySnowfall']['Inches'],
'base_depth': json_data['BaseDepth']['Inches'],
'current_season': json_data['CurrentSeason']['Inches'],
}
class HeavenlySnowReport(AbstractScriptScraper):
name = 'Heavenly'
url = 'https://www.skiheavenly.com/the-mountain/mountain-conditions/snow-and-weather-report.aspx'
def scrape(self):
soup = self._common_scrape()
# create regex pattern to find snowReportData json
# only grabs stuff in parens
pattern = re.compile("snowReportData = ({.*})")
# find html that contains pattern, will contain script tags
script_items = soup.find_all('script', text=pattern)
# get script body that contains snow report numbers
script_snow_report = script_items[0].text
# use regex pattern to grab only json part
# returns a list, grab first and only element
snow_data = re.findall(pattern, script_snow_report)[0]
# use json module to read json snow_data
json_snow_data = json.loads(snow_data)
return json_snow_data
def unpack_json(self, json_data):
return {
'overnight': json_data['OvernightSnowfall']['Inches'],
'24hr': json_data['TwentyFourHourSnowfall']['Inches'],
'48hr': json_data['FortyEightHourSnowfall']['Inches'],
'7day': json_data['SevenDaySnowfall']['Inches'],
'base_depth': json_data['BaseDepth']['Inches'],
'current_season': json_data['CurrentSeason']['Inches'],
}
class NorthstarSnowReport(AbstractScriptScraper):
name = 'Northstar'
url = 'https://www.northstarcalifornia.com/the-mountain/mountain-conditions/snow-and-weather-report.aspx'
def scrape(self):
soup = self._common_scrape()
# create regex pattern to find snowReportData json
# only grabs stuff in parens
pattern = re.compile("snowReportData = ({.*})")
# find html that contains pattern, will contain script tags
script_items = soup.find_all('script', text=pattern)
# get script body that contains snow report numbers
script_snow_report = script_items[0].text
# use regex pattern to grab only json part
# returns a list, grab first and only element
snow_data = re.findall(pattern, script_snow_report)[0]
# use json module to read json snow_data
json_snow_data = json.loads(snow_data)
return json_snow_data
def unpack_json(self, json_data):
return {
'overnight': json_data['OvernightSnowfall']['Inches'],
'24hr': json_data['TwentyFourHourSnowfall']['Inches'],
'48hr': json_data['FortyEightHourSnowfall']['Inches'],
'7day': json_data['SevenDaySnowfall']['Inches'],
'base_depth': json_data['BaseDepth']['Inches'],
'current_season': json_data['CurrentSeason']['Inches'],
}
class Command(BaseCommand):
help = "Scrapes ski resort website and updates database"
def handle(self, *args, **options):
# Trail and Lift Conditions
scrapers = [
HeavenlyScraper(),
NorthstarScraper(),
KirkwoodScraper(),
]
for scraper in scrapers:
name = scraper.name
scraped = scraper.scrape()
SkiResort.objects.update_or_create(
resort_name=name,
defaults={
'total_trails': scraped['total_trails'],
'acres_open': scraped['acres_open'],
'terrain_percent': scraped['terrain_percent'],
'trails_open': scraped['trails_open'],
'lifts_open': scraped['lifts_open'],
'total_lifts': scraped['total_lifts'],
'lifts_on_hold': scraped['lifts_on_hold'],
'black_diamonds_open': scraped['black_diamonds_open'],
'double_black_diamonds_open': scraped['double_black_diamonds_open'],
}
)
# Snow Conditions
snow_reports = [
KirkwoodSnowReport(),
HeavenlySnowReport(),
NorthstarSnowReport(),
]
for snow in snow_reports:
name = snow.name
snow_json_data = snow.scrape()
snow_data = snow.unpack_json(snow_json_data)
SkiResort.objects.update_or_create(
resort_name=name,
defaults={
'overnight_snowfall': snow_data['overnight'],
'twenty_four_hour_snowfall': snow_data['24hr'],
'forty_eight_hour_snowfall': snow_data['48hr'],
'seven_day_snowfall': snow_data['7day'],
'base_depth': snow_data['base_depth'],
'current_season': snow_data['current_season'],
}
)
self.stdout.write('SkiResort model updated')
```
#### File: app_scraping/tests/test_views.py
```python
import pytest
from django.urls import reverse
from .factories import ResortFactory
@pytest.mark.django_db()
def test_index_view(client):
resort = ResortFactory()
response = client.get(reverse('scraping:index'))
assert response.status_code == 200
assert list(response.context['resort_list']) == [resort]
``` |
{
"source": "jkpubsrc/asyncio-redis",
"score": 2
} |
#### File: asyncio-redis/asyncio_redis/protocol.py
```python
import asyncio
import logging
import types
from asyncio.futures import Future
from asyncio.queues import Queue
from asyncio.streams import StreamReader
try:
import hiredis
except ImportError:
hiredis = None
from collections import deque
from functools import wraps
from inspect import getfullargspec, getcallargs, signature
from .encoders import BaseEncoder, UTF8Encoder
from .exceptions import (
ConnectionLostError,
Error,
ErrorReply,
NoRunningScriptError,
NotConnectedError,
ScriptKilledError,
TimeoutError,
TransactionError,
)
from .log import logger
from .replies import (
BlockingPopReply,
ClientListReply,
ConfigPairReply,
DictReply,
EvalScriptReply,
InfoReply,
ListReply,
PubSubReply,
SetReply,
StatusReply,
ZRangeReply,
)
from .cursors import Cursor, SetCursor, DictCursor, ZCursor
__all__ = (
'RedisProtocol',
'HiRedisProtocol',
'Transaction',
'Subscription',
'Script',
'ZAggregate',
'ZScoreBoundary',
)
NoneType = type(None)
# In Python 3.4.4, `async` was renamed to `ensure_future`.
try:
ensure_future = asyncio.ensure_future
except AttributeError:
ensure_future = getattr(asyncio, "async")
class _NoTransactionType(object):
"""
Instance of this object can be passed to a @_command when it's not part of
a transaction. We need this because we need a singleton which is different
from None. (None could be a valid input for a @_command, so there is no way
to see whether this would be an extra 'transaction' value.)
"""
_NoTransaction = _NoTransactionType()
class ZScoreBoundary:
"""
Score boundary for a sorted set.
for queries like zrangebyscore and similar
:param value: Value for the boundary.
:type value: float
:param exclude_boundary: Exclude the boundary.
:type exclude_boundary: bool
"""
def __init__(self, value, exclude_boundary=False):
assert isinstance(value, float) or value in ('+inf', '-inf')
self.value = value
self.exclude_boundary = exclude_boundary
def __repr__(self):
return 'ZScoreBoundary(value=%r, exclude_boundary=%r)' % (
self.value, self.exclude_boundary)
ZScoreBoundary.MIN_VALUE = ZScoreBoundary('-inf')
ZScoreBoundary.MAX_VALUE = ZScoreBoundary('+inf')
class ZAggregate: # TODO: use the Python 3.4 enum type.
"""
Aggregation method for zinterstore and zunionstore.
"""
#: Sum aggregation.
SUM = 'SUM'
#: Min aggregation.
MIN = 'MIN'
#: Max aggregation.
MAX = 'MAX'
class PipelinedCall:
""" Track record for call that is being executed in a protocol. """
__slots__ = ('cmd', 'is_blocking')
def __init__(self, cmd, is_blocking):
self.cmd = cmd
self.is_blocking = is_blocking
class MultiBulkReply:
"""
Container for a multi bulk reply.
"""
def __init__(self, protocol, count, loop=None):
self._loop = loop or asyncio.get_event_loop()
#: Buffer of incoming, undelivered data, received from the parser.
self._data_queue = []
#: Incoming read queries.
#: Contains (read_count, Future, decode_flag, one_only_flag) tuples.
self._f_queue = deque()
self.protocol = protocol
self.count = int(count)
def _feed_received(self, item):
"""
Feed entry for the parser.
"""
# Push received items on the queue
self._data_queue.append(item)
self._flush()
def _flush(self):
"""
Answer read queries when we have enough data in our multibulk reply.
"""
# As long as we have more data in our queue then we require for a read
# query -> answer queries.
while self._f_queue and self._f_queue[0][0] <= len(self._data_queue):
# Pop query.
count, f, decode, one_only = self._f_queue.popleft()
# Slice data buffer.
data, self._data_queue = self._data_queue[:count], self._data_queue[count:]
# When the decode flag is given, decode bytes to native types.
if decode:
data = [ self._decode(d) for d in data ]
# When one_only flag has been given, don't return an array.
if one_only:
assert len(data) == 1
f.set_result(data[0])
else:
f.set_result(data)
def _decode(self, result):
""" Decode bytes to native Python types. """
if isinstance(result, (StatusReply, int, float, MultiBulkReply)):
# Note that MultiBulkReplies can be nested. e.g. in the 'scan' operation.
return result
elif isinstance(result, bytes):
return self.protocol.decode_to_native(result)
elif result is None:
return result
else:
raise AssertionError('Invalid type: %r' % type(result))
def _read(self, decode=True, count=1, _one=False):
""" Do read operation on the queue. Return future. """
f = Future(loop=self.protocol._loop)
self._f_queue.append((count, f, decode, _one))
# If there is enough data on the queue, answer future immediately.
self._flush()
return f
def iter_raw(self):
"""
Iterate over all multi bulk packets. This yields futures that won't
decode bytes yet.
"""
for i in range(self.count):
yield self._read(decode=False, _one=True)
def __iter__(self):
"""
Iterate over the reply. This yields coroutines of the decoded packets.
It decodes bytes automatically using protocol.decode_to_native.
"""
for i in range(self.count):
yield self._read(_one=True)
def __repr__(self):
return 'MultiBulkReply(protocol=%r, count=%r)' % (self.protocol, self.count)
class _ScanPart:
""" Internal: result chunk of a scan operation. """
def __init__(self, new_cursor_pos, items):
self.new_cursor_pos = new_cursor_pos
self.items = items
class PostProcessors:
"""
At the protocol level, we only know about a few basic classes; they
include: bool, int, StatusReply, MultiBulkReply and bytes.
This will return a postprocessor function that turns these into more
meaningful objects.
For some methods, we have several post processors. E.g. a list can be
returned either as a ListReply (which has some special streaming
functionality), but also as a Python list.
"""
@classmethod
def get_all(cls, return_type):
"""
Return list of (suffix, return_type, post_processor)
"""
default = cls.get_default(return_type)
alternate = cls.get_alternate_post_processor(return_type)
result = [ ('', return_type, default) ]
if alternate:
result.append(alternate)
return result
@classmethod
def get_default(cls, return_type):
""" Give post processor function for return type. """
return {
ListReply: cls.multibulk_as_list,
SetReply: cls.multibulk_as_set,
DictReply: cls.multibulk_as_dict,
float: cls.bytes_to_float,
(float, NoneType): cls.bytes_to_float_or_none,
NativeType: cls.bytes_to_native,
(NativeType, NoneType): cls.bytes_to_native_or_none,
InfoReply: cls.bytes_to_info,
ClientListReply: cls.bytes_to_clientlist,
str: cls.bytes_to_str,
bool: cls.int_to_bool,
BlockingPopReply: cls.multibulk_as_blocking_pop_reply,
ZRangeReply: cls.multibulk_as_zrangereply,
StatusReply: cls.bytes_to_status_reply,
(StatusReply, NoneType): cls.bytes_to_status_reply_or_none,
int: None,
(int, NoneType): None,
ConfigPairReply: cls.multibulk_as_configpair,
ListOf(bool): cls.multibulk_as_boolean_list,
_ScanPart: cls.multibulk_as_scanpart,
EvalScriptReply: cls.any_to_evalscript,
NoneType: None,
}[return_type]
@classmethod
def get_alternate_post_processor(cls, return_type):
""" For list/set/dict. Create additional post processors that return
python classes rather than ListReply/SetReply/DictReply """
original_post_processor = cls.get_default(return_type)
if return_type == ListReply:
@asyncio.coroutine
def as_list(protocol, result):
result = yield from original_post_processor(protocol, result)
return (yield from result.aslist())
return '_aslist', list, as_list
elif return_type == SetReply:
@asyncio.coroutine
def as_set(protocol, result):
result = yield from original_post_processor(protocol, result)
return (yield from result.asset())
return '_asset', set, as_set
elif return_type in (DictReply, ZRangeReply):
@asyncio.coroutine
def as_dict(protocol, result):
result = yield from original_post_processor(protocol, result)
return (yield from result.asdict())
return '_asdict', dict, as_dict
# === Post processor handlers below. ===
@asyncio.coroutine
def multibulk_as_list(protocol, result):
assert isinstance(result, MultiBulkReply)
return ListReply(result)
@asyncio.coroutine
def multibulk_as_boolean_list(protocol, result):
# Turn the array of integers into booleans.
assert isinstance(result, MultiBulkReply)
values = yield from ListReply(result).aslist()
return [ bool(v) for v in values ]
@asyncio.coroutine
def multibulk_as_set(protocol, result):
assert isinstance(result, MultiBulkReply)
return SetReply(result)
@asyncio.coroutine
def multibulk_as_dict(protocol, result):
assert isinstance(result, MultiBulkReply)
return DictReply(result)
@asyncio.coroutine
def multibulk_as_zrangereply(protocol, result):
assert isinstance(result, MultiBulkReply)
return ZRangeReply(result)
@asyncio.coroutine
def multibulk_as_blocking_pop_reply(protocol, result):
if result is None:
raise TimeoutError('Timeout in blocking pop')
else:
assert isinstance(result, MultiBulkReply)
list_name, value = yield from ListReply(result).aslist()
return BlockingPopReply(list_name, value)
@asyncio.coroutine
def multibulk_as_configpair(protocol, result):
assert isinstance(result, MultiBulkReply)
parameter, value = yield from ListReply(result).aslist()
return ConfigPairReply(parameter, value)
@asyncio.coroutine
def multibulk_as_scanpart(protocol, result):
"""
Process scanpart result.
This is a multibulk reply of length two, where the first item is the
new cursor position and the second item is a nested multi bulk reply
containing all the elements.
"""
# Get outer multi bulk reply.
assert isinstance(result, MultiBulkReply)
new_cursor_pos, items_bulk = yield from ListReply(result).aslist()
assert isinstance(items_bulk, MultiBulkReply)
# Read all items for scan chunk in memory. This is fine, because it's
# transmitted in chunks of about 10.
items = yield from ListReply(items_bulk).aslist()
return _ScanPart(int(new_cursor_pos), items)
@asyncio.coroutine
def bytes_to_info(protocol, result):
assert isinstance(result, bytes)
return InfoReply(result)
@asyncio.coroutine
def bytes_to_status_reply(protocol, result):
assert isinstance(result, bytes)
return StatusReply(result.decode('utf-8'))
@asyncio.coroutine
def bytes_to_status_reply_or_none(protocol, result):
assert isinstance(result, (bytes, NoneType))
if result:
return StatusReply(result.decode('utf-8'))
@asyncio.coroutine
def bytes_to_clientlist(protocol, result):
assert isinstance(result, bytes)
return ClientListReply(result)
@asyncio.coroutine
def int_to_bool(protocol, result):
assert isinstance(result, int)
return bool(result) # Convert int to bool
@asyncio.coroutine
def bytes_to_native(protocol, result):
assert isinstance(result, bytes)
return protocol.decode_to_native(result)
@asyncio.coroutine
def bytes_to_str(protocol, result):
assert isinstance(result, bytes)
return result.decode('ascii')
@asyncio.coroutine
def bytes_to_native_or_none(protocol, result):
if result is None:
return result
else:
assert isinstance(result, bytes)
return protocol.decode_to_native(result)
@asyncio.coroutine
def bytes_to_float_or_none(protocol, result):
if result is None:
return result
assert isinstance(result, bytes)
return float(result)
@asyncio.coroutine
def bytes_to_float(protocol, result):
assert isinstance(result, bytes)
return float(result)
@asyncio.coroutine
def any_to_evalscript(protocol, result):
# Result can be native, int, MultiBulkReply or even a nested structure
assert isinstance(result, (int, bytes, MultiBulkReply, NoneType))
return EvalScriptReply(protocol, result)
class ListOf:
""" Annotation helper for protocol methods. """
def __init__(self, type_):
self.type = type_
def __repr__(self):
return 'ListOf(%r)' % self.type
def __eq__(self, other):
return isinstance(other, ListOf) and other.type == self.type
def __hash__(self):
return hash((ListOf, self.type))
class NativeType:
"""
Constant which represents the native Python type that's used.
"""
def __new__(cls):
raise Exception('NativeType is not meant to be initialized.')
class CommandCreator:
"""
Utility for creating a wrapper around the Redis protocol methods.
This will also do type checking.
This wrapper handles (optionally) post processing of the returned data and
implements some logic where commands behave different in case of a
transaction or pubsub.
Warning: We use the annotations of `method` extensively for type checking
and determining which post processor to choose.
"""
def __init__(self, method):
self.method = method
@property
def specs(self):
""" Argspecs """
return getfullargspec(self.method)
@property
def return_type(self):
""" Return type as defined in the method's annotation. """
return self.specs.annotations.get('return', None)
@property
def params(self):
return { k:v for k, v in self.specs.annotations.items() if k != 'return' }
@classmethod
def get_real_type(cls, protocol, type_):
"""
Given a protocol instance, and type annotation, return something that
we can pass to isinstance for the typechecking.
"""
# If NativeType was given, replace it with the type of the protocol
# itself.
if isinstance(type_, tuple):
return tuple(cls.get_real_type(protocol, t) for t in type_)
if type_ == NativeType:
return protocol.native_type
elif isinstance(type_, ListOf):
return (list, types.GeneratorType) # We don't check the content of the list.
else:
return type_
def _create_input_typechecker(self):
""" Return function that does typechecking on input data. """
params = self.params
if params:
def typecheck_input(protocol, *a, **kw):
"""
Given a protocol instance and *a/**kw of this method, raise TypeError
when the signature doesn't match.
"""
if protocol.enable_typechecking:
# All @_command/@_query_command methods can take
# *optionally* a Transaction instance as first argument.
if a and isinstance(a[0], (Transaction, _NoTransactionType)):
a = a[1:]
for name, value in getcallargs(self.method, None, _NoTransaction, *a, **kw).items():
if name in params:
real_type = self.get_real_type(protocol, params[name])
if not isinstance(value, real_type):
raise TypeError('RedisProtocol.%s received %r, expected %r' %
(self.method.__name__, type(value).__name__, real_type))
else:
def typecheck_input(protocol, *a, **kw):
pass
return typecheck_input
def _create_return_typechecker(self, return_type):
""" Return function that does typechecking on output data. """
if return_type and not isinstance(return_type, str): # Exclude 'Transaction'/'Subscription' which are 'str'
def typecheck_return(protocol, result):
"""
Given protocol and result value. Raise TypeError if the result is of the wrong type.
"""
if protocol.enable_typechecking:
expected_type = self.get_real_type(protocol, return_type)
if not isinstance(result, expected_type):
raise TypeError('Got unexpected return type %r in RedisProtocol.%s, expected %r' %
(type(result).__name__, self.method.__name__, expected_type))
else:
def typecheck_return(protocol, result):
pass
return typecheck_return
def _get_docstring(self, suffix, return_type):
# Append the real signature as the first line in the docstring.
# (This will make the sphinx docs show the real signature instead of
# (*a, **kw) of the wrapper.)
# (But don't put the annotations inside the copied signature, that's rather
# ugly in the docs.)
parameters = signature(self.method).parameters
# The below differs from tuple(parameters.keys()) as it preserves the
# * and ** prefixes of variadic arguments
argnames = tuple(str(p).split(':')[0] for p in parameters.values())
# Use function annotations to generate param documentation.
def get_name(type_):
""" Turn type annotation into doc string. """
try:
return {
BlockingPopReply: ":class:`BlockingPopReply <asyncio_redis.replies.BlockingPopReply>`",
ConfigPairReply: ":class:`ConfigPairReply <asyncio_redis.replies.ConfigPairReply>`",
DictReply: ":class:`DictReply <asyncio_redis.replies.DictReply>`",
InfoReply: ":class:`InfoReply <asyncio_redis.replies.InfoReply>`",
ClientListReply: ":class:`InfoReply <asyncio_redis.replies.ClientListReply>`",
ListReply: ":class:`ListReply <asyncio_redis.replies.ListReply>`",
MultiBulkReply: ":class:`MultiBulkReply <asyncio_redis.replies.MultiBulkReply>`",
NativeType: "Native Python type, as defined by :attr:`~asyncio_redis.encoders.BaseEncoder.native_type`",
NoneType: "None",
SetReply: ":class:`SetReply <asyncio_redis.replies.SetReply>`",
StatusReply: ":class:`StatusReply <asyncio_redis.replies.StatusReply>`",
ZRangeReply: ":class:`ZRangeReply <asyncio_redis.replies.ZRangeReply>`",
ZScoreBoundary: ":class:`ZScoreBoundary <asyncio_redis.replies.ZScoreBoundary>`",
EvalScriptReply: ":class:`EvalScriptReply <asyncio_redis.replies.EvalScriptReply>`",
Cursor: ":class:`Cursor <asyncio_redis.cursors.Cursor>`",
SetCursor: ":class:`SetCursor <asyncio_redis.cursors.SetCursor>`",
DictCursor: ":class:`DictCursor <asyncio_redis.cursors.DictCursor>`",
ZCursor: ":class:`ZCursor <asyncio_redis.cursors.ZCursor>`",
_ScanPart: ":class:`_ScanPart",
int: 'int',
bool: 'bool',
dict: 'dict',
float: 'float',
str: 'str',
bytes: 'bytes',
list: 'list',
set: 'set',
# Because of circular references, we cannot use the real types here.
'Transaction': ":class:`asyncio_redis.Transaction`",
'Subscription': ":class:`asyncio_redis.Subscription`",
'Script': ":class:`~asyncio_redis.Script`",
}[type_]
except KeyError:
if isinstance(type_, ListOf):
return "List or iterable of %s" % get_name(type_.type)
if isinstance(type_, tuple):
return ' or '.join(get_name(t) for t in type_)
raise TypeError('Unknown annotation %r' % type_)
def get_param(k, v):
return ':param %s: %s\n' % (k, get_name(v))
params_str = [get_param(k, v) for k, v in self.params.items()]
returns = ':returns: (Future of) %s\n' % get_name(return_type) if return_type else ''
return '%s(%s)\n%s\n\n%s%s' % (
self.method.__name__ + suffix,
', '.join(argnames),
self.method.__doc__,
''.join(params_str),
returns
)
def get_methods(self):
"""
Return all the methods to be used in the RedisProtocol class.
"""
return [ ('', self._get_wrapped_method(None, '', self.return_type)) ]
def _get_wrapped_method(self, post_process, suffix, return_type):
"""
Return the wrapped method for use in the `RedisProtocol` class.
"""
typecheck_input = self._create_input_typechecker()
typecheck_return = self._create_return_typechecker(return_type)
method = self.method
# Wrap it into a check which allows this command to be run either
# directly on the protocol, outside of transactions or from the
# transaction object.
@wraps(method)
@asyncio.coroutine
def wrapper(protocol_self, *a, **kw):
if a and isinstance(a[0], (Transaction, _NoTransactionType)):
transaction = a[0]
a = a[1:]
else:
transaction = _NoTransaction
# When calling from a transaction
if transaction != _NoTransaction:
# In case of a transaction, we receive a Future from the command.
typecheck_input(protocol_self, *a, **kw)
future = yield from method(protocol_self, transaction, *a, **kw)
future2 = Future(loop=protocol_self._loop)
# Typecheck the future when the result is available.
@asyncio.coroutine
def done(result):
if post_process:
result = yield from post_process(protocol_self, result)
typecheck_return(protocol_self, result)
future2.set_result(result)
future.add_done_callback(lambda f: ensure_future(done(f.result()), loop=protocol_self._loop))
return future2
# When calling from a pubsub context
elif protocol_self.in_pubsub:
if not a or a[0] != protocol_self._subscription:
raise Error('Cannot run command inside pubsub subscription.')
else:
typecheck_input(protocol_self, *a[1:], **kw)
result = yield from method(protocol_self, _NoTransaction, *a[1:], **kw)
if post_process:
result = yield from post_process(protocol_self, result)
typecheck_return(protocol_self, result)
return (result)
else:
typecheck_input(protocol_self, *a, **kw)
result = yield from method(protocol_self, _NoTransaction, *a, **kw)
if post_process:
result = yield from post_process(protocol_self, result)
typecheck_return(protocol_self, result)
return result
wrapper.__doc__ = self._get_docstring(suffix, return_type)
return wrapper
class QueryCommandCreator(CommandCreator):
"""
Like `CommandCreator`, but for methods registered with `_query_command`.
This are the methods that cause commands to be send to the server.
Most of the commands get a reply from the server that needs to be post
processed to get the right Python type. We inspect here the
'returns'-annotation to determine the correct post processor.
"""
def get_methods(self):
# (Some commands, e.g. those that return a ListReply can generate
# multiple protocol methods. One that does return the ListReply, but
# also one with the 'aslist' suffix that returns a Python list.)
all_post_processors = PostProcessors.get_all(self.return_type)
result = []
for suffix, return_type, post_processor in all_post_processors:
result.append( (suffix, self._get_wrapped_method(post_processor, suffix, return_type)) )
return result
_SMALL_INTS = list(str(i).encode('ascii') for i in range(1000))
# List of all command methods.
_all_commands = []
class _command:
""" Mark method as command (to be passed through CommandCreator for the
creation of a protocol method) """
creator = CommandCreator
def __init__(self, method):
self.method = method
class _query_command(_command):
"""
Mark method as query command: This will pass through QueryCommandCreator.
NOTE: be sure to choose the correct 'returns'-annotation. This will automatially
determine the correct post processor function in :class:`PostProcessors`.
"""
creator = QueryCommandCreator
def __init__(self, method):
super().__init__(method)
class _RedisProtocolMeta(type):
"""
Metaclass for `RedisProtocol` which applies the _command decorator.
"""
def __new__(cls, name, bases, attrs):
for attr_name, value in dict(attrs).items():
if isinstance(value, _command):
creator = value.creator(value.method)
for suffix, method in creator.get_methods():
attrs[attr_name + suffix] = method
# Register command.
_all_commands.append(attr_name + suffix)
return type.__new__(cls, name, bases, attrs)
class RedisProtocol(asyncio.Protocol, metaclass=_RedisProtocolMeta):
"""
The Redis Protocol implementation.
::
self.loop = asyncio.get_event_loop()
transport, protocol = yield from loop.create_connection(RedisProtocol, 'localhost', 6379)
:param password: <PASSWORD>
:type password: Native Python type as defined by the ``encoder`` parameter
:param encoder: Encoder to use for encoding to or decoding from redis bytes to a native type.
(Defaults to :class:`~asyncio_redis.encoders.UTF8Encoder`)
:type encoder: :class:`~asyncio_redis.encoders.BaseEncoder` instance.
:param db: Redis database
:type db: int
:param enable_typechecking: When ``True``, check argument types for all
redis commands. Normally you want to have this
enabled.
:type enable_typechecking: bool
"""
def __init__(self, *, password=<PASSWORD>, db=0, encoder=None, connection_lost_callback=None, enable_typechecking=True, loop=None):
if encoder is None:
encoder = UTF8Encoder()
assert isinstance(db, int)
assert isinstance(encoder, BaseEncoder)
assert encoder.native_type, 'Encoder.native_type not defined'
assert not password or isinstance(password, encoder.native_type)
self.password = password
self.db = db
self._connection_lost_callback = connection_lost_callback
self._loop = loop or asyncio.get_event_loop()
# Take encode / decode settings from encoder
self.encode_from_native = encoder.encode_from_native
self.decode_to_native = encoder.decode_to_native
self.native_type = encoder.native_type
self.enable_typechecking = enable_typechecking
self.transport = None
self._queue = deque() # Input parser queues
self._messages_queue = None # Pubsub queue
self._is_connected = False # True as long as the underlying transport is connected.
# Pubsub state
self._in_pubsub = False
self._subscription = None
self._pubsub_channels = set() # Set of channels
self._pubsub_patterns = set() # Set of patterns
# Transaction related stuff.
self._transaction_lock = asyncio.Lock(loop=loop)
self._transaction = None
self._transaction_response_queue = None # Transaction answer queue
self._line_received_handlers = {
b'+': self._handle_status_reply,
b'-': self._handle_error_reply,
b'$': self._handle_bulk_reply,
b'*': self._handle_multi_bulk_reply,
b':': self._handle_int_reply,
}
def connection_made(self, transport):
self.transport = transport
self._is_connected = True
logger.log(logging.INFO, 'Redis connection made')
# Pipelined calls
self._pipelined_calls = set() # Set of all the pipelined calls.
# Start parsing reader stream.
self._reader = StreamReader(loop=self._loop)
self._reader.set_transport(transport)
self._reader_f = ensure_future(self._reader_coroutine(), loop=self._loop)
@asyncio.coroutine
def initialize():
# If a password or database was been given, first connect to that one.
if self.password:
yield from self.auth(self.password)
if self.db:
yield from self.select(self.db)
# If we are in pubsub mode, send channel subscriptions again.
if self._in_pubsub:
if self._pubsub_channels:
yield from self._subscribe(self._subscription, list(self._pubsub_channels)) # TODO: unittest this
if self._pubsub_patterns:
yield from self._psubscribe(self._subscription, list(self._pubsub_patterns))
ensure_future(initialize(), loop=self._loop)
def data_received(self, data):
""" Process data received from Redis server. """
self._reader.feed_data(data)
def _encode_int(self, value:int) -> bytes:
""" Encodes an integer to bytes. (always ascii) """
if 0 < value < 1000: # For small values, take pre-encoded string.
return _SMALL_INTS[value]
else:
return str(value).encode('ascii')
def _encode_float(self, value:float) -> bytes:
""" Encodes a float to bytes. (always ascii) """
return str(value).encode('ascii')
def _encode_zscore_boundary(self, value:ZScoreBoundary) -> str:
""" Encodes a zscore boundary. (always ascii) """
if isinstance(value.value, str):
return str(value.value).encode('ascii') # +inf and -inf
elif value.exclude_boundary:
return str("(%f" % value.value).encode('ascii')
else:
return str("%f" % value.value).encode('ascii')
def eof_received(self):
logger.log(logging.INFO, 'EOF received in RedisProtocol')
self._reader.feed_eof()
def connection_lost(self, exc):
if exc is None:
self._reader.feed_eof()
else:
logger.info("Connection lost with exec: %s" % exc)
self._reader.set_exception(exc)
if self._reader_f:
self._reader_f.cancel()
self._is_connected = False
self.transport = None
self._reader = None
self._reader_f = None
# Raise exception on all waiting futures.
while self._queue:
f = self._queue.popleft()
if not f.cancelled():
f.set_exception(ConnectionLostError(exc))
logger.log(logging.INFO, 'Redis connection lost')
# Call connection_lost callback
if self._connection_lost_callback:
self._connection_lost_callback()
# Request state
@property
def in_blocking_call(self):
""" True when waiting for answer to blocking command. """
return any(c.is_blocking for c in self._pipelined_calls)
@property
def in_pubsub(self):
""" True when the protocol is in pubsub mode. """
return self._in_pubsub
@property
def in_transaction(self):
""" True when we're inside a transaction. """
return bool(self._transaction)
@property
def in_use(self):
""" True when this protocol is in use. """
return self.in_blocking_call or self.in_pubsub or self.in_transaction
@property
def is_connected(self):
""" True when the underlying transport is connected. """
return self._is_connected
# Handle replies
@asyncio.coroutine
def _reader_coroutine(self):
"""
Coroutine which reads input from the stream reader and processes it.
"""
while True:
try:
yield from self._handle_item(self._push_answer)
except ConnectionLostError:
return
except asyncio.streams.IncompleteReadError:
return
@asyncio.coroutine
def _handle_item(self, cb):
c = yield from self._reader.readexactly(1)
if c:
yield from self._line_received_handlers[c](cb)
else:
raise ConnectionLostError(None)
@asyncio.coroutine
def _handle_status_reply(self, cb):
line = (yield from self._reader.readline()).rstrip(b'\r\n')
cb(line)
@asyncio.coroutine
def _handle_int_reply(self, cb):
line = (yield from self._reader.readline()).rstrip(b'\r\n')
cb(int(line))
@asyncio.coroutine
def _handle_error_reply(self, cb):
line = (yield from self._reader.readline()).rstrip(b'\r\n')
cb(ErrorReply(line.decode('ascii')))
@asyncio.coroutine
def _handle_bulk_reply(self, cb):
length = int((yield from self._reader.readline()).rstrip(b'\r\n'))
if length == -1:
# None bulk reply
cb(None)
else:
# Read data
data = yield from self._reader.readexactly(length)
cb(data)
# Ignore trailing newline.
remaining = yield from self._reader.readline()
assert remaining.rstrip(b'\r\n') == b''
@asyncio.coroutine
def _handle_multi_bulk_reply(self, cb):
# NOTE: the reason for passing the callback `cb` in here is
# mainly because we want to return the result object
# especially in this case before the input is read
# completely. This allows a streaming API.
count = int((yield from self._reader.readline()).rstrip(b'\r\n'))
# Handle multi-bulk none.
# (Used when a transaction exec fails.)
if count == -1:
cb(None)
return
reply = MultiBulkReply(self, count, loop=self._loop)
# Return the empty queue immediately as an answer.
if self._in_pubsub:
ensure_future(self._handle_pubsub_multibulk_reply(reply), loop=self._loop)
else:
cb(reply)
# Wait for all multi bulk reply content.
for i in range(count):
yield from self._handle_item(reply._feed_received)
@asyncio.coroutine
def _handle_pubsub_multibulk_reply(self, multibulk_reply):
# Read first item of the multi bulk reply raw.
type = yield from multibulk_reply._read(decode=False, _one=True)
assert type in (b'message', b'subscribe', b'unsubscribe', b'pmessage', b'psubscribe', b'punsubscribe')
if type == b'message':
channel, value = yield from multibulk_reply._read(count=2)
yield from self._subscription._messages_queue.put(PubSubReply(channel, value))
elif type == b'pmessage':
pattern, channel, value = yield from multibulk_reply._read(count=3)
yield from self._subscription._messages_queue.put(PubSubReply(channel, value, pattern=pattern))
# We can safely ignore 'subscribe'/'unsubscribe' replies at this point,
# they don't contain anything really useful.
# Redis operations.
def _send_command(self, args):
"""
Send Redis request command.
`args` should be a list of bytes to be written to the transport.
"""
# Create write buffer.
data = []
# NOTE: First, I tried to optimize by also flushing this buffer in
# between the looping through the args. However, I removed that as the
# advantage was really small. Even when some commands like `hmset`
# could accept a generator instead of a list/dict, we would need to
# read out the whole generator in memory in order to write the number
# of arguments first.
# Serialize and write header (number of arguments.)
data += [ b'*', self._encode_int(len(args)), b'\r\n' ]
# Write arguments.
for arg in args:
data += [ b'$', self._encode_int(len(arg)), b'\r\n', arg, b'\r\n' ]
# Flush the last part
self.transport.write(b''.join(data))
@asyncio.coroutine
def _get_answer(self, transaction, answer_f, _bypass=False, call=None): # XXX: rename _bypass to not_queued
"""
Return an answer to the pipelined query.
(Or when we are in a transaction, return a future for the answer.)
"""
# Wait for the answer to come in
result = yield from answer_f
if transaction != _NoTransaction and not _bypass:
# When the connection is inside a transaction, the query will be queued.
if result != b'QUEUED':
raise Error('Expected to receive QUEUED for query in transaction, received %r.' % result)
# Return a future which will contain the result when it arrives.
f = Future(loop=self._loop)
self._transaction_response_queue.append( (f, call) )
return f
else:
if call:
self._pipelined_calls.remove(call)
return result
def _push_answer(self, answer):
"""
Answer future at the queue.
"""
f = self._queue.popleft()
if isinstance(answer, Exception):
f.set_exception(answer)
elif f.cancelled():
# Received an answer from Redis, for a query which `Future` got
# already cancelled. Don't call set_result, that would raise an
# `InvalidStateError` otherwise.
pass
else:
f.set_result(answer)
@asyncio.coroutine
def _query(self, transaction, *args, _bypass=False, set_blocking=False):
"""
Wrapper around both _send_command and _get_answer.
Coroutine that sends the query to the server, and returns the reply.
(Where the reply is a simple Redis type: these are `int`,
`StatusReply`, `bytes` or `MultiBulkReply`) When we are in a transaction,
this coroutine will return a `Future` of the actual result.
"""
assert transaction == _NoTransaction or isinstance(transaction, Transaction)
if not self._is_connected:
raise NotConnectedError
# Get lock.
if transaction == _NoTransaction:
yield from self._transaction_lock.acquire()
else:
assert transaction == self._transaction
try:
call = PipelinedCall(args[0], set_blocking)
self._pipelined_calls.add(call)
# Add a new future to our answer queue.
answer_f = Future(loop=self._loop)
self._queue.append(answer_f)
# Send command
self._send_command(args)
finally:
# Release lock.
if transaction == _NoTransaction:
self._transaction_lock.release()
# TODO: when set_blocking=True, only release lock after reading the answer.
# (it doesn't make sense to free the input and pipeline commands in that case.)
# Receive answer.
result = yield from self._get_answer(transaction, answer_f, _bypass=_bypass, call=call)
return result
# Internal
@_query_command
def auth(self, tr, password:NativeType) -> StatusReply:
""" Authenticate to the server """
self.password = password
return self._query(tr, b'auth', self.encode_from_native(password))
@_query_command
def select(self, tr, db:int) -> StatusReply:
""" Change the selected database for the current connection """
self.db = db
return self._query(tr, b'select', self._encode_int(db))
# Strings
@_query_command
def set(self, tr, key:NativeType, value:NativeType,
expire:(int, NoneType)=None, pexpire:(int, NoneType)=None,
only_if_not_exists:bool=False, only_if_exists:bool=False) -> (StatusReply, NoneType):
"""
Set the string value of a key
::
yield from protocol.set('key', 'value')
result = yield from protocol.get('key')
assert result == 'value'
To set a value and its expiration, only if key not exists, do:
::
yield from protocol.set('key', 'value', expire=1, only_if_not_exists=True)
This will send: ``SET key value EX 1 NX`` at the network.
To set value and its expiration in milliseconds, but only if key already exists:
::
yield from protocol.set('key', 'value', pexpire=1000, only_if_exists=True)
"""
params = [
b'set',
self.encode_from_native(key),
self.encode_from_native(value)
]
if expire is not None:
params.extend((b'ex', self._encode_int(expire)))
if pexpire is not None:
params.extend((b'px', self._encode_int(pexpire)))
if only_if_not_exists and only_if_exists:
raise ValueError("only_if_not_exists and only_if_exists cannot be true simultaniously")
if only_if_not_exists:
params.append(b'nx')
if only_if_exists:
params.append(b'xx')
return self._query(tr, *params)
@_query_command
def setex(self, tr, key:NativeType, seconds:int, value:NativeType) -> StatusReply:
""" Set the string value of a key with expire """
return self._query(tr, b'setex', self.encode_from_native(key),
self._encode_int(seconds), self.encode_from_native(value))
@_query_command
def setnx(self, tr, key:NativeType, value:NativeType) -> bool:
""" Set the string value of a key if it does not exist.
Returns True if value is successfully set """
return self._query(tr, b'setnx', self.encode_from_native(key), self.encode_from_native(value))
@_query_command
def get(self, tr, key:NativeType) -> (NativeType, NoneType):
""" Get the value of a key """
return self._query(tr, b'get', self.encode_from_native(key))
@_query_command
def mget(self, tr, keys:ListOf(NativeType)) -> ListReply:
""" Returns the values of all specified keys. """
return self._query(tr, b'mget', *map(self.encode_from_native, keys))
@_query_command
def strlen(self, tr, key:NativeType) -> int:
""" Returns the length of the string value stored at key. An error is
returned when key holds a non-string value. """
return self._query(tr, b'strlen', self.encode_from_native(key))
@_query_command
def append(self, tr, key:NativeType, value:NativeType) -> int:
""" Append a value to a key """
return self._query(tr, b'append', self.encode_from_native(key), self.encode_from_native(value))
@_query_command
def getset(self, tr, key:NativeType, value:NativeType) -> (NativeType, NoneType):
""" Set the string value of a key and return its old value """
return self._query(tr, b'getset', self.encode_from_native(key), self.encode_from_native(value))
@_query_command
def incr(self, tr, key:NativeType) -> int:
""" Increment the integer value of a key by one """
return self._query(tr, b'incr', self.encode_from_native(key))
@_query_command
def incrby(self, tr, key:NativeType, increment:int) -> int:
""" Increment the integer value of a key by the given amount """
return self._query(tr, b'incrby', self.encode_from_native(key), self._encode_int(increment))
@_query_command
def decr(self, tr, key:NativeType) -> int:
""" Decrement the integer value of a key by one """
return self._query(tr, b'decr', self.encode_from_native(key))
@_query_command
def decrby(self, tr, key:NativeType, increment:int) -> int:
""" Decrement the integer value of a key by the given number """
return self._query(tr, b'decrby', self.encode_from_native(key), self._encode_int(increment))
@_query_command
def randomkey(self, tr) -> NativeType:
""" Return a random key from the keyspace """
return self._query(tr, b'randomkey')
@_query_command
def exists(self, tr, key:NativeType) -> bool:
""" Determine if a key exists """
return self._query(tr, b'exists', self.encode_from_native(key))
@_query_command
def delete(self, tr, keys:ListOf(NativeType)) -> int:
""" Delete a key """
return self._query(tr, b'del', *map(self.encode_from_native, keys))
@_query_command
def move(self, tr, key:NativeType, database:int) -> int:
""" Move a key to another database """
return self._query(tr, b'move', self.encode_from_native(key), self._encode_int(database)) # TODO: unittest
@_query_command
def rename(self, tr, key:NativeType, newkey:NativeType) -> StatusReply:
""" Rename a key """
return self._query(tr, b'rename', self.encode_from_native(key), self.encode_from_native(newkey))
@_query_command
def renamenx(self, tr, key:NativeType, newkey:NativeType) -> int:
""" Rename a key, only if the new key does not exist
(Returns 1 if the key was successfully renamed.) """
return self._query(tr, b'renamenx', self.encode_from_native(key), self.encode_from_native(newkey))
@_query_command
def bitop_and(self, tr, destkey:NativeType, srckeys:ListOf(NativeType)) -> int:
""" Perform a bitwise AND operation between multiple keys. """
return self._bitop(tr, b'and', destkey, srckeys)
@_query_command
def bitop_or(self, tr, destkey:NativeType, srckeys:ListOf(NativeType)) -> int:
""" Perform a bitwise OR operation between multiple keys. """
return self._bitop(tr, b'or', destkey, srckeys)
@_query_command
def bitop_xor(self, tr, destkey:NativeType, srckeys:ListOf(NativeType)) -> int:
""" Perform a bitwise XOR operation between multiple keys. """
return self._bitop(tr, b'xor', destkey, srckeys)
def _bitop(self, tr, op, destkey, srckeys):
return self._query(tr, b'bitop', op, self.encode_from_native(destkey), *map(self.encode_from_native, srckeys))
@_query_command
def bitop_not(self, tr, destkey:NativeType, key:NativeType) -> int:
""" Perform a bitwise NOT operation between multiple keys. """
return self._query(tr, b'bitop', b'not', self.encode_from_native(destkey), self.encode_from_native(key))
@_query_command
def bitcount(self, tr, key:NativeType, start:int=0, end:int=-1) -> int:
""" Count the number of set bits (population counting) in a string. """
return self._query(tr, b'bitcount', self.encode_from_native(key), self._encode_int(start), self._encode_int(end))
@_query_command
def getbit(self, tr, key:NativeType, offset:int) -> bool:
""" Returns the bit value at offset in the string value stored at key """
return self._query(tr, b'getbit', self.encode_from_native(key), self._encode_int(offset))
@_query_command
def setbit(self, tr, key:NativeType, offset:int, value:bool) -> bool:
""" Sets or clears the bit at offset in the string value stored at key """
return self._query(tr, b'setbit', self.encode_from_native(key), self._encode_int(offset),
self._encode_int(int(value)))
# Keys
@_query_command
def keys(self, tr, pattern:NativeType) -> ListReply:
"""
Find all keys matching the given pattern.
.. note:: Also take a look at :func:`~asyncio_redis.RedisProtocol.scan`.
"""
return self._query(tr, b'keys', self.encode_from_native(pattern))
# @_query_command
# def dump(self, key:NativeType):
# """ Return a serialized version of the value stored at the specified key. """
# # Dump does not work yet. It shouldn't be decoded using utf-8'
# raise NotImplementedError('Not supported.')
@_query_command
def expire(self, tr, key:NativeType, seconds:int) -> int:
""" Set a key's time to live in seconds """
return self._query(tr, b'expire', self.encode_from_native(key), self._encode_int(seconds))
@_query_command
def pexpire(self, tr, key:NativeType, milliseconds:int) -> int:
""" Set a key's time to live in milliseconds """
return self._query(tr, b'pexpire', self.encode_from_native(key), self._encode_int(milliseconds))
@_query_command
def expireat(self, tr, key:NativeType, timestamp:int) -> int:
""" Set the expiration for a key as a UNIX timestamp """
return self._query(tr, b'expireat', self.encode_from_native(key), self._encode_int(timestamp))
@_query_command
def pexpireat(self, tr, key:NativeType, milliseconds_timestamp:int) -> int:
""" Set the expiration for a key as a UNIX timestamp specified in milliseconds """
return self._query(tr, b'pexpireat', self.encode_from_native(key), self._encode_int(milliseconds_timestamp))
@_query_command
def persist(self, tr, key:NativeType) -> int:
""" Remove the expiration from a key """
return self._query(tr, b'persist', self.encode_from_native(key))
@_query_command
def ttl(self, tr, key:NativeType) -> int:
""" Get the time to live for a key """
return self._query(tr, b'ttl', self.encode_from_native(key))
@_query_command
def pttl(self, tr, key:NativeType) -> int:
""" Get the time to live for a key in milliseconds """
return self._query(tr, b'pttl', self.encode_from_native(key))
# Set operations
@_query_command
def sadd(self, tr, key:NativeType, members:ListOf(NativeType)) -> int:
""" Add one or more members to a set """
return self._query(tr, b'sadd', self.encode_from_native(key), *map(self.encode_from_native, members))
@_query_command
def srem(self, tr, key:NativeType, members:ListOf(NativeType)) -> int:
""" Remove one or more members from a set """
return self._query(tr, b'srem', self.encode_from_native(key), *map(self.encode_from_native, members))
@_query_command
def spop(self, tr, key:NativeType) -> (NativeType, NoneType):
""" Removes and returns a random element from the set value stored at key. """
return self._query(tr, b'spop', self.encode_from_native(key))
@_query_command
def srandmember(self, tr, key:NativeType, count:int=1) -> SetReply:
""" Get one or multiple random members from a set
(Returns a list of members, even when count==1) """
return self._query(tr, b'srandmember', self.encode_from_native(key), self._encode_int(count))
@_query_command
def sismember(self, tr, key:NativeType, value:NativeType) -> bool:
""" Determine if a given value is a member of a set """
return self._query(tr, b'sismember', self.encode_from_native(key), self.encode_from_native(value))
@_query_command
def scard(self, tr, key:NativeType) -> int:
""" Get the number of members in a set """
return self._query(tr, b'scard', self.encode_from_native(key))
@_query_command
def smembers(self, tr, key:NativeType) -> SetReply:
""" Get all the members in a set """
return self._query(tr, b'smembers', self.encode_from_native(key))
@_query_command
def sinter(self, tr, keys:ListOf(NativeType)) -> SetReply:
""" Intersect multiple sets """
return self._query(tr, b'sinter', *map(self.encode_from_native, keys))
@_query_command
def sinterstore(self, tr, destination:NativeType, keys:ListOf(NativeType)) -> int:
""" Intersect multiple sets and store the resulting set in a key """
return self._query(tr, b'sinterstore', self.encode_from_native(destination), *map(self.encode_from_native, keys))
@_query_command
def sdiff(self, tr, keys:ListOf(NativeType)) -> SetReply:
""" Subtract multiple sets """
return self._query(tr, b'sdiff', *map(self.encode_from_native, keys))
@_query_command
def sdiffstore(self, tr, destination:NativeType, keys:ListOf(NativeType)) -> int:
""" Subtract multiple sets and store the resulting set in a key """
return self._query(tr, b'sdiffstore', self.encode_from_native(destination),
*map(self.encode_from_native, keys))
@_query_command
def sunion(self, tr, keys:ListOf(NativeType)) -> SetReply:
""" Add multiple sets """
return self._query(tr, b'sunion', *map(self.encode_from_native, keys))
@_query_command
def sunionstore(self, tr, destination:NativeType, keys:ListOf(NativeType)) -> int:
""" Add multiple sets and store the resulting set in a key """
return self._query(tr, b'sunionstore', self.encode_from_native(destination), *map(self.encode_from_native, keys))
@_query_command
def smove(self, tr, source:NativeType, destination:NativeType, value:NativeType) -> int:
""" Move a member from one set to another """
return self._query(tr, b'smove', self.encode_from_native(source), self.encode_from_native(destination), self.encode_from_native(value))
# List operations
@_query_command
def lpush(self, tr, key:NativeType, values:ListOf(NativeType)) -> int:
""" Prepend one or multiple values to a list """
return self._query(tr, b'lpush', self.encode_from_native(key), *map(self.encode_from_native, values))
@_query_command
def lpushx(self, tr, key:NativeType, value:NativeType) -> int:
""" Prepend a value to a list, only if the list exists """
return self._query(tr, b'lpushx', self.encode_from_native(key), self.encode_from_native(value))
@_query_command
def rpush(self, tr, key:NativeType, values:ListOf(NativeType)) -> int:
""" Append one or multiple values to a list """
return self._query(tr, b'rpush', self.encode_from_native(key), *map(self.encode_from_native, values))
@_query_command
def rpushx(self, tr, key:NativeType, value:NativeType) -> int:
""" Append a value to a list, only if the list exists """
return self._query(tr, b'rpushx', self.encode_from_native(key), self.encode_from_native(value))
@_query_command
def llen(self, tr, key:NativeType) -> int:
""" Returns the length of the list stored at key. """
return self._query(tr, b'llen', self.encode_from_native(key))
@_query_command
def lrem(self, tr, key:NativeType, count:int=0, value='') -> int:
""" Remove elements from a list """
return self._query(tr, b'lrem', self.encode_from_native(key), self._encode_int(count), self.encode_from_native(value))
@_query_command
def lrange(self, tr, key, start:int=0, stop:int=-1) -> ListReply:
""" Get a range of elements from a list. """
return self._query(tr, b'lrange', self.encode_from_native(key), self._encode_int(start), self._encode_int(stop))
@_query_command
def ltrim(self, tr, key:NativeType, start:int=0, stop:int=-1) -> StatusReply:
""" Trim a list to the specified range """
return self._query(tr, b'ltrim', self.encode_from_native(key), self._encode_int(start), self._encode_int(stop))
@_query_command
def lpop(self, tr, key:NativeType) -> (NativeType, NoneType):
""" Remove and get the first element in a list """
return self._query(tr, b'lpop', self.encode_from_native(key))
@_query_command
def rpop(self, tr, key:NativeType) -> (NativeType, NoneType):
""" Remove and get the last element in a list """
return self._query(tr, b'rpop', self.encode_from_native(key))
@_query_command
def rpoplpush(self, tr, source:NativeType, destination:NativeType) -> (NativeType, NoneType):
""" Remove the last element in a list, append it to another list and return it """
return self._query(tr, b'rpoplpush', self.encode_from_native(source), self.encode_from_native(destination))
@_query_command
def lindex(self, tr, key:NativeType, index:int) -> (NativeType, NoneType):
""" Get an element from a list by its index """
return self._query(tr, b'lindex', self.encode_from_native(key), self._encode_int(index))
@_query_command
def blpop(self, tr, keys:ListOf(NativeType), timeout:int=0) -> BlockingPopReply:
""" Remove and get the first element in a list, or block until one is available.
This will raise :class:`~asyncio_redis.exceptions.TimeoutError` when
the timeout was exceeded and Redis returns `None`. """
return self._blocking_pop(tr, b'blpop', keys, timeout=timeout)
@_query_command
def brpop(self, tr, keys:ListOf(NativeType), timeout:int=0) -> BlockingPopReply:
""" Remove and get the last element in a list, or block until one is available.
This will raise :class:`~asyncio_redis.exceptions.TimeoutError` when
the timeout was exceeded and Redis returns `None`. """
return self._blocking_pop(tr, b'brpop', keys, timeout=timeout)
def _blocking_pop(self, tr, command, keys, timeout:int=0):
return self._query(tr, command, *([ self.encode_from_native(k) for k in keys ] + [self._encode_int(timeout)]), set_blocking=True)
@_command
@asyncio.coroutine
def brpoplpush(self, tr, source:NativeType, destination:NativeType, timeout:int=0) -> NativeType:
""" Pop a value from a list, push it to another list and return it; or block until one is available """
result = yield from self._query(tr, b'brpoplpush', self.encode_from_native(source), self.encode_from_native(destination),
self._encode_int(timeout), set_blocking=True)
if result is None:
raise TimeoutError('Timeout in brpoplpush')
else:
assert isinstance(result, bytes)
return self.decode_to_native(result)
@_query_command
def lset(self, tr, key:NativeType, index:int, value:NativeType) -> StatusReply:
""" Set the value of an element in a list by its index. """
return self._query(tr, b'lset', self.encode_from_native(key), self._encode_int(index), self.encode_from_native(value))
@_query_command
def linsert(self, tr, key:NativeType, pivot:NativeType, value:NativeType, before=False) -> int:
""" Insert an element before or after another element in a list """
return self._query(tr, b'linsert', self.encode_from_native(key), (b'BEFORE' if before else b'AFTER'),
self.encode_from_native(pivot), self.encode_from_native(value))
# Sorted Sets
@_query_command
def zadd(self, tr, key:NativeType, values:dict, only_if_not_exists=False, only_if_exists=False, return_num_changed=False) -> int:
"""
Add one or more members to a sorted set, or update its score if it already exists
::
yield protocol.zadd('myzset', { 'key': 4, 'key2': 5 })
"""
options = [ ]
assert not (only_if_not_exists and only_if_exists)
if only_if_not_exists:
options.append(b'NX')
elif only_if_exists:
options.append(b'XX')
if return_num_changed:
options.append(b'CH')
data = [ ]
for k,score in values.items():
assert isinstance(k, self.native_type)
assert isinstance(score, (int, float))
data.append(self._encode_float(score))
data.append(self.encode_from_native(k))
return self._query(tr, b'zadd', self.encode_from_native(key), *(options + data))
@_query_command
def zpopmin(self, tr, key:NativeType, count:int=1) -> ZRangeReply:
"""
Return the specified numbers of first elements from sorted set with a minimum score
You can do the following to recieve the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
result = yield protocol.zpopmin('myzset', count=10)
my_dict = yield result.asdict()
"""
return self._query(tr, b'zpopmin', self.encode_from_native(key), self._encode_int(count))
@_query_command
def zrange(self, tr, key:NativeType, start:int=0, stop:int=-1) -> ZRangeReply:
"""
Return a range of members in a sorted set, by index.
You can do the following to receive the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
result = yield protocol.zrange('myzset', start=10, stop=20)
my_dict = yield result.asdict()
or the following to retrieve it as a list of keys:
::
result = yield protocol.zrange('myzset', start=10, stop=20)
my_dict = yield result.aslist()
"""
return self._query(tr, b'zrange', self.encode_from_native(key),
self._encode_int(start), self._encode_int(stop), b'withscores')
@_query_command
def zrangebylex(self, tr, key:NativeType, start:str, stop:str) -> SetReply:
"""
Return a range of members in a sorted set, by index.
You can do the following to receive the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
result = yield protocol.zrangebykex('myzset', start='-', stop='[c')
my_dict = yield result.asdict()
or the following to retrieve it as a list of keys:
::
result = yield protocol.zrangebylex('myzset', start='-', stop='[c')
my_dict = yield result.aslist()
"""
return self._query(tr, b'zrangebylex', self.encode_from_native(key),
self.encode_from_native(start), self.encode_from_native(stop))
@_query_command
def zrevrange(self, tr, key:NativeType, start:int=0, stop:int=-1) -> ZRangeReply:
"""
Return a range of members in a reversed sorted set, by index.
You can do the following to receive the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
my_dict = yield protocol.zrevrange_asdict('myzset', start=10, stop=20)
or the following to retrieve it as a list of keys:
::
zrange_reply = yield protocol.zrevrange('myzset', start=10, stop=20)
my_dict = yield zrange_reply.aslist()
"""
return self._query(tr, b'zrevrange', self.encode_from_native(key),
self._encode_int(start), self._encode_int(stop), b'withscores')
@_query_command
def zrangebyscore(self, tr, key:NativeType,
min:ZScoreBoundary=ZScoreBoundary.MIN_VALUE,
max:ZScoreBoundary=ZScoreBoundary.MAX_VALUE,
offset:int=0, limit:int=-1) -> ZRangeReply:
""" Return a range of members in a sorted set, by score """
return self._query(tr, b'zrangebyscore', self.encode_from_native(key),
self._encode_zscore_boundary(min), self._encode_zscore_boundary(max),
b'limit', self._encode_int(offset), self._encode_int(limit),
b'withscores')
@_query_command
def zrevrangebyscore(self, tr, key:NativeType,
max:ZScoreBoundary=ZScoreBoundary.MAX_VALUE,
min:ZScoreBoundary=ZScoreBoundary.MIN_VALUE,
offset:int=0, limit:int=-1) -> ZRangeReply:
""" Return a range of members in a sorted set, by score, with scores ordered from high to low """
return self._query(tr, b'zrevrangebyscore', self.encode_from_native(key),
self._encode_zscore_boundary(max), self._encode_zscore_boundary(min),
b'limit', self._encode_int(offset), self._encode_int(limit),
b'withscores')
@_query_command
def zremrangebyscore(self, tr, key:NativeType,
min:ZScoreBoundary=ZScoreBoundary.MIN_VALUE,
max:ZScoreBoundary=ZScoreBoundary.MAX_VALUE) -> int:
""" Remove all members in a sorted set within the given scores """
return self._query(tr, b'zremrangebyscore', self.encode_from_native(key),
self._encode_zscore_boundary(min), self._encode_zscore_boundary(max))
@_query_command
def zremrangebyrank(self, tr, key:NativeType, min:int=0, max:int=-1) -> int:
""" Remove all members in a sorted set within the given indexes """
return self._query(tr, b'zremrangebyrank', self.encode_from_native(key),
self._encode_int(min), self._encode_int(max))
@_query_command
def zcount(self, tr, key:NativeType, min:ZScoreBoundary, max:ZScoreBoundary) -> int:
""" Count the members in a sorted set with scores within the given values """
return self._query(tr, b'zcount', self.encode_from_native(key),
self._encode_zscore_boundary(min), self._encode_zscore_boundary(max))
@_query_command
def zscore(self, tr, key:NativeType, member:NativeType) -> (float, NoneType):
""" Get the score associated with the given member in a sorted set """
return self._query(tr, b'zscore', self.encode_from_native(key), self.encode_from_native(member))
@_query_command
def zunionstore(self, tr, destination:NativeType, keys:ListOf(NativeType), weights:(NoneType,ListOf(float))=None,
aggregate=ZAggregate.SUM) -> int:
""" Add multiple sorted sets and store the resulting sorted set in a new key """
return self._zstore(tr, b'zunionstore', destination, keys, weights, aggregate)
@_query_command
def zinterstore(self, tr, destination:NativeType, keys:ListOf(NativeType), weights:(NoneType,ListOf(float))=None,
aggregate=ZAggregate.SUM) -> int:
""" Intersect multiple sorted sets and store the resulting sorted set in a new key """
return self._zstore(tr, b'zinterstore', destination, keys, weights, aggregate)
def _zstore(self, tr, command, destination, keys, weights, aggregate):
""" Common part for zunionstore and zinterstore. """
numkeys = len(keys)
if weights is None:
weights = [1] * numkeys
return self._query(tr, *
[ command, self.encode_from_native(destination), self._encode_int(numkeys) ] +
list(map(self.encode_from_native, keys)) +
[ b'weights' ] +
list(map(self._encode_float, weights)) +
[ b'aggregate' ] +
[ {
ZAggregate.SUM: b'SUM',
ZAggregate.MIN: b'MIN',
ZAggregate.MAX: b'MAX' }[aggregate]
] )
@_query_command
def zcard(self, tr, key:NativeType) -> int:
""" Get the number of members in a sorted set """
return self._query(tr, b'zcard', self.encode_from_native(key))
@_query_command
def zrank(self, tr, key:NativeType, member:NativeType) -> (int, NoneType):
""" Determine the index of a member in a sorted set """
return self._query(tr, b'zrank', self.encode_from_native(key), self.encode_from_native(member))
@_query_command
def zrevrank(self, tr, key:NativeType, member:NativeType) -> (int, NoneType):
""" Determine the index of a member in a sorted set, with scores ordered from high to low """
return self._query(tr, b'zrevrank', self.encode_from_native(key), self.encode_from_native(member))
@_query_command
def zincrby(self, tr, key:NativeType, increment:float, member:NativeType, only_if_exists=False) -> (float, NoneType):
""" Increment the score of a member in a sorted set """
if only_if_exists:
return self._query(tr, b'zadd', self.encode_from_native(key), b'xx', b'incr',
self._encode_float(increment), self.encode_from_native(member))
else:
return self._query(tr, b'zincrby', self.encode_from_native(key),
self._encode_float(increment), self.encode_from_native(member))
@_query_command
def zrem(self, tr, key:NativeType, members:ListOf(NativeType)) -> int:
""" Remove one or more members from a sorted set """
return self._query(tr, b'zrem', self.encode_from_native(key), *map(self.encode_from_native, members))
# Hashes
@_query_command
def hset(self, tr, key:NativeType, field:NativeType, value:NativeType) -> int:
""" Set the string value of a hash field """
return self._query(tr, b'hset', self.encode_from_native(key), self.encode_from_native(field), self.encode_from_native(value))
@_query_command
def hmset(self, tr, key:NativeType, values:dict) -> StatusReply:
""" Set multiple hash fields to multiple values """
data = [ ]
for k,v in values.items():
assert isinstance(k, self.native_type)
assert isinstance(v, self.native_type)
data.append(self.encode_from_native(k))
data.append(self.encode_from_native(v))
return self._query(tr, b'hmset', self.encode_from_native(key), *data)
@_query_command
def hsetnx(self, tr, key:NativeType, field:NativeType, value:NativeType) -> int:
""" Set the value of a hash field, only if the field does not exist """
return self._query(tr, b'hsetnx', self.encode_from_native(key), self.encode_from_native(field), self.encode_from_native(value))
@_query_command
def hdel(self, tr, key:NativeType, fields:ListOf(NativeType)) -> int:
""" Delete one or more hash fields """
return self._query(tr, b'hdel', self.encode_from_native(key), *map(self.encode_from_native, fields))
@_query_command
def hget(self, tr, key:NativeType, field:NativeType) -> (NativeType, NoneType):
""" Get the value of a hash field """
return self._query(tr, b'hget', self.encode_from_native(key), self.encode_from_native(field))
@_query_command
def hexists(self, tr, key:NativeType, field:NativeType) -> bool:
""" Returns if field is an existing field in the hash stored at key. """
return self._query(tr, b'hexists', self.encode_from_native(key), self.encode_from_native(field))
@_query_command
def hkeys(self, tr, key:NativeType) -> SetReply:
""" Get all the keys in a hash. (Returns a set) """
return self._query(tr, b'hkeys', self.encode_from_native(key))
@_query_command
def hvals(self, tr, key:NativeType) -> ListReply:
""" Get all the values in a hash. (Returns a list) """
return self._query(tr, b'hvals', self.encode_from_native(key))
@_query_command
def hlen(self, tr, key:NativeType) -> int:
""" Returns the number of fields contained in the hash stored at key. """
return self._query(tr, b'hlen', self.encode_from_native(key))
@_query_command
def hgetall(self, tr, key:NativeType) -> DictReply:
""" Get the value of a hash field """
return self._query(tr, b'hgetall', self.encode_from_native(key))
@_query_command
def hmget(self, tr, key:NativeType, fields:ListOf(NativeType)) -> ListReply:
""" Get the values of all the given hash fields """
return self._query(tr, b'hmget', self.encode_from_native(key), *map(self.encode_from_native, fields))
@_query_command
def hincrby(self, tr, key:NativeType, field:NativeType, increment) -> int:
""" Increment the integer value of a hash field by the given number
Returns: the value at field after the increment operation. """
assert isinstance(increment, int)
return self._query(tr, b'hincrby', self.encode_from_native(key), self.encode_from_native(field), self._encode_int(increment))
@_query_command
def hincrbyfloat(self, tr, key:NativeType, field:NativeType, increment:(int,float)) -> float:
""" Increment the float value of a hash field by the given amount
Returns: the value at field after the increment operation. """
return self._query(tr, b'hincrbyfloat', self.encode_from_native(key), self.encode_from_native(field), self._encode_float(increment))
# Pubsub
# (subscribe, unsubscribe, etc... should be called through the Subscription class.)
@_command
def start_subscribe(self, tr, *a) -> 'Subscription':
"""
Start a pubsub listener.
::
# Create subscription
subscription = yield from protocol.start_subscribe()
yield from subscription.subscribe(['key'])
yield from subscription.psubscribe(['pattern*'])
while True:
result = yield from subscription.next_published()
print(result)
:returns: :class:`~asyncio_redis.Subscription`
"""
# (Make coroutine. @asyncio.coroutine breaks documentation. It uses
# @functools.wraps to make a generator for this function. But _command
# will no longer be able to read the signature.)
if False: yield
if self.in_use:
raise Error('Cannot start pubsub listener when a protocol is in use.')
subscription = Subscription(self)
self._in_pubsub = True
self._subscription = subscription
return subscription
@_command
def _subscribe(self, tr, channels:ListOf(NativeType)) -> NoneType:
""" Listen for messages published to the given channels """
self._pubsub_channels |= set(channels)
return self._pubsub_method('subscribe', channels)
@_command
def _unsubscribe(self, tr, channels:ListOf(NativeType)) -> NoneType:
""" Stop listening for messages posted to the given channels """
self._pubsub_channels -= set(channels)
return self._pubsub_method('unsubscribe', channels)
@_command
def _psubscribe(self, tr, patterns:ListOf(NativeType)) -> NoneType:
""" Listen for messages published to channels matching the given patterns """
self._pubsub_patterns |= set(patterns)
return self._pubsub_method('psubscribe', patterns)
@_command
def _punsubscribe(self, tr, patterns:ListOf(NativeType)) -> NoneType: # XXX: unittest
""" Stop listening for messages posted to channels matching the given patterns """
self._pubsub_patterns -= set(patterns)
return self._pubsub_method('punsubscribe', patterns)
@asyncio.coroutine
def _pubsub_method(self, method, params):
if not self._in_pubsub:
raise Error('Cannot call pubsub methods without calling start_subscribe')
# Send
self._send_command([method.encode('ascii')] + list(map(self.encode_from_native, params)))
# Note that we can't use `self._query` here. The reason is that one
# subscribe/unsubscribe command returns a separate answer for every
# parameter. It doesn't fit in the same model of all the other queries
# where one query puts a Future on the queue that is replied with the
# incoming answer.
# Redis returns something like [ 'subscribe', 'channel_name', 1] for
# each parameter, but we can safely ignore those replies that.
@_query_command
def publish(self, tr, channel:NativeType, message:NativeType) -> int:
""" Post a message to a channel
(Returns the number of clients that received this message.) """
return self._query(tr, b'publish', self.encode_from_native(channel), self.encode_from_native(message))
@_query_command
def pubsub_channels(self, tr, pattern:(NativeType, NoneType)=None) -> ListReply:
"""
Lists the currently active channels. An active channel is a Pub/Sub
channel with one ore more subscribers (not including clients subscribed
to patterns).
"""
return self._query(tr, b'pubsub', b'channels',
(self.encode_from_native(pattern) if pattern else b'*'))
@_query_command
def pubsub_numsub(self, tr, channels:ListOf(NativeType)) -> DictReply:
"""Returns the number of subscribers (not counting clients subscribed
to patterns) for the specified channels. """
return self._query(tr, b'pubsub', b'numsub', *[ self.encode_from_native(c) for c in channels ])
@_query_command
def pubsub_numpat(self, tr) -> int:
""" Returns the number of subscriptions to patterns (that are performed
using the PSUBSCRIBE command). Note that this is not just the count of
clients subscribed to patterns but the total number of patterns all the
clients are subscribed to. """
return self._query(tr, b'pubsub', b'numpat')
# Server
@_query_command
def ping(self, tr) -> StatusReply:
""" Ping the server (Returns PONG) """
return self._query(tr, b'ping')
@_query_command
def echo(self, tr, string:NativeType) -> NativeType:
""" Echo the given string """
return self._query(tr, b'echo', self.encode_from_native(string))
@_query_command
def save(self, tr) -> StatusReply:
""" Synchronously save the dataset to disk """
return self._query(tr, b'save')
@_query_command
def bgsave(self, tr) -> StatusReply:
""" Asynchronously save the dataset to disk """
return self._query(tr, b'bgsave')
@_query_command
def bgrewriteaof(self, tr) -> StatusReply:
""" Asynchronously rewrite the append-only file """
return self._query(tr, b'bgrewriteaof')
@_query_command
def lastsave(self, tr) -> int:
""" Get the UNIX time stamp of the last successful save to disk """
return self._query(tr, b'lastsave')
@_query_command
def dbsize(self, tr) -> int:
""" Return the number of keys in the currently-selected database. """
return self._query(tr, b'dbsize')
@_query_command
def flushall(self, tr) -> StatusReply:
""" Remove all keys from all databases """
return self._query(tr, b'flushall')
@_query_command
def flushdb(self, tr) -> StatusReply:
""" Delete all the keys of the currently selected DB. This command never fails. """
return self._query(tr, b'flushdb')
# @_query_command
# def object(self, subcommand, args):
# """ Inspect the internals of Redis objects """
# raise NotImplementedError
@_query_command
def type(self, tr, key:NativeType) -> StatusReply:
""" Determine the type stored at key """
return self._query(tr, b'type', self.encode_from_native(key))
@_query_command
def config_set(self, tr, parameter:str, value:str) -> StatusReply:
""" Set a configuration parameter to the given value """
return self._query(tr, b'config', b'set', self.encode_from_native(parameter),
self.encode_from_native(value))
@_query_command
def config_get(self, tr, parameter:str) -> ConfigPairReply:
""" Get the value of a configuration parameter """
return self._query(tr, b'config', b'get', self.encode_from_native(parameter))
@_query_command
def config_rewrite(self, tr) -> StatusReply:
""" Rewrite the configuration file with the in memory configuration """
return self._query(tr, b'config', b'rewrite')
@_query_command
def config_resetstat(self, tr) -> StatusReply:
""" Reset the stats returned by INFO """
return self._query(tr, b'config', b'resetstat')
@_query_command
def info(self, tr, section:(NativeType, NoneType)=None) -> InfoReply:
""" Get information and statistics about the server """
if section is None:
return self._query(tr, b'info')
else:
return self._query(tr, b'info', self.encode_from_native(section))
@_query_command
def shutdown(self, tr, save=False) -> StatusReply:
""" Synchronously save the dataset to disk and then shut down the server """
return self._query(tr, b'shutdown', (b'save' if save else b'nosave'))
@_query_command
def client_getname(self, tr) -> NativeType:
""" Get the current connection name """
return self._query(tr, b'client', b'getname')
@_query_command
def client_setname(self, tr, name) -> StatusReply:
""" Set the current connection name """
return self._query(tr, b'client', b'setname', self.encode_from_native(name))
@_query_command
def client_list(self, tr) -> ClientListReply:
""" Get the list of client connections """
return self._query(tr, b'client', b'list')
@_query_command
def client_kill(self, tr, address:str) -> StatusReply:
"""
Kill the connection of a client
`address` should be an "ip:port" string.
"""
return self._query(tr, b'client', b'kill', address.encode('utf-8'))
# LUA scripting
@_command
@asyncio.coroutine
def register_script(self, tr, script:str) -> 'Script':
"""
Register a LUA script.
::
script = yield from protocol.register_script(lua_code)
result = yield from script.run(keys=[...], args=[...])
"""
# The register_script APi was made compatible with the redis.py library:
# https://github.com/andymccurdy/redis-py
sha = yield from self.script_load(tr, script)
return Script(sha, script, lambda:self.evalsha)
@_query_command
def script_exists(self, tr, shas:ListOf(str)) -> ListOf(bool):
""" Check existence of scripts in the script cache. """
return self._query(tr, b'script', b'exists', *[ sha.encode('ascii') for sha in shas ])
@_query_command
def script_flush(self, tr) -> StatusReply:
""" Remove all the scripts from the script cache. """
return self._query(tr, b'script', b'flush')
@_query_command
@asyncio.coroutine
def script_kill(self, tr) -> StatusReply:
"""
Kill the script currently in execution. This raises
:class:`~asyncio_redis.exceptions.NoRunningScriptError` when there are no
scrips running.
"""
try:
return (yield from self._query(tr, b'script', b'kill'))
except ErrorReply as e:
if 'NOTBUSY' in e.args[0]:
raise NoRunningScriptError
else:
raise
@_query_command
@asyncio.coroutine
def evalsha(self, tr, sha:str,
keys:(ListOf(NativeType), NoneType)=None,
args:(ListOf(NativeType), NoneType)=None) -> EvalScriptReply:
"""
Evaluates a script cached on the server side by its SHA1 digest.
Scripts are cached on the server side using the SCRIPT LOAD command.
The return type/value depends on the script.
This will raise a :class:`~asyncio_redis.exceptions.ScriptKilledError`
exception if the script was killed.
"""
if not keys: keys = []
if not args: args = []
try:
result = yield from self._query(tr, b'evalsha', sha.encode('ascii'),
self._encode_int(len(keys)),
*map(self.encode_from_native, keys + args))
return result
except ErrorReply:
raise ScriptKilledError
@_query_command
def script_load(self, tr, script:str) -> str:
""" Load script, returns sha1 """
return self._query(tr, b'script', b'load', script.encode('utf-8'))
# Scanning
@_command
def scan(self, tr, match:(NativeType, NoneType)=None) -> Cursor:
"""
Walk through the keys space. You can either fetch the items one by one
or in bulk.
::
cursor = yield from protocol.scan(match='*')
while True:
item = yield from cursor.fetchone()
if item is None:
break
else:
print(item)
::
cursor = yield from protocol.scan(match='*')
items = yield from cursor.fetchall()
It's possible to alter the COUNT-parameter, by assigning a value to
``cursor.count``, before calling ``fetchone`` or ``fetchall``. For
instance:
::
cursor.count = 100
Also see: :func:`~asyncio_redis.RedisProtocol.sscan`,
:func:`~asyncio_redis.RedisProtocol.hscan` and
:func:`~asyncio_redis.RedisProtocol.zscan`
Redis reference: http://redis.io/commands/scan
"""
if False: yield
def scanfunc(cursor, count):
return self._scan(tr, cursor, match, count)
return Cursor(name='scan(match=%r)' % match, scanfunc=scanfunc)
@_query_command
def _scan(self, tr, cursor:int, match:(NativeType,NoneType), count:int) -> _ScanPart:
match = b'*' if match is None else self.encode_from_native(match)
return self._query(tr, b'scan', self._encode_int(cursor),
b'match', match,
b'count', self._encode_int(count))
@_command
def sscan(self, tr, key:NativeType, match:(NativeType,NoneType)=None) -> SetCursor:
"""
Incrementally iterate set elements
Also see: :func:`~asyncio_redis.RedisProtocol.scan`
"""
if False: yield
name = 'sscan(key=%r match=%r)' % (key, match)
def scan(cursor, count):
return self._do_scan(tr, b'sscan', key, cursor, match, count)
return SetCursor(name=name, scanfunc=scan)
@_command
def hscan(self, tr, key:NativeType, match:(NativeType,NoneType)=None) -> DictCursor:
"""
Incrementally iterate hash fields and associated values
Also see: :func:`~asyncio_redis.RedisProtocol.scan`
"""
if False: yield
name = 'hscan(key=%r match=%r)' % (key, match)
def scan(cursor, count):
return self._do_scan(tr, b'hscan', key, cursor, match, count)
return DictCursor(name=name, scanfunc=scan)
@_command
def zscan(self, tr, key:NativeType, match:(NativeType,NoneType)=None) -> DictCursor:
"""
Incrementally iterate sorted sets elements and associated scores
Also see: :func:`~asyncio_redis.RedisProtocol.scan`
"""
if False: yield
name = 'zscan(key=%r match=%r)' % (key, match)
def scan(cursor, count):
return self._do_scan(b'zscan', key, cursor, match, count)
return ZCursor(name=name, scanfunc=scan)
@_query_command
def _do_scan(self, tr, verb:bytes, key:NativeType, cursor:int, match:(NativeType,NoneType), count:int) -> _ScanPart:
match = b'*' if match is None else self.encode_from_native(match)
return self._query(tr, verb, self.encode_from_native(key),
self._encode_int(cursor),
b'match', match,
b'count', self._encode_int(count))
# Transaction
@_command
@asyncio.coroutine
def watch(self, tr, keys:ListOf(NativeType)) -> NoneType:
"""
Watch keys.
::
# Watch keys for concurrent updates
yield from protocol.watch(['key', 'other_key'])
value = yield from protocol.get('key')
another_value = yield from protocol.get('another_key')
transaction = yield from protocol.multi()
f1 = yield from transaction.set('key', another_value)
f2 = yield from transaction.set('another_key', value)
# Commit transaction
yield from transaction.exec()
# Retrieve results
yield from f1
yield from f2
"""
return self._watch(tr, keys)
@asyncio.coroutine
def _watch(self, tr, keys:ListOf(NativeType)) -> NoneType:
result = yield from self._query(tr, b'watch', *map(self.encode_from_native, keys), _bypass=True)
assert result == b'OK'
@_command
@asyncio.coroutine
def multi(self, tr, watch:(ListOf(NativeType),NoneType)=None) -> 'Transaction':
"""
Start of transaction.
::
transaction = yield from protocol.multi()
# Run commands in transaction
f1 = yield from transaction.set('key', 'value')
f2 = yield from transaction.set('another_key', 'another_value')
# Commit transaction
yield from transaction.exec()
# Retrieve results (you can also use asyncio.tasks.gather)
result1 = yield from f1
result2 = yield from f2
:returns: A :class:`asyncio_redis.Transaction` instance.
"""
# Create transaction object.
if tr != _NoTransaction:
raise Error('Multi calls can not be nested.')
else:
yield from self._transaction_lock.acquire()
tr = Transaction(self)
self._transaction = tr
# Call watch
if watch is not None:
yield from self._watch(tr, watch)
# yield from asyncio.sleep(.015)
# Call multi
result = yield from self._query(tr, b'multi', _bypass=True)
assert result == b'OK'
self._transaction_response_queue = deque()
return tr
@asyncio.coroutine
def _exec(self, tr):
"""
Execute all commands issued after MULTI
"""
if not self._transaction or self._transaction != tr:
raise Error('Not in transaction')
try:
futures_and_postprocessors = self._transaction_response_queue
self._transaction_response_queue = None
# Get transaction answers.
multi_bulk_reply = yield from self._query(tr, b'exec', _bypass=True)
if multi_bulk_reply is None:
# We get None when a transaction failed.
raise TransactionError('Transaction failed.')
else:
assert isinstance(multi_bulk_reply, MultiBulkReply)
for f in multi_bulk_reply.iter_raw():
answer = yield from f
f2, call = futures_and_postprocessors.popleft()
if isinstance(answer, Exception):
f2.set_exception(answer)
else:
if call:
self._pipelined_calls.remove(call)
f2.set_result(answer)
finally:
self._transaction_response_queue = deque()
self._transaction = None
self._transaction_lock.release()
@asyncio.coroutine
def _discard(self, tr):
"""
Discard all commands issued after MULTI
"""
if not self._transaction or self._transaction != tr:
raise Error('Not in transaction')
try:
result = yield from self._query(tr, b'discard', _bypass=True)
assert result == b'OK'
finally:
self._transaction_response_queue = deque()
self._transaction = None
self._transaction_lock.release()
@asyncio.coroutine
def _unwatch(self, tr):
"""
Forget about all watched keys
"""
if not self._transaction or self._transaction != tr:
raise Error('Not in transaction')
result = yield from self._query(tr, b'unwatch') # XXX: should be _bypass???
assert result == b'OK'
class Script:
""" Lua script. """
def __init__(self, sha, code, get_evalsha_func):
self.sha = sha
self.code = code
self.get_evalsha_func = get_evalsha_func
def run(self, keys=[], args=[]):
"""
Returns a coroutine that executes the script.
::
script_reply = yield from script.run(keys=[], args=[])
# If the LUA script returns something, retrieve the return value
result = yield from script_reply.return_value()
This will raise a :class:`~asyncio_redis.exceptions.ScriptKilledError`
exception if the script was killed.
"""
return self.get_evalsha_func()(self.sha, keys, args)
class Transaction:
"""
Transaction context. This is a proxy to a :class:`.RedisProtocol` instance.
Every redis command called on this object will run inside the transaction.
The transaction can be finished by calling either ``discard`` or ``exec``.
More info: http://redis.io/topics/transactions
"""
def __init__(self, protocol):
self._protocol = protocol
def __getattr__(self, name):
"""
Proxy to a protocol.
"""
# Only proxy commands.
if name not in _all_commands:
raise AttributeError(name)
method = getattr(self._protocol, name)
# Wrap the method into something that passes the transaction object as
# first argument.
@wraps(method)
def wrapper(*a, **kw):
if self._protocol._transaction != self:
raise Error('Transaction already finished or invalid.')
return method(self, *a, **kw)
return wrapper
def discard(self):
"""
Discard all commands issued after MULTI
"""
return self._protocol._discard(self)
def exec(self):
"""
Execute transaction.
This can raise a :class:`~asyncio_redis.exceptions.TransactionError`
when the transaction fails.
"""
return self._protocol._exec(self)
def unwatch(self): # XXX: test
"""
Forget about all watched keys
"""
return self._protocol._unwatch(self)
class Subscription:
"""
Pubsub subscription
"""
def __init__(self, protocol):
self.protocol = protocol
self._messages_queue = Queue(loop=protocol._loop) # Pubsub queue
@wraps(RedisProtocol._subscribe)
def subscribe(self, channels):
return self.protocol._subscribe(self, channels)
@wraps(RedisProtocol._unsubscribe)
def unsubscribe(self, channels):
return self.protocol._unsubscribe(self, channels)
@wraps(RedisProtocol._psubscribe)
def psubscribe(self, patterns):
return self.protocol._psubscribe(self, patterns)
@wraps(RedisProtocol._punsubscribe)
def punsubscribe(self, patterns):
return self.protocol._punsubscribe(self, patterns)
@asyncio.coroutine
def next_published(self):
"""
Coroutine which waits for next pubsub message to be received and
returns it.
:returns: instance of :class:`PubSubReply <asyncio_redis.replies.PubSubReply>`
"""
return (yield from self._messages_queue.get())
class HiRedisProtocol(RedisProtocol, metaclass=_RedisProtocolMeta):
"""
Protocol implementation that uses the `hiredis` library for parsing the
incoming data. This will be faster in many cases, but not necessarily
always.
It does not (yet) support streaming of multibulk replies, which means that
you won't see the first item of a multi bulk reply, before the whole
response has been parsed.
"""
def __init__(self, *, password=<PASSWORD>, db=0, encoder=None,
connection_lost_callback=None, enable_typechecking=True,
loop=None):
super().__init__(password=password,
db=db,
encoder=encoder,
connection_lost_callback=connection_lost_callback,
enable_typechecking=enable_typechecking,
loop=loop)
self._hiredis = None
assert hiredis, "`hiredis` libary not available. Please don't use HiRedisProtocol."
def connection_made(self, transport):
super().connection_made(transport)
self._hiredis = hiredis.Reader()
def data_received(self, data):
# Move received data to hiredis parser
self._hiredis.feed(data)
while True:
item = self._hiredis.gets()
if item is not False:
self._process_hiredis_item(item, self._push_answer)
else:
break
def _process_hiredis_item(self, item, cb):
if isinstance(item, (bytes, int)):
cb(item)
elif isinstance(item, list):
reply = MultiBulkReply(self, len(item), loop=self._loop)
for i in item:
self._process_hiredis_item(i, reply._feed_received)
cb(reply)
elif isinstance(item, hiredis.ReplyError):
cb(ErrorReply(item.args[0]))
elif isinstance(item, NoneType):
cb(item)
@asyncio.coroutine
def _reader_coroutine(self):
# We don't need this one.
return
``` |
{
"source": "jkpubsrc/PyPine",
"score": 2
} |
#### File: src/pypine/_Chain.py
```python
import os
import typing
import jk_typing
#import jk_prettyprintobj
from ._INode import _INode
from ._ChainNodeP import _ChainNodeP
from ._Sequence import _Sequence
from .Context import Context
from .AbstractProcessor import AbstractProcessor
from .utils.TreeHelper import TreeHelper
from .utils.Color import Color
class _Chain(_INode):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, *processors):
assert processors
for x in processors:
assert isinstance(x, (AbstractProcessor, _Sequence))
# build processing chain
previousNode = None
for p in processors:
if isinstance(p, _Sequence):
p._prevChainNode = previousNode
previousNode = p
else:
previousNode = _ChainNodeP(previousNode, p)
self.__node = previousNode
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dump(self, th:TreeHelper):
print(Color.BLUE + th.toStr() + "Chain" + Color.RESET)
th = th.descend()
th.rightIsLast = True
self.__node._dump(th)
#
################################################################################################################################
## Public Methods
################################################################################################################################
@jk_typing.checkFunctionSignature()
def initialize(self, ctx:Context):
self.__node.initialize(ctx)
#
def __call__(self, ctx:Context, f):
yield from self.__node(ctx, f)
#
#
```
#### File: pypine/do/InMemoryFile.py
```python
import sys
import os
import typing
import grp
import pwd
import jk_typing
#import jk_prettyprintobj
from ._CommonDataObjectMixin import _CommonDataObjectMixin
from ..FileTypeInfo import FileTypeInfo
class InMemoryFile(object):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self,
relFilePath:str,
fileTypeInfo:FileTypeInfo,
data:typing.Union[str,bytes,bytearray],
):
self.__relFilePath = relFilePath
self.__fileTypeInfo = fileTypeInfo
if isinstance(data, str):
self.__textData = data
self.__lengthInBytes = None
self.__binaryData = None
else:
self.__textData = None
self.__binaryData = bytes(data)
self.__lengthInBytes = len(self.__binaryData)
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def dataType(self) -> str:
return "file"
#
@property
def fileTypeInfo(self) -> FileTypeInfo:
return self.__fileTypeInfo
#
@property
def baseDirPath(self) -> str:
return None
#
@property
def relFilePath(self) -> str:
return self.__relFilePath
#
@property
def size(self) -> int:
if self.__binaryData:
return self.__lengthInBytes
else:
if self.__lengthInBytes is None:
self.__lengthInBytes = len(self.__textData.encode("utf-8"))
return self.__lengthInBytes
#
@property
def relDirPath(self) -> str:
return os.path.dirname(self.relFilePath)
#
#
# The name of this entry
#
@property
def fileName(self) -> str:
return os.path.basename(self.relFilePath)
#
#
# This is the absolute path of this entry.
#
@property
def fullPath(self) -> str:
return None
#
@property
def gid(self) -> int:
return os.getgid()
#
@property
def uid(self) -> int:
return os.getuid()
#
#
# The name of the owning group
#
@property
def group(self) -> typing.Union[str,None]:
x = grp.getgrgid(self.gid)
if x:
return x.gr_name
else:
return None
#
#
# The name of the owning user
#
@property
def user(self) -> typing.Union[str,None]:
x = pwd.getpwuid(self.uid)
if x:
return x.pw_name
else:
return None
#
@property
def absFilePath(self) -> str:
return None
#
@property
def absDirPath(self) -> str:
return None
#
@property
def isBinary(self) -> bool:
return bool(self.__binaryData)
#
@property
def isText(self) -> bool:
return bool(self.__textData)
#
@property
def isLocal(self) -> bool:
return True
#
@property
def isLocalOnDisk(self) -> bool:
return False
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def clone(self):
if self.__textData is not None:
# this file stores text data
return InMemoryFile(self.__relFilePath, self.__fileTypeInfo, self.__textData)
else:
# this file stores binary data
return InMemoryFile(self.__relFilePath, self.__fileTypeInfo, self.__binaryData)
#
def __str__(self):
return "InMemoryFile<({} :: {})>".format(repr(self.relFilePath), self.fileTypeInfo)
#
def __repr__(self):
return "InMemoryFile<({} :: {})>".format(repr(self.relFilePath), self.fileTypeInfo)
#
def getTimeStamp(self) -> float:
return self.mtime
#
def getTimeStampI(self) -> int:
return int(self.mtime)
#
def getFileSize(self) -> int:
return self.size
#
def getMode(self) -> int:
return 0o644
#
def getUID(self) -> int:
return os.getuid()
#
def getGID(self) -> int:
return os.getgid()
#
def readBinary(self):
if self.__textData is None:
return self.__binaryData
else:
return self.__textData.encode("utf-8")
#
def readText(self):
if self.__textData is None:
raise Exception("Not a text file!")
else:
return self.__textData
#
#
```
#### File: pypine/processors_core/FileWriter.py
```python
import os
import typing
import jk_typing
from ..do.DiskFile import DiskFile
from ..Context import Context
from ..AbstractProcessor import AbstractProcessor
from ..EnumAction import EnumAction
#
# This component stores files.
#
class FileWriter(AbstractProcessor):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, outputDirPath:str):
super().__init__()
self.__outputDirPath = os.path.normpath(os.path.abspath(outputDirPath))
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def processorDetailsHR(self) -> str:
return self.__outputDirPath
#
@property
def processorTypeName(self) -> str:
return "dest"
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def initializeProcessing(self, ctx:Context):
# TODO: clean the target directory by recursively removing all files and directories.
pass
#
def processableDataTypes(self) -> list:
return [ "file" ]
#
def actionIfUnprocessable(self) -> EnumAction:
return EnumAction.Warn
#
def processElement(self, ctx:Context, f):
# TODO: set file modification time and mode on write?
absDirPath = os.path.join(self.__outputDirPath, f.relDirPath)
os.makedirs(absDirPath, exist_ok=True)
ctx.printDetail(self, "Writing: " + f.relFilePath)
absFilePath = os.path.join(absDirPath, f.fileName)
with open(absFilePath, "wb") as fout:
fout.write(f.readBinary())
f2 = DiskFile.fromFile(self.__outputDirPath, absFilePath)
return f2
#
#
```
#### File: pypine/processors_core/RunTask.py
```python
import os
import typing
import jk_typing
from ..FileTypeInfo import FileTypeInfo
from ..do.DiskFile import DiskFile
from ..do.InMemoryFile import InMemoryFile
from ..Context import Context
from ..AbstractProcessor import AbstractProcessor
class RunTask(AbstractProcessor):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, taskName:str):
super().__init__()
self.__taskName = taskName
self.__task = None
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def initializeProcessing(self, ctx:Context):
self.__task = ctx.tasks.getE(self.__taskName)
#
def processElement(self, ctx:Context, f):
return self.__task.run(ctx, self.__taskName, f)
#
#
```
#### File: pypine/pypinectrl_cli/PyPineXModuleInfo.py
```python
import os
import jk_json
import jk_flexdata
import jk_logging
class PyPineXModuleInfo(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, dirPath:str, jInfo:dict):
self.name = os.path.basename(dirPath)
self.pypiorgName = self.name.replace("_", "-")
self.dirPath = dirPath
self.meta = jk_flexdata.FlexObject(jInfo["meta"])
self.compatibility = jk_flexdata.FlexObject(jInfo["compatibility"])
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
@staticmethod
def tryLoad(dirPath:str, log:jk_logging.AbstractLogger = None):
pypinexInfoFilePath = os.path.join(dirPath, "pypinex_info.json")
if not os.path.isfile(pypinexInfoFilePath):
return None
moduleName = os.path.basename(dirPath)
try:
jData = jk_json.loadFromFile(pypinexInfoFilePath)
except Exception as ee:
if log:
log.error(ee)
return None
# check format
try:
if not jData["magic"]["magic"] == "pypinex-info":
raise Exception()
if jData["magic"]["version"] != 1:
raise Exception()
except Exception as ee:
if log:
log.error("Not a valid PyPine extension file!")
return None
# create instance of PyPineXModuleInfo
return PyPineXModuleInfo(dirPath, jData)
#
#
```
#### File: pypine/utils/RelFilePathBuilder.py
```python
import re
import typing
import os
import datetime
import jk_typing
def _toStr0(number:int, lengthInCharacters:int):
s = str(number)
while len(s) < lengthInCharacters:
s = "0" + s
return s
#
_allKeys = [
"%Y",
"%m",
"%d",
"%H",
"%M",
"%S",
"$(year)",
"$(month)",
"$(day)",
"$(hour)",
"$(minute)",
"$(second)",
"$(millis)",
"$(relDirPath)",
"$(relFilePath)",
"$(relFilePathWithoutExt)",
"$(fileName)",
"$(fileExt)",
"$(fileNameWithoutExt)",
]
_sStr = "|".join([
key.replace("$(", "\\$\\(").replace(")", "\\)") for key in _allKeys
])
_varSplitPattern = re.compile("(" + _sStr + ")")
class RelFilePathBuilder(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, filePathPattern:str):
self.__timestamp = datetime.datetime.now()
_year = _toStr0(self.__timestamp.year, 4)
_month = _toStr0(self.__timestamp.month, 2)
_day = _toStr0(self.__timestamp.day, 2)
_hour = _toStr0(self.__timestamp.hour, 2)
_minute = _toStr0(self.__timestamp.minute, 2)
_second = _toStr0(self.__timestamp.second, 2)
_millis = _toStr0(self.__timestamp.microsecond // 1000, 4)
self.__xvars = {
"%Y": _year,
"%m": _month,
"%d": _day,
"%H": _hour,
"%M": _minute,
"%S": _second,
"$(year)": _year,
"$(month)": _month,
"$(day)": _day,
"$(hour)": _hour,
"$(minute)": _minute,
"$(second)": _second,
"$(millis)": _millis,
}
self.__pattern = []
for p in _varSplitPattern.split(filePathPattern):
if not p:
continue
if p in self.__xvars:
self.__pattern.append(self.__xvars[p])
else:
self.__pattern.append(p)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def buildFilePath(self, f) -> str:
xvars2 = {
"$(relDirPath)": f.relDirPath,
"$(relFilePath)": f.relFilePath,
"$(relFilePathWithoutExt)": f.relFilePathWithoutExt,
"$(fileName)": f.fileName,
"$(fileExt)": f.fileExt,
"$(fileNameWithoutExt)": f.fileNameWithoutExt,
}
ret = []
for p in self.__pattern:
if p in xvars2:
ret.append(xvars2[p])
else:
ret.append(p)
s = "".join(ret)
if s.startswith("/"):
return s[1:]
else:
return s
#
#
``` |
{
"source": "jkpubsrc/pypinex-pack",
"score": 2
} |
#### File: src/pypinex_pack/CloseUploadPack.py
```python
import os
import typing
import io
import jk_uploadpack
from pypine import *
class CloseUploadPack(AbstractProcessor):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self):
super().__init__()
#
def initializeProcessing(self, ctx:Context):
self.__up = ctx.localData.get("uploadpack")
if self.__up is None:
raise Exception("No upload pack created!")
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def processingCompleted(self, ctx:Context):
self.__up.close()
f2 = DiskFile.fromFile(
os.path.dirname(self.__up.filePath),
self.__up.filePath,
)
ctx.printVerbose(self, "Archive created: " + f2.absFilePath)
return f2
#
#
```
#### File: src/pypinex_pack/PackBZip2.py
```python
import os
import typing
import bz2
import io
from pypine import *
class PackBZip2(AbstractProcessor):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, **kwargs):
super().__init__()
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def processableDataTypes(self) -> list:
return [ "file", "url" ]
#
def actionIfUnprocessable(self) -> EnumAction:
return EnumAction.Warn
#
def processElement(self, ctx:Context, f):
if f.fileName.endswith(".bz2"):
return f
outbuf = io.BytesIO()
with bz2.BZ2File(filename=outbuf, mode="wb", compresslevel=9) as stream:
rawData = f.readBinary()
stream.write(rawData)
f2 = InMemoryFile(f.relFilePath + ".bz2", FileTypeInfo.guessFromFileName(".bz2"), outbuf.getvalue())
return f2
#
#
```
#### File: pypinex-pack/src/setup.py
```python
from setuptools import setup
def readme():
with open("README.md", "r", encoding="UTF-8-sig") as f:
return f.read()
setup(
author = "<NAME>",
author_email = "<EMAIL>",
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
],
description = "PyPine extension that supports various ways of compressing/uncompressing files: gzip, bzip2, xz, tar and uploadpack.",
include_package_data = True,
install_requires = [
"pypine",
],
keywords = [
"pypine",
"pypinex",
"gzip",
"bzip2",
"xz",
"uploadpack",
"tar",
],
license = "Apache2",
name = "pypinex_pack",
package_data = {
"": [
"pypinex_info.json",
],
},
packages = [
"pypinex_pack",
],
version = "0.2021.3.19",
zip_safe = False,
long_description = readme(),
long_description_content_type="text/markdown",
)
``` |
{
"source": "jkpubsrc/python-jk-rawhtml",
"score": 3
} |
#### File: src/jk_rawhtml/HTML5HeadElement.py
```python
from jk_hwriter import HWriter
from .HTMLElement import *
class HTML5HeadElement(HTMLElement):
def __init__(self, proto, name):
assert name == "head"
super().__init__(proto, "head")
#
def __hasMetaTagWithCharset(self):
for child in self.children:
if isinstance(child, HTMLElement):
if child.name == "meta":
if "charset" in child.attributes:
return True
return False
#
def _serialize(self, w:HWriter):
w.write(self._openingTagData())
if self._proto.bHasClosingTag:
w.incrementIndent()
bRequireExtraCharsetTag = not self.__hasMetaTagWithCharset()
if self.children or bRequireExtraCharsetTag:
w.lineBreak()
if bRequireExtraCharsetTag:
w.writeLn("<meta charset=\"UTF-8\">")
for child in self.children:
if isinstance(child, (int, float, str)):
w.write(htmlEscape(str(child)))
else:
child._serialize(w)
w.lineBreak()
w.decrementIndent()
w.write(self._closingTagData())
else:
if len(self.children) > 0:
raise Exception("HTML tag \"" + self.name + "\" is not allowed to have child elements!")
w.lineBreak()
#
#
```
#### File: src/jk_rawhtml/_HTMLElementProto.py
```python
from .HTMLElement import HTMLElement
from .htmlgeneral import *
class _HTMLElementProto(object):
def __init__(self, name:str, bHasClosingTag=True, tagType=HTML_TAG_TYPE_INLINE_CONTENT, implClass=HTMLElement, extraAttributes:dict=None):
assert isinstance(name, str)
assert isinstance(bHasClosingTag, bool)
self.name = name
self.bHasClosingTag = bHasClosingTag
self.tagType = tagType
self.implClass = implClass
if tagType == HTML_TAG_TYPE_INLINE_CONTENT:
self.bLineBreakOuter = True
self.bLineBreakInner = False
elif tagType == HTML_TAG_TYPE_INLINE_ALL:
self.bLineBreakOuter = False
self.bLineBreakInner = False
elif tagType == HTML_TAG_TYPE_STRUCTURE:
self.bLineBreakOuter = True
self.bLineBreakInner = True
else:
raise Exception("Invalid tag type specified: " + str(tagType))
if extraAttributes:
assert isinstance(extraAttributes, dict)
self.extraAttributes = extraAttributes
else:
self.extraAttributes = None
#
def __call__(self, *args, **attrs):
if self.extraAttributes:
d = dict(self.extraAttributes)
d.update(attrs)
return self.implClass(self, self.name)(**d)
else:
return self.implClass(self, self.name)(**attrs)
#
def __getitem__(self, children):
return self.implClass(self, self.name)[children]
#
#
```
#### File: src/jk_rawhtml/htmlgeneral.py
```python
HTML_TAG_TYPE_STRUCTURE = 1
HTML_TAG_TYPE_INLINE_CONTENT = 2
HTML_TAG_TYPE_INLINE_ALL = 3
_HTML_ESCAPE_TABLE = {
"&": "&",
"\"": """,
"'": "'",
">": ">",
"<": "<",
}
def htmlEscape(text:str):
return "".join(_HTML_ESCAPE_TABLE.get(c, c) for c in text)
#
```
#### File: src/jk_rawhtml/HTMLRawText.py
```python
from jk_hwriter import HWriter
from .htmlgeneral import *
class HTMLRawText(object):
def __init__(self, textOrTextList):
if isinstance(textOrTextList, str):
self.texts1 = [ textOrTextList ]
else:
self.texts1 = list(textOrTextList)
self.texts2 = []
#
def __call__(self, **attrs):
self.texts1.append("".join(attrs))
return self
#
def __getitem__(self, textOrTexts):
if hasattr(type(textOrTexts), "__iter__"):
self.texts2.extend(textOrTexts)
else:
self.texts2.append(textOrTexts)
return self
#
def _serialize(self, w:HWriter):
if self.texts1:
for text in self.texts1:
w.lineBreak()
w.write(text)
w.lineBreak()
for text in self.texts2:
w.write(text)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-appmonitoring",
"score": 2
} |
#### File: src/jk_appmonitoring/RProcessList.py
```python
import jk_sysinfo
import jk_prettyprintobj
import jk_utils
from .RProcess import RProcess
from .RProcessFilter import RProcessFilter
class RProcessList(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self):
super().__init__()
self._rootProcesses = ()
self._pidsToProcesses = {}
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def rootProcesses(self) -> tuple:
return self._rootProcesses
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dumpVarNames(self) -> list:
return [
"rootProcesses",
]
#
"""
def __calcSummary(self, key:str, parseFunc, jOut:dict):
n = 0
for p in self._pidsToProcesses.values():
n += p[key]
if parseFunc:
jOut[key] = parseFunc(n)
else:
jOut[key] = n
#
"""
################################################################################################################################
## Public Methods
################################################################################################################################
def update(self):
# prepare variables
self._pidsToProcesses.clear()
rootProcesses = []
# retrieve data
jProcessList = jk_sysinfo.get_ps(bAddVMemSize=False)
# create a map of all process IDs
for jProcess in jProcessList:
p = RProcess(jProcess)
self._pidsToProcesses[p.pid] = p
# connect all processes with each other
for p in self._pidsToProcesses.values():
pp = self._pidsToProcesses.get(p.ppid)
if pp is not None:
pp._children.append(p)
p.parent = pp
# select all root processes
for p in self._pidsToProcesses.values():
if p.parent is None:
rootProcesses.append(p)
# finalize
self._rootProcesses = tuple(rootProcesses)
#
#
# Enrich all RProcess objects with more data.
#
def enrichWithMoreData(self):
for p in self._pidsToProcesses.values():
try:
p.enrichWithMoreData()
except Exception as ee:
pass
#
def getAllPIDs(self) -> set:
ret = set()
for p in self._rootProcesses:
p._getAllPIDsRecursively(ret)
return ret
#
#
# Extract a subset of the process tree based on the filter specified.
#
def filter(self, filter:RProcessFilter):
assert isinstance(filter, RProcessFilter)
ret = []
currentCandidates = list(self._rootProcesses)
nextCandidates = []
while True:
for p in currentCandidates:
if filter.match(p):
ret.append(p)
else:
nextCandidates.extend(p._children)
if nextCandidates:
currentCandidates = nextCandidates
nextCandidates = []
else:
break
pl = RProcessList()
pl._rootProcesses = ret
interestingPIDs = pl.getAllPIDs()
for p in self._pidsToProcesses.values():
if p.pid in interestingPIDs:
pl._pidsToProcesses[p.pid] = p
return pl
#
#
# If not yet done enrich the processes with additional data and then calculate RSS memory and IO read/write summary.
#
def calcSummary(self, jOut:dict = None) -> dict:
if jOut is None:
jOut = {}
else:
assert isinstance(jOut, dict)
for p in self._rootProcesses:
p.calcSummary(jOut)
return jOut
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-argsparsing",
"score": 3
} |
#### File: src/jk_argparsing/ArgOption.py
```python
from .ArgItemBase import *
class ArgOption(ArgItemBase):
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, shortName:typing.Union[str,None], longName:typing.Union[str,None], description:str):
super().__init__()
if shortName is not None:
assert isinstance(shortName, str)
assert len(shortName) == 1
if longName is not None:
assert isinstance(longName, str)
assert isinstance(description, str)
self.__shortName = shortName
self.__longName = longName
self.__description = description
self.__requiredErrorMessage = None
self.__onOption = None
self._isShortOption = self.isShortOption
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def shortName(self) -> typing.Union[str,None]:
return self.__shortName
#
@property
def longName(self) -> typing.Union[str,None]:
return self.__longName
#
@property
def description(self) -> str:
return self.__description
#
@property
def isRequired(self) -> bool:
return self.__requiredErrorMessage is not None
#
@property
def isShortOption(self) -> bool:
return self.__shortName is not None
#
@property
def onOption(self):
return self.__onOption
#
@onOption.setter
def onOption(self, value):
self.__onOption = value
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _invokeOpt(self, optArgs, parsedArgs):
if self.__onOption is not None:
self.__onOption(self, optArgs, parsedArgs)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
if self.__longName is not None:
s = "--" + self.__longName
#for op in self._optionParameters:
# s += " " + op.displayName
return s
if self.__shortName is not None:
s = "-" + self.__shortName
#for op in self._optionParameters:
# s += " " + op.displayName
return s
return "ArgOption(unknown)"
#
def __repr__(self):
if self.__longName is not None:
return "--" + self.__longName
if self.__shortName is not None:
return "-" + self.__shortName
return "ArgOption(unknown)"
#
def required(self, errorMessage:str):
assert isinstance(errorMessage, str)
if errorMessage is None:
raise Exception("No error message specified for required option: " + str(self))
self.__requiredErrorMessage = errorMessage
return self
#
#
```
#### File: jk_argparsing/textprimitives/columnLayouterL2R.py
```python
def columnLayouterL2R(availableWidth:int, columnComponent):
columnBlocks = columnComponent.columnBlocks
nColumnGap = columnComponent.nColumnGap
# ----
widths = [] # the currently assigned width; initialized with minWidth
remaining = [] # the difference between maxWidth and minWidth per component
for b in columnBlocks:
widths.append(b.minWidth)
remaining.append(b.maxWidth - widths[-1])
currentTotalWidth = sum(widths) + (len(widths) - 1) * nColumnGap # the total width
# expand all blocks from left to right as far as possible
while currentTotalWidth < availableWidth:
temp = currentTotalWidth
for i in range(0, len(columnBlocks)):
n = min((availableWidth - currentTotalWidth), remaining[i])
widths[i] += n
remaining[i] -= n
currentTotalWidth += n
if temp == currentTotalWidth:
# no more changes
break
# now assign preferred widths and order the components to layout itself
for i in range(0, len(columnBlocks)):
columnBlocks[i].layout(widths[i])
#
```
#### File: jk_argparsing/textprimitives/ITextBlock.py
```python
import typing
from .XLineFragment import XLineFragment
class ITextBlock:
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Properties
################################################################################################################################
@property
def indent(self) -> int:
raise NotImplementedError()
#
@property
def preferredWidth(self) -> typing.Union[int,None]:
raise NotImplementedError()
#
@property
def minWidth(self) -> int:
raise NotImplementedError()
#
@property
def maxWidth(self) -> int:
raise NotImplementedError()
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
#
# Layout this component.
#
# @param int availableWidth The width this component can use for layouting.
#
def layout(self, availableWidth:int):
raise NotImplementedError()
#
#
# Returns a list of lines.
# Call this method after layouting has been peformed.
# Otherwise the situation is undefined: *getLines()* might even throw an Exception or might simply return useless data.
#
# @return XLineFragment[] The list of lines.
#
def getLines(self, bColor:bool) -> list:
raise NotImplementedError()
#
#
```
#### File: jk_argparsing/textprimitives/TextBlockSequence.py
```python
import typing
from .XLineFragment import XLineFragment
from .ITextBlock import ITextBlock
_RESET = "\x1b[0m"
class TextBlockSequence(ITextBlock):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, indent:int, blockGap:int):
self.__indent = indent
self.__blocks = []
self.__nBlockGap = blockGap
# ----
self.__cached_minWidth = None
self.__cached_maxWidth = None
self.__preferredWidth = None
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def nBlocks(self) -> int:
return len(self.__blocks)
#
@property
def nBlockGap(self) -> int:
return self.__nBlockGap
#
@property
def indent(self) -> int:
return self.__indent
#
@property
def preferredWidth(self) -> typing.Union[int,None]:
return self.__preferredWidth
#
@property
def minWidth(self) -> int:
if self.__cached_minWidth is None:
self.__cached_minWidth = 0
for b in self.__blocks:
w = self.__indent + b.minWidth
if w > self.__cached_minWidth:
self.__cached_minWidth = w
return self.__cached_minWidth
#
@property
def maxWidth(self) -> int:
if self.__cached_maxWidth is None:
self.__cached_maxWidth = 0
for b in self.__blocks:
w = self.__indent + b.maxWidth
if w > self.__cached_maxWidth:
self.__cached_maxWidth = w
return self.__cached_maxWidth
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def layout(self, availableWidth:int):
self.__preferredWidth = availableWidth
w = availableWidth - self.__indent
for block in self.__blocks:
block.layout(w)
#
def addBlock(self, block):
self.__blocks.append(block)
self.__cached_maxWidth = None
self.__cached_minWidth = None
self.__preferredWidth = None
#
#
# @return XLineFragment[] Returns a list of lines.
#
def getLines(self, bColor:bool) -> list:
lines = []
for iBlock, block in enumerate(self.__blocks):
if iBlock > 0:
# 2nd block or later => add gap
for i in range(0, self.__nBlockGap):
lines.append(XLineFragment(0, "", 0))
for blockLine in block.getLines(bColor):
blockLine = blockLine.addIndent(self.__indent)
lines.append(blockLine)
return lines
#
#
```
#### File: jk_argparsing/textprimitives/TextPrefixBlock.py
```python
import typing
from .XLineFragment import XLineFragment
from .ITextBlock import ITextBlock
#
# Instances of this class represent a block of text. This is typically a paragraph.
#
class TextPrefixBlock(ITextBlock):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, indent:int, prefix:str, block):
self.__indent = indent
self.__prefix = prefix
self.__block = block
self.__preferredWidth = None
#
################################################################################################################################
## Properties
################################################################################################################################
#
# The preferred with including the indentation
#
@property
def preferredWidth(self) -> typing.Union[int,None]:
return self.__preferredWidth
#
@property
def maxWidth(self) -> int:
return self.__indent + len(self.__prefix) + self.__block.maxWidth
#
@property
def minWidth(self) -> int:
return self.__indent + len(self.__prefix) + self.__block.minWidth
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def layout(self, availableWidth:int):
w = availableWidth - self.__indent - len(self.__prefix)
self.__block.layout(w)
#
#
# @return XLineFragment[] Returns a list of lines.
#
def getLines(self, bColor:bool) -> list:
templateLine = XLineFragment(self.__indent, self.__prefix, len(self.__prefix))
retLines = []
for line in self.__block.getLines(bColor):
retLines.append(templateLine.append(line))
return retLines
#
#
```
#### File: jk_argparsing/textprimitives/XLineFragment.py
```python
class XLineFragment(object):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, indent:int, text:str, textLength:int):
assert textLength >= 0
self.__indent = indent
self.__text = text
self.__textLength = textLength
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def lineLength(self) -> int:
return self.__indent + self.__textLength
#
@property
def textLength(self) -> int:
return self.__textLength
#
@property
def text(self) -> str:
return self.__text
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
if self.__text:
return " " * self.__indent + self.__text
else:
return ""
#
def __repr__(self):
return "XLineFragment<(indent={},text={},textLength={})>".format(self.__indent, repr(self.__text), self.__textLength)
#
def addIndent(self, indent:int):
return XLineFragment(self.__indent + indent, self.__text, self.__textLength)
#
def join(self, position:int, other):
assert isinstance(position, int)
assert isinstance(other, XLineFragment)
nPadding = position - self.__textLength
# print("!!", position, self.__textLength, nPadding)
if nPadding < 0:
raise Exception("Line too long: " + str(nPadding) + ", " + repr(self.__text))
otherIndent = " " * other.__indent
return XLineFragment(
self.__indent,
self.__text + " " * nPadding + otherIndent + other.__text,
self.__textLength + nPadding + len(otherIndent) + other.__textLength)
#
def append(self, other):
assert isinstance(other, XLineFragment)
otherIndent = " " * other.__indent
return XLineFragment(
self.__indent,
self.__text + otherIndent + other.__text,
self.__textLength + len(otherIndent) + other.__textLength)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-asyncio-logging",
"score": 2
} |
#### File: src/jk_asyncio_logging/AsyncioFileLogger.py
```python
import jk_asyncio_syncasync
import jk_logging
from .AsyncioLogWrapper import AsyncioLogWrapper
class AsyncioFileLogger(AsyncioLogWrapper):
@staticmethod
def create(filePath, rollOver, bAppendToExistingFile = True, bFlushAfterEveryLogMessage = True, fileMode = None, logMsgFormatter = None):
return AsyncioFileLogger(jk_logging.FileLogger.create(filePath, rollOver, bAppendToExistingFile, bFlushAfterEveryLogMessage, fileMode, logMsgFormatter))
#
async def closed(self) -> bool:
return await jk_asyncio_syncasync.call_sync(self._l.closed)
#
async def isClosed(self) -> bool:
return await jk_asyncio_syncasync.call_sync(self._l.isClosed)
#
#
```
#### File: src/jk_asyncio_logging/AsyncioFilterLogger.py
```python
import jk_logging
from .AsyncioLogWrapper import AsyncioLogWrapper
class AsyncioFilterLogger(AsyncioLogWrapper):
@staticmethod
def create(logger:jk_logging.AbstractLogger, minLogLevel:jk_logging.EnumLogLevel = jk_logging.EnumLogLevel.WARNING):
return AsyncioFilterLogger(jk_logging.FilterLogger.create(logger, minLogLevel))
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-bincontainer",
"score": 3
} |
#### File: src/jk_bincontainer/BinContainer.py
```python
from io import BytesIO
import struct
from Crypto import Random
class BinContainer(object):
__MAGIC = b"BLKJK10\x00"
__RNG = Random.new()
def __init__(self):
self.__blocks = []
#
def toBytes(self) -> bytes:
return bytes(self.toByteArray())
#
def toByteArray(self) -> bytes:
ret = bytearray(BinContainer.__MAGIC)
for binKey, blockType, data in self.__blocks:
ret.extend(binKey)
ret.extend(self.__blockTypeStrToID(blockType))
if blockType == "bin":
for x in self.__pad(data):
ret.extend(x)
else:
raise Exception()
return ret
#
def __bytes__(self):
return bytes(self.toByteArray())
#
def __len__(self):
return len(self.__blocks)
#
def clear(self):
self.__blocks.clear()
#
def writeToFile(self, filePath:str):
assert isinstance(filePath, str)
with open(filePath, "wb") as fout:
fout.write(bytes(self))
#
def loadFromFile(self, filePath:str):
assert isinstance(filePath, str)
with open(filePath, "rb") as fin:
self.loadFromData(fin.read())
#
def dump(self):
print("BinContainer[")
for binKey, blockType, data in self.__blocks:
print("\tid=" + repr(self.__byteKeyToStrKey(binKey)) + ", type=" + repr(blockType) + ", length=" + str(len(data)))
print("]")
#
def loadFromData(self, bytedata):
assert isinstance(bytedata, (bytes, bytearray))
blocks = []
b = BytesIO(bytedata)
if b.read(len(BinContainer.__MAGIC)) != BinContainer.__MAGIC:
raise Exception("Not a binary container.")
alignmentInBytes = 4
while True:
binKey = b.read(4)
if not binKey:
break
assert len(binKey) == 4
binTypeID = b.read(4)
if not binTypeID:
break
assert len(binTypeID) == 4
typeID = self.__blockTypeIDToStr(binTypeID)
orgMsgLenByteArray = b.read(4)
assert len(orgMsgLenByteArray) == 4
nOrgDataLen = struct.unpack("<I", orgMsgLenByteArray)[0]
nPaddingBytes = (alignmentInBytes - nOrgDataLen) % alignmentInBytes
nBlockSize = nOrgDataLen + nPaddingBytes
raw = b.read(nBlockSize)
blocks.append((binKey, typeID, raw[:nOrgDataLen]))
self.__blocks = blocks
#
def __pad(self, rawBinData) -> tuple:
assert isinstance(rawBinData, (bytes, bytearray))
alignmentInBytes = 4
nOrgDataLen = len(rawBinData)
assert nOrgDataLen < 2147483647
orgMsgLenByteArray = struct.pack("<I", nOrgDataLen)
nPaddingBytes = (alignmentInBytes - nOrgDataLen) % alignmentInBytes
paddingData = BinContainer.__RNG.read(nPaddingBytes)
return bytes(orgMsgLenByteArray), bytes(rawBinData), bytes(paddingData)
#
def __unpad(self, rawBinData) -> bytes:
assert isinstance(rawBinData, (bytes, bytearray))
orgMsgLenByteArray = rawBinData[0:4]
assert len(orgMsgLenByteArray) == 4
nOrgDataLen = struct.unpack("<I", orgMsgLenByteArray)[0]
ret = rawBinData[4:4 + nOrgDataLen]
assert len(ret) == nOrgDataLen
return bytes(ret)
#
def __byteKeyToStrKey(self, raw) -> str:
assert isinstance(raw, (bytes, bytearray))
assert len(raw) == 4
return raw.decode("ascii")
#
def __strKeyToByteKey(self, s:str) -> bytes:
assert isinstance(s, str)
binKey = s.encode("ascii")
assert len(binKey) == 4
return binKey
#
def __blockTypeStrToID(self, s:str) -> bytes:
assert isinstance(s, str)
while len(s) < 4:
s += " "
binKey = s.encode("ascii")
assert len(binKey) == 4
return binKey
#
def __blockTypeIDToStr(self, raw) -> str:
assert isinstance(raw, (bytes, bytearray))
assert len(raw) == 4
return raw.decode("ascii").strip()
#
def addBinaryBlock(self, key:str, bytedata):
binKey = self.__strKeyToByteKey(key)
assert isinstance(bytedata, (bytes, bytearray))
bytedata = bytes(bytedata)
self.__blocks.append((binKey, "bin", bytedata))
#
def getBlockByKey(self, key:str) -> tuple:
binKey = self.__strKeyToByteKey(key)
for binKeyStored, blockType, dataStored in self.__blocks:
if binKeyStored == binKey:
return blockType, dataStored
return None
#
def getBlockByIndex(self, index:int) -> tuple:
assert isinstance(index, int)
if (index >= 0) and (index < len(self.__blocks)):
raw = self.__blocks[index]
return self.__byteKeyToStrKey(raw[0]), raw[1], raw[2]
return None
#
def getBlockByKeyE(self, key:str) -> tuple:
binKey = self.__strKeyToByteKey(key)
for binKeyStored, blockType, dataStored in self.__blocks:
if binKeyStored == binKey:
return blockType, dataStored
raise Exception("No such block: " + repr(key))
#
def getBlockByIndexE(self, index:int) -> tuple:
assert isinstance(index, int)
if (index >= 0) and (index < len(self.__blocks)):
raw = self.__blocks[index]
return self.__byteKeyToStrKey(raw[0]), raw[1], raw[2]
raise Exception("No such block: " + str(index))
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-cachefunccalls",
"score": 3
} |
#### File: src/jk_cachefunccalls/cacheCalls.py
```python
import typing
import inspect
import time
import typing
# the cache stores all data for some time
__CACHE = {}
# this is the annotation wrapper that receives arguments and returns the function that does the wrapping
def cacheCalls(seconds:int = 0, dependArgs:typing.Union[typing.List,typing.Tuple] = None):
assert isinstance(seconds, int)
assert seconds > 0
if dependArgs is not None:
assert isinstance(dependArgs, (tuple, list))
for a in dependArgs:
assert isinstance(a, int)
assert a >= 0
else:
dependArgs = ()
# this function is executed for every function definition
def _wrap_the_function(fn):
__CACHE[id(fn)] = [None, 0, None] # lastArgID, lastT, lastResult
#argNames = inspect.getargspec(fn).args
argNames = inspect.getfullargspec(fn).args
bIsMethod = argNames and (argNames[0] == "self")
_nShift = 1 if bIsMethod else 0
#print("method" if bIsMethod else "function")
nIdentifierArgs = [ x + _nShift for x in dependArgs ]
# this function is executed every time the wrapped function is invoked.
def wrapped(*args, **kwargs):
cacheRecord = __CACHE[id(fn)]
tNow = time.time()
extraIdentifier = ""
for i in nIdentifierArgs:
if i < len(args):
extraIdentifier += "|" + str(id(args[i]))
bNeedsInvoke = False
if "_ignoreCache" in kwargs:
bInvalidate = kwargs["_ignoreCache"]
assert isinstance(bInvalidate, bool)
del kwargs["_ignoreCache"]
bNeedsInvoke = bInvalidate
if cacheRecord[1] <= 0:
bNeedsInvoke = True
elif tNow > cacheRecord[1] + seconds:
bNeedsInvoke = True
elif extraIdentifier != cacheRecord[0]:
bNeedsInvoke = True
if bNeedsInvoke:
cacheRecord[0] = extraIdentifier
cacheRecord[1] = tNow
cacheRecord[2] = fn(*args, **kwargs)
return cacheRecord[2]
#
return wrapped
#
return _wrap_the_function
#
```
#### File: python-module-jk-cachefunccalls/testing/test_caching_obj.py
```python
import collections
import typing
import inspect
import time
from jk_cachefunccalls import cacheCalls
from _test_and_evaluate import testAndEvaluate
class MyTestClass(object):
@cacheCalls(seconds=2, dependArgs=[0])
def returnSomething(self, n):
return time.time()
#
#
o = MyTestClass()
testAndEvaluate(o.returnSomething)
``` |
{
"source": "jkpubsrc/python-module-jk-cmdoutputparsinghelper",
"score": 3
} |
#### File: src/jk_cmdoutputparsinghelper/ColumnDef.py
```python
class ColumnDef(object):
################################################################################################################################
## Constructors
################################################################################################################################
#
# Constructor method.
#
# @param str name (required) The name of the column.
# @param callable valueParser (optional) A value parser.
# @param str typeName (optional) The name of the type.
#
def __init__(self, name:str, valueParser = None, typeName = None):
assert isinstance(name, str)
if valueParser is not None:
assert callable(valueParser)
self.name = name
self.valueParser = valueParser
self.typeName = typeName
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
_typeName = None
if self.typeName is not None:
_typeName = self.typeName
else:
if isinstance(self.valueParser, type):
_typeName = self.valueParser.__name__
else:
_typeName = "???"
return self.name + ":" + _typeName
#
def __repr__(self):
_typeName = None
if self.typeName is not None:
_typeName = self.typeName
else:
if isinstance(self.valueParser, type):
_typeName = self.valueParser.__name__
else:
_typeName = "???"
return self.name + ":" + _typeName
#
#
```
#### File: src/jk_cmdoutputparsinghelper/TextData.py
```python
import typing
from .LineList import LineList
#
# This class represents text. It converts text automatically to a LineList or to string as required.
# So if you access the stored data via <c>text</c> you will receive the text content as a single string.
# If you access the stored data via <c>lines</c> you will receive the text content as a set of text lines.
# Conversion occurs automatically depending on which property you use.
#
# This mechanism provided by this class
# allows you to work efficiently with text data as a string or a string list, depending on the current
# requirements, while avoiding unnecessary intermediate conversions.
#
class TextData(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, data:typing.Union[str,tuple,list,LineList]):
self.__bStoringLines = False
self.__data = ""
if isinstance(data, str):
self.text = data
else:
self.lines = data
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def lines(self) -> LineList:
if not self.__bStoringLines:
# convert to lines
if self.__data:
self.__data = LineList(self.__data.split("\n"))
else:
self.__data = LineList()
self.__bStoringLines = True
return self.__data
#
@lines.setter
def lines(self, value:typing.Union[tuple,list,LineList]):
assert isinstance(value, (tuple, list, LineList))
for x in value:
assert isinstance(x, str)
if isinstance(value, LineList):
self.__data = value
else:
self.__data = LineList(value)
self.__bStoringLines = True
#
@property
def text(self) -> str:
if self.__bStoringLines:
# convert to text
self.__data = "\n".join(self.__data)
self.__bStoringLines = False
return self.__data
#
@text.setter
def text(self, value:str):
assert isinstance(value, str)
self.__data = value
self.__bStoringLines = False
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
if self.__bStoringLines:
return "\n".join(self.__data)
else:
return self.__data
#
def __repr__(self):
if self.__bStoringLines:
return repr("\n".join(self.__data))
else:
return repr(self.__data)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-commentjson",
"score": 3
} |
#### File: src/jk_commentjson/commentjson.py
```python
try:
import json
except ImportError:
# If python version is 2.5 or less, use simplejson
import simplejson as json
import re
import traceback
import codecs
class JSONLibraryException(Exception):
''' Exception raised when the JSON library in use raises an exception i.e.
the exception is not caused by `commentjson` and only caused by the JSON
library `commentjson` is using.
.. note::
As of now, ``commentjson`` supports only standard library's ``json``
module. It might start supporting other widely-used contributed JSON
libraries in the future.
'''
def __init__(self, json_error=""):
tb = traceback.format_exc()
tb = '\n'.join(' ' * 4 + line_ for line_ in tb.split('\n'))
message = [
'JSON Library Exception\n',
('Exception thrown by JSON library (json): '
'\033[4;37m%s\033[0m\n' % json_error),
'%s' % tb,
]
Exception.__init__(self, '\n'.join(message))
def loads(text, **kwargs):
''' Deserialize `text` (a `str` or `unicode` instance containing a JSON
document with Python or JavaScript like comments) to a Python object.
:param text: serialized JSON string with or without comments.
:param kwargs: all the arguments that `json.loads <http://docs.python.org/
2/library/json.html#json.loads>`_ accepts.
:raises: commentjson.JSONLibraryException
:returns: dict or list.
'''
regex = r'\s*(#|\/{2}).*$'
regex_inline1 = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#).*)|)$'
regex_inline2 = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\').*\'),?)(?:\s)*(((#).*)|)$'
lines = text.split('\n')
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
lines[index] = ""
elif re.search(regex_inline2, line):
lines[index] = re.sub(regex_inline2, r'\1', line)
elif re.search(regex_inline1, line):
lines[index] = re.sub(regex_inline1, r'\1', line)
try:
lineNo = 1
for line in lines:
#print(str(lineNo) + "\t" + line)
lineNo += 1
return json.loads('\n'.join(lines), **kwargs)
except Exception as e:
raise JSONLibraryException(str(e))
def dumps(obj, **kwargs):
''' Serialize `obj` to a JSON formatted `str`. Accepts the same arguments
as `json` module in stdlib.
:param obj: a JSON serializable Python object.
:param kwargs: all the arguments that `json.dumps <http://docs.python.org/
2/library/json.html#json.dumps>`_ accepts.
:raises: commentjson.JSONLibraryException
:returns str: serialized string.
'''
try:
return json.dumps(obj, **kwargs)
except Exception as e:
raise JSONLibraryException(str(e))
def _detectByBOM(path, defaultValue):
with open(path, 'rb') as f:
raw = f.read(4) # will read less if the file is smaller
for enc,boms in \
('utf-8-sig',(codecs.BOM_UTF8,)),\
('utf-16',(codecs.BOM_UTF16_LE,codecs.BOM_UTF16_BE)),\
('utf-32',(codecs.BOM_UTF32_LE,codecs.BOM_UTF32_BE)):
if any(raw.startswith(bom) for bom in boms):
return enc
return defaultValue
def loadFromFile(filePath, **kwargs):
enc = _detectByBOM(filePath, kwargs.get("encoding", 'utf-8'))
with codecs.open(filePath, 'r', enc) as f:
try:
if "encoding" in kwargs:
del kwargs["encoding"]
return loads(f.read(), **kwargs)
except Exception as e:
raise JSONLibraryException(str(e))
def load(fp, **kwargs):
''' Deserialize `fp` (a `.read()`-supporting file-like object containing
a JSON document with Python or JavaScript like comments) to a Python object.
:param fp: a `.read()`-supporting file-like object containing a JSON
document with or without comments.
:param kwargs: all the arguments that `json.load <http://docs.python.org/
2/library/json.html#json.load>`_ accepts.
:raises: commentjson.JSONLibraryException
:returns: dict or list.
'''
try:
return loads(fp.read(), **kwargs)
except Exception as e:
raise JSONLibraryException(str(e))
def dump(obj, fp, **kwargs):
''' Serialize `obj` as a JSON formatted stream to `fp` (a
`.write()`-supporting file-like object). Accepts the same arguments as
`json` module in stdlib.
:param obj: a JSON serializable Python object.
:param fp: a `.read()`-supporting file-like object containing a JSON
document with or without comments.
:param kwargs: all the arguments that `json.dump <http://docs.python.org/
2/library/json.html#json.dump>`_ accepts.
:raises: commentjson.JSONLibraryException
'''
try:
json.dump(obj, fp, **kwargs)
except Exception as e:
raise JSONLibraryException(str(e))
``` |
{
"source": "jkpubsrc/python-module-jk-console",
"score": 4
} |
#### File: python-module-jk-console/examples/colorspectrum1.py
```python
from jk_console import Console
def rangef(start, end, step):
v = start
while v <= end:
yield v
v += step
#
Console.clear()
for _s in rangef(0, 100, 3):
for _h in rangef(0, 100, 0.8):
h = _h/100.0
s = _s/100.0
c = Console.BackGround.hsl1(h, s, 0.5)
print(c + " ", end="")
print(Console.RESET)
print()
```
#### File: python-module-jk-console/examples/effect-2.py
```python
import os
import sys
import time
import datetime
import traceback
from jk_console import *
from jk_console.viewport import *
frame_colBG = ViewPortRGB.parseCSS("#000000")
frame_colFG = ViewPortRGB.parseCSS("#c0c0c0")
chars = ".,;/;,. "
colors = [
ViewPortRGB.parseCSS("#808080"),
ViewPortRGB.parseCSS("#8080c0"),
ViewPortRGB.parseCSS("#8080ff"),
ViewPortRGB.parseCSS("#c0c0ff"),
ViewPortRGB.parseCSS("#ffffff"),
ViewPortRGB.parseCSS("#c0c0ff"),
ViewPortRGB.parseCSS("#8080ff"),
ViewPortRGB.parseCSS("#8080c0"),
]
w = Console.width()
h = Console.height()
mods = [ 0, -1, -1, -2, -2, -1, -1, 0 ]
vp = ViewPort()
cb = None
bForProfiling = True
def init():
global w, h, vp, cb
cb = vp.createBuffer(0, 0, w, h - 1)
rect = Rect(0, 0, cb.width, cb.height)
rect = rect.shrink(right = 1, bottom = 1)
cb.drawRect((rect.x, rect.y, rect.width, rect.height), colBG=frame_colBG, colFG=frame_colFG)
w = cb.width
h = cb.height
#
def drawPattern(cb:ConsoleBuffer, ofs:int):
for iy in range(1, h - 2):
for ix in range(2, w - 2):
j = (iy + ofs) % 8
i = (ix + iy + ofs + mods[j]) % 8
cb.set(ix, iy, chars[i], colFG=colors[i])
#
try:
init()
drawPattern(cb, 0)
i = 1
while not bForProfiling or (i < 200):
#time.sleep(0.05)
t0 = datetime.datetime.now()
drawPattern(cb, i)
t1 = datetime.datetime.now()
vp.render()
t2 = datetime.datetime.now()
#time.sleep(0.001)
td1 = t1 - t0
td2 = t2 - t1
tdS = t2 - t0
pdata = [
"~~ screen-size: ", str(w), "x", str(h), " pixels"
" ~~ rendering: %d.%03ds" % (td1.seconds, (td1.microseconds + 499) // 1000),
" ~~ buffer-to-screen: %d.%03ds" % (td2.seconds, (td2.microseconds + 499) // 1000),
" ~~ total: %d.%03ds (fps %d)" % (tdS.seconds, (tdS.microseconds + 499) // 1000, int(1.0 / (t2 - t0).total_seconds())),
]
if bForProfiling:
pdata.append(" ~~ " + str(199-i))
pdata.append(" ~~")
cb.drawText((2, h - 2), text="".join(pdata), colFG=frame_colFG)
vp.render()
i += 1
#ww = Console.width() - 1
#hh = Console.height() - 1
#if (ww != w) or (hh != h):
# init()
except Exception as e:
os.system("clear")
Console.clear()
print(Console.RESET)
traceback.print_exc()
```
#### File: python-module-jk-console/examples/FloatField.py
```python
import random
class FloatField(object):
def __init__(self, width:int, height:int, defaultValue:float = 0, _data = None):
self.width = width
self.height = height
if _data:
self.data = _data
else:
self.data = self.__createField(width, height, defaultValue)
#
def __createField(self, width:int, height:int, value:float = 0):
rows = []
for y in range(0, height):
cols = []
for x in range(0, width):
cols.append(value)
rows.append(cols)
return rows
#
@property
def maximum(self):
ret = self.data[0][0]
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x]
if v > ret:
ret = v
return ret
#
@property
def minimum(self):
ret = self.data[0][0]
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x]
if v < ret:
ret = v
return ret
#
def clone(self):
rows = []
for y in range(0, self.height):
cols = []
for x in range(0, self.width):
cols.append(self.data[y][x])
rows.append(cols)
return FloatField(self.width, self.height, _data = rows)
#
def add(self, value:float, maxValue:float = 1):
if maxValue is None:
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = value
else:
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x] + value
if v > maxValue:
v = maxValue
self.data[y][x] = v
return self
#
def subtract(self, value:float, minValue:float = 0):
if minValue is None:
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = value
else:
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x] - value
if v < minValue:
v = minValue
self.data[y][x] = v
return self
#
def fill(self, value:float):
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = value
return self
#
def fillRandom(self):
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = random.random()
return self
#
def smooth(self, windowSize:int):
w1 = -windowSize
w2 = windowSize + 1
data2 = self.__createField(self.width, self.height)
for y in range(0, self.height):
for x in range(0, self.width):
sum = 0
count = 0
for iy in range(w1, w2):
yy = y + iy
for ix in range(w1, w2):
xx = x + ix
if (yy >= 0) and (yy < self.height) and (xx >= 0) and (xx < self.width):
sum += self.data[yy][xx]
count += 1
data2[y][x] = sum / count
self.data = data2
return self
#
#
```
#### File: python-module-jk-console/examples/test-mouse-pynput.py
```python
import sys
try:
from pynput.mouse import Listener
except:
print("Running on X required.")
sys.exit(0)
def on_move(x, y):
print("Mouse moved to ({0}, {1})".format(x, y))
def on_click(x, y, button, pressed):
if pressed:
print('Mouse clicked at ({0}, {1}) with {2}'.format(x, y, button))
def on_scroll(x, y, dx, dy):
print('Mouse scrolled at ({0}, {1})({2}, {3})'.format(x, y, dx, dy))
with Listener(on_move=on_move, on_click=on_click, on_scroll=on_scroll) as listener:
listener.join()
```
#### File: jk_console/viewport/Rectangle.py
```python
from typing import Union
class Rectangle(object):
def __init__(self, *args):
if len(args) == 0:
self.__x1 = 0
self.__y1 = 0
self.__x2 = 0
self.__y2 = 0
elif len(args) == 1:
arg = args[0]
if isinstance(arg, (tuple, list)):
assert len(arg) == 4
assert isinstance(arg[0], int)
assert isinstance(arg[1], int)
assert isinstance(arg[2], int)
assert isinstance(arg[3], int)
self.__x1 = arg[0]
self.__y1 = arg[1]
self.__x2 = self.__x1 + arg[2]
self.__y2 = self.__y1 + arg[3]
elif isinstance(arg, Rectangle):
self.__x1 = arg.x1
self.__y1 = arg.y1
self.__x2 = arg.x2
self.__y2 = arg.y2
else:
raise Exception("arg 0 is " + str(type(arg)) + " ???")
elif len(args) == 4:
assert isinstance(args[0], int)
assert isinstance(args[1], int)
assert isinstance(args[2], int)
assert isinstance(args[3], int)
self.__x1 = args[0]
self.__y1 = args[1]
self.__x2 = self.__x1 + args[2]
self.__y2 = self.__y1 + args[3]
else:
raise Exception("args ???")
self.__w = self.__x2 - self.__x1
self.__h = self.__y2 - self.__y1
#
@property
def width(self) -> int:
return self.__w
#
@width.setter
def width(self, value:int):
assert isinstance(value, int)
self.__w = value
self.__x2 = self.__x1 + self.__w
#
@property
def height(self) -> int:
return self.__h
#
@height.setter
def height(self, value:int):
assert isinstance(value, int)
self.__h = value
self.__y2 = self.__y1 + self.__h
#
@property
def x(self) -> int:
return self.__x1
#
@x.setter
def x(self, value:int):
assert isinstance(value, int)
self.__x1 = value
self.__w = self.__x2 - self.__x1
#
@property
def y(self) -> int:
return self.__y1
#
@y.setter
def y(self, value:int):
assert isinstance(value, int)
self.__y1 = value
self.__h = self.__y2 - self.__y1
#
@property
def x1(self) -> int:
return self.__x1
#
@x1.setter
def x1(self, value:int):
assert isinstance(value, int)
self.__x1 = value
self.__w = self.__x2 - self.__x1
#
@property
def y1(self) -> int:
return self.__y1
#
@y1.setter
def y1(self, value:int):
assert isinstance(value, int)
self.__y1 = value
self.__h = self.__y2 - self.__y1
#
@property
def x2(self) -> int:
return self.__x2
#
@x2.setter
def x2(self, value:int):
assert isinstance(value, int)
self.__x2 = value
self.__w = self.__x2 - self.__x1
#
@property
def y2(self) -> int:
return self.__y2
#
@y2.setter
def y2(self, value:int):
assert isinstance(value, int)
self.__y2 = value
self.__h = self.__y2 - self.__y1
#
@property
def topLeft(self) -> tuple:
return (self.__x1, self.__y1)
#
@topLeft.setter
def topLeft(self, value:Union[tuple, list]):
assert isinstance(value, (tuple, list))
assert len(value) == 2
self.__x1 = value[0]
self.__y1 = value[1]
self.__w = self.__x2 - self.__x1
self.__h = self.__y2 - self.__y1
#
@property
def topRight(self) -> tuple:
return (self.__x2 - 1, self.__y1)
#
@topRight.setter
def topRight(self, value:Union[tuple, list]):
assert isinstance(value, (tuple, list))
assert len(value) == 2
self.__x2 = value[0] + 1
self.__y1 = value[1]
self.__w = self.__x2 - self.__x1
self.__h = self.__y2 - self.__y1
#
@property
def bottomRight(self) -> tuple:
return (self.__x2 - 1, self.__y2 - 1)
#
@bottomRight.setter
def bottomRight(self, value:Union[tuple, list]):
assert isinstance(value, (tuple, list))
assert len(value) == 2
self.__x2 = value[0] + 1
self.__y2 = value[1] + 1
self.__w = self.__x2 - self.__x1
self.__h = self.__y2 - self.__y1
#
@property
def bottomLeft(self) -> tuple:
return (self.__x1, self.__y2 - 1)
#
@bottomLeft.setter
def bottomLeft(self, value:Union[tuple, list]):
assert isinstance(value, (tuple, list))
assert len(value) == 2
self.__x1 = value[0]
self.__y2 = value[1] + 1
self.__w = self.__x2 - self.__x1
self.__h = self.__y2 - self.__y1
#
def isValid(self) -> bool:
return (self.__w > 0) and (self.__h > 0)
#
def area(self) -> int:
return self.__w * self.__h
#
#def clone(self) -> Rectangle:
def clone(self):
return Rectangle(self)
#
def enlarge(self, *args):
if len(args) == 1:
v = args[0]
if isinstance(v, Rectangle):
self.__x1 -= v.x1
self.__y1 -= v.y1
self.__x2 += v.x2
self.__y2 += v.y2
else:
self.__x1 -= v
self.__y1 -= v
self.__x2 += v
self.__y2 += v
elif len(args) == 2:
vh = args[0]
vv = args[1]
self.__x1 -= vh
self.__y1 -= vv
self.__x2 += vh
self.__y2 += vv
elif len(args) == 4:
self.__x1 -= args[0]
self.__y1 -= args[1]
self.__x2 += args[2]
self.__y2 += args[3]
else:
raise Exception("args ???")
self.__w = self.__x2 - self.__x1
self.__h = self.__y2 - self.__y1
return self
#
def shrink(self, *args):
if len(args) == 1:
v = args[0]
if isinstance(v, Rectangle):
self.__x1 += v.x1
self.__y1 += v.y1
self.__x2 -= v.x2
self.__y2 -= v.y2
else:
self.__x1 += v
self.__y1 += v
self.__x2 -= v
self.__y2 -= v
elif len(args) == 2:
vh = args[0]
vv = args[1]
self.__x1 += vh
self.__y1 += vv
self.__x2 -= vh
self.__y2 -= vv
elif len(args) == 4:
self.__x1 += args[0]
self.__y1 += args[1]
self.__x2 -= args[2]
self.__y2 -= args[3]
else:
raise Exception("args ???")
self.__w = self.__x2 - self.__x1
self.__h = self.__y2 - self.__y1
return self
#
#def intersect(self, other:Rectangle) -> Rectangle:
def intersect(self, other):
assert isinstance(other, Rectangle)
if (other.__x1 > self.__x2) \
or (other.__y1 > self.__y2) \
or (other.__x2 < self.__x1) \
or (other.__y2 < self.__y1):
# no intersection
return None
x1 = max(self.__x1, other.__x1)
y1 = max(self.__y1, other.__y1)
x2 = min(self.__x2, other.__x2)
y2 = min(self.__y2, other.__y2)
return Rectangle(x1, y2, x2 - x1, y2 - y1)
#
#def unite(self, other:Rectangle) -> Rectangle:
def unite(self, other):
assert isinstance(other, Rectangle)
x1 = min(self.__x1, other.__x1)
y1 = min(self.__y1, other.__y1)
x2 = max(self.__x2, other.__x2)
y2 = max(self.__y2, other.__y2)
return Rectangle(x1, y1, x2 - x1, y2 - y1)
#
@staticmethod
#def intersectMany(other) -> Rectangle:
def intersectMany(other):
assert isinstance(other, (tuple, list))
if len(other) == 0:
raise Exception("args ???")
if len(other) == 1:
assert isinstance(other, Rectangle)
return other
rect = other[0]
assert isinstance(rect, Rectangle)
x1 = rect.__x1
y1 = rect.__y1
x2 = rect.__x2
y2 = rect.__y2
for r in other[1:]:
assert isinstance(r, Rectangle)
if (r.__x1 > x2) \
or (r.__y1 > y2) \
or (r.__x2 < x1) \
or (r.__y2 < y1):
# no intersection
return None
x1 = max(x1, r.__x1)
y1 = max(y1, r.__y1)
x2 = min(x2, r.__x2)
y2 = min(y2, r.__y2)
return Rectangle(x1, y1, x2 - x1, y2 - y1)
#
@staticmethod
#def uniteMany(other) -> Rectangle:
def uniteMany(other):
assert isinstance(other, (tuple, list))
x1 = min([ r.__x1 for r in other ])
y1 = min([ r.__y1 for r in other ])
x2 = max([ r.__x2 for r in other ])
y2 = max([ r.__y2 for r in other ])
return Rectangle(x1, y2, x2 - x2, y2 - y1)
#
def shift(self, x:int, y:int):
assert isinstance(x, int)
assert isinstance(y, int)
self.__x1 += x
self.__x2 += x
self.__y1 += y
self.__y2 += y
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-etcpasswd",
"score": 2
} |
#### File: src/jk_etcpasswd/GrpFile.py
```python
import collections
import os
import sys
import codecs
import typing
import jk_typing
from .GrpRecord import GrpRecord
class GrpFile(object):
################################################################
## Constants
################################################################
################################################################
## Constructor
################################################################
@jk_typing.checkFunctionSignature()
def __init__(self,
pwdFile:str = "/etc/group",
shadowFile:str = "/etc/gshadow",
pwdFileContent:str = None,
shadowFileContent:str = None,
bTest:bool = False,
jsonData:dict = None,
):
self.__records = [] # stores GrpRecord objects
self.__recordsByGroupName = {} # stores str->GrpRecord
if jsonData is None:
# regular instantiation
self.__pwdFilePath = pwdFile
self.__shadowFilePath = shadowFile
if pwdFileContent is None:
with codecs.open(pwdFile, "r", "utf-8") as f:
pwdFileContent = f.read()
if shadowFileContent is None:
with codecs.open(shadowFile, "r", "utf-8") as f:
shadowFileContent = f.read()
lineNo = -1
for line in pwdFileContent.split("\n"):
lineNo += 1
if not line:
continue
line = line.rstrip("\n")
items = line.split(":")
if (len(items) != 4) or (items[1] != 'x'):
raise Exception("Line " + str(lineNo + 1) + ": Invalid file format: " + pwdFile)
extraGroups = self.__parseExtraGroups(items[3])
r = GrpRecord(items[0], int(items[2]), extraGroups)
self.__records.append(r)
self.__recordsByGroupName[r.groupName] = r
lineNo = -1
for line in shadowFileContent.split("\n"):
lineNo += 1
if not line:
continue
line = line.rstrip("\n")
items = line.split(":")
if (len(items) != 4) or (len(items[2]) > 0):
raise Exception("Line " + str(lineNo + 1) + ": Invalid file format: " + shadowFile)
r = self.__recordsByGroupName.get(items[0])
if r is None:
raise Exception("Line " + str(lineNo + 1) + ": User \"" + items[0] + "\" not found! Invalid file format: " + shadowFile)
r.groupPassword = items[1]
for extraGroup in self.__parseExtraGroups(items[3]):
if extraGroup not in r.extraGroups:
r.extraGroups.append(extraGroup)
# ----
if bTest:
self._compareDataTo(
pwdFile = pwdFile,
shadowFile = shadowFile,
pwdFileContent = pwdFileContent,
shadowFileContent = shadowFileContent,
)
else:
# deserialization
assert jsonData["grpFormat"] == 1
self.__pwdFilePath = jsonData["grpFilePath"]
self.__shadowFilePath = jsonData["grpShadowFilePath"]
for jRecord in jsonData["grpRecords"]:
r = GrpRecord.createFromJSON(jRecord)
self.__records.append(r)
self.__recordsByGroupName[r.groupName] = r
#
################################################################
## Properties
################################################################
################################################################
## Helper Methods
################################################################
def __parseExtraGroups(self, groupString:typing.Union[str,None]) -> list:
if (groupString is None) or (len(groupString.strip()) == 0):
return []
else:
return groupString.split(",")
#
################################################################
## Public Methods
################################################################
def toJSON(self) -> dict:
ret = {
"grpFormat": 1,
"grpFilePath": self.__pwdFilePath,
"grpShadowFilePath": self.__shadowFilePath,
"grpRecords": [ r.toJSON() for r in self.__records ],
}
return ret
#
def idToNameMap(self) -> typing.Dict[int,str]:
ret = {}
for r in self.__records:
ret[r.groupID] = r.groupName
return ret
#
def nameToIDMap(self) -> typing.Dict[str,int]:
ret = {}
for r in self.__records:
ret[r.groupName] = r.groupID
return ret
#
#
# This method verifies that the data stored in this object reproduces the exact content of the password files in "/etc".
# An exception is raised on error.
#
@jk_typing.checkFunctionSignature()
def _compareDataTo(self, pwdFile:str = None, shadowFile:str = None, pwdFileContent:str = None, shadowFileContent:str = None):
if pwdFileContent is None:
if pwdFile is None:
pwdFile = self.__pwdFilePath
with codecs.open(pwdFile, "r", "utf-8") as f:
pwdFileContent = f.read()
if shadowFileContent is None:
if shadowFile is None:
shadowFile = self.__shadowFilePath
with codecs.open(shadowFile, "r", "utf-8") as f:
shadowFileContent = f.read()
contentPwdFile, contentShadowFile = self.toStringLists()
lineNo = -1
for line in pwdFileContent.split("\n"):
lineNo += 1
if not line:
continue
line = line.rstrip("\n")
if line != contentPwdFile[lineNo]:
print("-- Line read: " + repr(line))
print("-- Line generated: " + repr(contentPwdFile[lineNo]))
raise Exception("Line " + str(lineNo + 1) + ": Lines differ in file: " + pwdFile)
lineNo = -1
for line in shadowFileContent.split("\n"):
lineNo += 1
if not line:
continue
line = line.rstrip("\n")
if line != contentShadowFile[lineNo]:
print("-- Line read: " + repr(line))
print("-- Line generated: " + repr(contentShadowFile[lineNo]))
raise Exception("Line " + str(lineNo + 1) + ": Lines differ in file: " + shadowFile)
#
#
# Write the content to the group files in "/etc".
#
@jk_typing.checkFunctionSignature()
def store(self, pwdFile:str = None, shadowFile:str = None):
if pwdFile is None:
pwdFile = self.__pwdFilePath
if shadowFile is None:
shadowFile = self.__shadowFilePath
contentPwdFile, contentShadowFile = self.toStrings()
with codecs.open(pwdFile, "w", "utf-8") as f:
os.fchmod(f.fileno(), 0o644)
f.write(contentPwdFile)
with codecs.open(shadowFile, "w", "utf-8") as f:
os.fchmod(f.fileno(), 0o640)
f.write(contentShadowFile)
#
def toStrings(self) -> typing.Tuple[str,str]:
contentPwdFile = ""
contentShadowFile = ""
for r in self.__records:
contentPwdFile += r.groupName + ":x:" + str(r.groupID) + ":" + ",".join(sorted(r.extraGroups)) + "\n"
contentShadowFile += r.groupName + ":" + r.groupPassword + "::" + ",".join(sorted(r.extraGroups)) + "\n"
return contentPwdFile, contentShadowFile
#
def toStringLists(self) -> typing.Tuple[list,list]:
contentPwdFile = []
contentShadowFile = []
for r in self.__records:
contentPwdFile.append(r.groupName + ":x:" + str(r.groupID) + ":" + ",".join(r.extraGroups))
contentShadowFile.append(r.groupName + ":" + r.groupPassword + "::" + ",".join(r.extraGroups))
return contentPwdFile, contentShadowFile
#
def get(self, groupNameOrID:typing.Union[str,int]) -> typing.Union[GrpRecord,None]:
if isinstance(groupNameOrID, str):
return self.__recordsByGroupName.get(groupNameOrID, None)
elif isinstance(groupNameOrID, int):
for r in self.__records:
if r.groupID == groupNameOrID:
return r
return None
else:
raise Exception("Invalid data specified for argument 'groupNameOrID': " + repr(groupNameOrID))
#
################################################################
## Public Static Methods
################################################################
@staticmethod
def createFromJSON(j:dict):
assert isinstance(j, dict)
return GrpFile(jsonData=j)
#
#
```
#### File: src/jk_etcpasswd/PwdRecord.py
```python
import os
import sys
import codecs
import typing
#import jk_typing
class PwdRecord(object):
__slots__ = (
"userName",
"userID",
"groupID",
"description",
"homeDirPath",
"shellDirPath",
"secretPwdHash",
"extraShadowData",
)
################################################################
## Constants
################################################################
################################################################
## Constructor
################################################################
def __init__(self, userName:str, userID:int, groupID:int, description:str, homeDirPath:str, shellDirPath:str):
assert isinstance(userName, str)
assert isinstance(userID, int)
assert isinstance(groupID, int)
assert isinstance(description, str)
assert isinstance(homeDirPath, str)
assert isinstance(shellDirPath, str)
self.userName = userName
self.userID = userID
self.groupID = groupID
self.description = description
self.homeDirPath = homeDirPath
self.shellDirPath = shellDirPath
self.secretPwdHash = None
self.extraShadowData = None
#
################################################################
## Properties
################################################################
################################################################
## Helper Methods
################################################################
################################################################
## Public Methods
################################################################
def toJSON(self) -> dict:
ret = {
"userName": self.userName,
"userID": self.userID,
"groupID": self.groupID,
"description": self.description,
"homeDirPath": self.homeDirPath,
"shellDirPath": self.shellDirPath,
"secretPwdHash": self.secretPwdHash,
"extraShadowData": self.extraShadowData,
}
return ret
#
################################################################
## Public Static Methods
################################################################
@staticmethod
def createFromJSON(j:dict):
assert isinstance(j, dict)
ret = PwdRecord(j["userName"], j["userID"], j["groupID"], j["description"], j["homeDirPath"], j["shellDirPath"])
ret.secretPwdHash = j["secretPwdHash"]
ret.extraShadowData = j["extraShadowData"]
return ret
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-flexdata",
"score": 3
} |
#### File: src/jk_flexdata/__init__.py
```python
__author__ = "<NAME>"
__version__ = "0.2022.4.10"
from jk_testing import Assert
from .flexdata import FlexObject, NONE
from .FlexDataSelector import FlexDataSelector
bJSONCfgHelperAvailable = False
try:
import jk_jsoncfghelper2
import json
bJSONCfgHelperAvailable = True
except ImportError:
pass
#
# Load data from a file and check it against the structure <c>checkerName</c> defined in <c>scmgr</c>.
#
# @param str filePath The path of the file to load.
# @param jk_jsoncfghelper2.StructureCheckerManager scmgr The structure checker manager that holds the verification schemas
# @param str structureTypeName The name of the structure type the data should be conform to
# @return FlexObject A <c>FlexObject</c>.
#
def loadFromFile(filePath:str, scmgr = None, structureTypeName:str = None) -> FlexObject:
assert isinstance(filePath, str)
if bJSONCfgHelperAvailable:
if scmgr or structureTypeName:
Assert.isIn(scmgr.__class__.__name__, [ "StructureCheckerManager", "jk_jsoncfghelper2.StructureCheckerManager" ])
Assert.isInstance(structureTypeName, str)
with open(filePath, "r") as f:
data = json.load(f)
assert isinstance(data, dict)
if scmgr or structureTypeName:
checker = scmgr.getE(structureTypeName)
if checker.checkB(scmgr, data):
return FlexObject(data)
else:
raise Exception("Data does not match type " + repr(structureTypeName)) # TODO
else:
return FlexObject(data)
else:
if (scmgr is not None) or (structureTypeName is not None):
raise Exception("As module jk_jsoncfghelper2 is not installed, scmgr and structureTypeName must noe None!")
with open(filePath, "r") as f:
data = json.load(f)
assert isinstance(data, dict)
return FlexObject(data)
#
#
# Convert the data and check it against the structure <c>checkerName</c> defined in <c>scmgr</c>.
#
# @param dict data The data.
# @param jk_jsoncfghelper2.StructureCheckerManager scmgr The structure checker manager that holds the verification schemas
# @param str structureTypeName The name of the structure type the data should be conform to
# @return FlexObject A <c>FlexObject</c>.
#
def createFromData(data:dict, scmgr = None, structureTypeName:str = None) -> FlexObject:
assert isinstance(data, dict)
if bJSONCfgHelperAvailable:
if scmgr or structureTypeName:
Assert.isIn(scmgr.__class__.__name__, [ "StructureCheckerManager", "jk_jsoncfghelper2.StructureCheckerManager" ])
Assert.isInstance(structureTypeName, str)
if scmgr or structureTypeName:
checker = scmgr.getE(structureTypeName)
if checker.checkB(scmgr, data):
return FlexObject(data)
else:
raise Exception("Data does not match type " + repr(structureTypeName)) # TODO
else:
return FlexObject(data)
else:
if (scmgr is not None) or (structureTypeName is not None):
raise Exception("As module jk_jsoncfghelper2 is not installed, scmgr and structureTypeName must noe None!")
return FlexObject(data)
#
``` |
{
"source": "jkpubsrc/python-module-jk-git",
"score": 2
} |
#### File: jk_git/impl/GitConfigFileSection.py
```python
import os
import re
import collections
import typing
import jk_typing
import jk_prettyprintobj
class GitConfigFileSection(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, name:str, argument:str = None):
self.__name = name
self.__argument = argument
self.__properties = collections.OrderedDict()
self.__propertiesCachedDict = None
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def name(self) -> str:
return self.__name
#
@property
def argument(self) -> str:
return self.__argument
#
@property
def properties(self) -> typing.Dict[str,str]:
if self.__propertiesCachedDict is None:
self.__propertiesCachedDict = dict(self.__properties)
return self.__propertiesCachedDict
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dumpVarNames(self) -> list:
return [
"name",
"argument",
"properties",
]
#
################################################################################################################################
## Public Methods
################################################################################################################################
@jk_typing.checkFunctionSignature()
def getProperty(self, key:str) -> typing.Union[str,None]:
return self.__properties.get(key)
#
@jk_typing.checkFunctionSignature()
def setProperty(self, key:str, value:str):
self.__properties[key] = value
self.__propertiesCachedDict = None
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-infodatatree",
"score": 3
} |
#### File: src/jk_infodatatree/value_output_formatting.py
```python
import datetime
import time
import jk_utils
################################################################################################################################
#### Data converters
################################################################################################################################
def _doFormatSecsDiff(v:float) -> str:
nTotalSeconds = int(round(v))
nAbsTotalSeconds = abs(nTotalSeconds)
s = "+" if nTotalSeconds >= 0 else "-"
return s + jk_utils.formatTime(nAbsTotalSeconds)
#
def _doFormatTempC(v:float) -> str:
return str(round(v, 1)) + " °C"
#
def _doFormatFrequency(v:float) -> str:
if v < 0:
return "- Hz"
v = int(v)
if v > 1000:
# kHz
if v > 1000:
# MHz
if v > 1000000:
# GHz
return str(round(v / 1000000000, 2)) + " GHz"
else:
# < 1 GHz
return str(round(v / 1000000, 2)) + " MHz"
else:
# < 1 MHz
return str(round(v / 1000, 2)) + " kHz"
else:
# < 1 kHz
return str(v) + " Hz"
#
def _formatBytes(v:float) -> str:
if v < 0:
return "---"
s = jk_utils.formatBytes(v)
return s[:-1] + " " + s[-1] + "B"
#
def _formatDurationSecondsHR(v:float) -> str:
if v < 0:
return "---"
t = int(round(v))
if t == 1:
return "1 second"
else:
return str(t) + " seconds"
#
def _formatDurationHR(v:float) -> str:
if v < 0:
return "---"
nTimeDelta = v
n = nTimeDelta
nDays = int(n / (3600*24))
n = n - nDays * 3600*24
nHours = int(n / 3600)
n = n - nHours * 3600
nMinutes = int(n / 60)
nSeconds = int(round(n - nMinutes * 60))
bIgnoreMinutes = False
bIgnoreSeconds = False
if nTimeDelta > 3600*24:
bIgnoreMinutes = True
bIgnoreSeconds = True
elif nTimeDelta > 3600:
bIgnoreSeconds = True
ret = []
if nDays:
ret.append(str(nDays) + " " + ("day" if nDays == 1 else "days"))
if nHours:
ret.append(str(nHours) + " " + ("hour" if nHours == 1 else "hours"))
if nMinutes and not bIgnoreMinutes:
ret.append(str(nMinutes) + " " + ("minutes" if nMinutes == 1 else "minutes"))
if nSeconds and not bIgnoreSeconds:
ret.append(str(nSeconds) + " " + ("second" if nSeconds == 1 else "seconds"))
return ", ".join(ret)
#
def _formatTimeStampEU(t:float) -> str:
if t < 0:
return "---"
dt = datetime.datetime.fromtimestamp(t)
sDate = dt.strftime("%d.%M.%Y")
sTime = dt.strftime("%H:%m:%S")
return sDate + " " + sTime
#
def _formatTimeStampPastHR(t:float) -> str:
if t < 0:
return "---"
dt = datetime.datetime.fromtimestamp(t)
nYear = dt.year
nMonth = dt.month
nDay = dt.day
tNow = datetime.datetime.now()
if dt > tNow:
# specified timestamp is from the future
return "---"
nSecondsToday = tNow.hour * 3600 + tNow.minute * 60 + tNow.second
nDaysAgo = int(((tNow - dt).total_seconds() + 1 - nSecondsToday) / (24*3600))
if nDaysAgo > 7:
sDate = dt.strftime("%-dth of %B")
elif nDaysAgo == 0:
sDate = "today"
elif nDaysAgo == 1:
sDate = "yesterday"
else:
return str(nDaysAgo) + " days ago"
sTime = dt.strftime("%H:%m:%S")
return sDate + " " + sTime
#
def _doFormatBool(v:bool) -> str:
return "yes" if v else "no"
#
def _doFormatInt(v:int) -> str:
return str(v)
#
def _doFormatFloatPercent0(v:float) -> str:
v = v * 100
return str(round(v)) + "%"
#
def _doFormatFloatPercent1(v:float) -> str:
v = v * 100
return str(round(v, 1)) + "%"
#
def _doFormatFloatPercent2(v:float) -> str:
v = v * 100
return str(round(v, 2)) + "%"
#
def _doFormatFloat(v:float) -> str:
return str(v)
#
def _doFormatStr(v:str) -> str:
return v
#
def _formatStrList(data:list) -> str:
return ", ".join(data)
#
def _formatSortedStrList(data:list) -> str:
return ", ".join(sorted(data))
#
def _formatShortenedStrList(data:list) -> str:
d = data[:3]
s = ", ".join(sorted(d))
if len(data) > 3:
s += ", ..."
return s
#
def _formatIntList(data:list) -> str:
return ", ".join([ str(x) for x in data ])
#
def _formatSortedIntList(data:list) -> str:
return ", ".join([ str(x) for x in sorted(data) ])
#
def _formatShortenedIntList(data:list) -> str:
d = data[:3]
d = [ str(x) for x in d ]
s = ", ".join(d)
if len(data) > 3:
s += ", ..."
return s
#
def _formatFloatList(data:list) -> str:
return ", ".join([ str(x) for x in data ])
#
def _formatSortedFloatList(data:list) -> str:
return ", ".join([ str(x) for x in sorted(data) ])
#
def _formatShortenedFloatList(data:list) -> str:
d = data[:3]
d = [ str(x) for x in d ]
s = ", ".join(d)
if len(data) > 3:
s += ", ..."
return s
#
################################################################################################################################
#### Data structure that contains all definitions, descriptions and converters to call
################################################################################################################################
DEFINITIONS = {
"bool": {
"description": {
"valueDataType": "bool",
"text": "Boolean value",
},
"pyDataType": bool,
"notDataType": ( int, float ),
"default": {
"callable": _doFormatBool,
"text": "Outputs 'yes' or 'no'",
"outputExample": "yes",
},
"visFlavors": {
}
},
"int": {
"description": {
"valueDataType": "int",
"text": "Integer value",
},
"pyDataType": int,
"notDataType": ( bool, float ),
"default": {
"callable": _doFormatInt,
"text": "The input value as provided",
"outputExample": "123",
},
"visFlavors": {
}
},
"float": {
"description": {
"valueDataType": "float",
"text": "Float value",
},
"pyDataType": float,
"notDataType": ( int, bool ),
"default": {
"callable": _doFormatFloat,
"text": "The input value as provided",
"outputExample": "3.1415927",
},
"visFlavors": {
"%0": {
"callable": _doFormatFloatPercent0,
"text": "The float value formatted as a percent value with zero decimal digits",
"outputExample": "23%",
},
"%1": {
"callable": _doFormatFloatPercent1,
"text": "The float value formatted as a percent value with one decimal digit",
"outputExample": "23.4%",
},
"%2": {
"callable": _doFormatFloatPercent2,
"text": "The float value formatted as a percent value with two decimal digits",
"outputExample": "23.45%",
},
}
},
"str": {
"description": {
"valueDataType": "str",
"text": "String value",
},
"pyDataType": str,
"default": {
"callable": _doFormatStr,
"text": "The input string as provided",
"outputExample": "abcdef",
},
"visFlavors": {
}
},
"secsdiff": {
"description": {
"valueDataType": "int, float",
"constraint": ">= 0",
"text": "Number of seconds",
},
"pyDataType": ( int, float ),
"default": {
"callable": _doFormatSecsDiff,
"text": "The difference in time",
"outputExample": "+00:02:39",
},
"visFlavors": {
}
},
"tempc": {
"description": {
"valueDataType": "int, float",
"text": "Temperature in °C",
},
"pyDataType": ( int, float ),
"default": {
"callable": _doFormatTempC,
"text": "The temperature value in human readable form",
"outputExample": "37.1 °C",
},
"visFlavors": {
}
},
"freq": {
"description": {
"valueDataType": "int, float",
"constraint": ">= 0",
"text": "Frequency = Number of events per second",
},
"pyDataType": ( int, float ),
"default": {
"callable": _doFormatFrequency,
"text": "The frequency in human readable form",
"outputExample": "3.45 GHz",
},
"visFlavors": {
}
},
"bytes": {
"description": {
"valueDataType": "int",
"constraint": ">= 0",
"text": "Number of bytes",
},
"pyDataType": int,
"default": {
"callable": _formatBytes,
"text": "The number of bytes in human readable form",
"outputExample": "3.45 MB",
},
"visFlavors": {
}
},
"timestamp": {
"description": {
"valueDataType": "int, float",
"constraint": "> 0",
"text": "Number of seconds since Epoch",
},
"pyDataType": ( int, float ),
"default": {
"callable": _formatTimeStampEU,
"text": "Returns time stamp in human readable form",
"outputExample": "23.03.2020 12:03:59",
},
"visFlavors": {
"age": {
"callable": _formatTimeStampPastHR,
"text": "Returns time stamp in human readable form",
"outputExample": "yesterday 12:03:59",
},
}
},
"timestamputc": {
"description": {
"valueDataType": "int, float",
"constraint": "> 0",
"text": "Number of UTC seconds since Epoch",
},
"pyDataType": ( int, float ),
"default": {
"callable": _formatTimeStampEU,
"text": "Returns time stamp in human readable form",
"outputExample": "23.03.2020 12:03:59",
},
"visFlavors": {
"age": {
"callable": _formatTimeStampPastHR,
"text": "Returns time stamp in human readable form",
"outputExample": "yesterday 12:03:59",
},
}
},
"duration": {
"description": {
"valueDataType": "int, float",
"constraint": ">= 0",
"text": "Number of seconds",
},
"pyDataType": ( int, float ),
"default": {
"callable": _formatDurationHR,
"text": "Time spent in human readable form",
"outputExample": "3 hours, 1 minute, 29 seconds",
},
"visFlavors": {
"secs": {
"callable": _formatDurationSecondsHR,
"text": "Number of seconds spent in human readable form",
"outputExample": "129 seconds",
},
}
},
"str[]": {
"description": {
"valueDataType": "str[]",
"text": "List of string values",
},
"pyDataType": ( tuple, list ),
"pySubDataType": str,
"default": {
"callable": _formatStrList,
"text": "String values separated by comma",
"outputExample": "Mon, Tue, Wed, Thur, Fri",
},
"visFlavors": {
"sorted": {
"callable": _formatSortedStrList,
"text": "Sorted string values separated by comma",
"outputExample": "Mon, Tue, Wed, Thur, Fri",
},
"shorten": {
"callable": _formatShortenedStrList,
"text": "Shortened string value list separated by comma",
"outputExample": "Mon, Tue, Wed, ...",
},
},
},
"int[]": {
"description": {
"valueDataType": "int[]",
"text": "List of integer values",
},
"pyDataType": ( tuple, list ),
"pySubDataType": int,
"default": {
"callable": _formatIntList,
"text": "Integer values separated by comma",
"outputExample": "99, 4, 87, 7",
},
"visFlavors": {
"sorted": {
"callable": _formatSortedIntList,
"text": "Sorted integer values separated by comma",
"outputExample": "4, 7, 87, 99",
},
"shorten": {
"callable": _formatShortenedStrList,
"text": "Shortened integer value list separated by comma",
"outputExample": "99, 4, 87, ...",
},
},
},
"float[]": {
"description": {
"valueDataType": "float[]",
"text": "List of float values",
},
"pyDataType": ( tuple, list ),
"pySubDataType": float,
"default": {
"callable": _formatFloatList,
"text": "Float values separated by comma",
"outputExample": "99.9, 4.1, 87, 7.358",
},
"visFlavors": {
"sorted": {
"callable": _formatSortedFloatList,
"text": "Sorted float values separated by comma",
"outputExample": "4.1, 7.358, 87, 99.9",
},
"shorten": {
"callable": _formatShortenedStrList,
"text": "Shortened float value list separated by comma",
"outputExample": "99.9, 4.1, 87, ...",
},
},
},
}
ALL_VALID_DATA_TYPES = list(DEFINITIONS.keys())
ALIASES = {
"uptime": {
"valueDataType": "duration",
"visFlavor": None,
},
"age": {
"valueDataType": "duration",
"visFlavor": None,
},
}
################################################################################################################################
#### Main functions
################################################################################################################################
#
# Core formatting subroutine.
#
def formatValue_plaintext(value, dataType:str, visFlavor:str = None) -> str:
if value is None:
return "---"
assert dataType is not None
aliasStruct = ALIASES.get(dataType)
if aliasStruct is not None:
dataType = aliasStruct["valueDataType"]
_visFlavors = aliasStruct.get("aliasStruct", visFlavor)
visFlavor = visFlavor if None else _visFlavors
dataStruct = DEFINITIONS[dataType]
# check types
if not isinstance(value, dataStruct["pyDataType"]):
raise Exception("Expected value of type " + dataStruct["description"]["valueDataType"] + " but data value is of non-suitable python type " + repr(type(value)))
if "pySubDataType" in dataStruct:
pySubDataType = dataStruct["pySubDataType"]
for item in value:
if not isinstance(item, pySubDataType):
raise Exception("Expected value of type " + dataStruct["description"]["valueDataType"] + " but item value is of non-suitable python type " + repr(type(item)))
# convert
if visFlavor is None:
return dataStruct["default"]["callable"](value)
else:
if visFlavor in dataStruct["visFlavors"]:
return dataStruct["visFlavors"][visFlavor]["callable"](value)
else:
raise Exception("For value of type " + dataStruct["description"]["valueDataType"] + " this visFlavor value is invalid: " + repr(visFlavor))
#
def generateValueDataTypeDocu() -> str:
outputLines = []
for key in sorted(DEFINITIONS.keys()):
dataStruct = DEFINITIONS[key]
outputLines.append("# Data types")
outputLines.append("")
outputLines.append("The following sections provide an overview about all data types supported by the data value data structure.")
outputLines.append("")
outputLines.append("## Data type: '" + key + "'")
outputLines.append("")
outputLines.append("General information:")
outputLines.append("")
outputLines.append("* *FlexStruct type name:* `" + key + "`")
outputLines.append("* *Input value:* " + dataStruct["description"]["text"])
outputLines.append("* *Expected value type:* `" + dataStruct["description"]["valueDataType"] + "`")
outputLines.append("")
outputLines.append("Default output:")
outputLines.append("")
outputLines.append("* *Description:* " + dataStruct["default"]["text"])
outputLines.append("* *Output example:* \"`" + dataStruct["default"]["outputExample"] + "`\"")
outputLines.append("")
if ("visFlavors" in dataStruct) and dataStruct["visFlavors"]:
outputLines.append("The following visualization flavors exist:")
outputLines.append("")
for visFlavorName in dataStruct["visFlavors"]:
d = dataStruct["visFlavors"][visFlavorName]
outputLines.append("* \"`" + visFlavorName + "`\"")
outputLines.append("\t* *Description:* " + d["text"])
outputLines.append("\t* *Output example:* \"`" + d["outputExample"] + "`\"")
else:
outputLines.append("There are no visualization flavors for this data type.")
outputLines.append("")
return "\n".join(outputLines)
#
``` |
{
"source": "jkpubsrc/python-module-jk-invoke",
"score": 3
} |
#### File: src/jk_invoke/AbstractInvoker.py
```python
import os
import time
import random
from jk_testing import Assert
from jk_utils import ChModValue
from .CommandResult import CommandResult
class AbstractInvoker(object):
def __init__(self):
pass
#
def isSudo(self) -> bool:
return False
#
def runCmd(self, cmd:str, cmdArgs:list = None) -> CommandResult:
raise Exception()
#
def readTextFile(self, absFilePath:str) -> str:
raise Exception()
#
def writeTextFile(self, absFilePath:str, textContent:str):
raise Exception()
#
def _encodeSSHCmdLineArg(self, cmdArg:str) -> str:
return "'" + cmdArg.replace("'", "\\'") + "'"
#
def _encodeSSHCmdLine(self, *args) -> str:
if not args:
raise Exception("No arguments specified!")
ret = [ args[0] ]
for arg in args[1:]:
ret.append(self._encodeSSHCmdLineArg(arg))
return " ".join(ret)
#
def _createRandomFileName(self) -> str:
t = int(time.time()*1000000000)
return "x_" + str(t) + "-" + str(random.randint(0, 1000000000)) + ".tmp"
#
def _createLocalPrivateTempDir(self) -> str:
chmodValue = ChModValue(userW=True, userR=True, userX=True)
t = int(time.time()*1000000000)
while True:
dirPath = "/tmp/dir_" + str(t) + "-" + str(random.randint(0, 1000000000)) + ".tmp"
if not os.path.exists(dirPath):
os.makedirs(dirPath, int(chmodValue), exist_ok=False)
return dirPath
t += 1
#
def _generateLocalPrivateTempFile(self, textContent:str = None, chmodValue:ChModValue = None) -> str:
if chmodValue is None:
chmodValue = ChModValue(userW=True, userR=True)
t = int(time.time()*1000000000)
while True:
filePath = "/tmp/tmp_" + str(t) + "-" + str(random.randint(0, 1000000000)) + ".tmp"
if not os.path.exists(filePath):
with open(filePath, "w") as f:
pass
os.chmod(filePath, int(chmodValue))
if textContent is not None:
with open(filePath, "a") as f:
f.write(textContent)
return filePath
t += 1
#
#
```
#### File: src/jk_invoke/OneSlotPasswordProvider.py
```python
import os
import jk_json
from .simple_encrypter import encryptPwd, decryptPwd, isPwdEncrypted
class OneSlotPasswordProvider(object):
def __init__(self, thePassword:str):
self.__pwd = ""
self.setPassword(thePassword)
#
def __call__(self, machineName:str, loginName:str) -> str:
return self.__pwd
#
def storePwd(self, filePath:str):
with open(filePath, "w") as f:
f.write(encryptPwd(self.__pwd))
#
def loadPwd(self, filePath:str):
with open(filePath, "r") as f:
self.__pwd = decryptPwd(f.read())
#
def setPassword(self, thePassword:str):
assert isinstance(thePassword, str)
assert thePassword
if isPwdEncrypted(thePassword):
thePassword = decryptPwd(thePassword)
self.__pwd = thePassword
#
def getEncryptedPassword(self):
return encryptPwd(self.__pwd)
#
#
```
#### File: src/jk_invoke/SSHConnectionProvider.py
```python
import time
import os
import fabric
from jk_testing import Assert
from jk_utils import ChModValue
from .AbstractInvoker import AbstractInvoker
from .CommandResult import CommandResult
from .CachedPasswordProvider import CachedPasswordProvider
class SSHConnectionProvider(object):
def __init__(self, hostName:str, port:int, userName:str, passwordProvider):
assert isinstance(hostName, str)
assert isinstance(port, int)
assert isinstance(userName, str)
assert callable(passwordProvider)
self.__hostName = hostName
self.__port = port
self.__userName = userName
self.__passwordProvider = passwordProvider if isinstance(passwordProvider, CachedPasswordProvider) else CachedPasswordProvider(passwordProvider)
self.__c = None
#
def connect(self) -> fabric.Connection:
if self.__c is None:
pwd = self.__passwordProvider(self.__hostName, self.__userName)
config = fabric.Config(overrides={'sudo': {'password': <PASSWORD>}})
self.__c = fabric.Connection(self.__hostName, self.__userName, self.__port, config=config, connect_kwargs={"password": <PASSWORD>})
r = self.__c.run(
self._encodeSSHCmdLine("/bin/echo", "foo \" \" bar"),
hide=True)
Assert.isTrue(self.__c.is_connected)
Assert.isEqual(r.exited, 0)
Assert.isEqual(r.stderr, "")
Assert.isEqual(r.stdout, "foo \" \" bar\n")
return self.__c
#
def _encodeSSHCmdLineArg(self, cmdArg:str) -> str:
return "'" + cmdArg.replace("'", "\\'") + "'"
#
def _encodeSSHCmdLine(self, *args) -> str:
if not args:
raise Exception("No arguments specified!")
ret = [ args[0] ]
for arg in args[1:]:
ret.append(self._encodeSSHCmdLineArg(arg))
return " ".join(ret)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-jsoncfghelper2",
"score": 3
} |
#### File: src/jk_jsoncfghelper2/compile_from_xml.py
```python
import jk_xmlparser
from jk_simplexml import *
from .value_checkers import *
def __xml_getBooleanAttribute(x:HElement, attrName:str, defaultValue:bool) -> bool:
if x.hasAttribute(attrName):
s = x.getAttributeValue(attrName)
if (s == "true") or (s == "1") or (s == "yes"):
return True
elif (s == "false") or (s == "0") or (s == "no"):
return False
else:
raise Exception("Invalid value specified for " + repr(attrName) + ": " + repr(s))
else:
return defaultValue
#
def _compile_null(scmgr:StructureCheckerManager, x:HElement):
required = __xml_getBooleanAttribute(x, "required", True)
return NullValueChecker(scmgr, required=required, nullable=True)
#
def _compile_int(scmgr:StructureCheckerManager, x:HElement):
minValue = None
if x.hasAttribute("minValue"):
minValue = int(x.getAttributeValue("minValue"))
maxValue = None
if x.hasAttribute("maxValue"):
maxValue = int(x.getAttributeValue("maxValue"))
allowedValues = None
if x.hasAttribute("allowedValues"):
allowedValues = [ int(i.strip()) for i in x.getAttributeValue("allowedValues").split(",") ]
required = __xml_getBooleanAttribute(x, "required", True)
nullable = __xml_getBooleanAttribute(x, "nullable", False)
return IntValueChecker(scmgr, minValue=minValue, maxValue=maxValue, required=required, nullable=nullable)
#
def _compile_float(scmgr:StructureCheckerManager, x:HElement):
minValue = None
if x.hasAttribute("minValue"):
minValue = float(x.getAttributeValue("minValue"))
maxValue = None
if x.hasAttribute("maxValue"):
maxValue = float(x.getAttributeValue("maxValue"))
allowedValues = None
if x.hasAttribute("allowedValues"):
allowedValues = [ float(i.strip()) for i in x.getAttributeValue("allowedValues").split(",") ]
required = __xml_getBooleanAttribute(x, "required", True)
nullable = __xml_getBooleanAttribute(x, "nullable", False)
return FloatValueChecker(scmgr, minValue=minValue, maxValue=maxValue, required=required, nullable=nullable)
#
def _compile_bool(scmgr:StructureCheckerManager, x:HElement):
required = __xml_getBooleanAttribute(x, "required", True)
nullable = __xml_getBooleanAttribute(x, "nullable", False)
return BooleanValueChecker(scmgr, required=required, nullable=nullable)
#
def _compile_str(scmgr:StructureCheckerManager, x:HElement):
minLength = None
if x.hasAttribute("minLength"):
minLength = int(x.getAttributeValue("minLength"))
maxLength = None
if x.hasAttribute("maxLength"):
maxLength = int(x.getAttributeValue("maxLength"))
allowedValues = None
if x.hasAttribute("allowedValues"):
allowedValues = [ s.strip() for s in x.getAttributeValue("allowedValues").split(",") ]
required = __xml_getBooleanAttribute(x, "required", True)
nullable = __xml_getBooleanAttribute(x, "nullable", False)
return StringValueChecker(scmgr, minLength=minLength, maxLength=maxLength, allowedValues=allowedValues, required=required, nullable=nullable)
#
def _compile_list(scmgr:StructureCheckerManager, x:HElement):
minLength = None
if x.hasAttribute("minLength"):
minLength = int(x.getAttributeValue("minLength"))
maxLength = None
if x.hasAttribute("maxLength"):
maxLength = int(x.getAttributeValue("maxLength"))
allowedElementTypes = None
if x.hasAttribute("elementTypes"):
allowedElementTypes = [ s.strip() for s in x.getAttributeValue("elementTypes").split(",") ]
required = __xml_getBooleanAttribute(x, "required", True)
nullable = __xml_getBooleanAttribute(x, "nullable", False)
return ListValueChecker(scmgr, minLength=minLength, maxLength=maxLength, allowedElementTypes=allowedElementTypes, required=required, nullable=nullable)
#
def _compile_anydict(scmgr:StructureCheckerManager, x:HElement):
required = __xml_getBooleanAttribute(x, "required", True)
nullable = __xml_getBooleanAttribute(x, "nullable", False)
allowedElementTypes = None
if x.hasAttribute("elementTypes"):
allowedElementTypes = [ s.strip() for s in x.getAttributeValue("elementTypes").split(",") ]
return AnyDictionaryValueChecker(scmgr, required=required, allowedElementTypes=allowedElementTypes, nullable=nullable)
#
def _compile_specificdict(scmgr:StructureCheckerManager, t:AbstractValueChecker, x:HElement):
t = t.cloneObject()
t.required = __xml_getBooleanAttribute(x, "required", True)
t.nullable = __xml_getBooleanAttribute(x, "nullable", False)
return t
#
def _compile_eq(structureName:str, checker:SpecificDictionaryValueChecker, x:HElement):
fieldName = x.getAttributeValue("field")
sFieldValue = x.getAllText().strip()
if fieldName in checker.children:
field = checker.children[fieldName]
v = field.parseValueFromStr(sFieldValue)
return ValueCondition(fieldName, v)
else:
raise Exception("No such field in structure " + structureName + ": " + repr(fieldName))
#
def _compileDef(scmgr:StructureCheckerManager, x:HElement):
#print("Compiling structure: " + x.name)
xStructure = x.getChildElement("STRUCTURE")
xCondition = x.getChildElement("CONDITION")
children = {}
for xChild in xStructure.children:
if not isinstance(xChild, HElement):
continue
name = xChild.name
dataType = xChild.getAttributeValue("dataType")
if dataType is None:
raise Exception("No data type specified for " + x.name + ":" + name)
#print("\t> field ", repr(name), ":", dataType)
if dataType in [ "null" ]:
children[name] = _compile_null(scmgr, xChild)
elif dataType in [ "int", "integer" ]:
children[name] = _compile_int(scmgr, xChild)
elif dataType == "float":
children[name] = _compile_float(scmgr, xChild)
elif dataType in [ "bool", "boolean" ]:
children[name] = _compile_bool(scmgr, xChild)
elif dataType in [ "str", "string" ]:
children[name] = _compile_str(scmgr, xChild)
elif dataType in [ "list" ]:
children[name] = _compile_list(scmgr, xChild)
elif dataType in [ "dict", "dictionary", "obj", "object" ]:
children[name] = _compile_anydict(scmgr, xChild)
else:
t = scmgr.get(dataType)
if t is None:
raise Exception("Unknown data type: " + repr(dataType))
else:
children[name] = _compile_specificdict(scmgr, t, xChild)
checker = SpecificDictionaryValueChecker(scmgr, children, structType=x.name)
if xCondition is not None:
conditions = []
for xChild in xCondition.children:
if not isinstance(xChild, HElement):
continue
name = xChild.name
#print("\t> condition ", repr(name))
if name == "eq":
conditions.append(_compile_eq(x.name, checker, xChild))
else:
raise Exception("Unknown condition: " + repr(name))
checker.conditions = conditions
return checker
#
_xmlParser = jk_xmlparser.XMLDOMParser()
def loadFromXMLFile(filePath:str, scmgr:StructureCheckerManager = None) -> StructureCheckerManager:
assert isinstance(filePath, str)
if scmgr is None:
scmgr = StructureCheckerManager()
with open(filePath, "r") as f:
rawText = f.read()
rawText = rawText.strip()
xRoot = _xmlParser.parseText(rawText)
#xRoot = _xmlParser.parseFile(filePath)
assert isinstance(xRoot, HElement)
for x in xRoot.children:
if isinstance(x, HElement):
# print("Registering: " + x.name)
scmgr.register(x.name, _compileDef(scmgr, x))
return scmgr
#
def loadFromXMLStr(rawText:str, scmgr:StructureCheckerManager = None) -> StructureCheckerManager:
assert isinstance(rawText, str)
if scmgr is None:
scmgr = StructureCheckerManager()
rawText = rawText.strip()
xRoot = _xmlParser.parseText(rawText)
assert isinstance(xRoot, HElement)
for x in xRoot.children:
if isinstance(x, HElement):
# print("Registering: " + x.name)
scmgr.register(x.name, _compileDef(scmgr, x))
return scmgr
#
``` |
{
"source": "jkpubsrc/python-module-jk-json",
"score": 3
} |
#### File: src/jk_json/ParserErrorException.py
```python
import typing
from .SourceCodeLocation import *
class ParserErrorException(Exception):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, location:SourceCodeLocation, message, textData:str = None):
assert isinstance(location, SourceCodeLocation)
assert isinstance(message, str)
if textData:
lines = textData.split("\n")
self.__textLine = lines[location.lineNo]
assert isinstance(textData, str)
else:
self.__textLine = None
super().__init__(str(location) + " :: " + message)
self.__location = location
self.__message = message
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def location(self) -> SourceCodeLocation:
return self.__location
#
@property
def textLine(self) -> typing.Union[str,None]:
return self.__textLine
#
@property
def message(self) -> str:
return self.__message
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
#
```
#### File: src/jk_jsonschema/re.py
```python
import re
def _detidy_cb(m):
if m.group(2): return m.group(2)
if m.group(3): return m.group(3)
return ""
#
#
# Compacts a verbose regular expression
#
def compactVerboseRegEx(retext:str) -> str:
assert isinstance(retext, str)
decomment = re.compile(r"""(?#!py/mx decomment Rev:20160225_1800)
# Discard whitespace, comments and the escapes of escaped spaces and hashes.
( (?: \s+ # Either g1of3 $1: Stuff to discard (3 types). Either ws,
| \#.* # or comments,
| \\(?=[\r\n]|$) # or lone escape at EOL/EOS.
)+ # End one or more from 3 discardables.
) # End $1: Stuff to discard.
| ( [^\[(\s#\\]+ # Or g2of3 $2: Stuff to keep. Either non-[(\s# \\.
| \\[^# Q\r\n] # Or escaped-anything-but: hash, space, Q or EOL.
| \( # Or an open parentheses, optionally
(?:\?\#[^)]*(?:\)|$))? # starting a (?# Comment group).
| \[\^?\]? [^\[\]\\]* # Or Character class. Allow unescaped ] if first char.
(?:\\[^Q][^\[\]\\]*)* # {normal*} Zero or more non-[], non-escaped-Q.
(?: # Begin unrolling loop {((special1|2) normal*)*}.
(?: \[(?::\^?\w+:\])? # Either special1: "[", optional [:POSIX:] char class.
| \\Q [^\\]* # Or special2: \Q..\E literal text. Begin with \Q.
(?:\\(?!E)[^\\]*)* # \Q..\E contents - everything up to \E.
(?:\\E|$) # \Q..\E literal text ends with \E or EOL.
) [^\[\]\\]* # End special: One of 2 alternatives {(special1|2)}.
(?:\\[^Q][^\[\]\\]*)* # More {normal*} Zero or more non-[], non-escaped-Q.
)* (?:\]|\\?$) # End character class with ']' or EOL (or \\EOL).
| \\Q [^\\]* # Or \Q..\E literal text start delimiter.
(?:\\(?!E)[^\\]*)* # \Q..\E contents - everything up to \E.
(?:\\E|$) # \Q..\E literal text ends with \E or EOL.
) # End $2: Stuff to keep.
| \\([# ]) # Or g3of3 $6: Escaped-[hash|space], discard the escape.
""", re.VERBOSE | re.MULTILINE)
return re.sub(decomment, _detidy_cb, retext)
#
```
#### File: src/jk_jsonschema/_SchemaAST.py
```python
class _SchemaAST(object):
def __init__(self,
any_allowedTypes, # list<int>
any_allowedValues, # list<*>
any_mustBeExactValue, # 1tuple<*>
number_multipleValueOf, # int|float
number_maximum, # int|float
number_exclusiveMaximum, # int|float
number_minimum, # int|float
number_exclusiveMinimum, # int|float
string_minLength, # int
string_maxLength, # int
string_pattern, # regex
array_items, # _SchemaAST|_SchemaAST[]
array_additionalItems, # _SchemaAST|_SchemaAST[]
array_maxItems, # int
array_minItems, # int
array_itemsMustBeUnique, # bool
array_mustContainExactValue, # _SchemaAST
array_itemsMustNotBeNone, # bool
array_itemsMustBeNone, # bool
object_minProperties, # int
object_maxProperties, # int
object_properties, # dict<str,_SchemaAST>
object_patternProperties, # list<(str,_SchemaAST)>
object_propertyDependencyObjectMustMatchSchema, # dict<str,_SchemaAST>
object_propertyDependencyOtherPropertyMustExist, # dict<str,str[]>
object_propertyNames, # _SchemaAST
object_requiredProperties, # str[]
object_additionalProperties, # _SchemaAST
op_if, # _SchemaAST
op_then, # _SchemaAST
op_else, # _SchemaAST
op_not, # _SchemaAST
op_allOf, # list<_SchemaAST>
op_anyOf, # list<_SchemaAST>
op_oneOf, # list<_SchemaAST>
always = None # bool
):
self.any_allowedTypes = any_allowedTypes
self.any_allowedValues = any_allowedValues
self.any_mustBeExactValue = any_mustBeExactValue
self.number_multipleValueOf = number_multipleValueOf
self.number_maximum = number_maximum
self.number_exclusiveMaximum = number_exclusiveMaximum
self.number_minimum = number_minimum
self.number_exclusiveMinimum = number_exclusiveMinimum
self.string_minLength = string_minLength
self.string_maxLength = string_maxLength
self.string_pattern = string_pattern
self.array_items = array_items
self.array_additionalItems = array_additionalItems
self.array_maxItems = array_maxItems
self.array_minItems = array_minItems
self.array_itemsMustBeUnique = array_itemsMustBeUnique
self.array_mustContainExactValue = array_mustContainExactValue
self.array_itemsMustNotBeNone = array_itemsMustNotBeNone
self.array_itemsMustBeNone = array_itemsMustBeNone
self.object_minProperties = object_minProperties
self.object_maxProperties = object_maxProperties
self.object_properties = object_properties
self.object_patternProperties = object_patternProperties
self.object_propertyDependencyObjectMustMatchSchema = object_propertyDependencyObjectMustMatchSchema
self.object_propertyDependencyOtherPropertyMustExist = object_propertyDependencyOtherPropertyMustExist
self.object_propertyNames = object_propertyNames
self.object_requiredProperties = object_requiredProperties
self.object_additionalProperties = object_additionalProperties
self.op_if = op_if
self.op_then = op_then
self.op_else = op_else
self.op_not = op_not
self.op_allOf = op_allOf
self.op_anyOf = op_anyOf
self.op_oneOf = op_oneOf
self.always = always
#
def __makeNullIfEmptyListOrDict(self, item):
if item is None:
return None
if len(item) == 0:
return None
else:
return item
#
def __normalize_makeNullIfEmptyListOrDict(self, item):
if item is None:
return None
if isinstance(item, list):
for f in item:
f.normalize()
elif isinstance(item, dict):
newItem = {}
for k, f in item.items():
if f != None:
f.normalize()
newItem[k] = f
item = newItem
if len(item) == 0:
return None
else:
return item
#
def __normalize_makeNullIfEmptyAST(self, ast):
if ast is None:
return None
ast.normalize()
if ast.isNotEmpty():
return ast
else:
return None
#
def normalize(self):
self.any_allowedTypes = self.__makeNullIfEmptyListOrDict(self.any_allowedTypes)
self.any_allowedValues = self.__makeNullIfEmptyListOrDict(self.any_allowedValues)
newList = []
for regex, schema in self.object_patternProperties:
schema = self.__normalize_makeNullIfEmptyAST(schema)
if schema != None:
newList.append((regex, schema))
if len(newList) == 0:
newList = None
self.object_patternProperties = newList
self.array_items = self.__normalize_makeNullIfEmptyListOrDict(self.array_items)
self.object_properties = self.__normalize_makeNullIfEmptyListOrDict(self.object_properties)
self.object_propertyDependencyObjectMustMatchSchema = self.__normalize_makeNullIfEmptyListOrDict(self.object_propertyDependencyObjectMustMatchSchema)
self.op_allOf = self.__normalize_makeNullIfEmptyListOrDict(self.op_allOf)
self.op_anyOf = self.__normalize_makeNullIfEmptyListOrDict(self.op_anyOf)
self.op_oneOf = self.__normalize_makeNullIfEmptyListOrDict(self.op_oneOf)
newDict = {}
for key, stringList in self.object_propertyDependencyOtherPropertyMustExist.items():
if len(stringList) > 0:
newDict[key] = stringList
if len(newDict) == 0:
newDict = None
self.object_propertyDependencyOtherPropertyMustExist[key] = newDict
self.array_additionalItems = self.__normalize_makeNullIfEmptyAST(self.array_additionalItems)
self.array_mustContainExactValue = self.__normalize_makeNullIfEmptyAST(self.array_mustContainExactValue)
self.object_propertyNames = self.__normalize_makeNullIfEmptyAST(self.object_propertyNames)
self.op_if = self.__normalize_makeNullIfEmptyAST(self.op_if)
self.op_then = self.__normalize_makeNullIfEmptyAST(self.op_then)
self.op_else = self.__normalize_makeNullIfEmptyAST(self.op_else)
self.op_not = self.__normalize_makeNullIfEmptyAST(self.op_not)
#
def isNotEmpty(self):
return \
self.hasAnyTypeConstraint() \
or self.hasStringConstraint() \
or self.hasArrayConstraint() \
or self.hasObjectConstraint() \
or self.hasNumberConstraint() \
or self.hasOpConstraint()
#
def isEmpty(self):
return not \
( \
self.hasAnyTypeConstraint() \
or self.hasStringConstraint() \
or self.hasArrayConstraint() \
or self.hasObjectConstraint() \
or self.hasNumberConstraint() \
or self.hasOpConstraint() \
)
#
def hasAnyTypeConstraint(self):
return (self.any_allowedTypes != None) \
or (self.any_allowedValues != None) \
or (self.any_mustBeExactValue != None) \
or (self.always != None)
#
def hasStringConstraint(self):
return (self.string_minLength != None) \
or (self.string_maxLength != None) \
or (self.string_pattern != None)
#
def hasArrayConstraint(self):
return (self.array_items != None) \
or (self.array_additionalItems != None) \
or (self.array_maxItems != None) \
or (self.array_minItems != None) \
or (self.array_itemsMustBeUnique != None) \
or (self.array_mustContainExactValue != None)
#
def hasObjectConstraint(self):
return (self.object_minProperties != None) \
or (self.object_maxProperties != None) \
or (self.object_properties != None) \
or (self.object_patternProperties != None) \
or (self.object_propertyDependencyOtherPropertyMustExist != None) \
or (self.object_propertyDependencyOtherPropertyMustExist != None) \
or (self.object_propertyNames != None) \
or (self.object_requiredProperties != None)
#
def hasOpConstraint(self):
return (self.op_if != None) \
or (self.op_not != None) \
or (self.op_allOf != None) \
or (self.op_anyOf != None) \
or (self.op_oneOf != None)
#
def hasNumberConstraint(self):
return (self.number_multipleValueOf != None) \
or (self.number_maximum != None) \
or (self.number_exclusiveMaximum != None) \
or (self.number_minimum != None) \
or (self.number_exclusiveMinimum != None) \
#
#
ALWAYS_TRUE = _SchemaAST(
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None,
True)
ALWAYS_FALSE = _SchemaAST(
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None,
False)
```
#### File: src/jk_jsonschema/schema_validator.py
```python
import re
from typing import List, Union
import jk_logging
from jk_testing import Assert
from jk_json.tools import *
from ._SchemaAST import _SchemaAST
# ================================================================================================================================
# ================================================================================================================================
# ================================================================================================================================
class ValidatorStackTrace(list):
def __init__(self, errPath:str, errValue, errMsg:str):
assert isinstance(errPath, str)
assert isinstance(errMsg, str)
self.append((errPath, errValue, errMsg))
#
def appendError(self, errPath:str, errValue, errMsg:str):
assert isinstance(errPath, str)
assert isinstance(errMsg, str)
self.append((errPath, errValue, errMsg))
return self
#
#
# ================================================================================================================================
# ================================================================================================================================
# ================================================================================================================================
class ValidationContext(object):
def __init__(self):
self.allProperties = None
#
def derive(self):
return ValidationContext()
#
#
class ValidationContext2(object):
def __init__(self, path:str = "/"):
self.allProperties = None
self.__path = path
#
@property
def path(self):
if self.__path == "/":
return "/"
else:
return self.__path[:-1]
#
def deriveOnIndex(self, index:int):
p = self.__path + str(index) + "/"
return ValidationContext2(p)
#
def deriveOnPropertyName(self, propertyName:str):
p = self.__path + propertyName + "/"
return ValidationContext2(p)
#
#
class AbstractElementaryValidator(object):
def validate(self, ctx:ValidationContext, jsonData):
raise NotImplementedError()
#
def validate2(self, ctx:ValidationContext2, jsonData):
raise NotImplementedError()
#
def dump(self, prefix = "", writeFunction = print):
raise NotImplementedError()
#
#
# ================================================================================================================================
# ================================================================================================================================
# ================================================================================================================================
class _TypeSpecificValidator(AbstractElementaryValidator):
def __init__(self, typeID):
self.__typeID = typeID
self._validators = []
self._default = True
#
def validate(self, ctx:ValidationContext, jsonData):
for v in self._validators:
if not v.validate(ctx, jsonData):
return False
return self._default
#
def validate2(self, ctx:ValidationContext2, jsonData):
for v in self._validators:
retSuccess, retStackTrace = v.validate2(ctx, jsonData)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "failed for type: " + ALL_TYPES_NAME_LIST[self.__typeID])
if self._default:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, None, "default is: to fail (a)")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + "TypeSpecificValidator[" + ALL_TYPES_NAME_LIST[self.__typeID] + "]:")
prefix += "\t"
for v in self._validators:
v.dump(prefix, writeFunction)
writeFunction(prefix + "default: " + str(self._default).lower())
#
#
class _Validator(AbstractElementaryValidator):
def __init__(self):
self._validators = {}
for typeID in ALL_TYPES_SET:
self._validators[typeID] = _TypeSpecificValidator(typeID)
self._any = []
self._default = True
#
def validate(self, ctx:ValidationContext, jsonData) -> bool:
t = getTypeIDOfValue(jsonData)
if not self._validators[t].validate(ctx, jsonData):
return False
for v in self._any:
if not v.validate(ctx, jsonData):
return False
return self._default
#
def validate2(self, ctx:ValidationContext2, jsonData) -> tuple:
t = getTypeIDOfValue(jsonData)
retSuccess, retStackTrace = self._validators[t].validate2(ctx, jsonData)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "failed in category for type: " + ALL_TYPES_NAME_LIST[t])
for v in self._any:
retSuccess, retStackTrace = v.validate2(ctx, jsonData)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "failed in category for all types")
if self._default:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, None, "default is: to fail (b)")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + "Validator:")
prefix += "\t"
for v in self._validators.values():
v.dump(prefix, writeFunction)
writeFunction(prefix + "any:")
prefix2 = prefix + "\t"
for a in self._any:
a.dump(prefix2, writeFunction)
writeFunction(prefix + "default: " + str(self._default).lower())
#
#
# ================================================================================================================================
# ================================================================================================================================
# ================================================================================================================================
class Validator(object):
def __init__(self, validator:_Validator):
self.__validator = validator
#
#
# Validate the specified JSON data.
#
# @return bool bValidationResult Returns `True` or `False`.
#
def validate(self, jsonData) -> bool:
return self.__validator.validate(ValidationContext(), jsonData)
#
#
# Validate the specified JSON data.
#
# @return bool bValidationResult Returns `True` or `False`.
# @return ValidatorStackTrace stackTrace If validation failed returns a stack trace object. Otherwise `None` is returned.
#
def validate2(self, jsonData) -> tuple:
return self.__validator.validate2(ValidationContext2(), jsonData)
#
#
# Validate the specified JSON data. An exception is thrown on error.
#
def validateE(self, jsonData):
bResult, errStackTrace = self.__validator.validate2(ValidationContext2(), jsonData)
if not bResult:
v = errStackTrace[0][1]
if v is None:
msg = errStackTrace[0][2]
if msg[0].islower():
msg = msg[0].upper() + msg[1:]
else:
msg = errStackTrace[0][2]
#msg = str(v) + " " + msg
raise Exception("JSON VALIDATION ERROR! " + msg)
#
def dump(self, writeFunction = print):
self.__validator.dump("", writeFunction)
#
#
# ================================================================================================================================
# ================================================================================================================================
# ================================================================================================================================
class _validate_any_anyOfTheseValues(AbstractElementaryValidator):
def __init__(self, log, allowedValues:list):
Assert.isInstance(allowedValues, list, log=log)
self.__allowedValues = allowedValues
#
def validate(self, ctx:ValidationContext, jsonData):
for v in self.__allowedValues:
if jsonIsEqual(jsonData, v):
return True
return False
#
def validate2(self, ctx:ValidationContext2, jsonData):
for v in self.__allowedValues:
if jsonIsEqual(jsonData, v):
return True, None
return False, ValidatorStackTrace(ctx.path, jsonData, "not found in: " + repr(self.__allowedValues))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__allowedValues))
#
#
class _validate_any_exactValue(AbstractElementaryValidator):
def __init__(self, log, referenceValue):
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return jsonIsEqual(jsonData, self.__referenceValue)
#
def validate2(self, ctx:ValidationContext2, jsonData):
if jsonIsEqual(jsonData, self.__referenceValue):
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "not equal to: " + repr(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# Check the value is a multiple of the argument value.
# int|float
class _validate_number_multipleValueOf(AbstractElementaryValidator):
def __init__(self, log, referenceValue:Union[int,float]):
Assert.isInstance(referenceValue, (int, float), log=log)
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return (jsonData % self.__referenceValue) == 0
#
def validate2(self, ctx:ValidationContext2, jsonData):
r = jsonData % self.__referenceValue
if r == 0:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "not divisible by " + repr(self.__referenceValue) + ": " \
+ repr(jsonData) + " % " + repr(self.__referenceValue) + " == " + repr(r))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# Check the value against an upper bound.
# int|float
class _validate_number_maximum(AbstractElementaryValidator):
def __init__(self, log, referenceValue:Union[int,float]):
Assert.isInstance(referenceValue, (int, float), log=log)
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return jsonData <= self.__referenceValue
#
def validate2(self, ctx:ValidationContext2, jsonData):
if jsonData <= self.__referenceValue:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "larger than: " + repr(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# Check the value against an lower bound.
# int|float
class _validate_number_minimum(AbstractElementaryValidator):
def __init__(self, log, referenceValue:Union[int,float]):
Assert.isInstance(referenceValue, (int, float), log=log)
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return jsonData >= self.__referenceValue
#
def validate2(self, ctx:ValidationContext2, jsonData):
if jsonData >= self.__referenceValue:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "smaller than: " + repr(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# Check the value against an upper bound.
# int|float
class _validate_number_exclusiveMaximum(AbstractElementaryValidator):
def __init__(self, log, referenceValue:Union[int,float]):
Assert.isInstance(referenceValue, (int, float), log=log)
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return jsonData < self.__referenceValue
#
def validate2(self, ctx:ValidationContext2, jsonData):
if jsonData < self.__referenceValue:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "larger than or equal to: " + repr(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# Check the value against an lower bound.
# int|float
class _validate_number_exclusiveMinimum(AbstractElementaryValidator):
def __init__(self, log, referenceValue:Union[int,float]):
Assert.isInstance(referenceValue, (int, float), log=log)
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return jsonData > self.__referenceValue
#
def validate2(self, ctx:ValidationContext2, jsonData):
if jsonData > self.__referenceValue:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "smaller than or equal to: " + repr(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# Check the size of the python value against a lower bound.
# int
class _validate_item_minLength(AbstractElementaryValidator):
def __init__(self, log, referenceValue:int):
Assert.isInstance(referenceValue, int, log=log)
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return len(jsonData) >= self.__referenceValue
#
def validate2(self, ctx:ValidationContext2, jsonData):
if len(jsonData) >= self.__referenceValue:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "length/size is " + str(len(jsonData)) + " and therefore smaller than: " + str(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# Check the size of the python value against an upper bound.
# int
class _validate_item_maxLength(AbstractElementaryValidator):
def __init__(self, log, referenceValue:int):
Assert.isInstance(referenceValue, int, log=log)
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
return len(jsonData) <= self.__referenceValue
#
def validate2(self, ctx:ValidationContext2, jsonData):
if len(jsonData) <= self.__referenceValue:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "length/size is " + str(len(jsonData)) + " and therefore greater than: " + str(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
class _validate_item_mustBeNoneOrEmpty(AbstractElementaryValidator):
def __init__(self, log):
pass
#
def validate(self, ctx:ValidationContext, jsonData):
return (jsonData is None) or (len(jsonData) == 0)
#
def validate2(self, ctx:ValidationContext2, jsonData):
if (jsonData is None) or (len(jsonData) == 0):
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "there is data")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__)
#
#
class _validate_item_mustNotBeNoneNorEmpty(AbstractElementaryValidator):
def __init__(self, log):
pass
#
def validate(self, ctx:ValidationContext, jsonData):
return (jsonData != None) and (len(jsonData) > 0)
#
def validate2(self, ctx:ValidationContext2, jsonData):
if (jsonData != None) and (len(jsonData) > 0):
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "there is no data")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__)
#
#
class _validate_item_mustNotBeNone(AbstractElementaryValidator):
def __init__(self, log):
pass
#
def validate(self, ctx:ValidationContext, jsonData):
return jsonData != None
#
def validate2(self, ctx:ValidationContext2, jsonData):
if jsonData != None:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "there is no data")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__)
#
#
# Check the string against the argument pattern.
# string_pattern
class _validate_string_pattern(AbstractElementaryValidator):
def __init__(self, log, referencePatternStr:str):
Assert.isInstance(referencePatternStr, str, log=log)
self.__referencePatternStr = referencePatternStr
self.__referencePattern = re.compile(referencePatternStr)
#
def validate(self, ctx:ValidationContext, jsonData):
return self.__referencePattern.search(jsonData) != None
#
def validate2(self, ctx:ValidationContext2, jsonData):
if self.__referencePattern.search(jsonData) != None:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "does not match pattern: " + repr(self.__referencePatternStr))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referencePatternStr))
#
#
# Check if all array elements match against the specified validator.
# AbstractElementaryValidator
class _validate_array_checkIfAllItemsMatchValidator(AbstractElementaryValidator):
def __init__(self, log, validator:AbstractElementaryValidator):
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validator = validator
#
def validate(self, ctx:ValidationContext, jsonData):
for item in jsonData:
ctx2 = ctx.derive()
if not self.__validator.validate(ctx2, item):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
i = 0
for item in jsonData:
ctx2 = ctx.deriveOnIndex(i)
retSuccess, retStackTrace = self.__validator.validate2(ctx2, item)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "an item does not validate")
i += 1
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidator:")
self.__validator.dump(prefix + "\t\t", writeFunction)
#
#
# Check if at least one array elements matches against the specified validator.
# AbstractElementaryValidator
class _validate_array_checkIfAnyItemMatchesValidator(AbstractElementaryValidator):
def __init__(self, log, validator:AbstractElementaryValidator):
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validator = validator
#
def validate(self, ctx:ValidationContext, jsonData):
for item in jsonData:
ctx2 = ctx.derive()
if self.__validator.validate(ctx2, item):
return True
return False
#
def validate2(self, ctx:ValidationContext2, jsonData):
i = 0
for item in jsonData:
ctx2 = ctx.deriveOnIndex(i)
retSuccess, retStackTrace = self.__validator.validate2(ctx2, item)
if retSuccess:
return True, None
i += 1
return False, retStackTrace.appendError(ctx.path, None, "no item validates")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidator:")
self.__validator.dump(prefix + "\t\t", writeFunction)
#
#
# Check if all array elements match against the specified list of validators.
# AbstractElementaryValidator[]
class _validate_array_checkAllItemsMatchListOfValidators(AbstractElementaryValidator):
def __init__(self, log, validators:List[AbstractElementaryValidator]):
Assert.isInstance(validators, list, log=log)
for validator in validators:
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validators = validators
self.__count = len(validators)
#
def validate(self, ctx:ValidationContext, jsonData):
#if len(jsonData) != self.__count:
# return False
for jsonItem, validator in zip(jsonData, self.__validators):
ctx2 = ctx.derive()
if not validator.validate(ctx2, jsonItem):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
#if len(jsonData) != self.__count:
# return False, ValidatorStackTrace(ctx.path, jsonData, str(self.__count) + " items expected, " + str(len(jsonData)) + " found")
i = 0
for jsonItem, validator in zip(jsonData, self.__validators):
ctx2 = ctx.deriveOnIndex(i)
retSuccess, retStackTrace = validator.validate2(ctx2, jsonItem)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "validator at index " + str(i) + " failed.")
i += 1
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidators:")
for v in self.__validators:
v.dump(prefix + "\t\t", writeFunction)
#
#
# Check if all array elements match against the specified list of validators.
# AbstractElementaryValidator[], AbstractElementaryValidator
class _validate_array_checkAllItemsMatchListOfValidatorsWithExtraValidator(AbstractElementaryValidator):
def __init__(self, log, validators:List[AbstractElementaryValidator], extraValidator:AbstractElementaryValidator):
Assert.isInstance(validators, list, log=log)
for validator in validators:
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
Assert.isInstance(extraValidator, AbstractElementaryValidator, log=log)
self.__validators = validators
self.__extraValidator = extraValidator
self.__count = len(validators)
#
def validate(self, ctx:ValidationContext, jsonData):
if len(jsonData) < self.__count:
return False
extraJsonData = jsonData[self.__count:]
jsonData = jsonData[0:self.__count]
for jsonItem, validator in zip(jsonData, self.__validators):
ctx2 = ctx.derive()
if not validator.validate(ctx2, jsonItem):
return False
for jsonItem in extraJsonData:
ctx2 = ctx.derive()
if not self.__extraValidator.validate(ctx2, jsonItem):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
#if len(jsonData) < self.__count:
# return False, ValidatorStackTrace(ctx.path, jsonData, "at least " + str(self.__count) + " items expected, " + str(len(jsonData)) + " found")
extraJsonData = jsonData[self.__count:]
jsonData = jsonData[0:self.__count]
i = 0
for jsonItem, validator in zip(jsonData, self.__validators):
ctx2 = ctx.deriveOnIndex(i)
retSuccess, retStackTrace = validator.validate2(ctx2, jsonItem)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "validator failed at index " + str(i))
i += 1
for jsonItem in extraJsonData:
ctx2 = ctx.deriveOnIndex(i)
retSuccess, retStackTrace = self.__extraValidator.validate2(ctx2, jsonItem)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "extra validator failed at index " + str(i))
i += 1
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidators:")
for v in self.__validators:
v.dump(prefix + "\t\t", writeFunction)
writeFunction(prefix + "\textraValidator:")
self.__extraValidator.dump(prefix + "\t\t", writeFunction)
#
#
# Check if all array elements match against the specified list of validators.
# AbstractElementaryValidator[], AbstractElementaryValidator[]
class _validate_array_checkAllItemsMatchListOfValidatorsWithExtraListOfValidators(AbstractElementaryValidator):
def __init__(self, log, validators:List[AbstractElementaryValidator], extraValidators:List[AbstractElementaryValidator]):
Assert.isInstance(validators, list, log=log)
for validator in validators:
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
Assert.isInstance(extraValidators, list, log=log)
for validator in extraValidators:
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validators = validators
self.__extraValidators = extraValidators
self.__count = len(validators)
self.__extraCount = len(self.__extraValidators)
#
def validate(self, ctx:ValidationContext, jsonData):
if len(jsonData) < self.__count:
return False
extraJsonData = jsonData[self.__count:]
jsonData = jsonData[0:self.__count]
if len(extraJsonData) < self.__extraCount:
return False
for jsonItem, validator in zip(jsonData, self.__validators):
ctx2 = ctx.derive()
if not validator.validate(ctx2, jsonItem):
return False
for jsonItem, validator in zip(extraJsonData, self.__extraValidators):
ctx2 = ctx.derive()
if not validator.validate(ctx2, jsonItem):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
#if len(jsonData) < self.__count:
# return False, ValidatorStackTrace(ctx.path, jsonData, "at least " + str(self.__count) + " items expected, " + str(len(jsonData)) + " found")
extraJsonData = jsonData[self.__count:]
jsonData = jsonData[0:self.__count]
i = 0
for jsonItem, validator in zip(jsonData, self.__validators):
ctx2 = ctx.deriveOnIndex(i)
retSuccess, retStackTrace = validator.validate2(ctx2, jsonItem)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "validator failed at index " + str(i))
i += 1
for jsonItem, validator in zip(extraJsonData, self.__extraValidators):
ctx2 = ctx.deriveOnIndex(i)
retSuccess, retStackTrace = validator.validate2(ctx2, jsonItem)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "extra validator failed at index " + str(i))
i += 1
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidators:")
for v in self.__validators:
v.dump(prefix + "\t\t", writeFunction)
writeFunction(prefix + "\textraValidators:")
for v in self.__extraValidators:
v.dump(prefix + "\t\t", writeFunction)
#
#
# Check if no array element is equal to another array element.
class _validate_array_itemsMustBeUnique(AbstractElementaryValidator):
def __init__(self, log):
pass
#
def validate(self, ctx:ValidationContext, jsonData):
n = len(jsonData)
if n < 2:
return True
for i in range(0, n - 1):
v1 = jsonData[i]
t1 = getTypeIDOfValue(v1)
for j in range(i + 1, n):
v2 = jsonData[j]
t2 = getTypeIDOfValue(v2)
if t1 != t2:
continue
if jsonIsEqual(v1, v2):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
n = len(jsonData)
if n < 2:
return True, None
for i in range(0, n - 1):
v1 = jsonData[i]
t1 = getTypeIDOfValue(v1)
for j in range(i + 1, n):
v2 = jsonData[j]
t2 = getTypeIDOfValue(v2)
if t1 != t2:
continue
if jsonIsEqual(v1, v2):
return False, ValidatorStackTrace(ctx.path, jsonData, "item at " + int(i) + " and " + int(j) + " are equal")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__)
#
#
# Check if at least one array element matches the reference.
# (any)
class _validate_array_mustContainExactValue(AbstractElementaryValidator):
def __init__(self, log, referenceValue):
self.__referenceValue = referenceValue
#
def validate(self, ctx:ValidationContext, jsonData):
for v in jsonData:
if jsonIsEqual(v, self.__referenceValue):
return True
return False
#
def validate2(self, ctx:ValidationContext2, jsonData):
for v in jsonData:
if jsonIsEqual(v, self.__referenceValue):
return True, None
return False, ValidatorStackTrace(ctx.path, jsonData, "no such item: " + repr(self.__referenceValue))
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ": " + repr(self.__referenceValue))
#
#
# AbstractElementaryValidator
class _validate_not(AbstractElementaryValidator):
def __init__(self, log, validator:AbstractElementaryValidator):
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validator = validator
#
def validate(self, ctx:ValidationContext, jsonData):
return not self.__validator.validate(ctx, jsonData)
#
def validate2(self, ctx:ValidationContext2, jsonData):
if not self.__validator.validate(ctx, jsonData):
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "validator validated successfully")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidator:")
self.__validator.dump(prefix + "\t\t", writeFunction)
#
#
# AbstractElementaryValidator, AbstractElementaryValidator, AbstractElementaryValidator
class _validate_if(AbstractElementaryValidator):
def __init__(self, log, validatorIf:AbstractElementaryValidator, validatorThen:AbstractElementaryValidator, validatorElse:AbstractElementaryValidator):
Assert.isInstance(validatorIf, AbstractElementaryValidator, log=log)
if validatorThen:
Assert.isInstance(validatorThen, AbstractElementaryValidator, log=log)
if validatorElse:
Assert.isInstance(validatorElse, AbstractElementaryValidator, log=log)
self.__validatorIf = validatorIf
self.__validatorThen = validatorThen
self.__validatorElse = validatorElse
#
def validate(self, ctx:ValidationContext, jsonData):
if self.__validatorIf.validate(ctx, jsonData):
if self.__validatorThen:
return self.__validatorThen.validate(ctx, jsonData)
else:
return True
else:
if self.__validatorElse:
return self.__validatorElse.validate(ctx, jsonData)
else:
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
if self.__validatorIf.validate(ctx, jsonData):
if self.__validatorThen:
retSuccess, retStackTrace = self.__validatorThen.validate(ctx, jsonData)
if retSuccess:
return True, None
else:
return False, retStackTrace.appendError(ctx.path, None, "then-validator failed.")
else:
return True
else:
if self.__validatorElse:
retSuccess, retStackTrace = self.__validatorElse.validate(ctx, jsonData)
if retSuccess:
return True, None
else:
return False, retStackTrace.appendError(ctx.path, None, "else-validator failed.")
else:
return True
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidatorIF:")
self.__validatorIf.dump(prefix + "\t\t", writeFunction)
if self.__validatorThen:
writeFunction(prefix + "\tvalidatorTHEN:")
self.__validatorThen.dump(prefix + "\t\t", writeFunction)
if self.__validatorElse:
writeFunction(prefix + "\tvalidatorELSE:")
self.__validatorElse.dump(prefix + "\t\t", writeFunction)
#
#
# Check if all validators evaluate to true
# AbstractElementaryValidator[]
class _validate_allOf(AbstractElementaryValidator):
def __init__(self, log, validators:List[AbstractElementaryValidator]):
Assert.isInstance(validators, list, log=log)
for validator in validators:
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validators = validators
#
def validate(self, ctx:ValidationContext, jsonData):
for v in self.__validators:
if not v.validate(ctx, jsonData):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
for v in self.__validators:
retSuccess, retStackTrace = v.validate2(ctx, jsonData)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, "validator failed")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidators:")
for v in self.__validators:
v.dump(prefix + "\t\t", writeFunction)
#
#
# Check if at least one validator evaluates to true
# AbstractElementValidator[]
class _validate_anyOf(AbstractElementaryValidator):
def __init__(self, log, validators:List[AbstractElementaryValidator]):
Assert.isInstance(validators, list, log=log)
for validator in validators:
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validators = validators
#
def validate(self, ctx:ValidationContext, jsonData):
for v in self.__validators:
if v.validate(ctx, jsonData):
return True
return False
#
def validate2(self, ctx:ValidationContext2, jsonData):
for v in self.__validators:
retSuccess, retStackTrace = v.validate2(ctx, jsonData)
if retSuccess:
return True, None
return False, retStackTrace.appendError(ctx.path, None, "no validator succeeded")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidators:")
for v in self.__validators:
v.dump(prefix + "\t\t", writeFunction)
#
#
# Check if exactly one validator evaluates to true
# AbstractElementValidator[]
class _validate_oneOf(AbstractElementaryValidator):
def __init__(self, log, validators:List[AbstractElementaryValidator]):
Assert.isInstance(validators, list, log=log)
for validator in validators:
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validators = validators
#
def validate(self, ctx:ValidationContext, jsonData):
n = 0
for v in self.__validators:
if v.validate(ctx, jsonData):
if n == 0:
n += 1
else:
return False
return n == 1
#
def validate2(self, ctx:ValidationContext2, jsonData):
n = 0
for v in self.__validators:
if v.validate(ctx, jsonData):
if n == 0:
n += 1
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "another validator already succeeded")
if n == 1:
return True, None
else:
return False, ValidatorStackTrace(ctx.path, jsonData, "no validator succeeded")
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidators:")
for v in self.__validators:
v.dump(prefix + "\t\t", writeFunction)
#
#
# Check if all property names match against the specified validator.
# AbstractElementaryValidator
class _validate_object_allPropertyNamesMatchValidator(AbstractElementaryValidator):
def __init__(self, log, validator:AbstractElementaryValidator):
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validator = validator
#
def validate(self, ctx:ValidationContext, jsonData):
for propertyName in jsonData.keys():
if not self.__validator.validate(ctx, propertyName):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
for propertyName in jsonData.keys():
retSuccess, retStackTrace = self.__validator.validate2(ctx, propertyName)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, propertyName, "property name failed to validate")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidator:")
self.__validator.dump(prefix + "\t\t", writeFunction)
#
#
class _validate_object_otherPropertyMustExist(AbstractElementaryValidator):
def __init__(self, log, propertyNameToExpectedPropertyNamesMap:dict):
Assert.isInstance(propertyNameToExpectedPropertyNamesMap, dict, log=log)
for key, value in propertyNameToExpectedPropertyNamesMap.items():
Assert.isInstance(key, str, log=log)
Assert.isInstance(value, list, log=log)
for item in value:
Assert.isInstance(item, str, log=log)
self.__propertyNameToExpectedPropertyNamesMap = propertyNameToExpectedPropertyNamesMap
#
def validate(self, ctx:ValidationContext, jsonData):
for propertyName, expectedOtherProperties in self.__propertyNameToExpectedPropertyNamesMap.items():
if propertyName in jsonData:
for p in expectedOtherProperties:
if not p in jsonData:
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
for propertyName, expectedOtherProperties in self.__propertyNameToExpectedPropertyNamesMap.items():
if propertyName in jsonData:
for p in expectedOtherProperties:
if not p in jsonData:
return False, ValidatorStackTrace(ctx.path, jsonData, repr(propertyName) + " exists, so expecting " + repr(expectedOtherProperties)
+ " from where " + repr(p) + " did not exist")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tpropertyNameToExpectedPropertyNamesMap:")
for propertyName, expectedOtherProperties in self.__propertyNameToExpectedPropertyNamesMap.items():
writeFunction(prefix + "\t\t" + repr(propertyName) + ": " + repr(expectedOtherProperties))
#
#
class _validate_object_objectMustMatchSchema(AbstractElementaryValidator):
def __init__(self, log, propertyNameToSchemaMap:dict):
Assert.isInstance(propertyNameToSchemaMap, dict, log=log)
for key, value in propertyNameToSchemaMap.items():
Assert.isInstance(key, str, log=log)
Assert.isInstance(value, AbstractElementaryValidator, log=log)
self.__propertyNameToSchemaMap = propertyNameToSchemaMap
#
def validate(self, ctx:ValidationContext, jsonData):
for propertyName, validator in self.__propertyNameToSchemaMap.items():
if propertyName in jsonData:
return validator.validate(ctx, jsonData)
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
for propertyName, validator in self.__propertyNameToSchemaMap.items():
if propertyName in jsonData:
retSuccess, retStackTrace = validator.validate2(ctx, jsonData)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, repr(propertyName) + " exists, so expecting property related schema to match object but that failed")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tpropertyNameToSchemaMap:")
for propertyName, validator in self.__propertyNameToSchemaMap.items():
writeFunction(prefix + "\t\t" + repr(propertyName) + ":")
validator.dump(prefix + "\t\t\t", writeFunction)
#
#
# dict<str,_SchemaAST>
# modifies: ctx.allProperties
class _validate_object_propertiesMustMatchSchema(AbstractElementaryValidator):
def __init__(self, log, propertyNameToSchemaMap:dict):
Assert.isInstance(propertyNameToSchemaMap, dict, log=log)
for key, value in propertyNameToSchemaMap.items():
Assert.isInstance(key, str, log=log)
Assert.isInstance(value, AbstractElementaryValidator, log=log)
self.__propertyNameToSchemaMap = propertyNameToSchemaMap
#
def validate(self, ctx:ValidationContext, jsonData):
for propertyName, validator in self.__propertyNameToSchemaMap.items():
ctx2 = ctx.derive()
if propertyName in jsonData:
ctx.allProperties.remove(propertyName)
v = jsonData[propertyName]
if not validator.validate(ctx2, v):
return False
else:
return True
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
for propertyName, validator in self.__propertyNameToSchemaMap.items():
if propertyName in jsonData:
ctx.allProperties.remove(propertyName)
ctx2 = ctx.deriveOnPropertyName(propertyName)
v = jsonData[propertyName]
retSuccess, retStackTrace = validator.validate2(ctx2, v)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, repr(propertyName) + " exists, so expecting property related schema to match property value but that failed")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tpropertyNameToSchemaMap:")
for propertyName, validator in self.__propertyNameToSchemaMap.items():
writeFunction(prefix + "\t\t" + repr(propertyName) + ":")
validator.dump(prefix + "\t\t\t", writeFunction)
#
#
# list<(regexstr,_SchemaAST)>
# modifies: ctx.allProperties
class _validate_object_regexPropertiesMustMatchSchema(AbstractElementaryValidator):
def __init__(self, log, propertyNamePatternAndValidatorList:list):
Assert.isInstance(propertyNamePatternAndValidatorList, list, log=log)
for propertyNamePattern, validator in propertyNamePatternAndValidatorList:
Assert.isInstance(propertyNamePattern, str, log=log)
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__recordList = \
[ (p, re.compile(p), v) for (p, v) in propertyNamePatternAndValidatorList ]
#
def validate(self, ctx:ValidationContext, jsonData):
for propertyName in jsonData:
for propertyNamePattern, rePattern, validator in self.__recordList:
if rePattern.search(propertyName):
ctx.allProperties.remove(propertyName)
ctx2 = ctx.derive()
v = jsonData[propertyName]
retSuccess = validator.validate(ctx2, v)
if not retSuccess:
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
for propertyName in jsonData:
for propertyNamePattern, rePattern, validator in self.__recordList:
if rePattern.search(propertyName):
ctx.allProperties.remove(propertyName)
ctx2 = ctx.deriveOnPropertyName(propertyName)
v = jsonData[propertyName]
retSuccess, retStackTrace = validator.validate2(ctx2, v)
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, None, repr(propertyName) + " matches " + propertyNamePattern + " but validation of property value failed")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\trecordList:")
for propertyNamePattern, rePattern, validator in self.__recordList:
writeFunction(prefix + "\t\t" + repr(propertyNamePattern) + ":")
validator.dump(prefix + "\t\t\t", writeFunction)
#
#
# Check that certain properties exist
# str[]
class _validate_object_mustHaveProperties(AbstractElementaryValidator):
def __init__(self, log, propertyNames:list):
Assert.isInstance(propertyNames, list, log=log)
for key in propertyNames:
Assert.isInstance(key, str, log=log)
self.__propertyNames = propertyNames
#
def validate(self, ctx:ValidationContext, jsonData):
for propertyName in self.__propertyNames:
if propertyName not in jsonData:
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
for propertyName in self.__propertyNames:
if propertyName not in jsonData:
return False, ValidatorStackTrace(ctx.path, jsonData, repr(propertyName) + " does not exist")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tpropertyNames: " + repr(self.__propertyNames))
#
#
# modifies: ctx.allProperties
class _validate_object_collectAllPropertyNames(AbstractElementaryValidator):
def __init__(self, log):
pass
#
def validate(self, ctx:ValidationContext, jsonData):
ctx.allProperties = set(jsonData.keys())
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
ctx.allProperties = set(jsonData.keys())
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
#
#
# Check if all additional properties match against the specified validator.
# AbstractElementaryValidator
class _validate_object_additionalPropertiesMatchValidator(AbstractElementaryValidator):
def __init__(self, log, validator:AbstractElementaryValidator):
Assert.isInstance(validator, AbstractElementaryValidator, log=log)
self.__validator = validator
#
def validate(self, ctx:ValidationContext, jsonData):
if ctx.allProperties is None:
raise Exception()
for propertyName in ctx.allProperties:
ctx2 = ctx.derive()
if not self.__validator.validate(ctx2, jsonData[propertyName]):
return False
return True
#
def validate2(self, ctx:ValidationContext2, jsonData):
if ctx.allProperties is None:
raise Exception()
for propertyName in ctx.allProperties:
ctx2 = ctx.deriveOnPropertyName(propertyName)
retSuccess, retStackTrace = self.__validator.validate2(ctx2, jsonData[propertyName])
if not retSuccess:
return False, retStackTrace.appendError(ctx.path, jsonData, "property failed to validate")
return True, None
#
def dump(self, prefix = "", writeFunction = print):
writeFunction(prefix + self.__class__.__name__ + ":")
writeFunction(prefix + "\tvalidator:")
self.__validator.dump(prefix + "\t\t", writeFunction)
#
#
# TODO: check argument types as well, not only the number of arguments
ANY_VALIDATORS = {
("any_allowedValues", _validate_any_anyOfTheseValues, 1),
}
NUMBER_VALIDATORS = (
("number_minimum", _validate_number_minimum, 1),
("number_exclusiveMinimum", _validate_number_exclusiveMinimum, 1),
("number_maximum", _validate_number_maximum, 1),
("number_exclusiveMaximum", _validate_number_exclusiveMaximum, 1),
("number_multipleValueOf", _validate_number_multipleValueOf, 1),
)
STRING_VALIDATORS = (
("string_minLength", _validate_item_minLength, 1),
("string_maxLength", _validate_item_maxLength, 1),
("string_pattern", _validate_string_pattern, 1),
)
ARRAY_VALIDATORS = (
("array_minItems", _validate_item_minLength, 1),
("array_maxItems", _validate_item_maxLength, 1),
("array_mustContainExactValue", _validate_array_mustContainExactValue, 1),
("array_itemsMustBeUnique", _validate_array_itemsMustBeUnique, 0),
)
OPS_VALIDATORS = {
("op_not", _validate_not, 1),
("op_anyOf", _validate_anyOf, 1),
("op_allOf", _validate_allOf, 1),
("op_oneOf", _validate_oneOf, 1),
}
EXCLUSIVE_OPS = [
"op_if", "op_not", "op_anyOf", "op_oneOf", "op_allOf"
]
OBJECT_VALIDATORS = (
("object_requiredProperties", _validate_object_mustHaveProperties, 1),
("object_minProperties", _validate_item_minLength, 1),
("object_maxProperties", _validate_item_maxLength, 1),
("object_propertyDependencyObjectMustMatchSchema", _validate_object_objectMustMatchSchema, 1),
("object_propertyDependencyOtherPropertyMustExist", _validate_object_otherPropertyMustExist, 1),
("object_propertyNames", _validate_object_allPropertyNamesMatchValidator, 1),
("object_properties", _validate_object_propertiesMustMatchSchema, 1), # must be placed before object_additionalProperties
("object_patternProperties", _validate_object_regexPropertiesMustMatchSchema, 1), # must be placed before object_additionalProperties
("object_additionalProperties", _validate_object_additionalPropertiesMatchValidator, 1), # must be placed after object_properties and after object_patternProperties
)
def compileAstList(astList:list, log:jk_logging.AbstractLogger):
Assert.isInstance(log, jk_logging.AbstractLogger, log=log)
if astList is None:
return None
Assert.isInstance(astList, list)
return [ compileAst(x, log) for x in astList ]
#
def tryCompile(something, log:jk_logging.AbstractLogger):
if something is None:
return None
elif isinstance(something, _SchemaAST):
return compileAst(something, log)
elif isinstance(something, (tuple, list)):
return [ tryCompile(item, log) for item in something ]
elif isinstance(something, dict):
return { key: tryCompile(value, log) for (key, value) in something.items() }
else:
return something
#
def __compileAccordingToDefinitions(ast:_SchemaAST, typeIDorTypeIDs:Union[None,int,List[int]], listOfValidatorDefs:list, ret:_Validator, log:jk_logging.AbstractLogger):
for (astVarName, validatorClass, nArgs) in listOfValidatorDefs:
bAppendValidator = False
if nArgs == 0:
a = getattr(ast, astVarName, None)
if a != None:
v = validatorClass(log)
bAppendValidator = True
elif nArgs == 1:
a = getattr(ast, astVarName, None)
if a != None:
a = tryCompile(a, log)
v = validatorClass(log, a)
bAppendValidator = True
else:
a = getattr(ast, astVarName, None)
if a != None:
a = tryCompile(a, log)
v = validatorClass(log, *a)
bAppendValidator = True
if bAppendValidator:
if typeIDorTypeIDs is None:
ret._any.append(v)
elif isinstance(typeIDorTypeIDs, (tuple, list)):
for typeID in typeIDorTypeIDs:
ret._validators[typeID]._validators.append(v)
else:
ret._validators[typeIDorTypeIDs]._validators.append(v)
#
def compileAst(ast:_SchemaAST, log:jk_logging.AbstractLogger):
Assert.isInstance(log, jk_logging.AbstractLogger, log=log)
if ast is None:
return None
Assert.isInstance(ast, _SchemaAST, log=log)
ret = _Validator()
# -------- boolean --------
if ast.always != None:
# this overrides everything
ret._default = ast.always
return ret
# -------- ops --------
lastOpVarName = None
for astOpVarName in EXCLUSIVE_OPS:
a = getattr(ast, astOpVarName, None)
if a != None:
if lastOpVarName:
raise Exception("Can't compile " + astOpVarName + " as " + lastOpVarName + " has already been defined!")
lastOpVarName = astOpVarName
if ast.op_if:
ret._any.append(_validate_if(log,
compileAst(ast.op_if, log),
compileAst(ast.op_then, log),
compileAst(ast.op_else, log)))
__compileAccordingToDefinitions(ast, None, OPS_VALIDATORS, ret, log)
# -------- any --------
__compileAccordingToDefinitions(ast, None, ANY_VALIDATORS, ret, log)
if ast.any_mustBeExactValue:
ret._any.append(_validate_any_exactValue(log, ast.any_mustBeExactValue[0]))
# -------- number --------
__compileAccordingToDefinitions(ast, (TYPE_INT, TYPE_FLOAT), NUMBER_VALIDATORS, ret, log)
# -------- string --------
__compileAccordingToDefinitions(ast, TYPE_STR, STRING_VALIDATORS, ret, log)
# -------- array --------
__compileAccordingToDefinitions(ast, TYPE_ARRAY, ARRAY_VALIDATORS, ret, log)
if ast.array_itemsMustBeNone:
ret._validators[TYPE_ARRAY]._validators.append(_validate_item_mustBeNoneOrEmpty(log))
elif ast.array_itemsMustNotBeNone:
ret._validators[TYPE_ARRAY]._validators.append(_validate_item_mustNotBeNone(log))
elif ast.array_items != None:
if isinstance(ast.array_items, _SchemaAST):
ret._validators[TYPE_ARRAY]._validators.append(_validate_array_checkIfAllItemsMatchValidator(log,
compileAst(ast.array_items, log)))
else:
# assert isinstance(ast.array_items, list)
Assert.isInstance(ast.array_items, list, log=log)
if ast.array_additionalItems != None:
if isinstance(ast.array_additionalItems, bool):
if not ast.array_additionalItems:
ret._validators[TYPE_ARRAY]._validators.append(_validate_array_checkAllItemsMatchListOfValidatorsWithExtraValidator(log,
compileAstList(ast.array_items, log),
[]))
elif isinstance(ast.array_additionalItems, _SchemaAST):
ret._validators[TYPE_ARRAY]._validators.append(_validate_array_checkAllItemsMatchListOfValidatorsWithExtraValidator(log,
compileAstList(ast.array_items, log),
compileAst(ast.array_additionalItems, log)))
else:
# assert isinstance(ast.array_additionalItems, list)
Assert.isInstance(ast.array_additionalItems, list, log=log)
ret._validators[TYPE_ARRAY]._validators.append(_validate_array_checkAllItemsMatchListOfValidatorsWithExtraListOfValidators(log,
compileAstList(ast.array_items, log),
compileAstList(ast.array_additionalItems, log)))
else:
ret._validators[TYPE_ARRAY]._validators.append(_validate_array_checkAllItemsMatchListOfValidators(log,
compileAstList(ast.array_items, log)))
# -------- objects --------
if ast.object_properties or ast.object_patternProperties:
ret._validators[TYPE_OBJECT]._validators.append(_validate_object_collectAllPropertyNames(log))
__compileAccordingToDefinitions(ast, TYPE_OBJECT, OBJECT_VALIDATORS, ret, log)
# -------- types --------
if ast.any_allowedTypes:
# only let the specified types pass.
typeIDs = set(ast.any_allowedTypes)
extraTypeIDs = ALL_TYPES_SET.difference(typeIDs)
for typeID in extraTypeIDs:
v = ret._validators[typeID]
v._validators.clear()
v._default = False
# --------
return ret
#
```
#### File: python-module-jk-json/testing/parse-test-case-schemas.py
```python
import os
import jk_json
import jk_jsonschema
import jk_logging
import jk_console
jk_logging.COLOR_LOG_MESSAGE_FORMATTER.setOutputMode(jk_logging.COLOR_LOG_MESSAGE_FORMATTER.EnumOutputMode.FULL)
LOGGER = jk_logging.ConsoleLogger.create(logMsgFormatter=jk_logging.COLOR_LOG_MESSAGE_FORMATTER)
#BASE_DIR = os.path.abspath("JSON-Schema-Test-Suite/tests/draft7")
#BASE_DIR = os.path.abspath("tests")
BASE_DIR = os.path.abspath("tests/SUCCEEDED")
if os.path.isdir(BASE_DIR):
LOGGER.info("Using base directory for test cases: " + BASE_DIR)
else:
raise Exception("No such directory: " + BASE_DIR)
def runTests(schema, jTests, log):
nSucceeded = 0
nFailed = 0
for jtest in jTests:
description = jtest["description"]
jsonData = jtest["data"]
bValid = jtest["valid"]
blog = jk_logging.BufferLogger.create()
bForward = False
log2 = blog.descend("TEST: " + description)
try:
log2.notice("JSON data:")
for line in jk_json.dumps(jsonData, indent="\t").split("\n"):
log2.notice("\t" + line)
bResult, stackTrace = schema.validate2(jsonData)
s = "Result calculated: " + str(bResult).upper() + " --- Result expected: " + str(bValid).upper()
if bResult == bValid:
log2.success(s)
nSucceeded += 1
else:
log2.error(s)
if stackTrace:
for s in stackTrace:
log2.error(str(s))
nFailed += 1
bForward = True
except Exception as ee:
log2.error(ee)
bForward = True
if bForward:
blog.forwardTo(log)
return nSucceeded, nFailed
#
nFilesSucceeded = 0
nFilesFailed = 0
nTestsSucceeded = 0
nTestsFailed = 0
for entryName in os.listdir(BASE_DIR):
fullPath = os.path.join(BASE_DIR, entryName)
if not os.path.isfile(fullPath):
continue
LOGGER.notice("################################################################################################################################")
LOGGER.notice("################################################################################################################################")
log = LOGGER.descend("TEST-FILE: " + entryName)
try:
jsonData = jk_json.loadFromFile(fullPath, bStrict=False)
bHadError = False
for jpart in jsonData:
jschema = jpart["schema"]
jtests = jpart["tests"]
description = jpart["description"]
blog = jk_logging.BufferLogger.create()
bForward = False
log2 = blog.descend("TEST-GROUP: " + description)
try:
log2.notice("Schema:")
for line in jk_json.dumps(jschema, indent="\t").split("\n"):
log2.notice("\t" + line)
schema = jk_jsonschema.SchemaParser.parse(jschema, log2)
schema.dump(writeFunction=log2.notice)
nSucceeded, nFailed = runTests(schema, jtests, log2)
if nFailed > 0:
bHadError = True
bForward = True
nTestsSucceeded += nSucceeded
nTestsFailed += nFailed
except Exception as ee:
nTestsFailed += 1
log2.error(ee)
bHadError = False
bForward = True
if bForward:
blog.forwardTo(log)
if bHadError:
nFilesFailed += 1
else:
nFilesSucceeded += 1
except Exception as ee:
nFilesFailed += 1
log.error(ee)
def printValue(text, outlength, value, mode):
if mode > 0:
s = jk_console.Console.ForeGround.STD_GREEN
elif mode < 0:
s = jk_console.Console.ForeGround.STD_RED
else:
s = jk_console.Console.ForeGround.STD_DARKGRAY
while len(text) < outlength:
text = " " + text
s += text
s += ": "
s += str(value)
s += jk_console.Console.RESET
print(s)
#
print()
print("#### S T A T S ####")
print()
printValue("tests succeeded", 20, nTestsSucceeded, int(nTestsSucceeded > 0))
printValue("tests failed", 20, nTestsFailed, - int(nTestsFailed > 0))
printValue("files succeeded", 20, nFilesSucceeded, int(nFilesSucceeded > 0))
printValue("files failed", 20, nFilesFailed, - int(nFilesFailed > 0))
``` |
{
"source": "jkpubsrc/python-module-jk-licenses",
"score": 3
} |
#### File: src/jk_licenses/LicenseMgr.py
```python
import os
import typing
import jk_json
from .License import License
from .VariableDef import VariableDef
class LicenseMgr(object):
def __init__(self, licenseDirs:list = None):
if licenseDirs is None:
licenseDirs = []
licenseDirs.append(os.path.join(os.path.dirname(__file__), "licenses"))
self.__licenseDirs = tuple(licenseDirs)
self.__licenses = None
self.__licensesByMainID = None
#
@property
def dirPaths(self) -> tuple:
return self.__licenseDirs
#
@property
def allLicenseIDs(self) -> typing.Sequence[str]:
if self.__licenses is None:
self.scan()
return sorted(self.__licenses.keys())
#
@property
def mainLicenseIDs(self) -> typing.Sequence[str]:
if self.__licensesByMainID is None:
self.scan()
return sorted(self.__licensesByMainID.keys())
#
@property
def licenses(self) -> typing.Sequence[License]:
if self.__licensesByMainID is None:
self.scan()
mainLicenseIDs = [ l.licenseID for l in self.__licenses.values() ]
for licenseID in sorted(self.__licensesByMainID.keys()):
yield self.__licensesByMainID[licenseID]
#
def scan(self):
self.__licenses = {}
self.__licensesByMainID = {}
for dirPath in self.__licenseDirs:
if os.path.isdir(dirPath):
for entry in os.listdir(dirPath):
if entry.endswith(".json"):
fullPath = os.path.join(dirPath, entry)
self.__loadLicense(fullPath)
#
def __loadLicense(self, fullPath:str):
licenseRawFilePath = fullPath[:-5] + ".txt"
if not os.path.isfile(licenseRawFilePath):
licenseRawFilePath = None
jLicenseInfo = jk_json.loadFromFile(fullPath)
if "identifiers" in jLicenseInfo:
identifiers = jLicenseInfo["identifiers"]
else:
identifiers = []
if "identifier" in jLicenseInfo:
mainIdentifier = jLicenseInfo["identifier"]
identifiers.insert(0, mainIdentifier)
mainIdentifier = identifiers[0]
name = jLicenseInfo["name"]
url = jLicenseInfo.get("url")
classifier = jLicenseInfo.get("classifier")
variableDefs = {}
if "variables" in jLicenseInfo:
for jVarDefs in jLicenseInfo["variables"]:
varName = jVarDefs["name"]
varType = jVarDefs.get("type", "str")
assert varType in [ "bool", "str", "int" ]
varDescr = jVarDefs.get("description")
variableDefs[varName] = VariableDef(varName, varType, varDescr)
#
lic = License(mainIdentifier, identifiers, name, url, classifier, licenseRawFilePath, variableDefs)
self.__licensesByMainID[lic.licenseID] = lic
self.__licenses[lic.licenseID] = lic
for licenseID in lic.licenseIDs:
self.__licenses[licenseID] = lic
#
def getLicense(self, identifier:str):
if self.__licenses is None:
self.scan()
return self.__licenses[identifier]
#
def createLicenseMap(self) -> typing.Dict[str,list]:
if self.__licenses is None:
self.scan()
ret = {}
for license in self.__licensesByMainID.values():
licenseIDSet = []
for licenseID in license.licenseIDs:
if licenseID == license.licenseID:
continue
licenseIDSet.append(licenseID)
ret[license.licenseID] = licenseIDSet
return ret
#
def createAlternativeLicenseIDMap(self) -> typing.Dict[str,str]:
if self.__licenses is None:
self.scan()
ret = {}
for license in self.__licensesByMainID.values():
for licenseID in license.licenseIDs:
if licenseID == license.licenseID:
continue
ret[licenseID] = license.licenseID
return ret
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-logging",
"score": 3
} |
#### File: python-module-jk-logging/examples/example_simple_1d.py
```python
from jk_logging import *
def foo(a, b):
return a / b
def bar(a, log):
with log.descend("Processing foo() ...") as log2:
return foo(a, 0)
LOG = ConsoleLogger.create(logMsgFormatter=COLOR_LOG_MESSAGE_FORMATTER)
try:
with LOG:
# ... do something ...
with LOG.descend("Now doing something ...") as log2:
log2.notice("Just something unimportant.")
pass
# ... do something more ...
with LOG.descend("Now invoking bar() ...") as log2:
bar(6, log2)
# ...
except ExceptionInChildContextException as e:
pass
```
#### File: src/jk_logging/BufferLogger.py
```python
import datetime
import json
from os import stat
from .EnumLogLevel import EnumLogLevel
from .impl.LogStats import LogStats
from .AbstractLogger import AbstractLogger
from .impl.Converter import Converter
from .impl.JSONDict import JSONDict
#
# This logger will buffer log messages in an internal array. Later this data can be forwarded to
# other loggers, f.e. in order to store them on disk.
#
# NOTE: This is an enhanced version of BufferLogger that collects statistics while logging.
#
class BufferLogger(AbstractLogger):
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, idCounter = None, parentID:int = None, indentLevel:int = 0, logItemList = None, logStats:LogStats = None, extraProperties:JSONDict = None):
super().__init__(idCounter)
self._indentationLevel = indentLevel
if logItemList is None:
self.__list = []
else:
self.__list = logItemList
if parentID is None:
parentID = self._idCounter.next()
self._parentLogEntryID = parentID
self.__logStats = LogStats() if (logStats == None) else logStats
self.__extraProperties = JSONDict() if extraProperties is None else extraProperties
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def stats(self) -> LogStats:
return self.__logStats
#
#
# These extra properties accessed via this property will be part of the serialization of the buffer.
# While this data has no relevance for logging itself this data will still be part of the JSON serialization results.
#
@property
def extraProperties(self) -> JSONDict:
return self.__extraProperties
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _logi(self, logEntryStruct:list, bNeedsIndentationLevelAdaption:bool) -> list:
self.__logStats.increment(logEntryStruct[5])
if bNeedsIndentationLevelAdaption:
logEntryStruct = list(logEntryStruct)
logEntryStruct[2] = self._indentationLevel
self.__list.append(logEntryStruct)
return logEntryStruct
#
def _descend(self, logEntryStruct:list) -> AbstractLogger:
self.__logStats.increment(logEntryStruct[5])
nextID = logEntryStruct[1]
newList = logEntryStruct[7]
return BufferLogger(
idCounter=self._idCounter,
parentID=nextID,
indentLevel=self._indentationLevel + 1,
logItemList=newList,
logStats=self.__logStats,
extraProperties=self.__extraProperties,
)
#
"""
def __getJSONData(self, items):
ret = []
for item in items:
item2 = list(item)
item2[5] = int(item2[5])
if item2[0] == "txt":
pass
elif item2[0] == "ex":
pass
elif item2[0] == "desc":
item2[7] = self.__getJSONData(item2[7])
else:
raise Exception("Implementation Error!")
ret.append(item2)
return ret
#
def __stackTraceElementToPrettyJSONData(self, stackTraceItem):
return {
"file": stackTraceItem[0],
"line": stackTraceItem[1],
"module": stackTraceItem[2],
"sourcecode": stackTraceItem[3],
}
#
def __getPrettyJSONData(self, items):
ret = []
for item in items:
item2 = list(item)
t = datetime.datetime.fromtimestamp(item2[4])
jsonLogEntry = {
"type": item2[0],
"id": item2[1],
"indent": item2[2],
"timestamp": {
"t": item2[4],
"year": t.year,
"month": t.month,
"day": t.day,
"hour": t.hour,
"minute": t.minute,
"second": t.second,
"ms": t.microsecond // 1000,
"us": t.microsecond % 1000,
},
"loglevel": str(item2[5]),
"logleveln": int(item2[5]),
}
if item2[0] == "txt":
jsonLogEntry["text"] = item2[6]
elif item2[0] == "ex":
jsonLogEntry["exception"] = item2[6]
jsonLogEntry["text"] = item2[7]
jsonLogEntry["stacktrace"] = [ self.__stackTraceElementToPrettyJSONData(x) for x in item2[8] ] if item2[8] else None
elif item2[0] == "desc":
jsonLogEntry["text"] = item2[6]
jsonLogEntry["children"] = self.__getPrettyJSONData(item2[7])
else:
raise Exception("Implementation Error!")
ret.append(jsonLogEntry)
return ret
#
"""
################################################################################################################################
## Public Methods
################################################################################################################################
def hasData(self):
return len(self.__list) > 0
#
"""
#
# Return a list of strings that contains the data stored in this logger.
#
# @return string[] Returns an array of strings ready to be written to the console or a file.
#
def getBufferDataAsStrList(self):
ret = []
for logEntryStruct in in self.__list:
...
return ret
"""
"""
#
# Return a single string that contains the data stored in this logger.
#
# @return string Returns a single string ready to be written to the console or a file.
#
def getBufferDataAsStr(self):
s = ''
for logEntryStruct in in self.__list:
...
return s
"""
#
# Forward the log data stored in this logger to another logger.
#
# @param AbstractLogger logger Another logger that will receive the log data.
# @param bool bClear Clear buffer after forwarding all log data.
#
def forwardTo(self, logger, bClear = False):
assert isinstance(logger, AbstractLogger)
logger._logiAll(self.__list, True)
if bClear:
self.__list = []
#
#
# Forward the log data stored in this logger to another logger.
#
# @param AbstractLogger logger Another logger that will receive the log data.
# @param str text The title for the descend section to create.
# @param bool bClear Clear buffer after forwarding all log data.
#
def forwardToDescended(self, logger, text:str, bClear = False):
assert isinstance(logger, AbstractLogger)
log2 = logger.descend(text)
log2._logiAll(self.__list, True)
if bClear:
self.__list = []
#
#def clear(self):
# NOTE: This method has been removed as it is not possible to clear only part of a stats object
# self.__list = []
#
def toJSON(self):
#return self.__getJSONData(self.__list)
ret = {
"magic": {
"magic": "jk-logging-compact",
"version": 1,
},
"logData": [
Converter.RAW_TO_COMPACTJSON.logEntry_to_json(x) for x in self.__list
],
}
if self.__extraProperties:
ret["extraProperties"] = self.__extraProperties
return ret
#
def toJSONPretty(self):
#return self.__getPrettyJSONData(self.__list)
ret = {
"magic": {
"magic": "jk-logging-verbose",
"version": 1,
},
"logData": [
Converter.RAW_TO_PRETTYJSON.logEntry_to_json(x) for x in self.__list
]
}
if self.__extraProperties:
ret["extraProperties"] = self.__extraProperties
return ret
#
def __str__(self):
return "<BufferLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
def __repr__(self):
return "<BufferLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
################################################################################################################################
## Static Methods
################################################################################################################################
"""
@staticmethod
def __convertRawLogData(items:list, outLogStats:dict):
ret = []
for item in items:
item = list(item)
item[5] = EnumLogLevel.parse(item[5])
iLogLevel = int(item[5])
outLogStats[iLogLevel] = outLogStats.get(iLogLevel, 0) + 1
if item[0] == "txt":
pass
elif item[0] == "ex":
pass
elif item[0] == "desc":
item[7] = BufferLogger.__convertRawLogData(item[7], outLogStats)
else:
raise Exception("Implementation Error!")
ret.append(item)
return ret
#
"""
@staticmethod
def create(jsonData = None):
appendData = None
extraProperties = None
if jsonData is not None:
if isinstance(jsonData, list):
# seems to be raw data
appendData = jsonData
elif isinstance(jsonData, dict):
if jsonData["magic"]["magic"] == "jk-logging-verbose":
appendData = [
Converter.PRETTYJSON_TO_RAW.json_to_logEntry(x) for x in jsonData["logData"]
]
extraProperties = jsonData.get("extraProperties")
elif jsonData["magic"]["magic"] == "jk-logging-compact":
appendData = [
Converter.COMPACTJSON_TO_RAW.json_to_logEntry(x) for x in jsonData["logData"]
]
extraProperties = jsonData.get("extraProperties")
else:
raise Exception("jsonData is of invalid format!")
else:
raise Exception("jsonData is invalid")
# ----
if extraProperties is not None:
extraProperties = JSONDict(**extraProperties)
# ----
logger = BufferLogger(extraProperties=extraProperties)
if appendData is not None:
logger._logiAll(appendData, True)
return logger
#
#
```
#### File: src/jk_logging/ConsoleLogger.py
```python
import sys
from .EnumLogLevel import *
from .AbstractLogger import *
from .fmt.LogMessageFormatter import *
#
# This logger will broadcast log messages to additional loggers.
#
class ConsoleLogger(AbstractLogger):
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, idCounter = None, parentID = None, indentationLevel = 0, printToStdErr = False, logMsgFormatter = None, printFunction = None):
super().__init__(idCounter)
self._indentationLevel = indentationLevel
if parentID is None:
parentID = self._idCounter.next()
self._parentLogEntryID = parentID
self.__logMsgFormatter = DEFAULT_LOG_MESSAGE_FORMATTER if logMsgFormatter is None else logMsgFormatter
self.__printFunction = printFunction
self.__print = self.__eprint if printToStdErr else print
self.__printToStdErr = printToStdErr
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def logMsgFormatter(self) -> AbstractLogMessageFormatter:
return self.__logMsgFormatter
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def __eprint(self, *args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#
def _logi(self, logEntryStruct, bNeedsIndentationLevelAdaption):
if bNeedsIndentationLevelAdaption:
logEntryStruct = list(logEntryStruct)
logEntryStruct[2] = self._indentationLevel
lineOrLines = self.__logMsgFormatter.format(logEntryStruct)
if isinstance(lineOrLines, str):
self.__print(lineOrLines)
else:
for line in lineOrLines:
self.__print(line)
#
def _descend(self, logEntryStruct):
nextID = logEntryStruct[1]
return ConsoleLogger(self._idCounter, nextID, self._indentationLevel + 1, self.__printToStdErr, self.__logMsgFormatter, self.__printFunction)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
return "<ConsoleLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
def __repr__(self):
return "<ConsoleLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def create(printToStdErr = False, logMsgFormatter = None, printFunction = None):
return ConsoleLogger(printToStdErr = printToStdErr, logMsgFormatter = logMsgFormatter, printFunction = printFunction)
#
#
```
#### File: jk_logging/fmt/HTMLLogMessageFormatter.py
```python
import typing
from ..EnumLogLevel import EnumLogLevel
from .AbstractTimeStampFormatter import AbstractTimeStampFormatter
from .AbstractLogMessageFormatter import AbstractLogMessageFormatter
from .DefaultTimeStampFormatter import DefaultTimeStampFormatter
from ..EnumExtensitivity import EnumExtensitivity
#
# This is a default formatter for log messages. It produces human readable output for log messages.
#
class HTMLLogMessageFormatter(AbstractLogMessageFormatter):
################################################################################################################################
## Nested Classes
################################################################################################################################
################################################################################################################################
## Constants
################################################################################################################################
LOG_LEVEL_TO_COLOR_MAP = {
EnumLogLevel.TRACE: "#a0a0a0",
EnumLogLevel.DEBUG: "#a0a0a0",
EnumLogLevel.NOTICE: "#a0a0a0",
EnumLogLevel.STDOUT: "#404040",
EnumLogLevel.INFO: "#404040",
EnumLogLevel.WARNING: "#804040",
EnumLogLevel.ERROR: "#800000",
EnumLogLevel.STDERR: "#900000",
EnumLogLevel.EXCEPTION: "#900000",
EnumLogLevel.SUCCESS: "#009000",
}
#STACKTRACE_COLOR = "\033[38;2;204;102;0m"
#STACKTRACE_COLOR = "#800000"
STACKTRACE_COLOR = "#700000"
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self,
bIncludeIDs:bool = False,
fillChar:str = " ",
bLinesWithBRTag:bool = False,
extensitivity:EnumExtensitivity = EnumExtensitivity.FULL,
timeStampFormatter = None
):
assert isinstance(bIncludeIDs, bool)
self.__fillChar = fillChar
assert isinstance(fillChar, str)
self.__indentBuffer = fillChar
assert isinstance(bIncludeIDs, bool)
self.__includeIDs = bIncludeIDs
assert isinstance(extensitivity, EnumExtensitivity)
self.__outputMode = extensitivity
assert isinstance(bLinesWithBRTag, bool)
self.__bLinesWithBRTag = bLinesWithBRTag
if timeStampFormatter is None:
timeStampFormatter = DefaultTimeStampFormatter()
else:
assert callable(timeStampFormatter)
self.__timeStampFormatter = timeStampFormatter
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def timeStampFormatter(self) -> typing.Union[AbstractTimeStampFormatter,None]:
return self.__timeStampFormatter
#
@property
def outputMode(self) -> EnumExtensitivity:
return self.__outputMode
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
#
# REMOVED: Instances of this class must be read-only and must not be changable at runtime.
#
#def setOutputMode(self, outputMode:typing.Union[EnumExtensitivity,None]):
# if outputMode is None:
# outputMode = EnumExtensitivity.FULL
# self.__outputMode = outputMode
#
#
# Create and return a string representation of the specified log entry.
#
# @param list logEntryStruct A log entry structure. See <c>AbstractLogger._logi()</c> for a detailed description.
# @return str Returns the string representation of the log message.
#
def format(self, logEntryStruct):
term = "</span>"
if self.__bLinesWithBRTag:
term += "</br>"
sID = str(logEntryStruct[1]) if (logEntryStruct != None) else "-"
indentationLevel = logEntryStruct[2]
while indentationLevel > len(self.__indentBuffer):
self.__indentBuffer += self.__fillChar
sIndent = self.__indentBuffer[0:indentationLevel*len(self.__fillChar)]
sParentID = str(logEntryStruct[3]) if (logEntryStruct != None) else "-"
sTimeStamp = "[" + self.__timeStampFormatter(logEntryStruct[4]) + "]"
sLogType = AbstractLogMessageFormatter.LOG_LEVEL_TO_STR_MAP[logEntryStruct[5]]
if self.__includeIDs:
s3 = "(" + sParentID + "|" + sID + ") " + sTimeStamp + " "
else:
s3 = sTimeStamp + " "
s1 = sIndent + "<span style=\"color:" + HTMLLogMessageFormatter.LOG_LEVEL_TO_COLOR_MAP[logEntryStruct[5]] + "\">" + s3
s2 = sIndent + "<span style=\"color:" + HTMLLogMessageFormatter.STACKTRACE_COLOR + "\">" + s3
if logEntryStruct[0] == "txt":
sLogMsg = logEntryStruct[6]
if sLogMsg is None:
sLogMsg = ""
return s1 + sLogType + ": " + sLogMsg + term
elif logEntryStruct[0] == "ex":
sExClass = logEntryStruct[6]
sLogMsg = logEntryStruct[7]
ret = []
if logEntryStruct[8] != None:
if self.__outputMode == EnumExtensitivity.FULL:
for (stPath, stLineNo, stModuleName, stLine) in logEntryStruct[8]:
ret.append(s2 + "STACKTRACE: " + stPath + ":" + str(stLineNo) + " " + stModuleName + " # " + stLine + term)
elif self.__outputMode == EnumExtensitivity.SHORTED:
stPath, stLineNo, stModuleName, stLine = logEntryStruct[8][-1]
ret.append(s2 + "STACKTRACE: " + stPath + ":" + str(stLineNo) + " " + stModuleName + " # " + stLine + term)
if sLogMsg is None:
sLogMsg = ""
ret.append(s1 + sLogType + ": " + sExClass + ": " + sLogMsg + term)
return ret
elif logEntryStruct[0] == "desc":
sLogMsg = logEntryStruct[6]
if sLogMsg is None:
sLogMsg = ""
return s1 + sLogType + ": " + sLogMsg + term
else:
raise Exception()
#
#
HTML_LOG_MESSAGE_FORMATTER = HTMLLogMessageFormatter()
```
#### File: jk_logging/impl/Converter_prettyJSON_to_raw.py
```python
from abc import abstractclassmethod
import os
import datetime
import typing
import json
from ..EnumLogLevel import EnumLogLevel
class Converter_prettyJSON_to_raw(object):
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def __json_to_timeStamp(self, jTimeStamp:dict) -> float:
assert isinstance(jTimeStamp, dict)
return jTimeStamp["t"]
#
def __json_to_stackTraceElement(self, jStackTraceElement:dict) -> tuple:
assert isinstance(jStackTraceElement, dict)
return (
jStackTraceElement["file"],
jStackTraceElement["line"],
jStackTraceElement["module"],
jStackTraceElement["code"],
)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def json_to_logEntry(self, jLogEntry:dict) -> list:
assert isinstance(jLogEntry, dict)
sType = jLogEntry["type"]
rawLogEntry = [
sType,
0, # jLogEntry["id"],
jLogEntry["indent"],
self.__json_to_timeStamp(jLogEntry["timeStamp"]),
EnumLogLevel.parse(jLogEntry["logLevel"][0]),
]
if sType == "txt":
rawLogEntry.append(jLogEntry["text"])
assert len(rawLogEntry) == 7
elif sType == "ex":
rawLogEntry.append(jLogEntry["exception"])
rawLogEntry.append(jLogEntry["text"])
stackTraceList = None
if "stacktrace" in jLogEntry:
stackTraceList = [
self.__json_to_stackTraceElement(x) for x in jLogEntry["stacktrace"]
]
rawLogEntry.append(stackTraceList)
assert len(rawLogEntry) == 9
elif sType == "desc":
rawLogEntry.append(jLogEntry["text"])
children = None
if "children" in jLogEntry:
children = [
self.json_to_logEntry(x) for x in jLogEntry["children"]
]
rawLogEntry.append(children)
assert len(rawLogEntry) == 8
else:
raise Exception("Implementation Error!")
return rawLogEntry
#
################################################################################################################################
## Static Methods
################################################################################################################################
#
```
#### File: jk_logging/impl/Converter_raw_to_compactJSON.py
```python
import os
import datetime
import typing
import json
class Converter_raw_to_compactJSON(object):
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def logEntry_to_json(self, rawLogEntry:typing.Union[tuple,list]) -> list:
sType = rawLogEntry[0]
jsonLogEntry = [
sType,
# rawLogEntry[1], # logEntryID
# rawLogEntry[2], # indentationLevel
rawLogEntry[4], # timeStamp
int(rawLogEntry[5]), # logLevel
]
if sType == "txt":
assert len(rawLogEntry) == 7
# nothing more to convert
jsonLogEntry.append(rawLogEntry[6]) # logMsg
elif sType == "ex":
assert len(rawLogEntry) == 9
# nothing more to convert
jsonLogEntry.append(rawLogEntry[6]) # exClass
jsonLogEntry.append(rawLogEntry[7]) # exMsg
jsonLogEntry.append(rawLogEntry[8]) # exStackTrace
elif sType == "desc":
assert len(rawLogEntry) == 8
# convert list of nested elements
jsonLogEntry.append(rawLogEntry[6]) # logMsg
nestedList = None
if rawLogEntry[7] is not None:
nestedList = [
self.logEntry_to_json(x) for x in rawLogEntry[7]
]
jsonLogEntry.append(nestedList)
else:
raise Exception("Implementation Error!")
return jsonLogEntry
#
################################################################################################################################
## Static Methods
################################################################################################################################
#
```
#### File: src/jk_logging/JSONFileLogger.py
```python
import os
import json
from .EnumLogLevel import *
from .AbstractLogger import *
from .BufferLogger import BufferLogger
#
# This logger will buffer log messages in an internal array. Later this data can be forwarded to
# other loggers, f.e. in order to store them on disk.
#
class JSONFileLogger(BufferLogger):
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, idCounter = None, parentID = None, indentLevel = 0, logItemList = None, rootParent = None, filePath = None):
super().__init__(idCounter, parentID, indentLevel, logItemList)
if rootParent is not None:
assert isinstance(rootParent, JSONFileLogger)
assert isinstance(filePath, str)
self.__filePath = filePath
self.__filePathTmp = filePath + ".tmp"
self.__rootParent = rootParent
#
################################################################################################################################
## Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def _logi(self, logEntryStruct, bNeedsIndentationLevelAdaption):
super()._logi(logEntryStruct, bNeedsIndentationLevelAdaption)
if self.__rootParent is None:
self._saveLogData()
else:
self.__rootParent._saveLogData()
#
def _saveLogData(self):
with open(self.__filePathTmp, "w") as f:
json.dump(self.getDataAsJSON(), f)
if os.path.isfile(self.__filePath):
os.unlink(self.__filePath)
os.rename(self.__filePathTmp, self.__filePath)
#
def _descend(self, logEntryStruct):
nextID = logEntryStruct[1]
newList = logEntryStruct[7]
return JSONFileLogger(
self._idCounter,
nextID,
self._indentationLevel + 1,
newList,
self.__rootParent if self.__rootParent else self,
self.__filePath,
)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
return "<JSONFileLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
def __repr__(self):
return "<JSONFileLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def __convertRawLogData(items):
ret = []
for item in items:
item = list(item)
item[5] = EnumLogLevel.parse(item[5])
if item[0] == "txt":
pass
elif item[0] == "ex":
pass
elif item[0] == "desc":
item[7] = JSONFileLogger.__convertRawLogData(item[7])
else:
raise Exception("Implementation Error!")
ret.append(item)
return ret
#
@staticmethod
def create(filePath:str):
assert isinstance(filePath, str)
if os.path.isfile(filePath):
with open(filePath, "r") as f:
jsonRawData = json.load(f)
jsonRawData = JSONFileLogger.__convertRawLogData(jsonRawData)
else:
jsonRawData = None
return JSONFileLogger(None, None, 0, jsonRawData, None, filePath)
#
#
```
#### File: src/jk_logging/____JSONListLogger.py
```python
import os
import json
from .EnumLogLevel import *
from .AbstractLogger import *
from .BufferLogger import BufferLogger
"""
NOTE: This class is somehow the same as BufferLogger/BufferLogger2
#
# This logger will buffer log messages in an internal array. Later this data can be forwarded to
# other loggers, f.e. in order to store them on disk.
#
class JSONListLogger(BufferLogger):
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, idCounter = None, parentID = None, indentLevel = 0, logItemList = None, rootParent = None):
super().__init__(idCounter, parentID, indentLevel, logItemList)
if rootParent is not None:
assert isinstance(rootParent, JSONListLogger)
self.__rootParent = rootParent
#
################################################################################################################################
## Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def _descend(self, logEntryStruct):
nextID = logEntryStruct[1]
newList = logEntryStruct[7]
return JSONListLogger(
self._idCounter,
nextID,
self._indentationLevel + 1,
newList,
self.__rootParent if self.__rootParent else self,
)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
return "<JSONListLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
def __repr__(self):
return "<JSONListLogger(" + hex(id(self)) + ", indent=" + str(self._indentationLevel) + ",parentID=" + str(self._parentLogEntryID) + ")>"
#
def toJSON(self) -> list:
return self.getDataAsJSON()
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def __convertRawLogData(items):
ret = []
for item in items:
item = list(item)
item[5] = EnumLogLevel.parse(item[5])
if item[0] == "txt":
pass
elif item[0] == "ex":
pass
elif item[0] == "desc":
item[7] = JSONListLogger.__convertRawLogData(item[7])
else:
raise Exception("Implementation Error!")
ret.append(item)
return ret
#
@staticmethod
def create():
return JSONListLogger(None, None, 0, None, None)
#
#
"""
```
#### File: src/jk_logging/LoggerInstanceManager.py
```python
from ._inst import instantiate
class LoggerInstanceManager(object):
#
# Initialization method
#
# @param dict cfgs The configuration describing how to instantiate the loggers
#
def __init__(self, cfgs):
self.__loggerCfgs = cfgs
self.__loggerCategoryMaps = {}
self.__loggerCategories = list(cfgs.keys())
self.__loggerCategories.sort()
self.__loggerCategories = tuple(self.__loggerCategories)
#
#
# Returns a list of all categories configured.
#
@property
def categories(self):
return self.__loggerCategories
#
def __getLoggCfg(self, loggerCategory, loggerID):
cfg = self.__loggerCfgs.get(loggerCategory, None)
if cfg is None:
raise Exception("No logger configuration specified for category: " + loggerCategory)
if loggerID is None:
if "static" in cfg:
cfg = cfg["static"]
sStyle = "static"
else:
sStyle = cfg.get("style", None)
if sStyle is None:
raise Exception("Missing type for logger configuration in category: " + loggerCategory)
if sStyle != "static":
raise Exception("Logger configuration for category '" + loggerCategory + "' is not 'static'!")
else:
if "dynamic" in cfg:
cfg = cfg["dynamic"]
sStyle = "dynamic"
else:
sStyle = cfg.get("style", None)
if sStyle is None:
raise Exception("Missing type for logger configuration in category: " + loggerCategory)
if sStyle != "dynamic":
raise Exception("Logger configuration for category '" + loggerCategory + "' is not 'dynamic'!")
cfg = cfg.copy()
if sStyle == "dynamic":
adaptationMap = {
"category": loggerCategory,
"id": loggerID,
}
else:
adaptationMap = {
"category": loggerCategory,
}
cfg = self.____adaptValue(cfg, adaptationMap)
return cfg
#
def ____adaptValue(self, value, adaptationMap):
if isinstance(value, str):
return self.____adaptStr(value, adaptationMap)
elif isinstance(value, (tuple, list)):
i = 0
for v in value:
value[i] = self.____adaptValue(v, adaptationMap)
i += 1
elif isinstance(value, dict):
for key in value:
v = value[key]
value[key] = self.____adaptValue(v, adaptationMap)
return value
#
def ____adaptStr(self, value, adaptationMap):
assert isinstance(value, str)
assert value != None
for key in adaptationMap:
s = "$(" + key + ")"
while True:
pos = value.find(s)
if pos < 0:
break
value = value[0:pos] + adaptationMap[key] + value[pos+len(s):]
return value
#
def getCreateLogger(self, loggerCategory, loggerID = None):
assert isinstance(loggerCategory, str)
loggerCategoryEntry = self.__loggerCategoryMaps.get(loggerCategory, None)
if loggerCategoryEntry is None:
loggerCategoryEntry = [None, {}]
self.__loggerCategoryMaps[loggerCategory] = loggerCategoryEntry
if loggerID is None:
if loggerCategoryEntry[0] != None:
return loggerCategoryEntry[0]
else:
cfg = self.__getLoggCfg(loggerCategory, loggerID)
loggerCategoryEntry[0] = instantiate(cfg)
return loggerCategoryEntry[0]
else:
loggerID = str(loggerID)
logger = loggerCategoryEntry[1].get(loggerID, None)
if logger != None:
return logger
else:
cfg = self.__getLoggCfg(loggerCategory, loggerID)
logger = instantiate(cfg)
loggerCategoryEntry[1][loggerID] = logger
return logger
#
def createLogger(self, loggerCategory, loggerID = None):
assert isinstance(loggerCategory, str)
loggerCategoryEntry = self.__loggerCategoryMaps.get(loggerCategory, None)
if loggerCategoryEntry is None:
loggerCategoryEntry = [None, {}]
self.__loggerCategoryMaps[loggerCategory] = loggerCategoryEntry
if loggerID is None:
if loggerCategoryEntry[0] != None:
raise Exception("A logger for category " + str(loggerCategory) + " already exists!")
else:
cfg = self.__getLoggCfg(loggerCategory, loggerID)
loggerCategoryEntry[0] = instantiate(cfg)
return loggerCategoryEntry[0]
else:
loggerID = str(loggerID)
logger = loggerCategoryEntry[1].get(loggerID, None)
if logger != None:
raise Exception("A logger for category " + str(loggerCategory) + " with ID " + loggerID + " already exists!")
else:
cfg = self.__getLoggCfg(loggerCategory, loggerID)
logger = instantiate(cfg)
loggerCategoryEntry[1][loggerID] = logger
return logger
#
def getLogger(self, loggerCategory, loggerID = None):
assert isinstance(loggerCategory, str)
loggerCategoryEntry = self.__loggerCategoryMaps.get(loggerCategory, None)
if loggerCategoryEntry is None:
return None
if loggerID is None:
return loggerCategoryEntry[0]
else:
loggerID = str(loggerID)
return loggerCategoryEntry[1].get(loggerID, None)
#
def closeLogger(self, loggerCategory, loggerID):
assert isinstance(loggerCategory, str)
loggerCategoryEntry = self.__loggerCategoryMaps.get(loggerCategory, None)
if loggerCategoryEntry is None:
raise Exception("No such log category: " + loggerCategory)
if loggerID is None:
if loggerCategoryEntry[0] is None:
raise Exception("There is no logger for category " + str(loggerCategory) + "!")
else:
loggerCategoryEntry[0].close()
loggerCategoryEntry[0] = None
else:
loggerID = str(loggerID)
logger = loggerCategoryEntry[1].get(loggerID, None)
if logger != None:
logger.close()
loggerCategoryEntry[1][loggerID] = None
else:
raise Exception("There is no logger for category " + str(loggerCategory) + " with ID " + loggerID + "!")
#
def closeAllLoggers(self):
for loggereCategory in self.__loggerCategoryMaps:
loggerCategoryEntry = self.__loggerCategoryMaps[loggereCategory]
if loggerCategoryEntry[0] != None:
loggerCategoryEntry[0].close()
loggerCategoryEntry[0] = None
for loggerID in loggerCategoryEntry[1]:
logger = loggerCategoryEntry[1][loggerID]
logger.close()
loggerCategoryEntry[1].clear()
#
#
```
#### File: src/jk_logging/MulticastLogger.py
```python
from .EnumLogLevel import *
from .AbstractLogger import *
#
# This logger will broadcast log messages to additional loggers.
#
class MulticastLogger(AbstractLogger):
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, idCounter = None, loggerList = None, indentationLevel = 0, parentLogEntryID = 0):
super().__init__(idCounter)
self._indentationLevel = indentationLevel
self._parentLogEntryID = parentLogEntryID
self.__loggerList = []
if loggerList is not None:
if isinstance(loggerList, AbstractLogger):
self.__loggerList.append(loggerList)
elif isinstance(loggerList, (tuple, list)):
for item in loggerList:
if isinstance(item, AbstractLogger):
self.__loggerList.append(item)
else:
raise Exception("Invalid object found in logger list: " + str(type(item)))
else:
raise Exception("Invalid logger list: " + str(type(loggerList)))
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def loggers(self) -> tuple:
return tuple(self.__loggerList)
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _logi(self, logEntryStruct, bNeedsIndentationLevelAdaption):
for logger in self.__loggerList:
logger._logi(logEntryStruct, True)
#
def _descend(self, logEntryStruct):
nextID = logEntryStruct[1]
newList = []
for logger in self.__loggerList:
newList.append(logger._descend(logEntryStruct))
return MulticastLogger(self._idCounter, newList, self._indentationLevel + 1, nextID)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def addLogger(self, logger):
assert isinstance(logger, AbstractLogger)
self.__loggerList.append(logger)
#
def removeLogger(self, logger):
assert isinstance(logger, AbstractLogger)
self.__loggerList.remove(logger)
#
def removeAllLoggers(self):
self.__loggerList = []
#
def clear(self):
for logger in self.__loggerList:
logger.clear()
#
def __str__(self):
return "<MulticastLogger(" + hex(id(self)) + ", " + str(self.__loggerList) + ")>"
#
def __repr__(self):
return "<MulticastLogger(" + hex(id(self)) + ", " + str(self.__loggerList) + ")>"
#
def close(self):
for logger in self.__loggerList:
logger.close()
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def create(*argv):
return MulticastLogger(loggerList = argv)
#
#
```
#### File: jk_logging/_prepared/LogEntryStruct.py
```python
import os
import typing
import datetime
from .StackTraceStructList import StackTraceStructList
#from .LogEntryStructList import LogEntryStructList
class LogEntryStruct:
__slots__ = (
"_sType", # str "desc", "txt", "ex"
"_logEntryID", # int
"_indentationLevel", # int
"_parentLogEntryID", # int|None
"_timeStamp", # float
"_logLevel", # EnumLogLevel
"_logMsg", # str
"_exClass", # str
"_exMsg", # str
"_exStackTrace", # StackTraceStructList
"_nestedList", # LogEntryStructList
)
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self,
sType:str,
logEntryID:int,
indentationLevel:str,
parentLogEntryID:str,
timeStamp:float,
logLevel:str,
logMsg:str,
exClass:str,
exMsg:str,
exStackTrace:StackTraceStructList,
nestedList:list, # LogEntryStructList
):
self._sType = sType
self._logEntryID = logEntryID
self._indentationLevel = indentationLevel
self._parentLogEntryID = parentLogEntryID
self._timeStamp = timeStamp
self._logLevel = logLevel
self._logMsg = logMsg
self._exClass = exClass
self._exMsg = exMsg
self._exStackTrace = exStackTrace
self._nestedList = nestedList
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def sType(self) -> str:
return self._sType
#
@property
def logEntryID(self) -> int:
return self._logEntryID
#
@property
def indentationLevel(self) -> str:
return self._indentationLevel
#
@property
def parentLogEntryID(self) -> str:
return self._parentLogEntryID
#
@property
def timeStamp(self) -> float:
return self._timeStamp
#
@property
def logLevel(self) -> str:
return self._logLevel
#
@property
def logMsg(self) -> str:
return self._logMsg
#
@property
def exClass(self) -> str:
return self._exClass
#
@property
def exMsg(self) -> str:
return self._exMsg
#
@property
def exStackTrace(self) -> StackTraceStructList:
return self._exStackTrace
#
@property
def nestedList(self) -> list: # LogEntryStructList
return self._nestedList
#
################################################################################################################################
## Helper Methods
################################################################################################################################
@staticmethod
def __timeStamp_to_prettyJSONDict(t:float) -> dict:
assert isinstance(t, (int,float))
t = datetime.datetime.fromtimestamp(t)
return {
"t": t,
"year": t.year,
"month": t.month,
"day": t.day,
"hour": t.hour,
"minute": t.minute,
"second": t.second,
"ms": t.microsecond // 1000,
"us": t.microsecond % 1000,
}
#
@staticmethod
def __prettyJSONDict_to_timeStamp(jData:dict) -> float:
assert isinstance(jData, dict)
t = jData["t"]
assert isinstance(t, (int,float))
return t
#
@staticmethod
def __jsonToTimeStamp(jData:typing.Union[float,dict]) -> float:
if isinstance(jData, float):
return jData
assert isinstance(jData, dict)
t = jData["t"]
assert isinstance(t, (int,float))
return t
#
################################################################################################################################
## Public Methods
################################################################################################################################
def toJSONPretty(self) -> dict:
ret = {
"type": self._sType,
"id": self._logEntryID,
"parentID": self._parentLogEntryID,
"indent": self._indentationLevel,
"timeStamp": self.__timeStamp_to_prettyJSONDict(self._timeStamp),
"logLevel": str(self._logLevel),
"logLevelN": int(self._logLevel),
}
if self._sType == "txt":
ret["text"] = self._logMsg
elif self._sType == "ex":
ret["exception"] = self._exClass
ret["text"] = self._exMsg
ret["stacktrace"] = self._exStackTrace.toJSONPretty() if self._exStackTrace else None
elif self._sType == "desc":
ret["text"] = self._logMsg
ret["children"] = self._nestedList.toJSONPretty()
else:
raise Exception("Implementation Error!")
return ret
#
def toJSON(self) -> tuple:
ret = []
if self._sType == "txt":
return [
"txt",
self._logEntryID,
self._indentationLevel,
self._parentLogEntryID,
self._timeStamp,
int(self._logLevel),
self._logMsg,
]
elif self._sType == "ex":
return [
"ex",
self._logEntryID,
self._indentationLevel,
self._parentLogEntryID,
self._timeStamp,
int(self._logLevel),
self._exClass,
self._exMsg,
self._exStackTrace.toJSON(),
]
elif self._sType == "desc":
return [
"desc",
self._logEntryID,
self._indentationLevel,
self._parentLogEntryID,
self._timeStamp,
int(self._logLevel),
self._logMsg,
self._nestedList.toJSON(),
]
else:
raise Exception("Implementation Error!")
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def fromJSONAny(data:typing.Union[dict,list,tuple]):
if isinstance(data, LogEntryStruct):
return data
if isinstance(data, dict):
sType = data["type"]
if sType == "txt":
# 7 entries
return LogEntryStruct(
sType = data["type"],
logEntryID = data["id"],
indentationLevel = data["indent"],
parentLogEntryID = data["parentID"],
timeStamp = LogEntryStruct.__jsonToTimeStamp(data["timeStamp"]),
logLevel = data["logLevelN"],
logMsg = data["text"],
exClass = None,
exMsg = None,
exStackTrace = None,
nestedList = None,
)
elif sType == "ex":
# 9 entries
return LogEntryStruct(
sType = data["type"],
logEntryID = data["id"],
indentationLevel = data["indent"],
parentLogEntryID = data["parentID"],
timeStamp = LogEntryStruct.__jsonToTimeStamp(data["timeStamp"]),
logLevel = data["logLevelN"],
logMsg = None,
exClass = data["exception"],
exMsg = data["text"],
exStackTrace = StackTraceStructList.fromJSONAny(data["stacktrace"]),
nestedList = None,
)
elif sType == "desc":
# 8 entries
return LogEntryStruct(
sType = data["type"],
logEntryID = data["id"],
indentationLevel = data["indent"],
parentLogEntryID = data["parentID"],
timeStamp = LogEntryStruct.__jsonToTimeStamp(data["timeStamp"]),
logLevel = data["logLevelN"],
logMsg = data["text"],
exClass = None,
exMsg = None,
exStackTrace = None,
nestedList = LogEntryStructList.fromJSONAny(data["children"]),
)
else:
raise Exception("Data Error!")
if isinstance(data, (list,tuple)):
sType = data[0]
logEntryID = data[1]
indent = data[2]
parentID = data[3]
timeStamp = LogEntryStruct.__jsonToTimeStamp(data[4])
logLevelN = data[5]
if sType == "txt":
assert len(data) == 7
# 7 entries
return LogEntryStruct(
sType = sType,
logEntryID = logEntryID,
indentationLevel = indent,
parentLogEntryID = parentID,
timeStamp = timeStamp,
logLevel = logLevelN,
logMsg = data[6],
exClass = None,
exMsg = None,
exStackTrace = None,
nestedList = None,
)
elif sType == "ex":
assert len(data) == 9
# 9 entries
return LogEntryStruct(
sType = sType,
logEntryID = logEntryID,
indentationLevel = indent,
parentLogEntryID = parentID,
timeStamp = timeStamp,
logLevel = logLevelN,
logMsg = None,
exClass = data[6],
exMsg = data[7],
exStackTrace = StackTraceStructList.fromJSONAny(data[8]),
nestedList = None,
)
elif sType == "desc":
assert len(data) == 8
# 8 entries
return LogEntryStruct(
sType = sType,
logEntryID = logEntryID,
indentationLevel = indent,
parentLogEntryID = parentID,
timeStamp = timeStamp,
logLevel = logLevelN,
logMsg = data[6],
exClass = None,
exMsg = None,
exStackTrace = None,
nestedList = LogEntryStructList.fromJSONAny(data[7]),
)
else:
raise Exception("Data Error!")
raise TypeError("Data is of type " + type(data))
#
#
class LogEntryStructList(list):
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def toJSONPretty(self) -> dict:
ret = []
for x in self:
assert isinstance(x, LogEntryStruct)
ret.append(x.toJSONPretty())
return ret
#
def toJSON(self) -> list:
ret = []
for x in self:
assert isinstance(x, LogEntryStruct)
ret.append(x.toJSON())
return ret
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def fromJSONAny(data:typing.Union[list,tuple]):
assert isinstance(data, (list, tuple))
return [
LogEntryStruct.fromJSONAny(x) for x in data
]
#
#
```
#### File: src/jk_logging/SimpleFileLogger.py
```python
import os
from .EnumLogLevel import *
from .AbstractLogger import *
from .fmt.LogMessageFormatter import *
#
# This logger writes log data to a log file.
#
class SimpleFileLogger(AbstractLogger):
################################################################################################################################
## Constants
################################################################################################################################
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, filePath, idCounter = None, parentID = None, indentationLevel = 0,
bAppendToExistingFile = True, bFlushAfterEveryLogMessage = True, fileMode = 0o0600, logMsgFormatter = None):
super().__init__(idCounter)
self._indentationLevel = indentationLevel
if parentID is None:
parentID = self._idCounter.next()
self._parentLogEntryID = parentID
self.__logMsgFormatter = DEFAULT_LOG_MESSAGE_FORMATTER if logMsgFormatter is None else logMsgFormatter
assert isinstance(self.__logMsgFormatter, AbstractLogMessageFormatter)
self.__filePath = filePath
self.__bAppendToExistingFile = bAppendToExistingFile
self.__bFlushAfterEveryLogMessage = bFlushAfterEveryLogMessage
if not bAppendToExistingFile:
if os.path.isfile(filePath):
os.unlink(filePath)
self.__f = open(filePath, "a")
if fileMode is not None:
os.chmod(filePath, fileMode)
self.__fileMode = fileMode
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def logMsgFormatter(self) -> AbstractLogMessageFormatter:
return self.__logMsgFormatter
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _logi(self, logEntryStruct, bNeedsIndentationLevelAdaption):
if self.__f.closed:
raise Exception("Logger already closed.")
if bNeedsIndentationLevelAdaption:
logEntryStruct = list(logEntryStruct)
logEntryStruct[2] = self._indentationLevel
lineOrLines = self.__logMsgFormatter.format(logEntryStruct)
if isinstance(lineOrLines, str):
self.__f.write(lineOrLines + "\n")
else:
for line in lineOrLines:
self.__f.write(line + "\n")
if self.__bFlushAfterEveryLogMessage:
self.__f.flush()
#
# TODO: provide a more efficient implementation. currently <c>_logi()</c> will be called through <c>_logiAll()</c> so that not single but multiple tests are performed if the logger is already closed and multiple flushes are performed.
def _logiAll(self, logEntryStructList, bNeedsIndentationLevelAdaption):
if self.__f.closed:
raise Exception("Logger already closed.")
#for logEntryStruct in logEntryStructList:
# lineOrLines = self.__logMsgFormatter.format(logEntryStruct)
# if isinstance(lineOrLines, str):
# self.__f.write(lineOrLines + "\n")
# else:
# for line in lineOrLines:
# self.__f.write(line + "\n")
super()._logiAll(logEntryStructList, bNeedsIndentationLevelAdaption)
#if self.__bFlushAfterEveryLogMessage:
# self.__f.flush()
#
def _descend(self, logEntryStruct):
nextID = logEntryStruct[1]
return SimpleFileLogger(self.__filePath, self._idCounter, nextID, self._indentationLevel + 1,
self.__bAppendToExistingFile, self.__bFlushAfterEveryLogMessage, self.__fileMode, self.__logMsgFormatter)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def close(self):
if not self.__f.closed:
self.__f.flush()
self.__f.close()
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def create(filePath, bAppendToExistingFile = True, bFlushAfterEveryLogMessage = True, fileMode = 0o0600, logMsgFormatter = None):
assert isinstance(filePath, str)
assert isinstance(bAppendToExistingFile, bool)
assert isinstance(bFlushAfterEveryLogMessage, bool)
assert isinstance(fileMode, int)
if logMsgFormatter != None:
assert isinstance(logMsgFormatter, AbstractLogMessageFormatter)
return SimpleFileLogger(filePath, None, None, 0, bAppendToExistingFile, bFlushAfterEveryLogMessage, fileMode, logMsgFormatter)
#
#
```
#### File: python-module-jk-logging/testing/test3.py
```python
import os
import time
import traceback
import sys
import json
import abc
from jk_logging import *
def logException(blog, text):
try:
raise Exception(text)
except Exception as ee:
blog.error(ee)
blog = BufferLogger.create()
clog = ConsoleLogger.create()
flog = FileLogger.create("test-output-%Y-%m-%d-%H-%M.log.txt", "minute")
log = MulticastLogger.create(clog, blog, flog)
log.error("This is a test.")
log2 = log.descend("Descending ...")
logException(log2, "This is an exception")
print(json.dumps(blog.getDataAsJSON(), indent = 4))
blog.forwardTo(clog)
log.close()
```
#### File: python-module-jk-logging/testing/test6.py
```python
import os
import time
import traceback
import sys
import abc
import jk_logging
if os.path.isfile("test6-log.json"):
os.unlink("test6-log.json")
def doLogTest(log):
log.trace("This is a test for TRACE.")
log.debug("This is a test for DEBUG.")
log.notice("This is a test for NOTICE.")
log.info("This is a test for INFO.")
log.warning("This is a test for WARNING.")
log.error("This is a test for ERROR.")
log.success("This is a test for SUCCESS.")
log2 = log.descend("Nested log messages ...")
log2.notice("Some log data.")
log3 = log2.descend("Even deeper nested log messages ...")
log3.notice("Some other log data.")
log2 = log.descend("Frequent log messages ...")
log2.info("Some more log data.")
#
print()
print("-- JSONFileLogger --")
jlog = jk_logging.JSONFileLogger.create("test6-log.json")
doLogTest(jlog.descend("TEST"))
print()
print("-- forwarding to ConsoleLogger --")
clog = jk_logging.ConsoleLogger.create()
jlog.forwardTo(clog)
print()
print("-- reconstructing and forwarding to ConsoleLogger --")
clog = jk_logging.ConsoleLogger.create()
jlog2 = jk_logging.JSONFileLogger.create("test6-log.json")
jlog2.forwardTo(clog)
``` |
{
"source": "jkpubsrc/python-module-jk-mediawikiapi",
"score": 3
} |
#### File: src/jk_mediawikiapi/MWAPIException.py
```python
class MWAPIException(Exception):
def __init__(self, jsonResponse:dict):
self.errCode = jsonResponse["error"]["code"]
self.errMsg = jsonResponse["error"]["info"]
s = jsonResponse["error"]["code"] + ": " + jsonResponse["error"]["info"]
super().__init__(s)
#
#
```
#### File: src/jk_mediawikiapi/MWUserInfo.py
```python
import typing
import jk_prettyprintobj
from .MWPageContent import MWPageContent
from .MWTimestamp import MWTimestamp
from .MWPageRevision import MWPageRevision
from .MWNamespaceInfo import MWNamespaceInfo
class MWUserInfo(jk_prettyprintobj.DumpMixin):
def __init__(self,
userID:int,
name:str,
groups:typing.List[str],
implicitGroups:typing.List[str],
tRegistration:MWTimestamp,
rights:typing.List[str],
nEditCount:int,
):
assert isinstance(userID, int)
self.userID = userID
assert isinstance(name, str)
self.name = name
assert isinstance(groups, (list, tuple))
for x in groups:
assert isinstance(x, str)
self.groups = groups
assert isinstance(implicitGroups, (list, tuple))
for x in implicitGroups:
assert isinstance(x, str)
self.implicitGroups = implicitGroups
assert isinstance(tRegistration, MWTimestamp)
self.tRegistration = tRegistration
assert isinstance(rights, (list, tuple))
for x in rights:
assert isinstance(x, str)
self.rights = rights
assert isinstance(nEditCount, int)
self.nEditCount = nEditCount
#
def _dumpVarNames(self) -> list:
return [
"userID",
"name",
"groups",
"implicitGroups",
"tRegistration",
"rights",
"nEditCount",
]
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-mediawiki",
"score": 3
} |
#### File: jk_mediawiki/impl/lang_support_php.py
```python
import re
import os
import sys
from jk_utils import TypedValue
from jk_utils.tokenizer import RegExBasedTokenizer, Token
def tokenValueToPHP(dataType:str, value):
if dataType == "bool":
return "true" if value else "false"
elif dataType == "str2":
return "\"" + PHP.encodeString(value) + "\""
elif dataType == "str1":
return "\'" + PHP.encodeString(value) + "\'"
elif dataType == "int":
return str(value)
elif dataType == "op":
return value
elif dataType == "word":
return value
elif dataType == "magic":
return value
else:
raise Exception("Implementation Error! (" + repr(dataType) + ", " + repr(value) + ")")
#
#### Add a "toPHP()" method to TypedValue
def __toPHP(someVar):
return tokenValueToPHP(someVar.dataType, someVar.value)
#
setattr(TypedValue, "toPHP", __toPHP)
#
# This tokenizer parses a PHP file.
#
class PHPTokenizer(RegExBasedTokenizer):
def __init__(self):
super().__init__([
( "phpintro", "<\\?php" ),
( "phpoutro", "\\?>" ),
( "str1", r"'", r"[^']*", r"'" ),
( "str2", r"\"", r"[^\"]*", r"\"" ),
( "int_1", r"[+-]?[1-9][0-9]*" ),
( "int_2", r"0" ),
( "varref", r"\$", r"[a-zA-Z_][a-zA-Z0-9_]*", None ),
( "commentx", "#=#" ),
( "comment_1", "#[^\n]*" ),
( "comment_2", "//[^\n]*" ),
( "comment_3", "/*[.*?]*/" ),
( "lparen1", "\\(" ),
( "rparen1", "\\)" ),
( "lparen2", "\\[" ),
( "rparen2", "\\]" ),
( "lparen3", "\\{" ),
( "rparen3", "\\}" ),
( "semicolon", r";" ),
( "bool_1", r"true" ),
( "bool_2", r"false" ),
( "null", r"null" ),
( "word", r"[a-zA-Z_][a-zA-Z0-9_]*" ),
])
i = 1
for op in [ "===", "!==", "<<=", ">>=", "<=>",
"<>", "||", "&&", "==", "!=", "+=", "-=", "*=", "/=", "%=", "<=", ">=", "^=", "=>", "++", "--", ">>", "<<", "??", "->",
"^", "!", "%", "+", "-", "*", "/", ".", ",", "?", ":", "~", "@", "&", "|", "=" ]:
self.addTokenPattern("op_" + str(i), re.escape(op))
i += 1
self.compile()
self.registerTypeParsingDelegate("int", "1", self.__parseInt)
self.registerTypeParsingDelegate("int", "2", self.__parseInt)
self.registerTypeParsingDelegate("str1", None, PHP.decodeString)
self.registerTypeParsingDelegate("str2", None, PHP.decodeString)
self.registerTypeParsingDelegate("bool", "1", self.__parseBool)
self.registerTypeParsingDelegate("bool", "2", self.__parseBool)
self.registerTypeParsingDelegate("null", None, self.__parseNull)
#
def __parseNull(self, rawTokenText):
return None
#
def __parseBool(self, rawTokenText):
return rawTokenText == "true"
#
def __parseInt(self, rawTokenText):
return int(rawTokenText)
#
def tokenize(self, text, bEmitWhiteSpaces = False, bEmitNewLines = False, bEmitComments = False):
for token in super().tokenize(text, bEmitWhiteSpaces, bEmitNewLines):
if (token.type == "comment") and not bEmitComments:
continue
yield token
#
#
class PHP(object):
_REPL1 = {
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
"e": "\x1B",
"f": "\f",
}
_REPL2 = {
"\x00": "\\0",
"\x01": "\\x01",
"\x02": "\\x02",
"\x03": "\\x03",
"\x04": "\\x04",
"\x05": "\\x05",
"\x06": "\\x06",
"\x07": "\\x07",
"\x08": "\\x08",
"\t": "\\t", # 0x09
"\n": "\\n", # 0x0a
"\v": "\\v", # 0x0b
"\f": "\\f", # 0x0c
"\r": "\\r", # 0x0d
"\x0e": "\\x0e",
"\x0f": "\\x0f",
"\x10": "\\x10",
"\x11": "\\x11",
"\x12": "\\x12",
"\x13": "\\x13",
"\x14": "\\x14",
"\x15": "\\x15",
"\x16": "\\x16",
"\x17": "\\x17",
"\x18": "\\x18",
"\x19": "\\x19",
"\x1a": "\\x1a",
"\x1b": "\\e",
"\x1c": "\\x1c",
"\x1d": "\\x1d",
"\x1e": "\\x1e",
"\x1f": "\\x1f",
"\"": "\\\"",
"\\": "\\\\",
}
_RE_OCTAL = re.compile("[0-7]{1,3}")
_RE_HEX = re.compile("x[0-9A-Fa-f]{1,2}")
_RE_UNICODE = re.compile("u{[0-9A-Fa-f]+}")
"""
@staticmethod
def encode(someVar):
if someVar.dataType == "bool":
if someVar.value:
return "true"
else:
return "false"
elif someVar.dataType == "str":
return PHP.encodeString(someVar.value)
elif someVar.dataType == "int":
return str(someVar.value)
elif someVar.dataType == "const":
return someVar.value
else:
raise Exception("Implementation Error!")
#
"""
"""
@staticmethod
def parse(text):
if text is None:
return None
if (text == "true") or (text == "false"):
return TypedValue("bool", text == "true")
patternStr = re.compile(r"^(?P<d>[\"'])(?P<v>.*)(?P=d)$")
matchResult = patternStr.match(text)
if matchResult:
return TypedValue("str", PHP.decodeString(matchResult.group(2)))
patternConst = re.compile(r"^(?P<v>[a-zA-Z_][a-zA-Z0-9_]*)$")
matchResult = patternConst.match(text)
if matchResult:
return TypedValue("const", matchResult.group(1))
patternInt = re.compile(r"^(?P<v>[+-]?[1-9][0-9]*)$")
matchResult = patternInt.match(text)
if matchResult:
return TypedValue("int", int(matchResult.group(1)))
if text.startswith("array(") and text.endswith(")"):
text = text[6:]
text = text[:-1]
return None
#
"""
#
# Creates a text from a given string that directly could be inserted into a PHP source code file to represent a string.
#
@staticmethod
def encodeString(someString):
ret = ""
for c in someString:
ret += PHP._REPL2.get(c, c)
return ret
#
#
# Parses (= decodes) a PHP source code string.
#
# See: http://php.net/manual/en/language.types.string.php
#
@staticmethod
def decodeString(someString):
ret = ""
bMasked = False
i = 0
imax = len(someString)
while i < imax:
c = someString[i]
if bMasked:
result = PHP._RE_UNICODE.match(someString, i)
if result:
clip = someString[i:result.endpos()]
i += len(clip)
ret += chr(int(clip))
else:
result = PHP._RE_HEX.match(someString, i)
if result:
clip = someString[i:result.endpos()]
i += len(clip)
if len(clip) == 1:
clip = "0" + clip
ret += chr(int(clip, 16))
else:
result = PHP._RE_OCTAL.match(someString, i)
if result:
clip = someString[i:result.endpos()]
i += len(clip)
while len(clip) < 3:
clip = "0" + clip
ret += chr(int(clip, 8))
else:
# fallback
repl = PHP._REPL1.get(c, None)
if repl is None:
ret += c
else:
ret += repl
i += 1
bMasked = False
else:
if c == "\\":
bMasked = True
else:
ret += c
i += 1
return ret
#
#
```
#### File: jk_mediawiki/impl/WikiCronProcessFilter.py
```python
import os
import typing
import jk_typing
from .ProcessFilter import ProcessFilter
class WikiCronProcessFilter(ProcessFilter):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, userName:str, wikiInstDirPath:typing.Union[str,None], source:typing.Callable):
# {
# 'ppid': 21827,
# 'pid': 21841,
# 'tty': 'pts/7',
# 'stat': 'S',
# 'uid': 1000,
# 'gid': 1000,
# 'cmd': 'php',
# 'args': '/srv/wikis/srv/wikis/infowiki/infowiki/maintenance/runJobs.php --wait',
# 'user': 'woodoo',
# 'group': 'woodoo'
# }
super().__init__(
source = source,
userName = userName,
cmdExact="php",
#argEndsWith="runJobs.php",
argExact=os.path.join(wikiInstDirPath, "maintenance", "runJobs.php") if wikiInstDirPath else None
)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
#
```
#### File: jk_mediawiki/lsfile/MediaWikiLocalSettingsArrayAppend.py
```python
import os
from jk_utils import *
from jk_utils.tokenizer import *
from ..impl.lang_support_php import *
class MediaWikiLocalSettingsArrayAppend(object):
# ================================================================================================================================
# ==== Constructor Methods
def __init__(self, changedFlag:ChangedFlag, lineNo:int, colNo:int, bIsActive:bool, varName:str, value):
assert isinstance(changedFlag, ChangedFlag)
assert isinstance(lineNo, int)
assert isinstance(colNo, int)
assert isinstance(bIsActive, bool)
assert isinstance(varName, str)
assert isinstance(value, TypedValue)
self.__changedFlag = changedFlag
self.__lineNo = lineNo
self.__colNo = colNo
self.__bIsActive = bIsActive
self.__varName = varName
self.__value = value
#
# ================================================================================================================================
# ==== Properties
@property
def lineNo(self) -> int:
return self.__lineNo
#
@property
def colNo(self) -> int:
return self.__colNo
#
@property
def varName(self) -> str:
return self.__varName
#
@property
def value(self):
return self.__value
#
@property
def isActive(self) -> bool:
return self.__bIsActive
#
@property
def isCommentedOut(self) -> bool:
return not self.__bIsActive
#
# ================================================================================================================================
# ==== Methods
def setValue(self, value):
assert isinstance(value, TypedValue)
self.__value = value
self.__changedFlag.setChanged(True)
#
def toPHP(self):
ret = "" if self.__bIsActive else "#=# "
ret += "$" + self.__varName
ret += "[] = "
ret += self.__value.toPHP()
ret += ";"
return ret
#
def __str__(self):
return self.toPHP()
#
def __repr__(self):
return self.toPHP()
#
def activate(self):
if not self.__bIsActive:
self.__bIsActive = True
self.__changedFlag.setChanged(True)
#
def deactivate(self):
if self.__bIsActive:
self.__bIsActive = False
self.__changedFlag.setChanged(True)
#
# ================================================================================================================================
# ==== Static Methods
@staticmethod
def parseFromDict(changedFlag:ChangedFlag, dataMap:dict):
assert isinstance(changedFlag, ChangedFlag)
assert isinstance(dataMap, dict)
lineNo = dataMap["lineNo"]
colNo = dataMap["colNo"]
bIsActive = dataMap["active"]
varName = dataMap["varName"]
varType = dataMap["varType"]
assert varType == "value"
value = dataMap["value"]
assert isinstance(value, TypedValue)
return MediaWikiLocalSettingsArrayAppend(changedFlag, lineNo, colNo, bIsActive, varName, value)
#
#
```
#### File: jk_mediawiki/lsfile/MediaWikiLocalSettingsFile.py
```python
import os
import codecs
import re
import shutil
from jk_utils import *
from jk_utils.tokenizer import *
import jk_console
from ..impl.lang_support_php import *
from .MediaWikiLocalSettingsVariableAssignment import MediaWikiLocalSettingsVariableAssignment
from .MediaWikiLocalSettingsComplexVariableAssignment import MediaWikiLocalSettingsComplexVariableAssignment
from .MediaWikiLocalSettingsArrayAppend import MediaWikiLocalSettingsArrayAppend
#
# This class represents the "LocalSettings.php" file in a MediaWiki installation.
#
# During loading the file data is parsed. Internally a line is stored in an array. Each array entry is a 3-tuple containing the following data:
# 0) An identifier specifying the type of the line: "-", "varappend", "var", "vari" and "varii"
# 1) The raw text of the line
# 2) An instance of <c>MediaWikiLocalSettingsValue</c> representing the parsed version of the line or <c>None</c> otherwise
#
class MediaWikiLocalSettingsFile(object):
################################################################################################################################
## Constants
################################################################################################################################
__VALUE_PATTERN = TokenPatternAlternatives([
TokenPattern("str1"),
TokenPattern("str2"),
TokenPattern("int"),
TokenPattern("bool"),
TokenPattern("null"),
TokenPattern("word"),
])
__OPTIONAL_SPACE_OR_NEWLINE = TokenPatternOptional(TokenPatternAlternatives([
TokenPattern("SPACE"),
TokenPattern("NEWLINE"),
]))
__VALUE_LIST_PATTERN = TokenPatternSequence([
__VALUE_PATTERN.derive(assignToVarTyped = "value", bVarIsArray = True),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPatternOptional(TokenPatternRepeat(TokenPatternSequence([
TokenPattern("op", ","),
__OPTIONAL_SPACE_OR_NEWLINE,
__VALUE_PATTERN.derive(assignToVarTyped = "value", bVarIsArray = True),
__OPTIONAL_SPACE_OR_NEWLINE,
])))
])
__PARSING_DEFAULTS = {
"active": True,
}
__STMT_VARIABLE_APPENDING = TokenPatternSequence([
TokenPatternOptional(TokenPatternSequence([
TokenPattern("commentx").setTag("active", False),
__OPTIONAL_SPACE_OR_NEWLINE,
])),
TokenPattern("varref", assignToVar = "varName"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("lparen2"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("rparen2"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("op", "="),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPatternAlternatives([
TokenPatternSequence([
TokenPattern("word", "array").setTag("varType", "array"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("lparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPatternOptional(__VALUE_LIST_PATTERN),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("rparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
]),
__VALUE_PATTERN.derive(assignToVarTyped = "value").setTag("varType", "value"),
]),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("semicolon"),
])
# $someVar = value
# $someVar[value] = value
# $someVar = array(value)
# $someVar[value] = array(value)
__STMT_VARIABLE_ASSIGNMENT = TokenPatternSequence([
TokenPatternOptional(TokenPatternSequence([
TokenPattern("commentx").setTag("active", False),
__OPTIONAL_SPACE_OR_NEWLINE,
])),
TokenPattern("varref", assignToVar = "varName"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPatternOptional(TokenPatternRepeat(TokenPatternSequence([
TokenPattern("lparen2"),
__OPTIONAL_SPACE_OR_NEWLINE,
__VALUE_PATTERN.derive(assignToVarTyped = "index", bVarIsArray = True),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("rparen2"),
__OPTIONAL_SPACE_OR_NEWLINE,
]))),
TokenPattern("op", "="),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPatternAlternatives([
TokenPatternSequence([
TokenPattern("word", "array").setTag("varType", "array"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("lparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPatternOptional(__VALUE_LIST_PATTERN),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("rparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
]),
TokenPatternSequence([
TokenPattern("word", "dirname"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("word", "__DIR__").setTag("varType", "dirValue"),
__OPTIONAL_SPACE_OR_NEWLINE,
]),
TokenPatternSequence([
TokenPattern("word", "dirname"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("word", "__FILE__").setTag("varType", "fileValue"),
__OPTIONAL_SPACE_OR_NEWLINE,
]),
TokenPatternSequence([
TokenPattern("word", "dirname"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("lparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("word", "__FILE__").setTag("varType", "dirValue"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("rparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
]),
TokenPatternSequence([
TokenPattern("word", "dirname"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("lparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("word", "__DIR__").setTag("varType", "parentDirValue"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("rparen1"),
__OPTIONAL_SPACE_OR_NEWLINE,
]),
__VALUE_PATTERN.derive(assignToVarTyped = "value").setTag("varType", "value"),
]),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("semicolon"),
])
__STMT_VARIABLE_ASSIGNMENT_2 = TokenPatternSequence([
TokenPatternOptional(TokenPatternSequence([
TokenPattern("commentx").setTag("active", False),
__OPTIONAL_SPACE_OR_NEWLINE,
])),
TokenPattern("varref", assignToVar = "varName"),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("op", "="),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPatternRepeat(
TokenPatternAlternatives([
TokenPattern("SPACE"),
TokenPattern("varref").derive(assignToVarTyped = "x", bVarIsArray = True),
TokenPattern("op", ".").derive(assignToVarTyped = "x", bVarIsArray = True),
TokenPattern("str1").derive(assignToVarTyped = "x", bVarIsArray = True),
TokenPattern("str2").derive(assignToVarTyped = "x", bVarIsArray = True),
]),
),
__OPTIONAL_SPACE_OR_NEWLINE,
TokenPattern("semicolon"),
])
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self):
self.__data = None
self.__changedFlag = ChangedFlag(False)
self.__filePath = None
self.__magicVarValues = None
#
################################################################################################################################
## Properties
################################################################################################################################
@property
def isChanged(self):
return self.__changedFlag.value
#
@property
def isLoaded(self):
return self.__data != None
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
#
# For debugging purposes only: Write the internal state of this object to STDOUT.
#
def dump(self, onlyLineNumbers:list = None):
if onlyLineNumbers is not None:
assert isinstance(onlyLineNumbers, (set, tuple, list))
onlyLineNumbers = set(onlyLineNumbers)
print("MediaWikiLocalSettingsFile")
print("\t__bChanged: " + str(self.__changedFlag))
print("\t__filePath: " + str(self.__filePath))
if self.__data != None:
table = jk_console.SimpleTable()
if onlyLineNumbers:
bFirst = True
bLastWasPoints = False
for (b, data) in self.__data:
if data.lineNo in onlyLineNumbers:
if bFirst:
bFirst = False
if data.lineNo > 1:
table.addRow("...", "...", "...")
table.addRow(str(b), MediaWikiLocalSettingsFile.__getType(data), str(data))
bLastWasPoints = False
else:
if not bLastWasPoints:
table.addRow("...", "...", "...")
bLastWasPoints = True
bFirst = False
else:
for (b, data) in self.__data:
table.addRow(str(b), MediaWikiLocalSettingsFile.__getType(data), str(data))
print("\t__lines:")
table.print(prefix="\t\t")
#
#
# Load a LocalSettings.php file.
#
# Heart of this method is a parser that identifies PHP variable assignments. As we can not deal with all eventualities possible in PHP syntax
# this parser will only recognize variable assignments similar to these examples:
# * <c>$someVarName = 123;</c>
# * <c>$someVarName = "abc";</c>
# * <c>$someVarName = MY_CONSTANT;</c>
# * <c>$someVarName = true;</c>
# * <c>$someVarName = null;</c>
# * <c>$someVarName = array();</c>
# * <c>$someVarName[123] = 5;</c>
# * <c>$someVarName[123] = array();</c>
# * <c>$someVarName["xyz"][123] = array('abc', false, null);</c>
# * <c>$someVarName[] = 123;</c>
#
# The data for loading can either be specified diretly (parameter: <c>rawText</c>), by exact file path (parameter: <c>filePath</c>) or by
# specifying the installation directory (parameter: <c>dirPath</c>). <c>rawText</c> has higher precedence over <c>filePath</c>, which in turn
# has higher precedence over <c>dirPath</c>.
#
# @param str dirPath The MediaWiki installation directory path.
# @param str filePath The file path of the MediaWiki "LocalSettings.php" file.
# @param str rawText The raw file content of a "LocalSettings.php" file.
#
def load(self, dirPath = None, filePath = None, rawText:str = None): # TODO: add logging
if rawText is not None:
assert isinstance(rawText, str)
filePath = None
elif filePath is not None:
assert isinstance(filePath, str)
# TODO: add logging
with codecs.open(filePath, "r", "utf-8") as f:
rawText = f.read()
elif dirPath is not None:
assert isinstance(dirPath, str)
filePath = os.path.join(dirPath, "LocalSettings.php")
# TODO: add logging
with codecs.open(filePath, "r", "utf-8") as f:
rawText = f.read()
else:
raise Exception("At least one of the following arguments must be specified: 'rawText' or 'filePath'!")
self.__magicVarValues = {
"__FILE__": filePath,
"__DIR__": dirPath,
"dirname(__DIR__)": os.path.dirname(dirPath),
}
tokens = list(PHPTokenizer().tokenize(rawText, bEmitWhiteSpaces = True, bEmitComments = True, bEmitNewLines = True))
#for t in tokens:
# print(t)
# resultDataList will receive 2-tuples where
# the first item indicates the entry type - either "arrayAppend", "varAssignComplex", "varAssign" or "other" - and
# the second item will either be a token or a MediaWikiLocalSettingsValue.
resultDataList = []
pos = 0
while pos < len(tokens):
(bResult, n, data) = MediaWikiLocalSettingsFile.__STMT_VARIABLE_APPENDING.tryMatch(tokens, pos, MediaWikiLocalSettingsFile.__PARSING_DEFAULTS)
if bResult:
assert n > 0
# interpret pattern encountered and store it
resultDataList.append( ( "arrayAppend", MediaWikiLocalSettingsArrayAppend.parseFromDict(self.__changedFlag, data) ) )
#print("--arrayAppend--") # DEBUG
#for i in range(0, n): # DEBUG
# print("\t", tokens[pos+i]) # DEBUG
# advance
pos += n
continue
(bResult, n, data) = MediaWikiLocalSettingsFile.__STMT_VARIABLE_ASSIGNMENT.tryMatch(tokens, pos, MediaWikiLocalSettingsFile.__PARSING_DEFAULTS)
if bResult:
assert n > 0
# interpret pattern encountered and store it
resultDataList.append( ( "varAssign", MediaWikiLocalSettingsVariableAssignment.parseFromDict(self.__changedFlag, data) ) )
#print("--varAssign--") # DEBUG
#for i in range(0, n): # DEBUG
# print("\t", tokens[pos+i]) # DEBUG
# advance
pos += n
continue
(bResult, n, data) = MediaWikiLocalSettingsFile.__STMT_VARIABLE_ASSIGNMENT_2.tryMatch(tokens, pos, MediaWikiLocalSettingsFile.__PARSING_DEFAULTS)
if bResult:
assert n > 0
# interpret pattern encountered and store it
resultDataList.append( ( "varAssignComplex", MediaWikiLocalSettingsComplexVariableAssignment.parseFromDict(self.__changedFlag, data) ) )
#print("--varAssignComplex--") # DEBUG
#for i in range(0, n): # DEBUG
# print("\t", tokens[pos+i]) # DEBUG
# advance
pos += n
continue
resultDataList.append( ( "other", tokens[pos] ) )
pos += 1
#for b, t in resultDataList:
# print(str(b) + "\t\t" + str(t))
#sys.exit(0)
self.__data = resultDataList
self.__filePath = filePath
self.__changedFlag.setChanged(False)
#
#
# Write the file (and all changes applied). If the data has not been loaded from a file calling this method will fail.
# In that case use <c>toStr()</c> instead.
#
# Before writing to the file a backup file of "LocalSettings.php" named "LocalSettings.php.sav" is created.
#
def save(self):
if not self.__changedFlag.value:
return
if self.__data is None:
raise Exception("Not loaded!")
if self.__filePath is None:
raise Exception("Data was originally not loaded from a file!")
shutil.copy2(self.__filePath, self.__filePath + ".sav")
with codecs.open(self.__filePath, "w", "utf-8") as f:
f.write(self.toStr())
self.__changedFlag.setChanged(False)
#
#
# (Re)Generate PHP data from the parsed text.
#
# @return str Returns the text.
#
def toStr(self) -> str:
if self.__data is None:
raise Exception("Not loaded!")
ret = []
for stype, item in self.__data:
if stype == "other":
if item.type == "varref":
ret.append("$" + item.value)
elif item.type in [ "bool", "str1", "str2", "int", "word" ]:
ret.append(tokenValueToPHP(item.type, item.value))
else:
assert isinstance(item.value, str)
ret.append(item.value)
else:
ret.append(item.toPHP())
return "".join(ret)
#
#
# (Re)Generate PHP data from the parsed text.
#
# @return list Returns a list of lines.
#
def toLines(self) -> list:
if self.__data is None:
raise Exception("Not loaded!")
ret = []
buffer = []
for stype, item in self.__data:
if stype == "other":
if item.type == "NEWLINE":
ret.append("".join(buffer))
buffer.clear()
elif item.type == "varref":
buffer.append("$" + item.value)
elif item.type in [ "bool", "str1", "str2", "int", "word" ]:
buffer.append(tokenValueToPHP(item.type, item.value))
else:
assert isinstance(item.value, str)
buffer.append(item.value)
else:
buffer.append(item.toPHP())
if buffer:
ret.append("".join(buffer))
else:
ret.append("")
return ret
#
#
# Get a variable value.
# This method will resolve the value: If it contains magic constants or simple expressions the syntax will be evaluated and the resulting value returned.
#
# @return value This data or <c>None</c> if the variable does not exist.
#
def getVarValue(self, varName:str):
assert isinstance(varName, str)
item = self.getVar(varName)
if item is not None:
if isinstance(item, MediaWikiLocalSettingsComplexVariableAssignment):
# type: MediaWikiLocalSettingsComplexVariableAssignment
return item.getValue(self.getVarValueE)
else:
# type: TypeValue, MediaWikiLocalSettingsVariableAssignment, MediaWikiLocalSettingsArrayAppend
v = item.value
if isinstance(v, TypedValue):
if v.dataType == "magic":
# this is a "magic" variable. return the replacement value.
return self.__magicVarValues[v.value]
else:
return v.value
elif isinstance(v, list):
ret = []
for d in v:
ret.append(d.value)
return ret
else:
raise Exception("Implementation Error!")
return None
#
#
# Get a variable value.
# This method will resolve the value: If it contains magic constants or simple expressions the syntax will be evaluated and the resulting value returned.
#
# @return value This data.
#
def getVarValueE(self, varName:str):
assert isinstance(varName, str)
item = self.getVarValue(varName)
if item is not None:
return item
raise Exception("No such variable: " + repr(varName))
#
#
# Get a variable-like object.
#
# @return someObject This object returned is either of type:
# * TypeValue - if it is a constant
# * MediaWikiLocalSettingsVariableAssignment - if it is a constant assigned to a variable
# * MediaWikiLocalSettingsComplexVariableAssignment - if it is a complex variable assignment
# * MediaWikiLocalSettingsArrayAppend - if it is a value appended to an array
#
def getVar(self, varName:str):
assert isinstance(varName, str)
for stype, item in self.__data:
if stype in [ "arrayAppend", "varAssign", "varAssignComplex" ]:
if item.varName == varName:
return item
return None
#
def getIndexedVar1(self, varName, indexValue1):
assert isinstance(varName, str)
assert isinstance(indexValue1, TypedValue)
for stype, item in self.__data:
if stype == "varAssign":
if item.varName == varName:
v = item.indexValue
if (v != None) and (v == indexValue1):
return item
return None
#
def getIndexedVar2(self, varName, indexValue1, indexValue2):
assert isinstance(varName, str)
assert isinstance(indexValue1, TypedValue)
assert isinstance(indexValue2, TypedValue)
for stype, item in self.__data:
if stype == "varAssign":
if item.varName == varName:
v = item.indexValue
if (v != None) and (v == indexValue1) and (v == indexValue2):
return item
return None
#
def activateWiki(self):
v = self.getVar("wgReadOnly")
if v is None:
return
else:
v.deactivate()
#
def deactivateWiki(self, text):
v = self.getVar("wgReadOnly")
if v is None:
self.__data.append( ( "other", Token("NEWLINE", "\n", -1, -1) ) )
self.__data.append( ( "other", Token("NEWLINE", "\n", -1, -1) ) )
self.__data.append( ( "other", Token("NEWLINE", "\n", -1, -1) ) )
self.__data.append( ( "other", Token("NEWLINE", "\n", -1, -1) ) )
self.__data.append( ( "varAssign", MediaWikiLocalSettingsVariableAssignment(self.__changedFlag, -1, -1, True, "wgReadOnly", None, TypedValue("str1", text)) ) )
self.__data.append( ( "other", Token("NEWLINE", "\n", -1, -1) ) )
self.__data.append( ( "other", Token("NEWLINE", "\n", -1, -1) ) )
self.__changedFlag.setChanged(True)
else:
v.setValue(TypedValue("str1", text))
v.activate() # set this line to state "active" if it is commented out
#
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def __getType(something):
tName = something.__class__.__name__
return tName
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-php-version-parser",
"score": 2
} |
#### File: src/jk_php_version_parser/_ComposerTokenPattern.py
```python
import typing
import jk_typing
import jk_version
from .ComposerToken import ComposerToken
class _ComposerTokenPattern(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, tokenType:str, text:str = None):
self.__tokenType = tokenType
self.__text = text
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
return "ComposerTokenPattern<{}, {}>".format(
repr(self.__tokenType).replace("'", "\""),
repr(self.__text).replace("'", "\"")
)
#
def __repr__(self):
return "ComposerTokenPattern<{}, {}>".format(
repr(self.__tokenType).replace("'", "\""),
repr(self.__text).replace("'", "\"")
)
#
def tryMatch(self, token:ComposerToken) -> bool:
assert isinstance(token, ComposerToken)
if self.__tokenType != token.tokenType:
return False
if (self.__text is not None) and (self.__text != token.text):
return False
return True
#
#
```
#### File: python-module-jk-php-version-parser/testing/test_parsing.py
```python
import jk_testing
import jk_json
import jk_version
import jk_php_version_parser
PARSER = jk_php_version_parser.ComposerVersionParser()
def _parseAndCompareJSON(ctx:jk_testing.TestContext, textToParse:str, jsonExpected):
x = PARSER.parse(textToParse)
assert isinstance(x, jk_version.BaseVersionConstraint)
jsonReceived = x.toJSON()
linesReceived = jk_json.dumps(jsonReceived, indent="\t", sort_keys=True).split("\n")
linesExpected = jk_json.dumps(jsonExpected, indent="\t", sort_keys=True).split("\n")
if jsonReceived == jsonExpected:
ctx.log.debug("--input---" + "-" * 90)
ctx.log.debug(textToParse)
ctx.log.debug("--result--" + "-" * 90)
for line in linesReceived:
ctx.log.debug(line)
ctx.log.debug("-" * 100)
return
ctx.log.warn("--input---" + "-" * 90)
ctx.log.warn(textToParse)
ctx.log.warn("--result--" + "-" * 90)
for line in linesReceived:
ctx.log.warn(line)
ctx.log.warn("--expected" + "-" * 90)
for line in linesExpected:
ctx.log.warn(line)
ctx.log.warn("-" * 100)
raise jk_testing.AssertionException("received != expected!")
#
#
# Successes
#
@jk_testing.TestCase()
def test_parsing_elementar_1(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">= 1.2.3",
[ ">=", "1.2.3" ]
)
#
@jk_testing.TestCase()
def test_parsing_elementar_caret(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "^1.2.3",
[
"and",
[
[ ">=", "1.2.3" ],
[ "<", "2.0.0" ],
],
]
)
#
@jk_testing.TestCase()
def test_parsing_elementar_tilde(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "~1.2.3",
[
"and",
[
[ ">=", "1.2.3" ],
[ "<", "1.3.0" ],
],
]
)
#
@jk_testing.TestCase()
def test_parsing_multi_1(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">=1.2.3 <1.4",
[
"and",
[
[ ">=", "1.2.3" ],
[ "<", "1.4" ],
],
]
)
#
@jk_testing.TestCase()
def test_parsing_multi_or(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "1.2.3 || 1.2.5",
[
"or",
[
[ "==", "1.2.3", ],
[ "==", "1.2.5", ],
],
]
)
#
#
# ----
# Test Cases https://getcomposer.org/doc/articles/versions.md
# ----
#
@jk_testing.TestCase()
def test_parsing_composer_1(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "1.3.2",
[ "==", "1.3.2", ],
)
#
@jk_testing.TestCase()
def test_parsing_composer_2(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">=1.3.2",
[ ">=", "1.3.2", ],
)
#
@jk_testing.TestCase()
def test_parsing_composer_3(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "<1.3.2",
[ "<", "1.3.2", ],
)
#
@jk_testing.TestCase()
def test_parsing_composer_4a(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">=1.3.0 <1.4.0",
[
"and",
[
[ ">=", "1.3.0", ],
[ "<", "1.4.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_4b(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "1.3.*",
[
"and",
[
[ ">=", "1.3.0", ],
[ "<", "1.4.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_5a(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">=1.3.2 <1.4.0",
[
"and",
[
[ ">=", "1.3.2", ],
[ "<", "1.4.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_5b(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "~1.3.2",
[
"and",
[
[ ">=", "1.3.2", ],
[ "<", "1.4.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_6a(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">=1.3.0 <2.0.0",
[
"and",
[
[ ">=", "1.3.0", ],
[ "<", "2.0.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_6b(ctx:jk_testing.TestContext):
x = PARSER.parse("~1.3")
_parseAndCompareJSON(ctx, "~1.3",
[
"and",
[
[ ">=", "1.3", ],
[ "<", "2.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_7a(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">=1.3.2 <2.0.0",
[
"and",
[
[ ">=", "1.3.2", ],
[ "<", "2.0.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_7b(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "^1.3.2",
[
"and",
[
[ ">=", "1.3.2", ],
[ "<", "2.0.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_8a(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, ">=0.3.2 <0.4.0",
[
"and",
[
[ ">=", "0.3.2", ],
[ "<", "0.4.0", ],
]
]
)
#
@jk_testing.TestCase()
def test_parsing_composer_8b(ctx:jk_testing.TestContext):
_parseAndCompareJSON(ctx, "^0.3.2",
[
"and",
[
[ ">=", "0.3.2", ],
[ "<", "0.4.0", ],
]
]
)
#
#
# Errors
#
# TODO
################################################################################################################################
testDriver = jk_testing.TestDriver()
results = testDriver.runTests([
(test_parsing_elementar_1, True),
(test_parsing_elementar_caret, True),
(test_parsing_elementar_tilde, True),
(test_parsing_multi_1, True),
(test_parsing_multi_or, True),
(test_parsing_composer_1, True),
(test_parsing_composer_2, True),
(test_parsing_composer_3, True),
(test_parsing_composer_4a, True),
(test_parsing_composer_4b, True),
(test_parsing_composer_5a, True),
(test_parsing_composer_5b, True),
(test_parsing_composer_6a, True),
(test_parsing_composer_6b, True),
(test_parsing_composer_7a, True),
(test_parsing_composer_7b, True),
(test_parsing_composer_8a, True),
(test_parsing_composer_8b, True),
])
reporter = jk_testing.TestReporterHTML()
reporter.report(results, webbrowserType="chromium")
``` |
{
"source": "jkpubsrc/python-module-jk-pypiorgapi",
"score": 3
} |
#### File: src/jk_pypiorgapi/_CachedValue.py
```python
import time
class _CachedValue(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, valueProviderCallback, keepSeconds:int = 60, autoRenewKeep:bool = True):
assert callable(valueProviderCallback)
assert isinstance(keepSeconds, int)
assert isinstance(autoRenewKeep, bool)
# ----
self.__keepSeconds = keepSeconds
self.__autoRenewKeep = autoRenewKeep
self.__valueProviderCallback = valueProviderCallback
self.__value = None
self.__t = 0
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __call__(self, *args, **kwargs):
t = time.time()
if (t - self.__t > self.__keepSeconds):
self.__value = self.__valueProviderCallback(*args, **kwargs)
if self.__autoRenewKeep:
self.__t = t
return self.__value
#
def invalidate(self):
self.__value = None
self.__t = 0
#
#
```
#### File: src/jk_pypiorgapi/PyPiOrgAPI.py
```python
import re
import json
import typing
import urllib.parse
from bs4 import BeautifulSoup
from ._CachedValue import _CachedValue
from .URLFile import URLFile
class PyPiOrgAPI(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, keepSeconds:int = 120):
self.__listAllPackages = _CachedValue(self.__listAllPackagesCallback, keepSeconds=keepSeconds)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def __listAllPackagesCallback(self, log) -> list:
url = URLFile("https://pypi.org/simple/")
allPackages = []
for line in url.readText().split("\n"):
#m = re.match("<a\s+href=\"(/simple/[^/]+/)\">([^<]+)</a>", line)
m = re.match(r"\s+<a\shref=\"(/simple/.+?/)\">(.+?)</a>", line)
if m:
g1 = m.groups(1)
g2 = m.groups(2)
if g1 != g2:
if log:
log.warn("Differing names: {} and {}".format(repr(g1), repr(g2)))
else:
allPackages.append(g1)
return allPackages
#
def __saveBS4Tree(self, bs4data, fileName:str):
with open(fileName, "w") as fout:
for line in bs4data.prettify().split("\n"):
m = re.match("^\s+", line)
if m:
s = line[:m.end()]
fout.write(("\t" * len(s)) + line[len(s):] + "\n")
#
def __parsePackageSearchResultLI(self, xLI) -> list:
packageName = xLI.a.h3.find("span", { "class": "package-snippet__name" }).text.strip()
packageVersion = xLI.a.h3.find("span", { "class": "package-snippet__version" }).text.strip()
packageDescription = xLI.a.find("p", { "class": "package-snippet__description" }).text.strip()
return (packageName, packageVersion, packageDescription)
#
def __parsePackageSearchResultPage(self, baseURL:URLFile, xPage) -> tuple:
xDiv = xPage.find("div", { "class": "left-layout__main" })
xForm = xDiv.find("form", { "action": "/search/" })
#self.__saveBS4Tree(xForm, "xForm.html")
sResults = xForm.div.div.p.strong.text.strip()
sResults = sResults.replace(".", "")
sResults = sResults.replace(",", "")
if sResults.endswith("+"):
sResults = sResults[:-1]
nCountResults = int(sResults)
xPagination = xForm.find("div", { "class": "button-group--pagination" })
nMaxPage = -1
if xPagination:
#self.__saveBS4Tree(xPagination, "xPagination.html")
for xA in xPagination.findChildren("a", recursive=False):
sHREF = xA.get("href")
if sHREF:
m = re.search(r"page=([0-9]+)$", sHREF)
if m:
nPage = int(m.group(1))
if nPage > nMaxPage:
nMaxPage = nPage
packageList = []
xUL = xForm.find("ul")
for xChildLI in xUL.findChildren("li", recursive=False):
n, v, d = self.__parsePackageSearchResultLI(xChildLI)
packageList.append((n, v, d))
return nCountResults, nMaxPage, packageList
#
################################################################################################################################
## Public Methods
################################################################################################################################
#
# Retrieves a list of PyPi package names from https://pypi.org and returns it.
#
# Please note that this value is typically cached for 120 seconds. Retrieving this list takes quite some time as about 16 MByte of data need
# to be transferred. Therefore caching is mandatory here. Additionally you should not download that list too often.
#
# @return str[] Returns a list of PyPi package names.
# This method should always return a list. This method will only return <c>None</c>
# if data from the server could not be retrieved.
#
def listAllPackages(self, log = None) -> typing.Union[list,None]:
return self.__listAllPackages(log)
#
#
# This method retrieves information about a package and returns it
#
# @return dict Returns the raw JSON data.
# This method should always return a dictionary. This method will only return <c>None</c>
# if data from the server could not be retrieved, either because of a network error or
# because the specified package does not exist.
#
def getPackageInfoJSON(self, packageName:str, log = None) -> typing.Union[dict,None]:
url = URLFile("https://pypi.org/pypi/" + packageName +"/json")
return url.readJSON()
#
#
# Returns an iterator with all search results.
#
# @return int nResultNo The result index number. This is a counter starting at zero, enumerating all results.
# @return int nMaxResults The number of results to be expected. (Don't rely on it, neither can all be iterated if this value is too large,
# nor does it need to remain unchanged during the iteration.)
# @return str pkgName The name of the package.
# @return str pkgVersion The version of the package.
# @return str pkgDescription The description of the package.
#
def iteratePackagesByClassifier(self, searchTerm:str, classifiers:list, log = None) -> typing.Union[list,None]:
nPage = 1
nMaxPage = -1
nResultNo = 0
while True:
surl = "https://pypi.org/search/?q=" + urllib.parse.quote_plus(searchTerm) + "&page=" + str(nPage)
if classifiers:
surl += "&" + "&".join([ urllib.parse.quote_plus(c) for c in classifiers ])
#log.notice("Retrieving: " + surl)
url = URLFile(surl)
xPage = BeautifulSoup(url.readText(), "lxml")
#self.__saveBS4Tree(xPage, "out.html")
nCountResults, nMaxPage, packageList = self.__parsePackageSearchResultPage(url, xPage)
for n, v, d in packageList:
yield nResultNo, nCountResults, n, v, d
nResultNo += 1
if (nMaxPage < 0) or (nPage >= nMaxPage):
break
nPage += 1
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-pythonplugins",
"score": 2
} |
#### File: src/jk_pythonplugins/PluginWrapper.py
```python
import os
import sys
import importlib
import inspect
import time
import types
import jk_logging
#
# This is a wrapper around a python source code file that should be available to the rest of the python program as a plugin.
#
# The python source code wrapped must contain a single class. An initialization method may be available but it must expect no arguments.
#
class PluginWrapper(object):
# ================================================================
# == Constructors/Destructors
# ================================================================
#
# Initialization method
#
# @param str moduleFilePath The absolute path to the (existing) python file
#
def __init__(self, moduleFilePath, initCallback):
if not moduleFilePath.endswith(".py"):
raise Exception("Not a python source code file: " + moduleFilePath)
if not os.path.isabs(moduleFilePath):
raise Exception("Not an absolute path: " + moduleFilePath)
self.__initCallback = initCallback
self.__moduleName = os.path.basename(moduleFilePath)
self.__moduleName = self.__moduleName[:-3]
self.__moduleDirPath = os.path.dirname(moduleFilePath)
self.__moduleFilePath = moduleFilePath
self.__modificationTimeStamp = -1
self.__bIsLoaded = False
self.__moduleInstance = None
self.__class = None
self.__classInstance = None
self.__classMethodNames = None
self.__extraData = None
#
# ================================================================
# == Properties
# ================================================================
#
# The absolute path file of the python source code file this object should wrap around
#
@property
def filePath(self):
return self.__moduleFilePath
#
#
# The name of this plugin. This name is derived from the file name, not the class implementing this plugin.
#
@property
def name(self):
return self.__moduleName
#
#
# Has the underlying source code file been changed?
#
@property
def isChanged(self):
mt = self.__getModificationTimeStamp(self.__moduleFilePath)
return mt != self.__modificationTimeStamp
#
#
# Is this plugin still in use?
#
@property
def isInUse(self):
if self.__moduleInstance is None:
return False
return sys.getrefcount(self.__moduleInstance) > 3
#
#
# Is the underlying plugin class loaded?
#
def isLoaded(self):
return self.__bIsLoaded
#
#
# Provides extra data about the plugin. This data is provided by the initialization callback.
#
@property
def extraData(self):
return self.__extraData
#
# ================================================================
# == Helper Methods
# ================================================================
def __getModificationTimeStamp(self, filePath):
if os.path.isfile(filePath):
try:
return os.path.getmtime(filePath)
except:
return -1
else:
return -1
#
# ================================================================
# == Public Methods
# ================================================================
#
# Load the source code file. For this to succeed the current state must be "UNLOADED".
# An exception is thrown on error.
#
# @param AbstractLogger log A logger to receive debug output (or <c>None</c> if not needed)
# @param bool bAcceptIsChangedAlsoOnError If <c>True</c> even a failed loading attempt will make the wrapper remember the file.
# Successive calls to <c>isChanged</c> will then return <c>False</c>. But if <c>False</c>
# is specified here <c>load()</c> will not remember the file if loading fails.
# I that case successive calls to <c>isChanged</c> will then return <c>True</c>.
#
def load(self, log, bAcceptIsChangedAlsoOnError = False):
if self.__bIsLoaded:
log.debug("Module already loaded, nothing to do.")
return
mt = self.__getModificationTimeStamp(self.__moduleFilePath)
if mt < 0:
raise Exception("No such file: " + self.__moduleFilePath)
if bAcceptIsChangedAlsoOnError:
self.__modificationTimeStamp = mt
if not self.__moduleDirPath in sys.path:
log.debug("Adding module directory to sys.path ...")
sys.path.append(self.__moduleDirPath)
log.debug("Loading and parsing module ...")
self.__moduleInstance = importlib.import_module(self.__moduleName)
#print(sys.getrefcount(self.__moduleInstance))
log.debug("Scanning module for classes ...")
countClassesFound = 0
for name, element in inspect.getmembers(self.__moduleInstance):
if inspect.isclass(element) and (element.__module__ == self.__moduleInstance.__name__):
log.debug("Found: " + element.__name__)
self.__class = element
countClassesFound += 1
if countClassesFound == 0:
log.debug("No classes found in module: " + self.__moduleFilePath)
self.unload(log)
raise Exception("No classes found in module: " + self.__moduleFilePath)
if countClassesFound > 1:
log.debug("Multiple classes found in module: " + self.__moduleFilePath)
self.unload(log)
raise Exception("Multiple classes found in module: " + self.__moduleFilePath)
self.__classInstance = self.__class()
if self.__initCallback:
try:
self.__extraData = self.__initCallback(self.__classInstance)
except Exception as e:
log.error("Initialization callback failed:")
log.error(e)
self.unload(log)
raise Exception("Initialization callback failed for class in module: " + self.__moduleFilePath)
log.debug("Module loaded.")
self.__bIsLoaded = True
self.__modificationTimeStamp = mt
#for key in sys.modules:
# print(key + " -- " + str(sys.modules[key]))
#
#
# Removes all traces of the loaded python class.
#
# This method is indempotent. You may invoke it even if the underlying python source code is not yet loaded.
#
# @param AbstractLogger log A logger to receive debug output (or <c>None</c> if not needed)
#
def unload(self, log):
log.debug("Unloading module ...")
self.__classMethodNames = None
self.__classInstance = None
self.__class = None
self.__extraData = None
if self.__moduleInstance:
del sys.modules[self.__moduleName]
self.__moduleInstance = None
#self.__moduleInstance = importlib.reload(self.__moduleInstance)
self.__bIsLoaded = False
#
#
# Invoke a method.
# The underlying plugin class must be loaded for this to work.
#
# @param str methodName The name of the method to invoke
# @param list args Arguments
# @param dict kwargs Arguments
# @return mixed Returns the data the invoked method provides
#
def invoke(self, methodName, *args, **kwargs):
if not self.__bIsLoaded:
raise Exception("Plugin not loaded!")
m = getattr(self.__classInstance, methodName, None)
if m:
return m(*args, **kwargs)
else:
raise Exception("No such plugin method: " + methodName)
#
#
# Get a list of all methods available in the (loaded) plugin class.
# The underlying plugin class must be loaded for this to work.
#
# @return str[] Returns a tuple of strings.
#
def getMethodNames(self):
if not self.__bIsLoaded:
raise Exception("Plugin not loaded!")
if not self.__classMethodNames:
self.__classMethodNames = [x for x, y in self.__class.__dict__.items() if not x.startswith("_") and (type(y) == types.FunctionType)]
return tuple(self.__classMethodNames)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-simpleexec",
"score": 2
} |
#### File: src/jk_simpleexec/TextDataProcessingPolicy.py
```python
import typing
import jk_prettyprintobj
#
# This class defines the defaults for postpocessing text data recieved.
#
class TextDataProcessingPolicy(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self,
bRemoveLeadingEmptyLines:bool = None,
bRemoveTrailingEmptyLines:bool = None,
bRightTrimLines:bool = None,
):
self.bRightTrimLines = bRightTrimLines
self.bRemoveLeadingEmptyLines = bRemoveLeadingEmptyLines
self.bRemoveTrailingEmptyLines = bRemoveTrailingEmptyLines
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dumpVarNames(self) -> list:
return [
"bRightTrimLines",
"bRemoveLeadingEmptyLines",
"bRemoveTrailingEmptyLines",
]
#
################################################################################################################################
## Public Methods
################################################################################################################################
def clone(self):
return TextDataProcessingPolicy(
self.bRemoveLeadingEmptyLines,
self.bRemoveTrailingEmptyLines,
self.bRightTrimLines,
)
#
def override(self, overrides):
if overrides is None:
return self
assert isinstance(overrides, TextDataProcessingPolicy)
ret = self.clone()
for attrName in [ "bRemoveLeadingEmptyLines", "bRemoveTrailingEmptyLines", "bRightTrimLines" ]:
v = getattr(overrides, attrName)
if v is not None:
setattr(ret, attrName, v)
return ret
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-simpleusermgr",
"score": 3
} |
#### File: src/jk_simpleusermgr/UserMgr.py
```python
import os
import json
from jk_utils.file_rw import loadBinaryFile, writePrivateBinaryFile, writePrivateJSONFile
from jk_utils import ChModValue
from .User import User
class UserMgr(object):
def __init__(self, dirPath:str):
assert isinstance(dirPath, str)
self.__dirPath = dirPath
if not os.path.isdir(self.__dirPath):
os.mkdir(self.__dirPath)
os.chmod(self.__dirPath, ChModValue("rwx------").toInt())
self.__users = {}
allKeys = []
for entry in os.scandir(self.__dirPath):
if entry.is_dir():
allKeys.append(entry.name)
for userName in allKeys:
userDirPath = os.path.join(self.__dirPath, userName)
userDataFilePath = os.path.join(userDirPath, "data.json")
if os.path.isfile(userDataFilePath):
user = self._instantiateUserObject(userName, userDirPath)
self.__users[userName] = user
#
def __len__(self):
return len(self.__users)
#
def getAllUserNames(self) -> list:
return [ key for key in sorted(self.__users.keys()) ]
#
def getAllUsers(self) -> list:
return [ self.__users[key] for key in sorted(self.__users.keys()) ]
#
@property
def allUserNames(self) -> list:
return [ key for key in sorted(self.__users.keys()) ]
#
@property
def allUsers(self) -> list:
return [ self.__users[key] for key in sorted(self.__users.keys()) ]
#
def getUser(self, userName:str) -> User:
assert isinstance(userName, str)
u = self.__users.get(userName)
return u
#
def getUserE(self, userName:str) -> User:
assert isinstance(userName, str)
u = self.__users.get(userName)
if u is None:
raise Exception("No such user: " + repr(userName))
return u
#
def hasUser(self, userName:str) -> bool:
assert isinstance(userName, str)
u = self.__users.get(userName)
return u is not None
#
def getCreateUser(self, userName:str) -> User:
u = self.getUser(userName)
if u is None:
u = self.createUser(userName)
return u
#
def createUser(self, userName:str) -> User:
assert isinstance(userName, str)
if userName in self.__users:
raise Exception("User already exists: " + userName)
userDirPath = os.path.join(self.__dirPath, userName)
if not os.path.isdir(userDirPath):
os.mkdir(userDirPath)
os.chmod(userDirPath, ChModValue("rwx------").toInt())
userDataFilePath = os.path.join(self.__dirPath, userName, "data.json")
writePrivateJSONFile(self._generateInitialUserDataStructure(), userDataFilePath, bPretty=True)
u = self._instantiateUserObject(userName, userDirPath)
self.__users[userName] = u
return u
#
#
# This method creates a raw data structure for user objects.
# You can overwrite this method if you would like to extend this data structure.
#
def _generateInitialUserDataStructure(self) -> dict:
return {
"version": 1,
"privileges": []
}
#
#
# This method creates a user object.
# You can overwrite this method if you would like to return a subclass of <c>User</c>.
#
def _instantiateUserObject(self, userName:str, userDirPath:str) -> User:
return User(userName, userDirPath)
#
#
```
#### File: src/jk_simpleusermgr/User.py
```python
import os
import json
class User(object):
def __init__(self, userName:str, dirPath:str):
assert isinstance(userName, str)
if dirPath is not None:
assert isinstance(dirPath, str)
dataFilePath = os.path.join(dirPath, "data.json")
self.__name = userName
self.__dirPath = dirPath
self.__dataFilePath = dataFilePath
if (dataFilePath is not None) and os.path.isfile(dataFilePath):
with open(dataFilePath, "r") as f:
self.__data = json.load(f)
assert self.__data["version"] == 1
else:
raise Exception()
#
@property
def dirPath(self) -> str:
return self.__dirPath
#
@property
def hasKeyPair(self) -> bool:
return False
#
@property
def privileges(self) -> tuple:
ret = self.__data.get("privileges", [])
assert isinstance(ret, list)
return tuple(ret)
#
@property
def name(self) -> str:
return self.__name
#
def __str__(self):
return self.__name
#
def hasPrivilege(self, privilege:str) -> bool:
assert isinstance(privilege, str)
ret = self.__data.get("privileges", [])
assert isinstance(ret, list)
return privilege in ret
#
def __eq__(self, value):
if isinstance(value, User):
return value.__name == self.__name
else:
return None
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-simplexml",
"score": 3
} |
#### File: src/jk_simplexml/HAbstractElement.py
```python
class HAbstractElement(object):
def isDeepEqualTo(self, obj) -> bool:
raise NotImplementedError()
#
def isShallowEqualTo(self, obj) -> bool:
raise NotImplementedError()
#
def deepClone(self):
raise NotImplementedError()
#
#
```
#### File: src/jk_simplexml/HText.py
```python
from .HAbstractElement import HAbstractElement
class HText(HAbstractElement):
def __init__(self, text:str):
self.text = text
self.tag = None
#
def isDeepEqualTo(self, obj) -> bool:
if isinstance(obj, HText):
return obj.text == self.text
else:
return False
#
def isShallowEqualTo(self, obj) -> bool:
if isinstance(obj, HText):
return obj.text == self.text
else:
return False
#
def deepClone(self):
return HText(self.text)
#
def toPlainText(self, HWriter) -> str:
raise NotImplementedError()
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-svg",
"score": 2
} |
#### File: src/jk_svg/AbstractSVGElement.py
```python
import typing
import sys
import inspect
import random
import jk_typing
import jk_hwriter
from .BoundingBox import BoundingBox
from ._AttrMixinStyle import _AttrMixinStyle
from ._AttrMixinClass import _AttrMixinClass
def _toStr(v):
if isinstance(v, float):
s = list("{:.14f}".format(v)) # NOTE: It doesn't make much sense to have a larger precision as typically floating point numbers are IEEE 754 format only.
while (s[-1] == "0") and (s[-2] != "."):
del s[-1]
return "".join(s)
elif isinstance(v, int):
return str(v)
elif isinstance(v, str):
# TODO: perform valid XML encoding of text
return v
else:
return str(v)
#
class AbstractSVGElement(_AttrMixinStyle, _AttrMixinClass):
_SORT_ATTRIBUTES_KEY_MAP = {
"x": "_1_x",
"y": "_1_y",
"width": "_2_w",
"height": "_3_h",
"x1": "_4_x1",
"y1": "_4_y1",
"x2": "_5_x2",
"y2": "_5_y2",
"cx": "_6_cx",
"cy": "_6_cy",
"r": "_7_r",
"rx": "_8_rx",
"ry": "_8_ry",
}
################################################################################################################################
## Constructor
################################################################################################################################
@jk_typing.checkFunctionSignature()
def __init__(self, tagName:str = None):
if not tagName:
raise Exception("tagName missing")
self._tagName = tagName # str | the name of the SVG element
self._attributes = {} # str->any | the attributes of this SVG element
self._children = [] # AbstractSVGElement[] | nested elements
self._moveCallback = None # callable |
self._textContent = None # str | NOTE: element text content will be ignored if we have children.
self._maskRef = None # str | if set: stores a reference to a mask
self._comment = None # str | if set: a comment that is to be placed before a comonent in the output
for clazz in inspect.getmro(self.__class__):
if clazz.__name__.startswith("_AttrMixin"):
if hasattr(clazz, "_init" + clazz.__name__):
getattr(clazz, "_init" + clazz.__name__)(self)
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def id(self) -> typing.Union[str,None]:
return self._attributes.get("id")
#
@id.setter
def id(self, v:typing.Union[str,None]):
if v is not None:
assert isinstance(v, str)
self._attributes["id"] = v
#
@property
def comment(self) -> typing.Union[str,None]:
return self._comment
#
@comment.setter
def comment(self, v:typing.Union[str,None]):
if v is not None:
assert isinstance(v, str)
self._comment = v
#
@property
def mask(self) -> typing.Union[str,None]:
return self._maskRef
#
@mask.setter
def mask(self, v:typing.Union[str,None]):
if v is not None:
assert isinstance(v, str)
self._maskRef = v
#
@property
def tagName(self) -> str:
return self._tagName
#
@property
def attributes(self) -> dict:
return self._attributes
#
@property
def children(self) -> list:
return self._children
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def __sortAttributes(self, attributesDict:dict) -> list:
sortedKeys = sorted(attributesDict.keys(), key=lambda x: AbstractSVGElement._SORT_ATTRIBUTES_KEY_MAP.get(x, x))
return [ (k,attributesDict[k]) for k in sortedKeys ]
#
@jk_typing.checkFunctionSignature()
def _toSVG(self, w:jk_hwriter.HWriter, bPretty:bool = True, extraChildren:list = None):
# ---- get children list
if extraChildren:
children = list(extraChildren)
children.extend(self._children)
else:
children = self._children
# ---- prepare attributes
self.cleanAttributes()
attributesMap = dict(self._attributes)
if self._maskRef:
attributesMap["mask"] = "url(#" + self._maskRef + ")"
sortedAttributes = self.__sortAttributes(attributesMap)
# ---- write comment
if bPretty:
if self._comment:
w.writeLn("<!-- " + self._comment + " -->")
# ---- determine output mode
bPrettyCompact = bPretty and not self._children
# ---- write regular output
if bPrettyCompact:
# pretty, but no children
w.write("<" + self._tagName)
if sortedAttributes:
for key, value in sortedAttributes:
w.write("\t" + key + "=\"" + _toStr(value) + "\"")
w.write("\t")
if self._textContent:
w.writeLn(">" + _toStr(self._textContent) + "</" + self.tagName + ">")
else:
w.writeLn("/>")
elif bPretty:
# pretty, possibly with children
if sortedAttributes:
w.writeLn("<" + self._tagName)
w.incrementIndent()
for key, value in sortedAttributes:
w.writeLn(key + "=\"" + _toStr(value) + "\"")
w.write(">" if (children or self._textContent) else "/>")
w.decrementIndent()
else:
w.write("<" + self._tagName)
w.write(">" if (children or self._textContent) else "/>")
if children:
w.writeLn()
w.incrementIndent()
for c in children:
c._toSVG(w, True)
w.decrementIndent()
w.writeLn("</" + self._tagName + ">")
elif self._textContent:
w.writeLn(_toStr(self._textContent) + "</" + self.tagName + ">")
else:
w.writeLn()
else:
# optimized output for non-humans
w.write("<" + self._tagName)
for key, value in sortedAttributes:
w.write(" " + key + "=\"" + _toStr(value) + "\"")
if children:
w.write(">")
for c in children:
c._toSVG(w, False)
w.write("</" + self._tagName + ">")
elif self._textContent:
w.write(">" + _toStr(self._textContent) + "</" + self.tagName + ">")
else:
w.write("/>")
#
################################################################################################################################
## Public Methods
################################################################################################################################
def generateUniqueID(self, prefix:str) -> str:
while True:
ret = prefix + str(random.randint(1, 999999))
bFound = False
for c in self._children:
if c.id == ret:
bFound = True
break
if not bFound:
return ret
#
def getBoundingPoints(self):
for c in self._children:
yield from c.getBoundingPoints()
#
def move(self, dx:float, dy:float):
if self._moveCallback:
self._moveCallback(dx, dy)
for c in self._children:
c.move(dx, dy)
#
def toSVG(self, bPretty:bool = True) -> str:
w = jk_hwriter.HWriter()
self._toSVG(w, bPretty)
return str(w)
#
def cleanAttributes(self):
for key in list(self._attributes.keys()):
a = self._attributes[key]
if (a is None) or (a is ""):
del self._attributes[key]
#
def getBoundingBox(self) -> typing.Union[BoundingBox,None]:
allX = []
allY = []
for px, py in self.getBoundingPoints():
allX.append(px)
allY.append(py)
if allX:
return BoundingBox(min(allX), min(allY), max(allX), max(allY))
else:
return None
#
def __enter__(self, *args):
return self
#
def __exit__(self, *args):
pass
#
#
```
#### File: src/jk_svg/_AttrMixinCXCY.py
```python
import typing
import jk_typing
import jk_hwriter
class _AttrMixinCXCY:
################################################################################################################################
## Constructor
################################################################################################################################
def _init_AttrMixinCXCY(self):
self._moveCallback = self.__move
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def _xy(self) -> tuple:
return self._attributes.get("cx", 0), self._attributes.get("cy", 0)
#
@_xy.setter
def _xy(self, value:tuple):
assert isinstance(value, (list,tuple))
assert len(value) >= 2
assert isinstance(value[0], (int,float))
assert isinstance(value[1], (int,float))
self._attributes["cx"] = value[0]
self._attributes["cy"] = value[1]
#
@property
def cx(self) -> float:
return self._attributes.get("cx", 0)
#
@cx.setter
def cx(self, v:float):
self._attributes["cx"] = v
#
@property
def cy(self) -> float:
return self._attributes.get("cy", 0)
#
@cy.setter
def cy(self, v:float):
self._attributes["cy"] = v
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def __move(self, dx:float, dy:float):
self.cx += dx
self.cy += dy
#
################################################################################################################################
## Public Methods
################################################################################################################################
def setCenter(self, cx, cy):
self.cx = cx
self.cy = cy
#
def getCenter(self) -> list:
return [ self.cx, self.cy ]
#
#
```
#### File: src/jk_svg/_AttrMixinWidthHeight.py
```python
import typing
import jk_typing
import jk_hwriter
class _AttrMixinWidthHeight:
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def width(self) -> float:
return self._attributes.get("width", 0)
#
@width.setter
def width(self, v:float):
self._attributes["width"] = v
#
@property
def height(self) -> float:
return self._attributes.get("height", 0)
#
@height.setter
def height(self, v:float):
self._attributes["height"] = v
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def setSize(self, width, height):
self.width = width
self.height = height
#
def getSize(self) -> list:
return [ self.width, self.height ]
#
#
```
#### File: src/jk_svg/SVGDefs.py
```python
import typing
import jk_typing
import jk_hwriter
from .AbstractSVGElement import AbstractSVGElement
from ._GroupElementsMixin import _GroupElementsMixin
from .SVGGroup import SVGGroup
from .SVGMask import SVGMask
class SVGDefs(AbstractSVGElement, _GroupElementsMixin):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self):
super().__init__("defs")
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def createGroup(self):
ret = SVGGroup()
self._children.append(ret)
return ret
#
def createMask(self):
ret = SVGMask()
self._children.append(ret)
return ret
#
#
```
#### File: src/jk_svg/SVGEllipse.py
```python
import typing
import jk_typing
import jk_hwriter
from .AbstractSVGElement import AbstractSVGElement
from ._AttrMixinCXCY import _AttrMixinCXCY
from ._AttrMixinRXRY import _AttrMixinRXRY
class SVGEllipse(AbstractSVGElement, _AttrMixinCXCY, _AttrMixinRXRY):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self):
super().__init__("ellipse")
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def getBoundingPoints(self):
yield from super().getBoundingPoints()
yield self.cx - self.rx, self.cy
yield self.cx + self.rx, self.cy
yield self.cx, self.cy - self.ry
yield self.cx, self.cy + self.ry
#
def setBounds(self, x, y, width, height):
self.rx = width / 2
self.cx = x + self.rx
self.ry = height / 2
self.cy = y + self.ry
#
def getBounds(self) -> list:
x = self.cx - self.rx
y = self.cy - self.ry
w = self.rx * 2
h = self.ry * 2
return [ x, y, w, h ]
#
#
```
#### File: src/jk_svg/SVGGroup.py
```python
import typing
import jk_typing
import jk_hwriter
from .AbstractSVGElement import AbstractSVGElement
from ._GroupElementsMixin import _GroupElementsMixin
from .Transformer import Transformer
from .SVGMask import SVGMask
class SVGGroup(AbstractSVGElement, _GroupElementsMixin):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self):
super().__init__("g")
self.__transformer = Transformer(self._attributes.get("transform"))
self.__transformer._connectedSVGControl = self
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def transform(self) -> Transformer:
return self.__transformer
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def createGroup(self):
ret = SVGGroup()
self._children.append(ret)
return ret
#
def createMask(self):
ret = SVGMask()
self._children.append(ret)
return ret
#
#
```
#### File: src/jk_svg/SVGRect.py
```python
import typing
import jk_typing
import jk_hwriter
from .AbstractSVGElement import AbstractSVGElement
from ._AttrMixinXY import _AttrMixinXY
from ._AttrMixinWidthHeight import _AttrMixinWidthHeight
from .BoundingBox import BoundingBox
class SVGRect(AbstractSVGElement, _AttrMixinXY, _AttrMixinWidthHeight):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self):
super().__init__("rect")
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def getBoundingPoints(self):
yield from super().getBoundingPoints()
x1, y1 = self.x, self.y
x2, y2 = x1 + self.width, y1 + self.height
yield x1, y1
yield x1, y2
yield x2, y1
yield x2, y2
#
def setBounds(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
#
def getBounds(self) -> list:
return [ self.x, self.y, self.width, self.height ]
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-sysinfo",
"score": 2
} |
#### File: python-module-jk-sysinfo/examples/collector_async.py
```python
import os
import sys
import re
import time
import json
import jk_logging
import jk_sysinfo
#import jk_json
#import jk_flexdata
import jk_utils
from jk_trioscheduler import *
from fabric import Connection
import jk_pwdinput
REMOTE_HOST = "127.0.0.1"
REMOTE_PORT = 22
REMOTE_LOGIN = "<login>"
REMOTE_PASSWORD = <PASSWORD>.readpwd("Password for " + REMOTE_LOGIN + "@" + REMOTE_HOST + ": ")
c = Connection(host=REMOTE_HOST, user=REMOTE_LOGIN, port=REMOTE_PORT, connect_kwargs={"password": REMOTE_PASSWORD})
SYSTEM_ID = "nbxxxxxxxx"
#PRETTY = False
PRETTY = True
RETRIEVERS = (
( "lsb_release_a", jk_sysinfo.get_lsb_release_a),
( "lshw", jk_sysinfo.get_lshw),
( "mobo", jk_sysinfo.get_motherboard_info),
( "bios", jk_sysinfo.get_bios_info),
( "proccpu", jk_sysinfo.get_proc_cpu_info),
( "cpu", jk_sysinfo.get_cpu_info),
( "sensors", jk_sysinfo.get_sensors),
( "sysload", jk_sysinfo.get_proc_load_avg),
( "mem", jk_sysinfo.get_proc_meminfo),
( "lsblk", jk_sysinfo.get_lsblk),
( "reboot", jk_sysinfo.get_needs_reboot),
( "mounts", jk_sysinfo.get_mount),
( "df", jk_sysinfo.get_df),
( "net_info", jk_sysinfo.get_net_info),
( "uptime", jk_sysinfo.get_uptime),
)
print("Now repeatedly retrieving data ...")
class GlobalAppObject(object):
def __init__(self):
self.n = 0
self.log = jk_logging.ConsoleLogger.create()
#
#
async def resultReporter(taskReport:TaskReport):
log2 = taskReport.ctx.app.log.descend(str(taskReport))
taskReport.ctx.log.forwardTo(log2)
#
async def prepareTaskContext(ctx:TaskContext):
# print("Preparing: " + ctx.identifier)
ctx.log = jk_logging.BufferLogger.create()
#
async def retrieveData(ctx:TaskContext, theData):
data = {}
tAll = time.time()
for key, retriever in RETRIEVERS:
t = time.time()
try:
v = retriever(c)
except Exception as ee:
print("ERROR @ " + key)
v = None
dt = time.time() - t
#if isinstance(v, (list, tuple)):
# v = [ jk_flexdata.createFromData(x) for x in v ]
#else:
# v = jk_flexdata.createFromData(v)
data[key] = {
"_duration": dt,
"data": v,
"_t": t,
}
dtAll = time.time() - tAll
data["_t"] = tAll
data["_duration"] = dtAll
print("Duration:", dtAll)
if PRETTY:
s = json.dumps(data, indent="\t")
else:
s = json.dumps(data)
with jk_utils.file_rw.openWriteText("data/" + SYSTEM_ID + ".json", bSafeWrite=True) as f:
f.write(s)
#
async def setup(scheduler):
scheduler.setApp(GlobalAppObject())
await scheduler.defineRepeatingTask(
identifier="*",
rescheduleEveryNSeconds=20,
timeOut=18,
theCallback=retrieveData,
theData=None,
prepareTaskContextCallback=None,
bReportTermination=False)
#
Scheduler().run(setup, resultReporter)
```
#### File: src/bin/sysinfo.py
```python
import os
import sys
import re
import jk_console
import jk_json
import jk_flexdata
from jk_typing import *
import jk_sysinfo
import jk_sysinfo.entity
"""
from fabric import Connection
import jk_pwdinput
REMOTE_HOST = "<ipaddress>"
REMOTE_PORT = 22
REMOTE_LOGIN = "<login>"
REMOTE_PASSWORD = <PASSWORD>pwd("Password for " + REMOTE_LOGIN + "@" + REMOTE_HOST + ": ")
c = Connection(host=REMOTE_HOST, user=REMOTE_LOGIN, port=REMOTE_PORT, connect_kwargs={"password": RE<PASSWORD>})
"""
c = None
x = jk_sysinfo.get_etc_os_release(c)
os_release = jk_flexdata.createFromData(jk_sysinfo.get_etc_os_release(c))
bIsRPi = os_release.distribution == "raspbian"
data_lsb_release_a = jk_flexdata.createFromData(jk_sysinfo.get_lsb_release_a(c)) # static
data_lshw = jk_flexdata.createFromData(jk_sysinfo.get_lshw(c)) # static
if bIsRPi:
data_proccpu_extra = jk_flexdata.createFromData(jk_sysinfo.get_proc_cpu_info(c)[1]) # static
data_mobo_json = {
"vendor": "Raspberry Pi Foundation",
}
if data_proccpu_extra.hardware:
data_mobo_json["soc"] = data_proccpu_extra.hardware
data_mobo_json["serial"] = data_proccpu_extra.serial
data_mobo_json["name"] = data_proccpu_extra.model
data_mobo = jk_flexdata.createFromData(data_mobo_json)
else:
data_mobo = jk_flexdata.createFromData(jk_sysinfo.get_motherboard_info(c)) # static
data_bios = jk_flexdata.createFromData(jk_sysinfo.get_bios_info(c)) # static
data_proccpu = [ jk_flexdata.createFromData(x) for x in jk_sysinfo.get_proc_cpu_info(c)[0] ] # static, (runtime)
data_cpu = jk_flexdata.createFromData(jk_sysinfo.get_cpu_info(c)) # static
if bIsRPi:
t = jk_sysinfo.get_vcgencmd_measure_temp(c)
v = jk_sysinfo.get_vcgencmd_measure_volts(c)
data_sensors_json = {
"coretemp": {
"device": "coretemp",
"sensorData": {
"temp1": {
"sensor": "temp",
"value": t["cpu"]["temp"],
}
}
},
"corevolts": {
"device": "corevolts",
"sensorData": {
"temp1": {
"sensor": "volt",
"value": v["cpu"]["volt"],
}
}
},
"ramolts": {
"device": "ramvolts",
"sensorData": {
"volt1": {
"sensor": "volt",
"value": v["ram"]["volt"],
}
}
}
}
data_sensors = jk_flexdata.createFromData(data_sensors_json) # runtime
else:
data_sensors = jk_flexdata.createFromData(jk_sysinfo.get_sensors(c)) # runtime
data_sysload = jk_flexdata.createFromData(jk_sysinfo.get_proc_load_avg(c)) # runtime
data_mem = jk_flexdata.createFromData(jk_sysinfo.get_proc_meminfo(c)) # runtime
data_lsblk = jk_flexdata.createFromData(jk_sysinfo.get_lsblk(c)) # runtime
data_reboot = jk_flexdata.createFromData(jk_sysinfo.get_needs_reboot(c)) # runtime
data_mounts = jk_flexdata.createFromData(jk_sysinfo.get_mount(c)) # runtime
data_df = jk_flexdata.createFromData(jk_sysinfo.get_df(c)) # runtime
data_net_info = jk_flexdata.createFromData(jk_sysinfo.get_net_info(c)) # runtime
data_uptime = jk_flexdata.createFromData(jk_sysinfo.get_uptime(c)) # runtime
if not data_reboot:
print(jk_console.Console.ForeGround.RED + "WARNING: Packet 'needrestart' not installed." + jk_console.Console.RESET)
################################################################
print("\n#### bios ####\n")
print("static")
print("\tvendor:", data_bios.vendor)
print("\tversion:", data_bios.version)
print("\tdate:", data_bios.date)
print("-")
################################################################
print("\n#### motherboard ####\n")
print("static")
print("\tvendor:", data_mobo.vendor)
print("\tname:", data_mobo.name)
print("\tversion:", data_mobo.version)
print("-")
################################################################
print("\n#### busses and bus devices ####\n")
print("static")
def printPCIStruct(data:jk_flexdata.FlexObject, indent:str=""):
print(indent + data["class"].upper()
+ " " + (data.product if data.product else "-")
+ " (" + data.vendor + ")"
+ " " + (data.description if data.description else "-")
)
if data.children:
for c in data.children:
printPCIStruct(c, indent=indent + "\t")
#
bridge = data_lshw._findR(_class="bridge")
printPCIStruct(bridge, indent="\t")
print("-")
################################################################
print("\n#### system ####\n")
print("static")
print("\thostname:", data_lshw.id) # hostname
print("\tos distribution:", data_lsb_release_a.distribution)
print("\tos version:", data_lsb_release_a.version)
print("\tis LTS version:", data_lsb_release_a.lts)
print("runtime")
print("\tprocesses:", data_sysload.processes_total)
print("\tsystem load:", data_sysload.load1, "/", data_sysload.load5, "/", data_sysload.load15)
days, hours, minutes, seconds, milliseconds = jk_sysinfo.convertSecondsToHumanReadableDuration(data_uptime.uptimeInSeconds)
print("\tuptime:", days, "day(s),", hours, "hour(s),", minutes, "minute(s),", seconds, "second(s)")
if data_reboot.needsReboot:
updatesRequired = set()
if data_reboot.updateMicroCodeOrABI:
updatesRequired.add("CPU or ABI")
if data_reboot.updateKernel:
updatesRequired.add("kernel")
if not updatesRequired:
raise Exception()
print(jk_console.Console.ForeGround.ORANGE + "\tUpdate required:", ",".join(updatesRequired) + jk_console.Console.RESET)
print("-")
################################################################
print("\n#### cpu ####\n")
print("static")
print("\tvendor:", data_proccpu[0].vendor_id)
print("\tmodel:", data_proccpu[0].model_name)
print("\tspeed:", jk_sysinfo.formatFrequencyRangeS(data_cpu.freq_min * 1000000, data_cpu.freq_max * 1000000))
print("\tcpu family:", data_proccpu[0].cpu_family)
print("\tcores:", len(data_proccpu), "(hyperthreading)" if ("ht" in data_proccpu[0].flags) else "")
if "cache_size" in data_proccpu[0]._keys():
print("\tcpu cache size:", data_proccpu[0].cache_size)
if data_proccpu[0].bugs:
print("\tbugs:", ", ".join(data_proccpu[0].bugs))
print("-")
################################################################
print("\n#### memory ####\n")
print("runtime")
mem = data_lshw._findR(id="memory")
assert mem.units == "bytes"
#print("size:", jk_sysinfo.formatBytesS(int(mem.size)))
print("\tmem total:", jk_sysinfo.formatBytesS(data_mem.MemTotal * 1024))
print("\tmem available:", jk_sysinfo.formatBytesS(data_mem.MemAvailable * 1024))
print("\tmem free:", jk_sysinfo.formatBytesS(data_mem.MemFree * 1024))
print("\tmem buffers:", jk_sysinfo.formatBytesS(data_mem.Buffers * 1024))
print("\tmem cached:", jk_sysinfo.formatBytesS(data_mem.Cached * 1024))
print("\tswap total:", jk_sysinfo.formatBytesS(data_mem.SwapTotal * 1024))
print("\tswap free:", jk_sysinfo.formatBytesS(data_mem.SwapFree * 1024))
print("\tswap cached:", jk_sysinfo.formatBytesS(data_mem.SwapCached * 1024))
print("-")
################################################################
print("\n#### display ####\n")
print("static")
for display in data_lshw._findAllR(id="display"):
print("\tvendor:", display.vendor)
print("\tproduct:", display.product)
print("\tdriver:", display.configuration.driver)
print("-")
################################################################
print("\n#### storage ####\n")
print("static")
#for storage in data_lshw._findAllR(id="storage"):
# print("\tvendor:", storage.vendor)
# print("\tproduct:", storage.product)
# print("\tdescription:", storage.description)
# print("\tdriver:", storage.configuration.driver)
# print("-")
#for storage in data_lshw._findAllR(id="cdrom"):
# print("\tvendor:", storage.vendor)
# print("\tproduct:", storage.product)
# print("\tdescription:", storage.description)
# print("-")
data_lsblk_disks = jk_sysinfo.filter_lsblk_devtree(data_lsblk._toDict(), type="disk")
diskTable = jk_console.SimpleTable()
diskTable.addRow(
"device",
"model",
"vendor",
"serial",
"firmwareRevision",
"formFactor",
"diskGranularity",
"rotationRate",
"transport",
"size",
"uuid",
"readOnly",
"rotational",
"hotplug",
"NCQ",
"TRIM",
).hlineAfterRow = True
for jDisk in data_lsblk_disks:
devicePath = jDisk["dev"]
data_hdparam_I = jk_sysinfo.get_hdparm_I(devPath=devicePath)
di = jk_sysinfo.entity.DriveInfo(jDisk, data_hdparam_I)
diskTable.addRow(
di.devicePath,
di.model,
di.vendor,
di.serial,
di.firmwareRevision,
di.formFactor,
di.diskGranularity,
di.nominalMediaRotationRate,
di.transport,
di.size,
di.uuid if di.uuid else "-",
di.isReadOnly,
di.isRotational,
di.isHotplug,
di.isNCQSupported,
di.isTRIMSupported,
)
diskTable.print(prefix="\t")
print("-")
################################################################
print("\n#### multimedia ####\n")
print("static")
for multimedia in data_lshw._findAllR(id="multimedia"):
print("\tvendor:", multimedia.vendor)
print("\tproduct:", multimedia.product)
print("\tdriver:", multimedia.configuration.driver)
print("-")
################################################################
print("\n#### network (hardware) ####\n")
print("static")
for network in list(data_lshw._findAllR(id="network")) + list(data_lshw._findAllR(id=re.compile("^network:"))):
#jk_json.prettyPrint(network._toDict())
print("\tvendor:", network.vendor)
print("\tproduct:", network.product)
print("\tdevice:", network.logicalname) # network device name
if network.configuration.link:
print("\thas_link:", network.configuration.link == "yes")
else:
print("\thas_link:", "unknown")
if network.capabilities.tp:
# regular twisted pair network
if network.maxSpeedInBitsPerSecond:
speed, unit = jk_sysinfo.formatBitsPerSecond(network.maxSpeedInBitsPerSecond)
print("\tspeed maximum:", speed, unit) # general speed in bits/s
if network.configuration.speed:
print("\tspeed current:", network.configuration.speed) # current speed
if network.configuration.duplex:
print("\tduplex:", network.configuration.duplex)
elif network.configuration.wireless:
# regular wireless network
print("\twireless standard:", network.configuration.wireless) # "IEEE 802.11"
else:
raise Exception("Unknown network type")
print("\tdescription:", network.description)
print("\tdriver:", network.configuration.driver)
print("\tmac_addr:", network.serial)
print("-")
################################################################
print("\n#### sensors ####\n")
def formatSensorData(data:jk_flexdata.FlexObject) -> str:
if data._isEmpty():
return "n/a"
if data.sensor == "volt":
return str(data.value) + "V"
if data.sensor == "fan":
return str(data.value) + " rpm"
elif data.sensor == "temp":
if data.crit and data.max:
return jk_sysinfo.formatTemperatureGraphC(data.value, data.crit) + " (max: " + str(data.max) + ", crit: " + str(data.crit) + ")"
#return str(data.value) + " °C (max: " + str(data.max) + ", crit: " + str(data.crit) + ")"
else:
return jk_sysinfo.formatTemperatureGraphC(data.value)
#return str(data.value) + " °C"
else:
raise Exception("Unknown: " + repr(data.sensor))
#
print("runtime")
for data in data_sensors._values():
#jk_json.prettyPrint(data._toDict())
for sensorItemName, sensorItemStruct in data.sensorData._items():
print("\t" + data.device + "." + sensorItemName + ": " + formatSensorData(sensorItemStruct))
print("-")
################################################################
print("\n#### network (os) ####\n")
print("runtime")
table = jk_console.SimpleTable()
table.addRow(
"ifname",
"loop",
"wlan",
"mac",
"mtu",
"rx pkgs",
"rx dropped",
"rx errors",
"tx pkgs",
"tx dropped",
"tx errors",
).hlineAfterRow = True
for networkInterface, networkInterfaceData in data_net_info._items():
table.addRow(
networkInterface,
networkInterfaceData.is_loop,
networkInterfaceData.is_wlan,
networkInterfaceData.mac_addr,
networkInterfaceData.mtu,
networkInterfaceData.rx_packets,
networkInterfaceData.rx_dropped,
networkInterfaceData.rx_errors,
networkInterfaceData.tx_packets,
networkInterfaceData.tx_dropped,
networkInterfaceData.tx_errors,
)
table.print(prefix="\t")
print("-")
################################################################
print("\n#### drives ####\n")
print("runtime")
@checkFunctionSignature()
def printDevice(data_lsblk:jk_flexdata.FlexObject, data_mounts:jk_flexdata.FlexObject, data_df:jk_flexdata.FlexObject, indent:str=""):
if data_lsblk.mountpoint and data_lsblk.mountpoint.startswith("/snap"):
return
s = indent + data_lsblk.dev
if data_lsblk.mountpoint:
s += " @ "
s += data_lsblk.mountpoint
sAdd = " :: "
else:
sAdd = " :: "
if data_lsblk.uuid:
s += sAdd + repr(data_lsblk.uuid)
sAdd = " ~ "
if data_lsblk.fstype:
s += sAdd + data_lsblk.fstype
sAdd = " ~ "
print(s)
indent += "\t"
if data_mounts and data_lsblk.mountpoint:
data_df_2 = data_df._get(data_lsblk.mountpoint)
#jk_json.prettyPrint(data_mounts._toDict())
#jk_json.prettyPrint(data_df._toDict())
if data_df_2:
print(indent
+ "total:", jk_sysinfo.formatBytesS(data_df_2.spaceTotal)
+ ", used:", jk_sysinfo.formatBytesS(data_df_2.spaceUsed)
+ ", free:", jk_sysinfo.formatBytesS(data_df_2.spaceFree)
+ ", filled:", jk_sysinfo.formatPercentGraphC(data_df_2.spaceUsed, data_df_2.spaceTotal), jk_sysinfo.formatPercent(data_df_2.spaceUsed, data_df_2.spaceTotal)
)
#jk_json.prettyPrint(data_df_2._toDict())
else:
print("Not found: " + data_lsblk.mountpoint)
if data_lsblk.children:
for c in data_lsblk.children:
printDevice(c, data_mounts, data_df, indent)
#
# TODO: drive models
#print(data_lsblk._keys())
for d in data_lsblk.deviceTree:
printDevice(d, data_mounts, data_df, "\t")
# TODO: list logical drives
print("-")
################################################################
print()
```
#### File: src/jk_sysinfo/get_dpkg_list.py
```python
import re
from jk_cachefunccalls import cacheCalls
from .parsing_utils import *
from .invoke_utils import run
from jk_version import Version
#
# Returns:
#
# {
# ...
# "netcat-openbsd": "1.105-7ubuntu1",
# "netpbm": "2:10.0-15.3",
# "nettle-dev", "3.2-1ubuntu0.16.04.1",
# ...
# }
#
def parse_dpkg_list(stdout:str, stderr:str, exitcode:int) -> dict:
"""
...
netcat-openbsd\t1.105-7ubuntu1
netpbm\t2:10.0-15.3
nettle-dev\t3.2-1ubuntu0.16.04.1
...
"""
if exitcode != 0:
raise Exception()
lines = stdout.strip().split("\n")
ret = {}
for line in lines:
name, version = line.split("\t")
p = name.find(":")
if p > 0:
name = name[:p]
ret[name] = version
return ret
#
#
# Returns:
#
# {
# ...
# "netcat-openbsd": "1.105-7ubuntu1",
# "netpbm": "2:10.0-15.3",
# "nettle-dev", "3.2-1ubuntu0.16.04.1",
# ...
# }
#
@cacheCalls(seconds=3, dependArgs=[0])
def get_dpkg_list(c = None) -> dict:
stdout, stderr, exitcode = run(c, "/usr/bin/dpkg-query -W")
return parse_dpkg_list(stdout, stderr, exitcode)
#
```
#### File: src/jk_sysinfo/get_etc_group.py
```python
import os
from jk_cachefunccalls import cacheCalls
import jk_etcpasswd
from .parsing_utils import *
from .invoke_utils import run
def parse_etc_group(stdoutGroup:str, stdoutGShadow:str) -> dict:
grpFile = jk_etcpasswd.GrpFile("/etc/group", "/etc/gshadow", stdoutGroup, stdoutGShadow)
return grpFile.toJSON()
#
def get_etc_group(c = None) -> dict:
if os.geteuid() != 0:
raise Exception("Must be root!")
stdoutGroup, stderrGroup, exitcodeGroup = run(c, "cat /etc/group")
assert exitcodeGroup == 0
assert not stderrGroup
stdoutGShadow, stderrGShadow, exitcodeGShadow = run(c, "cat /etc/gshadow")
assert exitcodeGShadow == 0
assert not stderrGShadow
return parse_etc_group(stdoutGroup, stdoutGShadow)
#
```
#### File: src/jk_sysinfo/get_lsb_release_a.py
```python
from jk_cachefunccalls import cacheCalls
from .parsing_utils import *
from .invoke_utils import run
_parserColonKVP = ParseAtFirstDelimiter()
#
# Returns:
#
# {
# "distribution": "ubuntu",
# "lts": true,
# "version": "16.04.6"
# }
#
def parse_lsb_release_a(stdout:str, stderr:str, exitcode:int) -> dict:
if exitcode != 0:
raise Exception()
ret = _parserColonKVP.parseLines(stdout.split("\n"))
return {
"version": ret["Description"].split()[1],
"lts": ret["Description"].endswith(" LTS"),
"distribution": ret["Distributor ID"].lower(),
}
#
#
# Returns:
#
# {
# "distribution": "ubuntu",
# "lts": true,
# "version": "16.04.6"
# }
#
@cacheCalls(seconds=3, dependArgs=[0])
def get_lsb_release_a(c = None) -> dict:
stdout, stderr, exitcode = run(c, "/usr/bin/lsb_release -a")
return parse_lsb_release_a(stdout, stderr, exitcode)
#
```
#### File: src/jk_sysinfo/get_mount.py
```python
import re
from jk_cachefunccalls import cacheCalls
from .parsing_utils import *
from .invoke_utils import run
#
# Returns:
#
# {
# "/sys": {
# "dev": null,
# "fstype": "sysfs",
# "fstype2": "sysfs",
# "mountPoint": "/sys",
# "options": [
# "rw",
# "nosuid",
# "nodev",
# "noexec",
# "relatime"
# ]
# },
# "/proc": {
# "dev": null,
# "fstype": "proc",
# "fstype2": "proc",
# "mountPoint": "/proc",
# "options": [
# "rw",
# "nosuid",
# "nodev",
# "noexec",
# "relatime"
# ]
# },
# "/dev": {
# "dev": null,
# "fstype": "devtmpfs",
# "fstype2": "udev",
# "mountPoint": "/dev",
# "options": [
# "rw",
# "nosuid",
# "relatime",
# "size=15913668k",
# "nr_inodes=3978417",
# "mode=755"
# ]
# },
# ...,
# "/run": {
# "dev": null,
# "fstype": "tmpfs",
# "fstype2": "tmpfs",
# "mountPoint": "/run",
# "options": [
# "rw",
# "nosuid",
# "noexec",
# "relatime",
# "size=3187068k",
# "mode=755"
# ]
# },
# ...,
# "/mounts/net/nbxxxxxxxx": {
# "dev": "[email protected]:/home/xxxxxxxx",
# "fstype": "fuse.sshfs",
# "fstype2": null,
# "mountPoint": "/mounts/net/nbxxxxxxxx",
# "options": [
# "rw",
# "nosuid",
# "nodev",
# "relatime",
# "user_id=1000",
# "group_id=1000",
# "allow_other"
# ]
# },
# ...
# }
#
def parse_mount(stdout:str, stderr:str, exitcode:int) -> dict:
"""
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
udev on /dev type devtmpfs (rw,nosuid,relatime,size=8018528k,nr_inodes=2004632,mode=755)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,noexec,relatime,size=1608252k,mode=755)
/dev/sdb1 on / type ext4 (rw,relatime,errors=remount-ro,data=ordered)
...
"""
if exitcode != 0:
raise Exception()
lines = stdout.strip().split("\n")
ret = {}
for line in lines:
m = re.match(r"^(.+?) on (.+?) type ([^\s]+) \(([^\s]+)\)$", line)
if m is None:
raise Exception("Failed to parse line: " + repr(line))
groups = m.groups()
devicePathOrFileSystem = groups[0]
mountPoint = groups[1]
fstype = groups[2]
mountOptions = groups[3].split(",")
isDevicePath = devicePathOrFileSystem.find("/") >= 0
devicePath = devicePathOrFileSystem if isDevicePath else None
fstype2 = None if isDevicePath else devicePathOrFileSystem
ret[mountPoint] = {
"dev": devicePath,
"fstype": fstype,
"fstype2": fstype2,
"options": mountOptions,
"mountPoint": mountPoint,
}
return ret
#
#
# Returns:
#
# {
# "/sys": {
# "dev": null,
# "fstype": "sysfs",
# "fstype2": "sysfs",
# "mountPoint": "/sys",
# "options": [
# "rw",
# "nosuid",
# "nodev",
# "noexec",
# "relatime"
# ]
# },
# "/proc": {
# "dev": null,
# "fstype": "proc",
# "fstype2": "proc",
# "mountPoint": "/proc",
# "options": [
# "rw",
# "nosuid",
# "nodev",
# "noexec",
# "relatime"
# ]
# },
# "/dev": {
# "dev": null,
# "fstype": "devtmpfs",
# "fstype2": "udev",
# "mountPoint": "/dev",
# "options": [
# "rw",
# "nosuid",
# "relatime",
# "size=15913668k",
# "nr_inodes=3978417",
# "mode=755"
# ]
# },
# ...,
# "/run": {
# "dev": null,
# "fstype": "tmpfs",
# "fstype2": "tmpfs",
# "mountPoint": "/run",
# "options": [
# "rw",
# "nosuid",
# "noexec",
# "relatime",
# "size=3187068k",
# "mode=755"
# ]
# },
# ...,
# "/mounts/net/nbxxxxxxxx": {
# "dev": "[email protected]:/home/xxxxxxxx",
# "fstype": "fuse.sshfs",
# "fstype2": null,
# "mountPoint": "/mounts/net/nbxxxxxxxx",
# "options": [
# "rw",
# "nosuid",
# "nodev",
# "relatime",
# "user_id=1000",
# "group_id=1000",
# "allow_other"
# ]
# },
# ...
# }
#
@cacheCalls(seconds=3, dependArgs=[0])
def get_mount(c = None) -> dict:
stdout, stderr, exitcode = run(c, "/bin/mount")
return parse_mount(stdout, stderr, exitcode)
#
```
#### File: src/jk_sysinfo/get_proc_cpu_info.py
```python
import typing
from jk_cachefunccalls import cacheCalls
from jk_cmdoutputparsinghelper import ValueParser_ByteWithUnit
from .parsing_utils import *
from .invoke_utils import run
#import jk_json
_parserColonKVP = ParseAtFirstDelimiter(delimiter=":", valueCanBeWrappedInDoubleQuotes=False, keysReplaceSpacesWithUnderscores=True)
#
# Returns:
#
# [
# {
# "<key>": "<value>",
# ...
# },
# ...
# ]
#
def parse_proc_cpu_info(stdout:str, stderr:str, exitcode:int) -> typing.Tuple[list,dict]:
"""
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 1000.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 800.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 800.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 1100.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 6
initial apicid : 6
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
"""
if exitcode != 0:
raise Exception()
cpuInfos = splitAtEmptyLines(stdout.split("\n"))
retExtra = {}
ret = []
for group in cpuInfos:
d = _parserColonKVP.parseLines(group)
if "processor" not in d:
for k, v in d.items():
retExtra[k.lower()] = v
continue
if "cache_size" in d:
d["cache_size_kb"] = ValueParser_ByteWithUnit.parse(d["cache_size"]) // 1024
del d["cache_size"]
if "bogomips" in d:
d["bogomips"] = float(d["apicid"])
elif "BogoMIPS" in d:
d["bogomips"] = float(d["BogoMIPS"])
del d["BogoMIPS"]
if "bugs" in d:
d["bugs"] = d["bugs"].split()
if "flags" in d:
d["flags"] = sorted(d["flags"].split())
elif "Features" in d:
d["flags"] = sorted(d["Features"].split())
del d["Features"]
# bool
for key in [ "fpu", "fpu_exception", "wp" ]:
if key in d:
d[key.lower()] = d[key] == "yes"
if key != key.lower():
del d[key]
# int
for key in [ "CPU_architecture", "CPU_revision", "physical_id", "initial_apicid", "cpu_cores", "core_id", "clflush_size", "cache_alignment", "apicid" ]:
if key in d:
d[key.lower()] = int(d[key])
if key != key.lower():
del d[key]
# float
for key in [ "cpu_MHz" ]:
if key in d:
d[key.lower()] = float(d[key])
if key != key.lower():
del d[key]
# str
for key in [ "CPU_implementer", "CPU_part", "CPU_variant" ]:
if key in d:
d[key.lower()] = d[key]
if key != key.lower():
del d[key]
d["processor"] = int(d["processor"])
if "siblings" in d:
d["siblings"] = int(d["siblings"])
#jk_json.prettyPrint(d)
ret.append(d)
return ret, retExtra
#
#
# Returns:
#
# [
# {
# "<key>": "<value>",
# ...
# },
# ...
# ]
#
@cacheCalls(seconds=3, dependArgs=[0])
def get_proc_cpu_info(c = None) -> typing.Tuple[list,dict]:
stdout, stderr, exitcode = run(c, "cat /proc/cpuinfo")
return parse_proc_cpu_info(stdout, stderr, exitcode)
#
```
#### File: src/jk_sysinfo/get_proc_load_avg.py
```python
from jk_cachefunccalls import cacheCalls
from .parsing_utils import *
from .invoke_utils import run
#
# Returns:
#
# {
# "load1": 0.13,
# "load15": 0.29,
# "load5": 0.3,
# "processes_total": 1052,
# "processes_runnable": 1
# }
#
def parse_proc_load_avg(stdout:str, stderr:str, exitcode:int) -> dict:
"""
0.12 0.22 0.25 1/1048 16989
"""
if exitcode != 0:
raise Exception()
ret = stdout.strip().split(" ")
return {
"load1": float(ret[0]),
"load5": float(ret[1]),
"load15": float(ret[2]),
"processes_runnable": int(ret[3].split("/")[0]),
"processes_total": int(ret[3].split("/")[1]),
}
#
#
# Returns:
#
# {
# "load1": 0.13,
# "load15": 0.29,
# "load5": 0.3,
# "processes_total": 1052,
# "processes_runnable": 1
# }
#
def get_proc_load_avg(c = None) -> dict:
stdout, stderr, exitcode = run(c, "cat /proc/loadavg")
return parse_proc_load_avg(stdout, stderr, exitcode)
#
```
#### File: src/jk_sysinfo/get_ps_local.py
```python
import os
import re
import pwd
import grp
import typing
import resource
# TODO: eliminate the use of modules pwd and grp as this way get_ps() can not be executed remotely.
# NOTE: we might be able implement this using data from get_user_info().
from jk_cachefunccalls import cacheCalls
import jk_cmdoutputparsinghelper
from .parsing_utils import *
from .invoke_utils import run
_PAGESIZE = resource.getpagesize()
_PAGESIZE_KB = _PAGESIZE / 1024
_parserColonKVP = ParseAtFirstDelimiter(delimiter="=", valueCanBeWrappedInDoubleQuotes=True)
def _enrichWithVMSize(jProgramData:dict, pid:int):
procPath = "/proc/" + str(jProgramData["pid"]) + "/statm"
try:
with open(procPath, "r") as f:
components = f.read().strip().split(" ")
assert len(components) == 7
jProgramData["vmsizeKB"] = int(components[0]) * _PAGESIZE_KB
except:
jProgramData["vmsizeKB"] = -1
#
def get_process_info_local(pid:int, userIDToNameMap:typing.Dict[int,str] = None, grpIDToNameMap:typing.Dict[int,str] = None):
dirPath = "/proc/" + str(pid)
#### /proc/????/status ####
_fullContent = None
try:
with open(os.path.join(dirPath, "status"), "r") as f:
_fullContent = f.read()
except:
return
_pre_map = {}
for line in _fullContent.split("\n"):
if not line:
continue
m = re.match("^([a-zA-Z_]+):\s+(.+)$", line)
if m is None:
print("ERROR: Failed to parse line: " + line)
return
_pre_map[m.group(1)] = m.group(2)
xxProcName = _pre_map["Name"]
_sStateTemp = _pre_map["State"]
m = re.match("^(.*)\s+\((.*)\)$", _sStateTemp)
if m:
xxState = m.group(1)
else:
print(_sStateTemp)
xxState = "?"
xxPPid = int(_pre_map["PPid"])
_listUIDTemp = re.split("(\s+)", _pre_map["Uid"])
_listGIDTemp = re.split("(\s+)", _pre_map["Gid"])
_listUID = [ int(_listUIDTemp[0]), int(_listUIDTemp[2]), int(_listUIDTemp[4]), int(_listUIDTemp[6]) ] # real, effective, saved, fs
_listGID = [ int(_listGIDTemp[0]), int(_listGIDTemp[2]), int(_listGIDTemp[4]), int(_listGIDTemp[6]) ] # real, effective, saved, fs
xxVMSizeKB = None
if "VmSize" in _pre_map:
_vmSizeTemp = re.split("(\s+)", _pre_map["VmSize"])
assert len(_vmSizeTemp) == 3
assert _vmSizeTemp[2] == "kB"
xxVMSizeKB = int(_vmSizeTemp[0])
xxUID = _listUID[1]
xxGID = _listGID[1]
xxUserName = userIDToNameMap.get(xxUID, None) if userIDToNameMap else None
xxGroupName = grpIDToNameMap.get(xxGID, None) if grpIDToNameMap else None
#### /proc/????/cmdline ####
xxCmd = None
xxArgs = None
with open(os.path.join(dirPath, "cmdline"), "r") as f:
_cmdLine = f.read().rstrip("\x00").split("\x00")
if _cmdLine:
xxArgs = _cmdLine[1:]
xxCmd = _cmdLine[0]
if not xxCmd:
assert xxProcName
xxCmd = "[" + xxProcName + "]"
#### /proc/????/cwd ####
try:
# NOTE: if we don't own this we can only read this as root
xxCWD = os.readlink(os.path.join(dirPath, "cwd"))
except:
xxCWD = None
#### /proc/????/exe ####
try:
# NOTE: if we don't own this we can only read this as root
xxEXE = os.readlink(os.path.join(dirPath, "exe"))
except:
xxEXE = None
# ----------------
# sometimes xxArgs has not been provided with "\x00" but space as separator; compensate for this though this might not be perfect;
if xxCmd and xxEXE:
if xxCmd.startswith(xxEXE + " "):
xxArgs = xxCmd[len(xxEXE)+1:].strip().split(" ")
xxCmd = xxEXE
assert xxCmd is not None
assert xxArgs is not None
# ----------------
ret = {
"args": " ".join(xxArgs),
"argsv": xxArgs,
"cmd": xxCmd,
"pid": pid,
"ppid": xxPPid,
"stat": xxState,
}
if xxUserName is not None:
ret["user"] = xxUserName
if xxGroupName is not None:
ret["group"] = xxGroupName
if xxCWD is not None:
ret["cwd"] = xxCWD
if xxEXE is not None:
ret["exe"] = xxEXE
ret["vmsizeKB"] = xxVMSizeKB if xxVMSizeKB is not None else -1
# ----------------
return ret
#
#
# Returns:
#
# [
# {
# "args": "splash",
# "cmd": "/sbin/init",
# "pid": 1,
# "ppid": 0,
# "stat": "Ss",
# "tty": null,
# "uid": 0,
# "user": "root"
# },
# {
# "cmd": "[kthreadd]",
# "pid": 2,
# "ppid": 0,
# "stat": "S",
# "tty": null,
# "uid": 0,
# "user": "root",
# "cwd": ....
# },
# {
# "cmd": "[ksoftirqd/0]",
# "pid": 3,
# "ppid": 2,
# "stat": "S",
# "tty": null,
# "uid": 0,
# "user": "root",
# "cwd": ....
# },
# ...
# {
# "cmd": "bash",
# "pid": 20144,
# "ppid": 14839,
# "stat": "Ss+",
# "tty": "pts/3",
# "uid": 1000,
# "user": "xxxxxxxx"
# "gid": 1000,
# "group": "xxxxxxxx",
# "cwd": ....
# },
# {
# "args": "--spawner :1.9 /org/gtk/gvfs/exec_spaw/4",
# "cmd": "/usr/lib/gvfs/gvfsd-computer",
# "pid": 20292,
# "ppid": 1,
# "stat": "Sl",
# "tty": null,
# "uid": 1000,
# "user": "xxxxxxxx"
# "gid": 1000,
# "group": "xxxxxxxx",
# "cwd": ....
# },
# ...
# {
# "args": "/usr/share/code/resources/app/extensions/json-language-features/server/dist/jsonServerMain --node-ipc --clientProcessId=15491",
# "cmd": "/usr/share/code/code",
# "pid": 29554,
# "ppid": 15491,
# "stat": "Sl",
# "tty": null,
# "uid": 1000,
# "user": "xxxxxxxx"
# "gid": 1000,
# "group": "xxxxxxxx",
# "cwd": ....
# },
# ...
# ]
#
def get_ps_local(c = None, bAddVMemSize:bool = False, userIDToNameMap:typing.Dict[int,str] = None, grpIDToNameMap:typing.Dict[int,str] = None) -> typing.List[dict]:
if c is not None:
raise Exception("This process analysis requires fast I/O and therefore can only be run locally!")
if c is None:
if userIDToNameMap is None:
userIDToNameMap = {}
for entry in pwd.getpwall():
userIDToNameMap[entry.pw_uid] = entry.pw_name
if grpIDToNameMap is None:
grpIDToNameMap = {}
for entry in grp.getgrall():
grpIDToNameMap[entry.gr_gid] = entry.gr_name
# ----
ret = []
for fe in os.scandir("/proc"):
if fe.is_dir(follow_symlinks=False) and re.match("^[1-9][0-9]*$", fe.name):
ret.append(get_process_info_local(int(fe.name), userIDToNameMap, grpIDToNameMap))
# ----
return ret
#
```
#### File: jk_sysinfo/OLD/get_os_data.py
```python
import collections
OSInfo = collections.namedtuple("OSInfo", [
"class",
"distribution",
])
#
# Returns:
#
# {
# "distribution": "ubuntu",
# "lts": true,
# "version": "16.04.6"
# }
#
def get_etc_os_release(c = None) -> dict:
stdout, stderr, exitcode = run(c, "cat /etc/os-release")
return parse_etc_os_release(stdout, stderr, exitcode)
#
```
#### File: python-module-jk-sysinfo/testing/test_get_lshw2.py
```python
import jk_sysinfo
import jk_json
from jk_simplexml import *
#jk_sysinfo.enableDebugging()
result = jk_sysinfo.get_lshw()
def _process(data):
sClass = data["class"]
del data["class"]
identifier = data["id"]
del data["id"]
if "children" in data:
children = data["children"]
del data["children"]
else:
children = None
x = HElement(sClass)
x.setAttributeValue("identifier", identifier)
_addAttributesToXML(x, data)
if children:
#xC = x.createChildElement("children")
#for child in children:
# xC.add(_process(child))
for child in children:
x.add(_process(child))
return x
#
def _addAttributesToXML(x:HElement, dictData:dict):
for k, v in dictData.items():
if k == "logicalname":
xLN = x.createChildElement("logicalnames")
if isinstance(v, list):
for logicalName in v:
xLN.createChildElement("logicalname").setChildText(logicalName)
else:
xLN.createChildElement("logicalname").setChildText(v)
continue
if isinstance(v, list):
raise Exception(k)
if isinstance(v, dict):
x2 = x.createChildElement(k)
_addAttributesToXML(x2, v)
else:
x.setAttributeValue(k, str(v))
#
xRoot = _process(result)
xmlWriteSettings = XMLWriteSettings()
xmlWriteSettings.writeXmlHeader = True
with open("test_get_lshw2.xml", "w") as f:
f.write(HSerializer.toXMLStr(xRoot, xmlWriteSettings))
``` |
{
"source": "jkpubsrc/python-module-jk-testing",
"score": 3
} |
#### File: python-module-jk-testing/examples/examples.py
```python
from jk_testing import *
@TestCase()
def testCaseZero(ctx):
ctx.log.notice("We're doing some special tests here ...")
#
@TestCase(
runAfter="testCaseZero",
description="This is test A",
)
def testCaseA(ctx):
ctx.log.notice("We're doing some tests here ...")
#
@TestCase(
runBefore="testCaseA",
)
def testCaseB(ctx):
ctx.log.notice("We're doing some other tests here ...")
#
@TestCase(
requires="testCaseB",
providesVariable="xx",
)
def testCaseC(ctx):
ctx.log.notice("We're doing some special tests here ...")
return {
"xx": "abc"
}
#
@TestCase(
RaisesException(FileNotFoundError, filename="xxx.xx"),
requiresVariable="xx",
)
def testCaseD(ctx):
ctx.log.notice("We're doing some special tests here ...")
with open("xxx.xx", "r") as f:
pass
#
@TestCase(
RaisesException(FileNotFoundError, filename="xxx.xx"),
requiresVariable="xx",
)
def testCaseD2(ctx):
pass
#
@TestCase()
def testCaseE(ctx):
ctx.log.notice("äöüßÄÖÜ ...")
with open("xxx.xx", "r") as f:
pass
#
testDriver = TestDriver()
testDriver.data["abc"] = "abc"
results = testDriver.runTests([
(testCaseZero, False),
(testCaseA, True),
(testCaseB, False),
(testCaseC, False),
(testCaseD2, True),
(testCaseD, True),
(testCaseE, True),
])
reporter = TestReporterHTML()
reporter.report(results)
```
#### File: src/jk_testing/Assert.py
```python
import re
from .AssertionException import AssertionException
class _Assert(object):
def __init__(self, log):
if callable(log):
self.__log = log
else:
self.__log = log
#
def isIn(self, value, valueList, message = None):
Assert.l_isIn(self.__log, value, valueList, message)
#
def isNotIn(self, value, valueList, message = None):
Assert.l_isNotIn(self.__log, value, valueList, message)
#
def raisesException(self, function, arguments, message = None):
Assert.l_raisesException(self.__log, function, arguments, message)
#
def isCallable(self, value, message = None):
Assert.l_isCallable(self.__log, value, message)
#
def isInstance(self, value, typeOrTypes, message = None):
Assert.l_isInstance(self.__log, value, typeOrTypes, message)
#
def isRegExMatch(self, value, regexPattern, message = None):
Assert.l_isRegExMatch(self.__log, value, regexPattern, message)
#
def isEqual(self, value, otherValue, message = None):
Assert.l_isEqual(self.__log, value, otherValue, message)
#
def isGreater(self, value, otherValue, message = None):
Assert.l_isGreater(self.__log, value, otherValue, message)
#
def isGreaterOrEqual(self, value, otherValue, message = None):
Assert.l_isGreaterOrEqual(self.__log, value, otherValue, message)
#
def isSmaller(self, value, otherValue, message = None):
Assert.l_isSmaller(self.__log, value, otherValue, message)
#
def isSmallerOrEqual(self, value, otherValue, message = None):
Assert.l_isSmallerOrEqual(self.__log, value, otherValue, message)
#
def isNotEqual(self, value, otherValue, message = None):
Assert.l_isNotEqual(self.__log, value, otherValue, message)
#
def isNone(self, value, message = None):
Assert.l_isNone(self.__log, value, message)
#
def isNotNone(self, value, message = None):
Assert.l_isNotNone(self.__log, value, message)
#
def isNotNoneOrEmpty(self, value, message = None):
Assert.l_isNotNoneOrEmpty(self.__log, value, message)
#
def isTrue(self, value, message = None):
Assert.l_isTrue(self.__log, value, message)
#
def isFalse(self, value, message = None):
Assert.l_isFalse(self.__log, value, message)
#
#
class Assert(object):
@staticmethod
def createCustomAssert(log):
return _Assert(log)
#
"""
@staticmethod
def getAllBaseClasses(cls):
# TODO: convert this to an iteration
c = list(cls.__bases__)
for base in c:
c.extend(getAllBaseClasses(base))
return c
#
"""
@staticmethod
def isIn(value, valueList, message = None, log = None, identifier:str = None):
bSuccess = value in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isIn(log, value, valueList, message = None, identifier:str = None):
bSuccess = value in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isNotIn(value, valueList, message = None, log = None, identifier:str = None):
bSuccess = value not in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isNotIn(log, value, valueList, message = None, identifier:str = None):
bSuccess = value not in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def raisesException(function, arguments, message = None, log = None, identifier:str = None):
bSuccess = True
try:
function(*arguments)
bSuccess = False
except Exception as ee:
pass
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "No exception was raised!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_raisesException(log, function, arguments, message = None, identifier:str = None):
bSuccess = True
try:
function(*arguments)
bSuccess = False
except Exception as ee:
pass
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "No exception was raised!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isCallable(value, message = None, log = None, identifier:str = None):
if callable(value):
return
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is not a callable but of type " + str(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isCallable(log, value, message = None, identifier:str = None):
if callable(value):
return
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is not a callable but of type " + str(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isInstance(value, typeOrTypes, message = None, log = None, identifier:str = None):
if isinstance(value, typeOrTypes):
return
if issubclass(type(value), typeOrTypes):
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is of type " + str(type(value)) + " and not of type " + str(typeOrTypes)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isInstance(log, value, typeOrTypes, message = None, identifier:str = None):
if isinstance(value, typeOrTypes):
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is of type " + str(type(value)) + " and not of type " + str(typeOrTypes)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isEqual(value, otherValue, message = None, log = None, identifier:str = None):
if value == otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " and not " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isEqual(log, value, otherValue, message = None, identifier:str = None):
if value == otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " and not " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isRegExMatch(value, regexPattern, message = None, log = None, identifier:str = None):
m = re.match(regexPattern, value)
if m:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which does not match " + repr(regexPattern) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isRegExMatch(log, value, regexPattern, message = None, identifier:str = None):
m = re.match(regexPattern, value)
if m:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which does not match " + repr(regexPattern) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isNotEqual(value, otherValue, message = None, log = None, identifier:str = None):
if value != otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which is not expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isNotEqual(log, value, otherValue, message = None, identifier:str = None):
if value != otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which is not expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isGreater(value, otherValue, message = None, log = None, identifier:str = None):
if value > otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not greater than " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isGreater(log, value, otherValue, message = None, identifier:str = None):
if value > otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not greater than " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isGreaterOrEqual(value, otherValue, message = None, log = None, identifier:str = None):
if value >= otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not greater or equal to " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isGreaterOrEqual(log, value, otherValue, message = None, identifier:str = None):
if value >= otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not greater or equal to " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isSmaller(value, otherValue, message = None, log = None, identifier:str = None):
if value < otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not smaller than " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isSmaller(log, value, otherValue, message = None, identifier:str = None):
if value < otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not smaller than " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isSmallerOrEqual(value, otherValue, message = None, log = None, identifier:str = None):
if value <= otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not smaller or equal to " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isSmallerOrEqual(log, value, otherValue, message = None, identifier:str = None):
if value <= otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not smaller or equal to " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isNone(value, message = None, log = None, identifier:str = None):
if value is None:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is not None as expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isNone(log, value, message = None, identifier:str = None):
if value is None:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is not None as expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isNotNone(value, message = None, log = None, identifier:str = None):
if value is not None:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is None which is not expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isNotNone(log, value, message = None, identifier:str = None):
if value is not None:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is None which is not expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isNotNoneOrEmpty(value, message = None, log = None, identifier:str = None):
if value:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is None or empty which is not expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isNotNoneOrEmpty(log, value, message = None, identifier:str = None):
if value:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is None or empty which is not expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isTrue(value, message = None, log = None, identifier:str = None):
if value is True:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is not true as expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isTrue(log, value, message = None, identifier:str = None):
if value is True:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is not true as expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isFalse(value, message = None, log = None, identifier:str = None):
if value is False:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is not false as expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isFalse(log, value, message = None, identifier:str = None):
if value is False:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is not false as expected: " + repr(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
#
```
#### File: src/jk_testing/SingleLookAtQueue.py
```python
class SingleLookAtQueue(object):
def __init__(self):
self.__data = []
self.__lookedAt = set()
#
def add(self, item):
itemID = id(item)
if itemID not in self.__lookedAt:
self.__data.append(item)
self.__lookedAt.add(itemID)
#
def addAll(self, items):
for item in items:
itemID = id(item)
if itemID not in self.__lookedAt:
self.__data.append(item)
self.__lookedAt.add(itemID)
#
def retrieve(self):
if self.__data:
ret = self.__data[0]
del self.__data[0]
return ret
else:
return None
#
def isNotEmpty(self):
return len(self.__data) > 0
#
def isEmpty(self):
return len(self.__data) == 0
#
#
```
#### File: src/jk_testing/TestCollectionVisualizer.py
```python
import os
import sys
import subprocess
from .TestCaseInstance import *
from .NodeMatrix import NodeMatrix
class TestCollectionVisualizer(object):
IMAGE_VIEWER_CANDIDATES = [
"/usr/bin/viewnior", # viewnior: https://siyanpanayotov.com/project/viewnior
"/usr/bin/nomacs", # nomacs: https://nomacs.org/
"/usr/bin/geeqie", # geeqie: http://geeqie.org/
"/usr/bin/mirageiv", # mirageiv: http://mirageiv.sourceforge.net/
"/usr/bin/eom", # eye of mate
"/usr/bin/eog", # eye of gnome
"/usr/bin/feh", # feh: https://feh.finalrewind.org/
"/usr/bin/display", # fallback
]
def __init__(self):
pass
#
#
# Determine the color a node should have during visualization
#
# @return str bgColor The background color selected (or <c>None</c>)
# @return str textColor The text color selected (or <c>None</c>)
# @return str lineColor The foreground color selected (or <c>None</c>)
#
def __calcNodeColorCallback(self, testCaseInstance:TestCaseInstance):
if testCaseInstance.isRoot:
# test is root
# bgColor: light blue
# textColor: dark blue
# lineColor: dark blue
return "#f0f0ff", "#f0f0ff", "#d0d0f0"
if testCaseInstance.processingState == EnumProcessingState.NOT_PROCESSED:
if testCaseInstance.enabledState == EnumEnabledState.ENABLED_BY_USER:
# test enabled by user
# bgColor: light green
# textColor: dark green
# lineColor: dark green
return "#e0ffe0", "#40a040", "#40a040"
elif testCaseInstance.enabledState == EnumEnabledState.ENABLED_IN_CONSEQUENCE:
# test enabled in consequence
# bgColor: light grayish green
# textColor: dark grayish green
# lineColor: dark grayish green
return "#e0f0e0", "#80a080", "#80a080"
elif testCaseInstance.enabledState == EnumEnabledState.DISABLED:
# test disabled
# bgColor: light gray
# textColor: gray
# lineColor: gray
return "#e0e0e0", "#909090", "#909090"
else:
raise Exception()
elif testCaseInstance.processingState == EnumProcessingState.SUCCEEDED:
# succeeded
# bgColor: dark green
# textColor: white
# lineColor: white
return "#008000", "#ffffff", "#ffffff"
elif testCaseInstance.processingState == EnumProcessingState.FAILED:
# test failed
# bgColor: dark red
# textColor: white
# lineColor: white
return "#800000", "#ffffff", "#ffffff"
elif testCaseInstance.processingState == EnumProcessingState.FAILED_CRITICALLY:
# test failed
# bgColor: dark red
# textColor: white
# lineColor: white
return "#800000", "#ffffff", "#ffffff"
else:
raise Exception()
#
def createSVG(self, collection):
return collection._nodeMatrix.convertTo("svg", nodeColoringCallback=self.__calcNodeColorCallback)
#
def visualize(self, collection, imageViewerPath:str = None):
if imageViewerPath is None:
for filePath in TestCollectionVisualizer.IMAGE_VIEWER_CANDIDATES:
if os.path.isfile(filePath):
imageViewerPath = filePath
break
else:
assert isinstance(imageViewerPath, str)
if imageViewerPath is None:
raise Exception("Autodetection of suitable image viewer failed!")
elif os.path.isfile(imageViewerPath):
resultFilePath = collection._nodeMatrix.convertTo("svg", nodeColoringCallback=self.__calcNodeColorCallback)
subprocess.Popen(["nohup", imageViewerPath, resultFilePath], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
raise Exception("No such image viewer: " + str(imageViewerPath))
#
#
```
#### File: src/jk_testing/TestReporterHTML.py
```python
import os
import sys
import shutil
import http.server
import socketserver
import webbrowser
import urllib
import posixpath
import jk_logging
import jk_json
from jinja2 import Environment, FileSystemLoader, select_autoescape, Template
from .TestResultCollection import *
from .TestResult import *
class ResultsHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, rootDirPath:str, request, client_address, server):
self.__rootDirPath = os.path.realpath(rootDirPath)
super().__init__(request, client_address, server)
#
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
try:
path = urllib.parse.unquote(path, errors='surrogatepass')
except UnicodeDecodeError:
path = urllib.parse.unquote(path)
path = posixpath.normpath(path)
words = path.split('/')
words = filter(None, words)
path = self.__rootDirPath
for word in words:
if os.path.dirname(word) or word in (os.curdir, os.pardir):
# Ignore components that are not a simple file/directory name
continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
#
#
class ResultsHTTPServer(socketserver.TCPServer):
allow_reuse_address = True
def __init__(self, rootDirPath:str):
super().__init__(("", 9096), ResultsHTTPRequestHandler)
self.__rootDirPath = rootDirPath
#
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(self.__rootDirPath, request, client_address, self)
#
#
class TestReporterHTML(object):
def __init__(self):
#self.__staticFilesDir = os.path.join(os.getcwd(), "files")
self.__staticFilesDir = os.path.join(os.path.dirname(__file__), "data", "html_default", "files")
#self.__templateDir = os.path.join(os.getcwd(), "templates")
self.__templateDir = os.path.join(os.path.dirname(__file__), "data", "html_default", "templates")
self.__env = Environment(
loader=FileSystemLoader(self.__templateDir),
autoescape=select_autoescape(["html", "xml"])
)
self.__templateTestCase = self.__env.get_template("testcase.html")
self.__templateOverview = self.__env.get_template("index.html")
#
def report(self,
testResultCollection:TestResultCollection,
outDirPath:str="results",
showInWebBrowser:bool=True,
serveWithWebServer:bool=False,
webbrowserType:str=None
):
if not os.path.isabs(outDirPath):
outDirPath = os.path.abspath(outDirPath)
if not os.path.isdir(outDirPath):
os.makedirs(outDirPath)
shutil.rmtree(outDirPath)
# ----
shutil.copytree(self.__staticFilesDir, outDirPath)
# ----
for testResult in testResultCollection.testResults:
self.__writeResultFile(testResult, outDirPath)
self.__writeOverviewFile(testResultCollection, outDirPath)
# ----
if showInWebBrowser:
httpd = ResultsHTTPServer(outDirPath)
print("Running web server. For results see: http://localhost:9096/")
webbrowser.get(webbrowserType).open("http://localhost:9096/", new=1)
httpd.serve_forever()
elif serveWithWebServer:
httpd = ResultsHTTPServer(outDirPath)
print("Running web server. For results see: http://localhost:9096/")
httpd.serve_forever()
#
def __writeResultFile(self, testResult:TestResult, outDirPath:str):
jsonTestRecord = {
"id": "test_" + testResult.name,
"file": "test_" + testResult.name + ".html",
"name": testResult.name,
"timeStamp": testResult.timeStamp,
"enabledState": str(testResult.enabledState),
"processingState": str(testResult.processingState),
"duration": testResult.duration,
"logBuffer": testResult.logBuffer.toJSONPretty()["logData"],
"description": testResult.description,
}
content = self.__templateTestCase.render(testRecord=jsonTestRecord)
filePath = os.path.join(outDirPath, "test_" + testResult.name + ".html")
print("Writing to: " + filePath)
with open(filePath, "w") as f:
f.write(content)
#
def __writeOverviewFile(self, testResultCollection:TestResultCollection, outDirPath:str):
jsonTestRecords = []
for testResult in testResultCollection.testResults:
jsonTestRecords.append({
"id": "test_" + testResult.name,
"file": "test_" + testResult.name + ".html",
"name": testResult.name,
"timeStamp": testResult.timeStamp,
"enabledState": str(testResult.enabledState),
"processingState": str(testResult.processingState),
"duration": testResult.duration,
"logBuffer": testResult.logBuffer.toJSONPretty()["logData"],
"description": testResult.description,
})
tempFilePath = testResultCollection.createSVG()
with open(tempFilePath, "r") as f:
svgLines = f.readlines()
os.unlink(tempFilePath)
while (len(svgLines) > 0) and not svgLines[0].startswith("<svg"):
del svgLines[0]
content = self.__templateOverview.render(
svg="".join(svgLines),
testRecords=jsonTestRecords,
summary={
"countTestsPerformed": testResultCollection.countTestsSucceeded + testResultCollection.countTestsFailed,
"countTestsNotYetPerformed": testResultCollection.countTestsNotYetPerformed,
"countTestsSucceeded": testResultCollection.countTestsSucceeded,
"countTestsFailed": testResultCollection.countTestsFailed,
"totalTestRuntime": testResultCollection.totalTestRuntime,
"totalTestDuration": testResultCollection.totalTestDuration,
}
)
filePath = os.path.join(outDirPath, "index.html")
print("Writing: " + filePath)
with open(filePath, "w") as f:
f.write(content)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-tokenizingparsing",
"score": 2
} |
#### File: python-module-jk-tokenizingparsing/examples/test_matching3.py
```python
import jk_json
import jk_tokenizingparsing
from jk_tokenizingparsing.tokenmatching import *
TEXT = """CREATE TABLE archive (
ar_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
ar_namespace INTEGER NOT NULL default 0,
ar_title TEXT NOT NULL default '',
ar_comment BLOB NOT NULL default '', -- Deprecated in favor of ar_comment_id
ar_comment_id INTEGER NOT NULL DEFAULT 0, -- ("DEFAULT 0" is temporary, signaling that ar_comment should be used)
ar_user INTEGER NOT NULL default 0, -- Deprecated in favor of ar_actor
ar_user_text TEXT NOT NULL DEFAULT '', -- Deprecated in favor of ar_actor
ar_actor INTEGER NOT NULL DEFAULT 0, -- ("DEFAULT 0" is temporary, signaling that ar_user/ar_user_text should be used)
ar_timestamp BLOB NOT NULL default '',
ar_minor_edit INTEGER NOT NULL default 0,
ar_rev_id INTEGER NOT NULL,
ar_text_id INTEGER NOT NULL DEFAULT 0,
ar_deleted INTEGER NOT NULL default 0,
ar_len INTEGER ,
ar_page_id INTEGER ,
ar_parent_id INTEGER default NULL,
ar_sha1 BLOB NOT NULL default '',
ar_content_model BLOB DEFAULT NULL,
ar_content_format BLOB DEFAULT NULL
)"""
TOKENS = jk_tokenizingparsing.Serializer.deserializeTokens([
{"type":"w","text":"CREATE","lineNo":0,"charPos":0,"endLineNo":0,"endCharPos":6},
{"type":"w","text":"TABLE","lineNo":0,"charPos":7,"endLineNo":0,"endCharPos":12},
{"type":"w","text":"archive","lineNo":0,"charPos":13,"endLineNo":0,"endCharPos":20},
{"type":"dbo","text":"(","lineNo":0,"charPos":21,"endLineNo":0,"endCharPos":22},
{"type":"w","text":"ar_id","lineNo":1,"charPos":1,"endLineNo":1,"endCharPos":6},
{"type":"w","text":"INTEGER","lineNo":1,"charPos":7,"endLineNo":1,"endCharPos":14},
{"type":"w","text":"NOT","lineNo":1,"charPos":16,"endLineNo":1,"endCharPos":19},
{"type":"w","text":"NULL","lineNo":1,"charPos":20,"endLineNo":1,"endCharPos":24},
{"type":"w","text":"PRIMARY","lineNo":1,"charPos":25,"endLineNo":1,"endCharPos":32},
{"type":"w","text":"KEY","lineNo":1,"charPos":33,"endLineNo":1,"endCharPos":36},
{"type":"w","text":"AUTOINCREMENT","lineNo":1,"charPos":37,"endLineNo":1,"endCharPos":50},
{"type":"dc","text":",","lineNo":1,"charPos":50,"endLineNo":1,"endCharPos":51},
{"type":"w","text":"ar_namespace","lineNo":2,"charPos":1,"endLineNo":2,"endCharPos":13},
{"type":"w","text":"INTEGER","lineNo":2,"charPos":14,"endLineNo":2,"endCharPos":21},
{"type":"w","text":"NOT","lineNo":2,"charPos":22,"endLineNo":2,"endCharPos":25},
{"type":"w","text":"NULL","lineNo":2,"charPos":26,"endLineNo":2,"endCharPos":30},
{"type":"w","text":"default","lineNo":2,"charPos":31,"endLineNo":2,"endCharPos":38},
{"type":"i","text":"0","lineNo":2,"charPos":39,"endLineNo":2,"endCharPos":40},
{"type":"dc","text":",","lineNo":2,"charPos":40,"endLineNo":2,"endCharPos":41},
{"type":"w","text":"ar_title","lineNo":3,"charPos":1,"endLineNo":3,"endCharPos":9},
{"type":"w","text":"TEXT","lineNo":3,"charPos":10,"endLineNo":3,"endCharPos":14},
{"type":"w","text":"NOT","lineNo":3,"charPos":16,"endLineNo":3,"endCharPos":19},
{"type":"w","text":"NULL","lineNo":3,"charPos":20,"endLineNo":3,"endCharPos":24},
{"type":"w","text":"default","lineNo":3,"charPos":25,"endLineNo":3,"endCharPos":32},
{"type":"s","text":"","lineNo":3,"charPos":34,"endLineNo":3,"endCharPos":35},
{"type":"dc","text":",","lineNo":3,"charPos":35,"endLineNo":3,"endCharPos":36},
{"type":"w","text":"ar_comment","lineNo":4,"charPos":1,"endLineNo":4,"endCharPos":11},
{"type":"w","text":"BLOB","lineNo":4,"charPos":12,"endLineNo":4,"endCharPos":16},
{"type":"w","text":"NOT","lineNo":4,"charPos":17,"endLineNo":4,"endCharPos":20},
{"type":"w","text":"NULL","lineNo":4,"charPos":21,"endLineNo":4,"endCharPos":25},
{"type":"w","text":"default","lineNo":4,"charPos":26,"endLineNo":4,"endCharPos":33},
{"type":"s","text":"","lineNo":4,"charPos":35,"endLineNo":4,"endCharPos":36},
{"type":"dc","text":",","lineNo":4,"charPos":36,"endLineNo":4,"endCharPos":37},
{"type":"w","text":"ar_comment_id","lineNo":5,"charPos":1,"endLineNo":5,"endCharPos":14},
{"type":"w","text":"INTEGER","lineNo":5,"charPos":15,"endLineNo":5,"endCharPos":22},
{"type":"w","text":"NOT","lineNo":5,"charPos":24,"endLineNo":5,"endCharPos":27},
{"type":"w","text":"NULL","lineNo":5,"charPos":28,"endLineNo":5,"endCharPos":32},
{"type":"w","text":"DEFAULT","lineNo":5,"charPos":33,"endLineNo":5,"endCharPos":40},
{"type":"i","text":"0","lineNo":5,"charPos":41,"endLineNo":5,"endCharPos":42},
{"type":"dc","text":",","lineNo":5,"charPos":42,"endLineNo":5,"endCharPos":43},
{"type":"w","text":"ar_user","lineNo":6,"charPos":1,"endLineNo":6,"endCharPos":8},
{"type":"w","text":"INTEGER","lineNo":6,"charPos":9,"endLineNo":6,"endCharPos":16},
{"type":"w","text":"NOT","lineNo":6,"charPos":18,"endLineNo":6,"endCharPos":21},
{"type":"w","text":"NULL","lineNo":6,"charPos":22,"endLineNo":6,"endCharPos":26},
{"type":"w","text":"default","lineNo":6,"charPos":27,"endLineNo":6,"endCharPos":34},
{"type":"i","text":"0","lineNo":6,"charPos":35,"endLineNo":6,"endCharPos":36},
{"type":"dc","text":",","lineNo":6,"charPos":36,"endLineNo":6,"endCharPos":37},
{"type":"w","text":"ar_user_text","lineNo":7,"charPos":1,"endLineNo":7,"endCharPos":13},
{"type":"w","text":"TEXT","lineNo":7,"charPos":14,"endLineNo":7,"endCharPos":18},
{"type":"w","text":"NOT","lineNo":7,"charPos":20,"endLineNo":7,"endCharPos":23},
{"type":"w","text":"NULL","lineNo":7,"charPos":24,"endLineNo":7,"endCharPos":28},
{"type":"w","text":"DEFAULT","lineNo":7,"charPos":29,"endLineNo":7,"endCharPos":36},
{"type":"s","text":"","lineNo":7,"charPos":38,"endLineNo":7,"endCharPos":39},
{"type":"dc","text":",","lineNo":7,"charPos":39,"endLineNo":7,"endCharPos":40},
{"type":"w","text":"ar_actor","lineNo":8,"charPos":1,"endLineNo":8,"endCharPos":9},
{"type":"w","text":"INTEGER","lineNo":8,"charPos":10,"endLineNo":8,"endCharPos":17},
{"type":"w","text":"NOT","lineNo":8,"charPos":19,"endLineNo":8,"endCharPos":22},
{"type":"w","text":"NULL","lineNo":8,"charPos":23,"endLineNo":8,"endCharPos":27},
{"type":"w","text":"DEFAULT","lineNo":8,"charPos":28,"endLineNo":8,"endCharPos":35},
{"type":"i","text":"0","lineNo":8,"charPos":36,"endLineNo":8,"endCharPos":37},
{"type":"dc","text":",","lineNo":8,"charPos":37,"endLineNo":8,"endCharPos":38},
{"type":"w","text":"ar_timestamp","lineNo":9,"charPos":1,"endLineNo":9,"endCharPos":13},
{"type":"w","text":"BLOB","lineNo":9,"charPos":14,"endLineNo":9,"endCharPos":18},
{"type":"w","text":"NOT","lineNo":9,"charPos":19,"endLineNo":9,"endCharPos":22},
{"type":"w","text":"NULL","lineNo":9,"charPos":23,"endLineNo":9,"endCharPos":27},
{"type":"w","text":"default","lineNo":9,"charPos":28,"endLineNo":9,"endCharPos":35},
{"type":"s","text":"","lineNo":9,"charPos":37,"endLineNo":9,"endCharPos":38},
{"type":"dc","text":",","lineNo":9,"charPos":38,"endLineNo":9,"endCharPos":39},
{"type":"w","text":"ar_minor_edit","lineNo":10,"charPos":1,"endLineNo":10,"endCharPos":14},
{"type":"w","text":"INTEGER","lineNo":10,"charPos":15,"endLineNo":10,"endCharPos":22},
{"type":"w","text":"NOT","lineNo":10,"charPos":23,"endLineNo":10,"endCharPos":26},
{"type":"w","text":"NULL","lineNo":10,"charPos":27,"endLineNo":10,"endCharPos":31},
{"type":"w","text":"default","lineNo":10,"charPos":32,"endLineNo":10,"endCharPos":39},
{"type":"i","text":"0","lineNo":10,"charPos":40,"endLineNo":10,"endCharPos":41},
{"type":"dc","text":",","lineNo":10,"charPos":41,"endLineNo":10,"endCharPos":42},
{"type":"w","text":"ar_rev_id","lineNo":11,"charPos":1,"endLineNo":11,"endCharPos":10},
{"type":"w","text":"INTEGER","lineNo":11,"charPos":11,"endLineNo":11,"endCharPos":18},
{"type":"w","text":"NOT","lineNo":11,"charPos":20,"endLineNo":11,"endCharPos":23},
{"type":"w","text":"NULL","lineNo":11,"charPos":24,"endLineNo":11,"endCharPos":28},
{"type":"dc","text":",","lineNo":11,"charPos":28,"endLineNo":11,"endCharPos":29},
{"type":"w","text":"ar_text_id","lineNo":12,"charPos":1,"endLineNo":12,"endCharPos":11},
{"type":"w","text":"INTEGER","lineNo":12,"charPos":12,"endLineNo":12,"endCharPos":19},
{"type":"w","text":"NOT","lineNo":12,"charPos":21,"endLineNo":12,"endCharPos":24},
{"type":"w","text":"NULL","lineNo":12,"charPos":25,"endLineNo":12,"endCharPos":29},
{"type":"w","text":"DEFAULT","lineNo":12,"charPos":30,"endLineNo":12,"endCharPos":37},
{"type":"i","text":"0","lineNo":12,"charPos":38,"endLineNo":12,"endCharPos":39},
{"type":"dc","text":",","lineNo":12,"charPos":39,"endLineNo":12,"endCharPos":40},
{"type":"w","text":"ar_deleted","lineNo":13,"charPos":1,"endLineNo":13,"endCharPos":11},
{"type":"w","text":"INTEGER","lineNo":13,"charPos":12,"endLineNo":13,"endCharPos":19},
{"type":"w","text":"NOT","lineNo":13,"charPos":21,"endLineNo":13,"endCharPos":24},
{"type":"w","text":"NULL","lineNo":13,"charPos":25,"endLineNo":13,"endCharPos":29},
{"type":"w","text":"default","lineNo":13,"charPos":30,"endLineNo":13,"endCharPos":37},
{"type":"i","text":"0","lineNo":13,"charPos":38,"endLineNo":13,"endCharPos":39},
{"type":"dc","text":",","lineNo":13,"charPos":39,"endLineNo":13,"endCharPos":40},
{"type":"w","text":"ar_len","lineNo":14,"charPos":1,"endLineNo":14,"endCharPos":7},
{"type":"w","text":"INTEGER","lineNo":14,"charPos":8,"endLineNo":14,"endCharPos":15},
{"type":"dc","text":",","lineNo":14,"charPos":16,"endLineNo":14,"endCharPos":17},
{"type":"w","text":"ar_page_id","lineNo":15,"charPos":1,"endLineNo":15,"endCharPos":11},
{"type":"w","text":"INTEGER","lineNo":15,"charPos":12,"endLineNo":15,"endCharPos":19},
{"type":"dc","text":",","lineNo":15,"charPos":20,"endLineNo":15,"endCharPos":21},
{"type":"w","text":"ar_parent_id","lineNo":16,"charPos":1,"endLineNo":16,"endCharPos":13},
{"type":"w","text":"INTEGER","lineNo":16,"charPos":14,"endLineNo":16,"endCharPos":21},
{"type":"w","text":"default","lineNo":16,"charPos":23,"endLineNo":16,"endCharPos":30},
{"type":"w","text":"NULL","lineNo":16,"charPos":31,"endLineNo":16,"endCharPos":35},
{"type":"dc","text":",","lineNo":16,"charPos":35,"endLineNo":16,"endCharPos":36},
{"type":"w","text":"ar_sha1","lineNo":17,"charPos":1,"endLineNo":17,"endCharPos":8},
{"type":"w","text":"BLOB","lineNo":17,"charPos":9,"endLineNo":17,"endCharPos":13},
{"type":"w","text":"NOT","lineNo":17,"charPos":14,"endLineNo":17,"endCharPos":17},
{"type":"w","text":"NULL","lineNo":17,"charPos":18,"endLineNo":17,"endCharPos":22},
{"type":"w","text":"default","lineNo":17,"charPos":23,"endLineNo":17,"endCharPos":30},
{"type":"s","text":"","lineNo":17,"charPos":32,"endLineNo":17,"endCharPos":33},
{"type":"dc","text":",","lineNo":17,"charPos":33,"endLineNo":17,"endCharPos":34},
{"type":"w","text":"ar_content_model","lineNo":18,"charPos":1,"endLineNo":18,"endCharPos":17},
{"type":"w","text":"BLOB","lineNo":18,"charPos":18,"endLineNo":18,"endCharPos":22},
{"type":"w","text":"DEFAULT","lineNo":18,"charPos":23,"endLineNo":18,"endCharPos":30},
{"type":"w","text":"NULL","lineNo":18,"charPos":31,"endLineNo":18,"endCharPos":35},
{"type":"dc","text":",","lineNo":18,"charPos":35,"endLineNo":18,"endCharPos":36},
{"type":"w","text":"ar_content_format","lineNo":19,"charPos":1,"endLineNo":19,"endCharPos":18},
{"type":"w","text":"BLOB","lineNo":19,"charPos":19,"endLineNo":19,"endCharPos":23},
{"type":"w","text":"DEFAULT","lineNo":19,"charPos":24,"endLineNo":19,"endCharPos":31},
{"type":"w","text":"NULL","lineNo":19,"charPos":32,"endLineNo":19,"endCharPos":36},
{"type":"dbc","text":")","lineNo":20,"charPos":1,"endLineNo":20,"endCharPos":2},
{"type":"eos","text":"","lineNo":20,"charPos":2,"endLineNo":20,"endCharPos":2}
])
TABLE_DEF = TPSeq(
TP("w", "CREATE", bIgnoreCase=True),
TP("w", "TABLE", bIgnoreCase=True),
TPAlt(
TP("w", emitName="tableName"),
TP("s", emitName="tableName")
)
)
COL_DEF = TPSeq(
TP("w", emitName="name"),
TPAlt(
TP("w", "INTEGER", bIgnoreCase=True),
TP("w", "BLOB", bIgnoreCase=True),
TP("w", "TEXT", bIgnoreCase=True),
emitName="type"
),
TPOptional(
TPSeq(
TP("w", "NOT", bIgnoreCase=True),
TP("w", "NULL", bIgnoreCase=True),
emitName="nullable",
emitValue=True
)
),
TPOptional(
TPSeq(
TP("w", "DEFAULT", bIgnoreCase=True),
TPAlt(
TP("w", "NULL", bIgnoreCase=True),
TP("i"),
TP("s"),
emitName="defaultValue"
)
)
),
TPOptional(
TPSeq(
TP("w", "PRIMARY", bIgnoreCase=True,
emitName="pk",
emitValue=True),
TP("w", "KEY", bIgnoreCase=True),
TP("w", "AUTOINCREMENT", bIgnoreCase=True,
emitName="autoincr",
emitValue=True),
)
)
)
def __tryEat_SQL_COLUMN_DEF(ts:jk_tokenizingparsing.TokenStream):
m = COL_DEF.match(ts)
if m:
# print(m.values())
return m.values()
else:
return None
#
def __tryEat_SQL_CREATE_TABLE(ts:jk_tokenizingparsing.TokenStream):
m = TABLE_DEF.match(ts)
if m:
return { "table": m["tableName"], "columns": [] }
else:
return None
#
def parse(ts:jk_tokenizingparsing.TokenStream):
retTable = __tryEat_SQL_CREATE_TABLE(ts)
if retTable is None:
raise jk_tokenizingparsing.ParserErrorException(ts.location, "P0001: Syntax error", "parse-1", ts.getTextPreview(20))
m = TP("dbo").match(ts)
if not m:
raise jk_tokenizingparsing.ParserErrorException(ts.location, "P0001: Syntax error", "parse-2", ts.getTextPreview(20))
while True:
if ts.isEOS:
raise jk_tokenizingparsing.ParserErrorException(ts.location, "P0001: Syntax error", "parse-3", ts.getTextPreview(20))
dictData = __tryEat_SQL_COLUMN_DEF(ts)
if dictData:
col = {
"name": dictData["name"][0],
"type": dictData["type"][0],
"nullable": dictData.get("nullable", False),
"pk": dictData.get("pk", False),
"autoincr": dictData.get("autoincr", False)
}
retTable["columns"].append(col)
else:
raise jk_tokenizingparsing.ParserErrorException(ts.location, "P0002: Syntax error", "parse-5", ts.getTextPreview(20))
m = TP("dbc").match(ts)
if m:
return retTable
m = TP("dc").match(ts)
if m:
continue
raise jk_tokenizingparsing.ParserErrorException(ts.location, "P0001: Syntax error", "parse-4", ts.getTextPreview(20))
#
try:
ts = jk_tokenizingparsing.TokenStream(TOKENS)
parsingReslt = parse(ts)
jk_json.prettyPrint(parsingReslt)
except jk_tokenizingparsing.ParserErrorException as e:
e.print()
```
#### File: python-module-jk-tokenizingparsing/examples/test_matching5.py
```python
import timeit
from jk_tokenizingparsing import *
from jk_tokenizingparsing.tokenmatching import *
tokens = [
Token("w", "someVar", None, None, None, None, None),
Token("d", "=", None, None, None, None, None),
Token("d", "[", None, None, None, None, None),
Token("s", "gallia", None, None, None, None, None),
Token("d", ",", None, None, None, None, None),
Token("s", "est", None, None, None, None, None),
Token("d", ",", None, None, None, None, None),
Token("s", "omnis", None, None, None, None, None),
Token("d", ",", None, None, None, None, None),
Token("s", "divisa", None, None, None, None, None),
Token("d", ",", None, None, None, None, None),
Token("s", "in", None, None, None, None, None),
Token("d", ",", None, None, None, None, None),
Token("s", "partres", None, None, None, None, None),
Token("d", ",", None, None, None, None, None),
Token("s", "tres", None, None, None, None, None),
Token("d", "]", None, None, None, None, None),
Token("eos", "", None, None, None, None, None),
]
ts = TokenStream(tokens)
x = TPSeq(
TP("w", "someVar", emitName="varName"),
TP("d", "="),
TP("d", "["),
TPRepeat(
TP("s", emitName="value"),
TP("d", ",")
),
TP("d", "]")
)
def avg(*args):
return sum(*args) / len(args)
#
m = x.match(ts)
print(m)
print(m.values())
print()
nRepeat = 100000
timeValues = timeit.repeat(stmt="ts.reset(); x.match(ts)", globals={
"x": x,
"ts": ts,
}, repeat=1, number=nRepeat)
n = avg(timeValues)
print("%.4f µs per run = %d runs per sec" % ( n * 1000000 / nRepeat, nRepeat/n ) )
```
#### File: src/jk_tokenizingparsing/Convert4HexToUnicode.py
```python
import binascii
from .AbstractTextConverter import AbstractTextConverter
class Convert4HexToUnicode(AbstractTextConverter):
def __init__(self):
super().__init__("convert4HexToUnicode")
#
def __removeTrailingZeros(self, byteData):
if len(byteData) == 0:
return byteData
if byteData[0] != 0:
return byteData
for i in range(0, len(byteData)):
if byteData[i] != 0:
return byteData[i:]
return bytearray()
#
def convertText(self, text:str):
try:
binData = self.__removeTrailingZeros(binascii.unhexlify(text))
return data.decode("utf-8")
except:
raise Exception("Not valid unicode: " + repr(text))
#
#
```
#### File: src/jk_tokenizingparsing/TokenizerPattern.py
```python
import re
import collections
from .Token import *
from .ParserErrorException import *
from .TokenizerAction import *
from ._TokenizerPatternTuple import _TokenizerPatternTuple
class EnumPattern(object):
EXACTCHAR = 1
ANYCHAR = 2
EXACTSTRING = 3
REGEX = 4
#
#
# A tokenization pattern. Use the methods provided in `TokenizerPattern` to provide patterns.
#
class TokenizerPattern(object):
_PREFIXES_POSTFIXES = [
"$", "^"
]
#
# Is the character encountered equal to the specified character?
#
@staticmethod
def exactChar(data):
assert isinstance(data, str)
assert len(data) == 1
return _TokenizerPatternTuple(EnumPattern.EXACTCHAR, "EXACTCHAR", data, 1)
#
#
# Are the characters encountered exactly the specified sequence?
#
@staticmethod
def exactSequence(data):
assert isinstance(data, str)
assert len(data) > 0
return _TokenizerPatternTuple(EnumPattern.EXACTSTRING, "EXACTSTRING", data, len(data))
#
#
# Is the character encountered any of the specified characters?
#
@staticmethod
def anyOfTheseChars(data):
assert isinstance(data, str)
assert len(data) > 0
return _TokenizerPatternTuple(EnumPattern.ANYCHAR, "ANYCHAR", data, -1)
#
#
# Do the characters encountered match the specified regular expression?
#
@staticmethod
def regEx(regExPattern):
if (len(regExPattern) == 0) \
or regExPattern.startswith("$") \
or regExPattern.startswith("^") \
or regExPattern.endswith("$") \
or regExPattern.endswith("^"):
raise Exception("Unsuitable regular expression!")
return _TokenizerPatternTuple(EnumPattern.REGEX, "REGEX", re.compile(regExPattern), -1)
#
#
```
#### File: jk_tokenizingparsing/tokenmatching/TPOptional.py
```python
from jk_testing import Assert
import typing
from ..TokenStream import TokenStream
from ._Triplet import _Triplet
from .AbstractTokenPattern import AbstractTokenPattern
class TPOptional(AbstractTokenPattern):
def __init__(self, tp:AbstractTokenPattern, emitName:str = None, emitValue = None):
Assert.isInstance(tp, AbstractTokenPattern)
if emitName is not None:
Assert.isInstance(emitName, str)
self.__tp = tp
self.emitName = emitName
self.emitValue = emitValue
#
def doMatch(self, ts:TokenStream):
assert isinstance(ts, TokenStream)
m, mD = self.__tp.doMatch(ts)
if m:
if self.emitName:
return True, _Triplet(self.emitName, self.emitValue, mD)
else: # optimization
return True, mD
else:
return True, _Triplet(None, None, [])
#
#
```
#### File: jk_tokenizingparsing/tokenmatching/TP.py
```python
from jk_testing import Assert
import typing
from ..TokenStream import TokenStream, Token
from .AbstractTokenPattern import AbstractTokenPattern, _Triplet
class TP(AbstractTokenPattern):
def __init__(self, type_:str, text_:str = None, bIgnoreCase:bool = False, emitName:str = None, emitValue = None):
self.type = type_
if text_ is None:
self.text = None
self.bIgnoreCase = False
else:
self.text = text_.lower() if bIgnoreCase else text_
self.bIgnoreCase = bIgnoreCase
if emitName is not None:
Assert.isInstance(emitName, str)
self.emitName = emitName
self.emitValue = emitValue
#
def doMatch(self, ts:TokenStream):
assert isinstance(ts, TokenStream)
t = ts.peek()
if self.bIgnoreCase:
if (
((self.type) and (t.type != self.type))
or
((self.text is not None) and (t.text.lower() != self.text))
):
return False, None
else:
if (
((self.type) and (t.type != self.type))
or
((self.text is not None) and (t.text != self.text))
):
return False, None
ts.skip()
return True, _Triplet( self.emitName, self.emitValue, t )
#
#
```
#### File: jk_tokenizingparsing/tokenmatching/TPRepeat.py
```python
from jk_testing import Assert
import typing
from ..TokenStream import TokenStream
from ._Triplet import _Triplet
from .AbstractTokenPattern import AbstractTokenPattern
class TPRepeat(AbstractTokenPattern):
def __init__(self, tp:AbstractTokenPattern, delimiterPattern:AbstractTokenPattern, emitName:str = None, emitValue = None):
assert isinstance(tp, AbstractTokenPattern)
if delimiterPattern:
assert isinstance(delimiterPattern, AbstractTokenPattern)
if emitName is not None:
Assert.isInstance(emitName, str)
self.__tokenPattern = tp
self.__delimiterPattern = delimiterPattern
self.emitName = emitName
self.emitValue = emitValue
#
def doMatch(self, ts:TokenStream):
assert isinstance(ts, TokenStream)
sequenceMatched = []
while True:
m, mD = self.__tokenPattern.doMatch(ts)
if m:
sequenceMatched.append(mD)
else:
break
if self.__delimiterPattern:
m, mD = self.__delimiterPattern.doMatch(ts)
if m:
continue
else:
break
if sequenceMatched:
return True, _Triplet(self.emitName, self.emitValue, sequenceMatched)
else:
return False, None
#
#
```
#### File: jk_tokenizingparsing/tokenmatching/TPSeq.py
```python
from jk_testing import Assert
import typing
from ..TokenStream import TokenStream
from .AbstractTokenPattern import AbstractTokenPattern, _Triplet
class TPSeq(AbstractTokenPattern):
def __init__(self, *args, emitName:str = None, emitValue = None):
assert len(args) > 0
for a in args:
Assert.isInstance(a, AbstractTokenPattern)
self.__tokenPatterns = args
if emitName is not None:
Assert.isInstance(emitName, str)
self.emitName = emitName
self.emitValue = emitValue
#
def doMatch(self, ts:TokenStream):
assert isinstance(ts, TokenStream)
mark = ts.mark()
mDs = []
for tp in self.__tokenPatterns:
m, mD = tp.doMatch(ts)
if m:
mDs.append(mD)
else:
mark.resetToMark()
return False, None
return True, _Triplet(self.emitName, self.emitValue, mDs)
#
#
```
#### File: jk_tokenizingparsing/tokenmatching/TPUnordSeq.py
```python
from jk_testing import Assert
import typing
from ..TokenStream import TokenStream
from .AbstractTokenPattern import AbstractTokenPattern, _Triplet
class TPUnordSeq(AbstractTokenPattern):
def __init__(self, *args, emitName:str = None, emitValue = None):
assert len(args) > 0
for a in args:
Assert.isInstance(a, AbstractTokenPattern)
if isinstance(a, TPOptional):
raise Exception("An TPOptional element should not be part of a TPUnordSeq element as this breaks matching!")
if emitName is not None:
Assert.isInstance(emitName, str)
self.__tokenPatterns = args
if emitName is not None:
assert isinstance(emitName, str)
self.emitName = emitName
self.emitValue = emitValue
#
def doMatch(self, ts:TokenStream):
assert isinstance(ts, TokenStream)
mark = ts.mark()
mDs = []
allTPs = list(self.__tokenPatterns)
while allTPs:
bFound = False
for tp in allTPs:
m, mD = tp.doMatch(ts)
if m:
mDs.append(mD)
allTPs.remove(tp)
bFound = True
break
if not bFound:
mark.resetToMark()
return False, None
return True, _Triplet(self.emitName, self.emitValue, mDs)
#
#
```
#### File: src/jk_tokenizingparsing/TokenStream.py
```python
from .Token import *
from .TokenizerBase import *
from .SourceCodeLocation import *
from .TokenStreamMark import TokenStreamMark
class TokenStream(object):
# ////////////////////////////////////////////////////////////////
# // Constructors/Destructors
# ////////////////////////////////////////////////////////////////
def __init__(self, tokens):
self.__tokens = list(tokens)
assert len(self.__tokens) > 0
self.__pos = 0
self.__maxpos = len(self.__tokens) - 1
self.__eosToken = self.__tokens[self.__maxpos]
assert self.__eosToken.type == "eos"
#
# ////////////////////////////////////////////////////////////////
# // Properties
# ////////////////////////////////////////////////////////////////
@property
def location(self):
t = self.__tokens[self.__pos]
return SourceCodeLocation.fromToken(t)
@property
def isEOS(self):
return self.__pos == self.__maxpos
#
@property
def position(self):
return self.__pos
#
@property
def length(self):
return len(self.__tokens)
#
# ////////////////////////////////////////////////////////////////
# // Methods
# ////////////////////////////////////////////////////////////////
def reset(self):
self.__pos = 0
#
def getTextPreview(self, length:int, bInsertSpacesBetweenTokens:bool = False) -> str:
nAvailable = self.__maxpos - self.__pos
if nAvailable < length:
extra = "..."
else:
nAvailable = length
extra = ""
mid = " " if bInsertSpacesBetweenTokens else ""
s = mid.join([ c.text for i, c in zip(range(0, nAvailable), self.__tokens[self.__pos:]) ])
return s + extra
#
"""
def getTokenPostview(self, length:int):
nAvailable = self.__maxpos - self.__pos
if nAvailable < length:
extra = " ..."
else:
nAvailable = length
extra = ""
s = " ".join([ (c.type + ":" + repr(c.text)) for i, c in zip(range(0, nAvailable), self.__tokens[self.__pos:]) ]) #### !!!SYNC!!!! ####
return s + extra
#
"""
def getTokenPreview(self, length:int) -> str:
nAvailable = self.__maxpos - self.__pos
if nAvailable < length:
extra = " ..."
else:
nAvailable = length
extra = ""
s = " ".join([
(c.type + ":" + repr(c.text) + "@" + str(c.lineNo) + ":" + str(c.charPos))
for i, c in zip(range(0, nAvailable), self.__tokens[self.__pos:])
]) #### !!!SYNC!!!! ####
return s + extra
#
def mark(self):
return TokenStreamMark(self)
#
def _setPosition(self, pos):
self.__pos = pos
#
def reset(self):
self.__pos = 0
#
def multiPeek(self, nCount):
if nCount <= 0:
raise Exception("Invalid nCount value: " + str(nCount))
if self.__pos + nCount > self.__maxpos:
ret = []
for i in range(0, nCount):
j = self.__pos + i
if j < self.__maxpos:
ret.append(self.__tokens[j])
else:
ret.append(self.__eosToken)
return ret
else:
return self.__tokens[self.__pos:self.__pos + nCount]
#
def peek(self):
return self.__tokens[self.__pos]
#
def read(self):
t = self.__tokens[self.__pos]
if self.__pos < self.__maxpos:
self.__pos += 1
return t
#
def skip(self, n = 1):
if n < 0:
raise Exception("Invalid n: " + str(n))
if self.__pos + n > self.__maxpos:
raise Exception("Skipping too far!")
self.__pos += n
#
def skipAll(self, tokenType, tokenText):
assert isinstance(tokenType, str)
n = 0
while True:
t = self.__tokens[self.__pos]
if t.type == "eos":
return n
if t.type != tokenType:
return n
if tokenText != None:
if t.text != tokenText:
return n
n += 1
self.__pos += 1
#
def __iter__(self):
return iter(self.__tokens[self.__pos:])
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-treetaggerwrapper",
"score": 3
} |
#### File: src/jk_treetaggerwrapper/PoolOfThreadedTreeTaggers.py
```python
import time
import threading
import contextlib
import re
import treetaggerwrapper as ttpw
from .ObservableEvent import ObservableEvent
from .ReplaceException import ReplaceException
#
# This class wraps around tree taggers. It is best suitable for use in concurrent, multithreaded environments.
#
class PoolOfThreadedTreeTaggers(object):
_SPLIT_PATTERN = re.compile("^([^\t]+)\t(.+)\s+(.+)\s+([^\s]+)$")
_SPLIT_PATTERN_2 = re.compile("^<([^\s]+)\s+text=\"(.+)\"\s*/>$")
class _LangSpecificCache(object):
def __init__(self, langID:str):
self.langID = langID
self.idleInstances = []
self.lastAccess = time.time()
self.countUsedInstances = 0
self.langLock = threading.Lock()
#
def touch(self):
self.lastAccess = time.time()
#
#
################################################################
#### Constructors / Destructors
################################################################
def __init__(self, treeTaggerInstallationPath:str):
self.__treeTaggerInstallationPath = treeTaggerInstallationPath
self.__unused = {}
self.__onTaggerCreated = ObservableEvent("onTaggerCreated")
self.__mainLock = threading.Lock()
#
################################################################
#### Events
################################################################
#
# This property returns an event object. Whenever a new TreeTagger process is created, this event is fired.
#
@property
def onTaggerCreated(self):
return self.__onTaggerCreated
#
################################################################
#### Methods
################################################################
#
# This method must be used together with the "with" statement to retrieve and use a <c>treetaggerwrapper</c> object.
#
# Have a look at <c>tagText()</c>: That method might be exactly what you are looking for as <c>tagText()</c> is implemented as this:
#
# <code>
# def tagText(self, langID:str, text:str) -> str:
# with self._useTagger(langID) as tagger:
# return tagger.tag_text(text)
# </code>
#
@contextlib.contextmanager
def _useTagger(self, langID:str):
assert isinstance(langID, str)
with self.__mainLock:
langIDCache = self.__unused.get(langID, None)
if langIDCache is None:
langIDCache = PoolOfThreadedTreeTaggers._LangSpecificCache(langID)
self.__unused[langID] = langIDCache
langIDCache.touch()
if langIDCache.idleInstances:
with langIDCache.langLock:
tagger = langIDCache.idleInstances[-1]
del langIDCache.idleInstances[-1]
langIDCache.countUsedInstances += 1
else:
tagger = ttpw.TreeTagger(
TAGLANG=langID,
TAGOPT="-prob -threshold 0.7 -token -lemma -sgml -quiet",
TAGDIR=self.__treeTaggerInstallationPath)
self.__onTaggerCreated.fire(self, langID)
with langIDCache.langLock:
langIDCache.countUsedInstances += 1
try:
yield tagger
finally:
with langIDCache.langLock:
langIDCache.countUsedInstances -= 1
langIDCache.idleInstances.append(tagger)
#
#
# Convenience method that grabs a free instance of a suitable <c>TreeTagger</c>, tags the data, returns the tree tagger instance used
# and then returns the tagging result to the caller.
#
def tagText(self, langID:str, text:str) -> str:
assert isinstance(text, str)
with self._useTagger(langID) as tagger:
return tagger.tag_text(text)
#
def __parseSpecial(self, text, item, gID, gContent, bWithConfidence):
if gID == "repdns":
pos = gContent.rfind(".")
if pos > 0:
lastPartC = gContent[pos + 1]
if lastPartC.isupper():
raise ReplaceException(gContent, gContent[:pos + 1] + " " + gContent[pos:])
else:
if bWithConfidence:
return (
gContent,
"§DNS§",
gContent,
1,
)
else:
return (
gContent,
"§DNS§",
gContent,
)
elif gID == "repemail":
if bWithConfidence:
return (
gContent,
"§EMAIL§",
gContent,
1,
)
else:
return (
gContent,
"§EMAIL§",
gContent,
)
elif gID == "repurl":
if bWithConfidence:
return (
gContent,
"§URL§",
gContent,
1,
)
else:
return (
gContent,
"§URL§",
gContent,
)
else:
print()
print(text)
print("No suitable pattern: " + item)
print()
if bWithConfidence:
return (
None,
None,
None,
None,
)
else:
return (
None,
None,
None,
)
#
#
# Convenience method that grabs a free instance of a suitable <c>TreeTagger</c>, tags the data, returns the tree tagger instance used
# and then returns the tagging result to the caller.
#
# @param bool bWithConfidence A boolean value indicating wether to add the last confidence value or ignore it in the output
# @return list Returns a list of 3- respectively 4-tuples, where each tuple consists of these entries:
# * str : The token tagged (= the input data)
# * str : The type of that token (with extra token types: "§EMAIL§", "§URL§")
# * str : The lemma as a string or <c>None</c> if tagging failed
# * float : The confidence value
#
def tagText2(self, langID:str, text:str, bWithConfidence:bool = True, bWithNullsInsteadOfUnknown:bool = True) -> list:
assert isinstance(bWithConfidence, bool)
assert isinstance(bWithNullsInsteadOfUnknown, bool)
if not bWithNullsInsteadOfUnknown:
raise NotImplementedError("bWithNullsInsteadOfUnknown = False")
assert isinstance(langID, str)
assert isinstance(text, str)
n = 100
while True:
try:
n -= 1
return self.__tagText2(langID, text, bWithConfidence, bWithNullsInsteadOfUnknown)
except ReplaceException as ee:
if n == 0:
raise Exception("Endless loop! " + repr(text))
text = text.replace(ee.pattern, ee.replacement, 1)
#
def __tagText2(self, langID:str, text:str, bWithConfidence:bool = True, bWithNullsInsteadOfUnknown:bool = True) -> list:
if bWithConfidence:
with self._useTagger(langID) as tagger:
ret = []
for item in tagger.tag_text(text):
result = PoolOfThreadedTreeTaggers._SPLIT_PATTERN_2.match(item)
if result != None:
special = self.__parseSpecial(
text,
item,
result.group(1),
result.group(2),
bWithConfidence
)
if special[0] != None:
del ret[-1]
ret.append(special)
else:
ret.append(special)
else:
result = PoolOfThreadedTreeTaggers._SPLIT_PATTERN.match(item)
if result != None:
g3 = result.group(3)
if bWithNullsInsteadOfUnknown:
ret.append((
result.group(1),
result.group(2),
None if g3 == "<unknown>" else g3,
float(result.group(4)),
))
else:
ret.append((
result.group(1),
result.group(2),
g3,
float(result.group(4)),
))
else:
print()
print(text)
print("No suitable pattern: " + item)
print()
ret.append((
None,
None,
None,
None,
))
return ret
else:
with self._useTagger(langID) as tagger:
ret = []
for item in tagger.tag_text(text):
result = PoolOfThreadedTreeTaggers._SPLIT_PATTERN_2.match(item)
if result != None:
special = self.__parseSpecial(
text,
item,
result.group(1),
result.group(2),
bWithConfidence
)
if special[0] != None:
del ret[-1]
ret.append(special)
else:
ret.append(special)
else:
result = PoolOfThreadedTreeTaggers._SPLIT_PATTERN.match(item)
if result != None:
g3 = result.group(3)
if bWithNullsInsteadOfUnknown:
ret.append((
result.group(1),
result.group(2),
None if g3 == "<unknown>" else g3,
))
else:
ret.append((
result.group(1),
result.group(2),
g3,
))
else:
print()
print(text)
print("No suitable pattern: " + item)
print()
ret.append((
None,
None,
None,
None,
))
return ret
#
#
# Retrieve statistical information about the tagging instances maintained in the background.
#
def getStats(self):
with self.__mainLock:
allTuples = list(self.__unused.items())
ret = {}
for langID, langIDCache in allTuples:
ret[langID] = {
"idle": len(langIDCache.idleInstances),
"inUse": langIDCache.countUsedInstances
}
return ret
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-trioinput",
"score": 3
} |
#### File: python-module-jk-trioinput/examples/test1.py
```python
import trio
import jk_console
import jk_console.demo
import jk_trioinput
import jk_json
async def inputLoop(history, cancel_scope):
try:
while True:
s = await jk_trioinput.readConsoleInput("> ", 0, jk_console.Console.height()-2, syntaxHighlighter=None, history=history)
s = s.strip()
if not s:
cancel_scope.cancel()
jk_console.Console.printAt(0, 5, "INPUT: " + repr(s) + " " * 20)
except:
cancel_scope.cancel()
#
async def run():
jk_console.Console.clear()
print("This is a demo to test asynchroneous input from the console.")
history = jk_trioinput.ConsoleInputHistory()
async with trio.open_nursery() as nursery:
nursery.start_soon(inputLoop, history, nursery.cancel_scope)
print(jk_console.Console.RESET)
print()
return
#
trio.run(run)
```
#### File: src/jk_trioinput/input.py
```python
import re
import sys
import os
import json
import trio
import jk_json
import jk_console
from .TextBufferWithCursor import TextBufferWithCursor
from .TextRenderer import TextRenderer
from .ConsoleInputHistory import ConsoleInputHistory
#
# This method reads user input outputting at the current position. This way it works quite similar to <c>input()</c>.
#
async def readConsoleInput(outputText:str = None, cx:int = None, cy:int = None, syntaxHighlighter = None,
arrowColor:str = jk_console.Console.ForeGround.STD_YELLOW,
defaultTextColor:str = jk_console.Console.ForeGround.STD_WHITE,
history = None) -> str:
cx2, cy2 = jk_console.Console.getCursorPosition()
if cx is None:
cx = cx2
if cy is None:
cy = cy2
if outputText:
jk_console.Console.printAt(cx, cy, outputText, True)
cx += len(outputText)
width, height = jk_console.Console.getSize()
#textRenderer = TextRenderer(cx, cy, width - 1, syntaxHighlighter)
textRenderer = TextRenderer(cx, cy, syntaxHighlighter=syntaxHighlighter, arrowColor=arrowColor, defaultTextColor=defaultTextColor)
if history is not None:
history.resetCursor()
buf = TextBufferWithCursor()
textRenderer.render(buf.__str__(), buf.cursorPos)
temp = ""
while True:
bChanged = False
key = None
#with trio.move_on_after(2):
key = await trio.to_thread.run_sync(jk_console.Console.Input.readKey)
if key is None:
# timeout
continue
if key == jk_console.Console.Input.KEY_CURSOR_UP:
if history is not None:
s = history.prev()
if s is not None:
buf.initialize(s)
textRenderer.render(str(buf), buf.cursorPos)
elif key == jk_console.Console.Input.KEY_CURSOR_DOWN:
if history is not None:
s = history.next()
if s is not None:
buf.initialize(s)
textRenderer.render(str(buf), buf.cursorPos)
else:
buf.initialize(temp)
history.resetCursor()
elif key == jk_console.Console.Input.KEY_CURSOR_LEFT:
buf.moveCursorLeft()
elif key == jk_console.Console.Input.KEY_CURSOR_RIGHT:
buf.moveCursorRight()
elif key == jk_console.Console.Input.KEY_CTRL_CURSOR_RIGHT:
buf.jumpCursorRight()
elif key == jk_console.Console.Input.KEY_CTRL_CURSOR_LEFT:
buf.jumpCursorLeft()
elif key == jk_console.Console.Input.KEY_DELETE:
if buf.deleteUnderCursor():
bChanged = True
elif key == jk_console.Console.Input.KEY_BACKSPACE:
if buf.moveCursorLeft():
if buf.deleteUnderCursor():
bChanged = True
elif key == jk_console.Console.Input.KEY_TAB:
buf.insertAtCursor(" ")
bChanged = True
elif key == jk_console.Console.Input.KEY_HOME:
buf.moveCursorToStart()
elif key == jk_console.Console.Input.KEY_HOME_NUMPAD:
buf.moveCursorToStart()
elif key == jk_console.Console.Input.KEY_END:
buf.moveCursorToEnd()
elif key == jk_console.Console.Input.KEY_END_NUMPAD:
buf.moveCursorToEnd()
elif key == jk_console.Console.Input.KEY_ENTER:
print()
break
elif key == jk_console.Console.Input.KEY_CTRL_C:
raise KeyboardInterrupt()
else:
if len(key) == 1:
buf.insertAtCursor(key)
bChanged = True
if bChanged:
temp = str(buf)
textRenderer.render(str(buf), buf.cursorPos)
s = str(buf)
if history is not None:
history.append(s)
return s
#
```
#### File: src/jk_trioinput/TextBufferWithCursor.py
```python
import jk_console
class TextBufferWithCursor(object):
def __init__(self):
self.__buffer = []
self.__cpos = 0
#
def initialize(self, text:str):
self.__buffer = list(text)
self.__cpos = len(text)
#
@property
def isEmpty(self):
return len(self.__buffer) == 0
#
@property
def cursorPos(self):
return self.__cpos
#
def deleteUnderCursor(self):
if self.__cpos < len(self.__buffer):
del self.__buffer[self.__cpos]
#
def _classifyChar(self, c:str):
if c in "!\"§$%&/()=?'²³{[]}\\`+*~#'-.:,;µ|<>@":
return "d"
elif c in " \t":
return "_"
else:
return "w"
#
def _strToClassList(self, s:list):
return [ self._classifyChar(c) for c in s ]
#
def _findNextStopRight(self, pos:int):
s = self._strToClassList(self.__buffer)
cref = s[pos]
if cref == "d":
cref == "x"
while True:
pos += 1
if pos >= len(s):
return len(s)
if s[pos] != cref:
if s[pos] == "_":
# move through this space section
cref = "_"
else:
return pos
#
def _findNextStopLeft(self, pos:int):
s = self._strToClassList(reversed(self.__buffer))
pos = len(s) - pos
cref = s[pos]
if cref == "d":
cref == "x"
while True:
pos += 1
if pos >= len(s):
return 0
if s[pos] != cref:
if cref == "_":
# move through this word section
cref = s[pos]
else:
return len(s) - pos
#
def moveCursorLeft(self):
if self.__cpos > 0:
self.__cpos -= 1
return True
else:
return False
#
def moveCursorRight(self):
if self.__cpos < len(self.__buffer):
self.__cpos += 1
return True
else:
return False
#
def jumpCursorRight(self):
if self.__cpos < len(self.__buffer):
n = self._findNextStopRight(self.__cpos)
if n < 0:
return False
self.__cpos = n
return True
else:
return False
#
def jumpCursorLeft(self):
if self.__cpos > 0:
n = self._findNextStopLeft(self.__cpos)
self.__cpos = n
return True
else:
return False
#
def moveCursorToStart(self):
self.__cpos = 0
#
def moveCursorToEnd(self):
self.__cpos = len(self.__buffer)
#
def insertAtCursor(self, c:str, bMoveCursor:bool = True):
self.__buffer.insert(self.__cpos, c)
if bMoveCursor:
self.__cpos += 1
#
def printToConsoleAt(self, cx:int, cy:int, width:int = -1, syntaxHighlighter = None):
s = "".join(self.__buffer)
if syntaxHighlighter:
buf = []
for color, chunk in syntaxHighlighter(s):
buf.append(color)
buf.append(chunk)
s = "".join(buf)
jk_console.Console.printAt(cx, cy, s + " ", True)
jk_console.Console.moveCursorTo(cx + self.__cpos, cy)
#
def __str__(self):
return "".join(self.__buffer)
#
def __repr__(self):
return "".join(self.__buffer)
#
def __len__(self):
return len(self.__buffer)
#
def reset(self):
self.__buffer.clear()
self.__cpos = 0
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-triologging",
"score": 2
} |
#### File: src/jk_triologging/_inst.py
```python
import jk_logging
from .TrioLogWrapper import TrioLogWrapper
from .TrioBufferLogger import TrioBufferLogger
from .TrioConsoleLogger import TrioConsoleLogger
from .TrioFileLogger import TrioFileLogger
from .TrioFilterLogger import TrioFilterLogger
from .TrioMulticastLogger import TrioMulticastLogger
from .TrioNamedMulticastLogger import TrioNamedMulticastLogger
from .TrioNullLogger import TrioNullLogger
from .TrioStringListLogger import TrioStringListLogger
def instantiate(cfg) -> TrioLogWrapper:
l = jk_logging.instantiate(cfg)
if isinstance(l, jk_logging.BufferLogger):
return TrioBufferLogger(l)
elif isinstance(l, jk_logging.ConsoleLogger):
return TrioConsoleLogger(l)
elif isinstance(l, jk_logging.FileLogger):
return TrioFileLogger(l)
elif isinstance(l, jk_logging.FilterLogger):
return TrioFilterLogger(l)
elif isinstance(l, jk_logging.MulticastLogger):
return TrioMulticastLogger(l)
elif isinstance(l, jk_logging.NamedMulticastLogger):
return TrioNamedMulticastLogger(l)
elif isinstance(l, jk_logging.NullLogger):
return TrioNullLogger(l)
elif isinstance(l, jk_logging.StringListLogger):
return TrioStringListLogger(l)
else:
return TrioLogWrapper(l)
#
```
#### File: src/jk_triologging/TrioFileLogger.py
```python
import trio
import jk_logging
from .TrioLogWrapper import TrioLogWrapper
class TrioFileLogger(TrioLogWrapper):
@staticmethod
def create(filePath, rollOver, bAppendToExistingFile = True, bFlushAfterEveryLogMessage = True, fileMode = None, logMsgFormatter = None):
return TrioFileLogger(jk_logging.FileLogger.create(filePath, rollOver, bAppendToExistingFile, bFlushAfterEveryLogMessage, fileMode, logMsgFormatter))
#
async def closed(self) -> bool:
return await trio.to_thread.run_sync(self._l.closed)
#
async def isClosed(self) -> bool:
return await trio.to_thread.run_sync(self._l.isClosed)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-typo3",
"score": 2
} |
#### File: src/jk_typo3/Typo3ConfigurationFileParser.py
```python
import re
import os
import sys
from jk_utils import TypedValue
from jk_utils.tokenizer import Token, Stack
import jk_php_tokenizer
from .TokenStream import TokenStream
class Typo3ConfigurationFileParser(object):
__TOKENIZER = jk_php_tokenizer.PHPTokenizer()
def parseText(self, rawText:str) -> dict:
tokenStream = TokenStream(Typo3ConfigurationFileParser.__TOKENIZER.tokenize(rawText))
ret = self._tryRead_S(tokenStream)
if ret is None:
tokenStream.dump()
raise Exception("Syntax error at: " + tokenStream.location() + ". Expected: PHP intro signature")
assert isinstance(ret, dict)
if tokenStream.hasMoreTokens():
tokenStream.dump()
raise Exception("Syntax error at: " + tokenStream.location() + ". Excessive tokens!")
return ret
#
def parseFile(self, filePath:str):
with open(filePath, "r") as f:
return self.parseText(f.read())
#
#
# S -> phpintro "return" ASSOCARRAY semicolon
#
def _tryRead_S(self, ts:TokenStream):
t = ts.tryEat("phpintro")
if t is None:
return None
t = ts.tryEat("word", "return")
if t is None:
ts.dump()
raise Exception("Syntax error at: " + ts.location() + ". Expected: 'return'")
bSuccess, ret = self._tryRead_ASSOCARRAY(ts)
if not bSuccess:
ts.dump()
raise Exception("Syntax error at: " + ts.location() + ". Expected: associative array")
t = ts.tryEat("semicolon")
if t is None:
ts.dump()
raise Exception("Syntax error at: " + ts.location() + ". Expected: ';'")
return ret
#
#
# EMTPYARRAY -> "[" "]"
#
def _tryRead_EMTPYARRAY(self, ts:TokenStream):
m = ts.mark()
# check start condition: "["
t = ts.tryEat("lparen2")
if t is None:
return False, None
# check termination condition: "["
t = ts.tryEat("rparen2")
if t is None:
m.resetToMark()
return False, None
return True, []
#
#
# ASSOCARRAY -> "[" ASSOCARRAY_COMPONENT+ "]"
#
def _tryRead_ASSOCARRAY(self, ts:TokenStream):
m = ts.mark()
# check start condition: "["
t = ts.tryEat("lparen2")
if t is None:
return False, None
ret = {}
bExpectTermination = False
while True:
# check EOS
if ts.isEOS():
ts.dump()
raise Exception("Unexpected EOS!")
# check termination condition: "]"
t = ts.tryEat("rparen2")
if t is None:
if bExpectTermination:
raise Exception("Syntax error at: " + ts.location() + ". Expected: ']'")
else:
return True, ret
# read array item
bSuccess, key, value = self._tryRead_ASSOCARRAY_ELEMENT(ts)
if bSuccess:
ret[key] = value
else:
if len(ret) == 0:
m.resetToMark()
return False, None
else:
raise Exception("Syntax error at: " + ts.location() + ". Expected: associative array element, ',' or ']'")
# read comma
t = ts.tryEat("op", ",")
if t is None:
bExpectTermination = True
#
#
# STDARRAY -> "[" STDARRAY_COMPONENT+ "]"
#
def _tryRead_STDARRAY(self, ts:TokenStream):
m = ts.mark()
# check start condition: "["
t = ts.tryEat("lparen2")
if t is None:
return False, None
ret = []
bExpectTermination = False
while True:
# check EOS
if ts.isEOS():
ts.dump()
raise Exception("Unexpected EOS!")
# check termination condition: "]"
t = ts.tryEat("rparen2")
if t is None:
if bExpectTermination:
raise Exception("Syntax error at: " + ts.location() + ". Expected: ']'")
else:
return True, ret
# read array item
bSuccess, value = self._tryRead_VALUE(ts)
if bSuccess:
ret.append(value)
else:
if len(ret) == 0:
m.resetToMark()
return False, None
else:
raise Exception("Syntax error at: " + ts.location() + ". Expected: standard array element, ',' or ']'")
# read comma
t = ts.tryEat("op", ",")
if t is None:
bExpectTermination = True
#
#
# ASSOCARRAY_ELEMENT -> str1 "=>" VALUE
#
def _tryRead_ASSOCARRAY_ELEMENT(self, ts:TokenStream):
m = ts.mark()
t = ts.tryEat("str1")
if t is None:
return False, None, None
key = t.value
t = ts.tryEat("op", "=>")
if t is None:
m.resetToMark()
return False, None, None
b, ret = self._tryRead_VALUE(ts)
if not b:
ts.dump()
raise Exception("Syntax error at: " + ts.location() + ". Expected: some value")
return True, key, ret
#
#
# VALUE -> int
# | str1
# | str2
# | bool
# | null
# | ASSOCARRAY
# | STDARRAY
#
def _tryRead_VALUE(self, ts:TokenStream):
t = ts.tryEat("str1")
if t is not None:
return True, t.value
t = ts.tryEat("str2")
if t is not None:
return True, t.value
t = ts.tryEat("int")
if t is not None:
return True, int(t.value)
t = ts.tryEat("bool")
if t is not None:
return True, t.value == "true"
t = ts.tryEat("null")
if t is not None:
return True, None
bSuccess, ret = self._tryRead_EMTPYARRAY(ts)
if bSuccess:
return True, ret
bSuccess, ret = self._tryRead_ASSOCARRAY(ts)
if bSuccess:
return True, ret
bSuccess = self._tryRead_STDARRAY(ts)
if bSuccess:
return True, ret
return False, None
#
#
```
#### File: src/jk_typo3/Typo3LocalConfigurationFile.py
```python
import os
import codecs
import re
import shutil
import jk_console
from .Typo3ConfigurationFileParser import Typo3ConfigurationFileParser
#
# This class represents the "LocalConfiguration.php" file in a Typo3 installation.
#
# During loading the file data is parsed. Internally a line is stored in an array. Each array entry is a 3-tuple containing the following data:
# 0) An identifier specifying the type of the line: "-", "varappend", "var", "vari" and "varii"
# 1) The raw text of the line
# 2) An instance of <c>MediaWikiLocalSettingsValue</c> representing the parsed version of the line or <c>None</c> otherwise
#
class Typo3LocalConfigurationFile(object):
# ================================================================================================================================
# ==== Constructor Methods
#
# Constructor method.
#
def __init__(self):
self.__data = None
self.__filePath = None
#
# ================================================================================================================================
# ==== Properties
@property
def isLoaded(self):
return self.__data != None
#
@property
def data(self) -> dict:
return self.__data
#
# ================================================================================================================================
# ==== Methods
"""
#
# For debugging purposes only: Write the internal state of this object to STDOUT.
#
def dump(self, onlyLineNumbers:list = None):
if onlyLineNumbers is not None:
assert isinstance(onlyLineNumbers, (set, tuple, list))
onlyLineNumbers = set(onlyLineNumbers)
print("Typo3LocalConfigurationFile")
print("\t__bChanged: " + str(self.__changedFlag))
print("\t__filePath: " + str(self.__filePath))
if self.__data != None:
table = jk_console.SimpleTable()
if onlyLineNumbers:
bFirst = True
bLastWasPoints = False
for (b, data) in self.__data:
if data.lineNo in onlyLineNumbers:
if bFirst:
bFirst = False
if data.lineNo > 1:
table.addRow("...", "...", "...")
table.addRow(str(b), Typo3LocalConfigurationFile.__getType(data), str(data))
bLastWasPoints = False
else:
if not bLastWasPoints:
table.addRow("...", "...", "...")
bLastWasPoints = True
bFirst = False
else:
for (b, data) in self.__data:
table.addRow(str(b), Typo3LocalConfigurationFile.__getType(data), str(data))
print("\t__lines:")
table.print(prefix="\t\t")
#
"""
#
# Load a LocalConfiguration.php file.
#
# @param str dirPath The MediaWiki installation directory path.
# @param str filePath The file path of the MediaWiki "LocalConfiguration.php" file.
# @param str rawText The raw file content of a "LocalConfiguration.php" file.
#
def load(self, dirPath = None, filePath = None, rawText:str = None) -> dict:
if rawText is not None:
assert isinstance(rawText, str)
filePath = None
elif filePath is not None:
assert isinstance(filePath, str)
with codecs.open(filePath, "r", "utf-8") as f:
rawText = f.read()
elif dirPath is not None:
assert isinstance(dirPath, str)
filePath = os.path.join(dirPath, "public/typo3conf/LocalConfiguration.php")
with codecs.open(filePath, "r", "utf-8") as f:
rawText = f.read()
else:
raise Exception("At least one of the following arguments must be specified: 'dirPath', 'filePath' or 'rawText'!")
self.__data = Typo3ConfigurationFileParser().parseText(rawText)
assert isinstance(self.__data, dict)
self.__filePath = filePath
return self.__data
#
# ================================================================================================================================
# ==== Static Methods
@staticmethod
def __getType(something):
tName = something.__class__.__name__
return tName
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-uploadpack",
"score": 4
} |
#### File: src/jk_uploadpack/helpers.py
```python
import hashlib
#
# Consumer that iterates through a bytes iterator and produces the hash value.
#
def sha256_bytesiter(bytesiter) -> str:
hasher = hashlib.sha256()
for block in bytesiter:
hasher.update(block)
return hasher.hexdigest()
#
#
# Iterator that reads from a file.
#
def file_read_blockiter(f, blocksize=65536):
block = f.read(blocksize)
while len(block) > 0:
yield block
block = f.read(blocksize)
#
```
#### File: src/jk_uploadpack/Packer.py
```python
import time
import os
import typing
import sys
import json
import tarfile
import io
import jk_utils
from .SrcFileInfo import SrcFileInfo
from .UPFile import UPFile
from .UPFileGroup import UPFileGroup
from .UPStoredBlob import UPStoredBlob
class Packer(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, outFilePath:str, compression:str = None):
assert isinstance(outFilePath, str)
assert outFilePath
self.__outFilePath = outFilePath
# ----
if compression is None:
if outFilePath.endswith(".gz"):
self.__compression = "gz"
elif outFilePath.endswith(".xz"):
self.__compression = "xz"
elif outFilePath.endswith(".bz2"):
self.__compression = "bz2"
else:
self.__compression = None
else:
assert isinstance(compression, str)
assert compression in [ "gz", "gzip", "xz", "bz2", "bzip2" ]
if compression == "bzip2":
compression = "bz2"
if compression == "gzip":
compression = "gz"
self.__compression = compression
# ----
self.__totalSizeLogical = 0
self.__totalSizeUncompressed = 0
self.__totalSizeCompressed = 0
self.__fileGroups = {}
self.__filesByID = []
self.__filesByHash = {}
self.__t = tarfile.open(outFilePath, ("w:" + self.__compression) if self.__compression else "w")
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def filePath(self) -> str:
return self.__outFilePath
#
@property
def closed(self) -> bool:
return self.__t is None
#
@property
def isClosed(self) -> bool:
return self.__t is None
#
@property
def totalSizeLogical(self) -> int:
return self.__totalSizeLogical
#
@property
def totalSizeUncompressed(self) -> int:
return self.__totalSizeUncompressed
#
@property
def totalSizeCompressed(self) -> int:
return self.__totalSizeCompressed
#
@property
def fileGroupIdentifiers(self) -> list:
return sorted(self.__fileGroups.keys())
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def __enter__(self):
return self
#
def __exit__(self, _extype, _exobj, _stacktrace):
self.close()
#
def _registerFile(self, filePath:str) -> typing.Tuple[SrcFileInfo,UPStoredBlob]:
assert filePath is not None
if self.__t is None:
raise Exception("Upload pack is already closed!")
srcFI = SrcFileInfo.fromFile(filePath)
sf = self.__filesByHash.get(srcFI.hashID)
if sf is None:
fileID = len(self.__filesByID)
tarInfo = tarfile.TarInfo("parts/{}".format(fileID))
tarInfo.size = srcFI.size
tarInfo.mtime = srcFI.mtime # TODO
tarInfo.uid = 1000 # TODO
tarInfo.gid = 1000 # TODO
tarInfo.mode = srcFI.mode # TODO
with open(srcFI.srcFilePath, "rb") as fin:
self.__t.addfile(tarInfo, fin)
sf = UPStoredBlob(fileID, srcFI.size)
self.__filesByID.append(sf)
self.__filesByHash[srcFI.hashID] = sf
self.__totalSizeUncompressed += srcFI.size
self.__totalSizeLogical += srcFI.size
return srcFI, sf
#
def _registerRaw(self, raw:typing.Union[bytes,bytearray,io.BytesIO]) -> typing.Tuple[SrcFileInfo,UPStoredBlob]:
assert raw is not None
if self.__t is None:
raise Exception("Upload pack is already closed!")
srcFI = SrcFileInfo.fromRaw(raw)
sf = self.__filesByHash.get(srcFI.hashID)
if sf is None:
fileID = len(self.__filesByID)
tarInfo = tarfile.TarInfo("parts/{}".format(fileID))
tarInfo.size = srcFI.size
tarInfo.mtime = srcFI.mtime # TODO
tarInfo.uid = 1000 # TODO
tarInfo.gid = 1000 # TODO
tarInfo.mode = srcFI.mode # TODO
if isinstance(raw, (bytes,bytearray)):
self.__t.addfile(tarInfo, io.BytesIO(raw))
else:
self.__t.addfile(tarInfo, raw)
sf = UPStoredBlob(fileID, srcFI.size)
self.__filesByID.append(sf)
self.__filesByHash[srcFI.hashID] = sf
self.__totalSizeUncompressed += srcFI.size
self.__totalSizeLogical += srcFI.size
return srcFI, sf
#
################################################################################################################################
## Public Methods
################################################################################################################################
def fileGroup(self, identifier:str) -> UPFileGroup:
assert isinstance(identifier, str)
assert identifier
fg = self.__fileGroups.get(identifier)
if fg is None:
fg = UPFileGroup(self, identifier)
self.__fileGroups[identifier] = fg
return fg
#
def __createMetaJSON(self) -> dict:
_jFileGroups = {}
for upFG in self.__fileGroups.values():
_jFileGroups[upFG.identifier] = upFG.toJSON()
return {
"magic": {
"magic": "upload-pack",
"version": 1,
},
"fileGroups": _jFileGroups,
}
#
def close(self):
if self.__t is None:
return
rawData = json.dumps(self.__createMetaJSON()).encode("utf-8")
tarInfo = tarfile.TarInfo("meta.json")
tarInfo.size = len(rawData)
tarInfo.mtime = time.time()
tarInfo.uid = os.getuid()
tarInfo.gid = os.getgid()
tarInfo.mode = jk_utils.ChModValue("rwxrwxr-x").toInt()
self.__totalSizeUncompressed += tarInfo.size
self.__t.addfile(tarInfo, io.BytesIO(rawData))
self.__t.close()
self.__t = None
self.__totalSizeCompressed = os.lstat(self.__outFilePath).st_size
#
#
```
#### File: src/jk_uploadpack/SrcFileInfo.py
```python
import io
import stat
import os
import typing
import hashlib
import jk_typing
import jk_utils
from .helpers import sha256_bytesiter, file_read_blockiter
#
# This class represents a source file to store. It provides essential information about that file for storing.
#
class SrcFileInfo(object):
__DEFAULT_MODE = jk_utils.ChModValue("rwxrwxr-x").toInt()
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
def __init__(self, size:int, hashID:str, srcFilePath:typing.Union[str,None], mode:int, mtime:float):
assert isinstance(size, int)
assert size >= 0
assert isinstance(hashID, str)
assert hashID
if srcFilePath is not None:
assert isinstance(srcFilePath, str)
assert srcFilePath
assert isinstance(mode, int)
assert isinstance(mtime, (int, float))
self.mode = mode
self.size = size
self.mtime = mtime
self.hashID = hashID
self.srcFilePath = srcFilePath
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
@staticmethod
def fromFile(filePath:str):
statStruct = os.lstat(filePath)
mode = statStruct.st_mode
size = statStruct.st_size
uid = statStruct.st_uid
gid = statStruct.st_gid
mtime = float(statStruct.st_mtime)
hashAlg = hashlib.sha256()
with open(filePath, "rb") as fin:
for chunk in iter(lambda: fin.read(4096), b""):
hashAlg.update(chunk)
hashDigest = hashAlg.hexdigest()
hashID = "sha256:{}:{}".format(hashDigest, size)
return SrcFileInfo(size, hashID, filePath, mode, mtime)
#
@staticmethod
def fromRaw(raw:typing.Union[bytes,bytearray,io.BytesIO]):
mode = SrcFileInfo.__DEFAULT_MODE
size = len(raw)
uid = 1000
gid = 1000
mtime = 0
hashAlg = hashlib.sha256()
hashAlg.update(raw)
hashDigest = hashAlg.hexdigest()
hashID = "sha256:{}:{}".format(hashDigest, size)
return SrcFileInfo(size, hashID, None, mode, mtime)
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-utils",
"score": 4
} |
#### File: python-module-jk-utils/examples/test_async_runner.py
```python
import jk_console
from jk_utils import AsyncRunner
asyncRunner = AsyncRunner(debugLogPrintFunction = print)
asyncRunner.start()
def doSomething(data):
print()
print("#### --------------------------------")
print("#### Doing something: " + str(data))
print("#### --------------------------------")
print()
#
asyncRunner.rescheduleCallable(doSomething, "abc", 5, doSomething, autoRescheduleDelay=5)
print()
print("Enter 's' to schedule or enter 'r' to reschedule some activity. Any other intput will terminate the program.")
print()
n = 1
while True:
keyChar = input()
if keyChar == "s":
asyncRunner.removeScheduledCallable(doSomething)
print("Scheduling: " + str(n))
asyncRunner.scheduleCallable(doSomething, "Activity " + str(n), 2)
n += 1
elif keyChar == "r":
print("(Re)scheduling: " + str(n))
asyncRunner.rescheduleCallable(doSomething, "Activity " + str(n), 2, doSomething)
n += 1
else:
break
```
#### File: src/jk_utils/AmountOfBytes.py
```python
import typing
import re
from .showCapacityProgress import formatBytes
#
# This class represents a specific amount of bytes. An object of this class is intended to either represent an existing amount of data specified by users,
# by a configuration file or represent data that requires human readable formatting.
#
class AmountOfBytes(object):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, value):
self.__n = AmountOfBytes.__parseBytesFromAny(value)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __repr__(self):
return "AmountOfBytes<(" + AmountOfBytes.__bytesToStrAuto(self.__n) + ")>"
#
def __str__(self):
return AmountOfBytes.__bytesToStrAuto(self.__n)
#
#
# Use this method to retrieve formatted output of the data.
#
def toStr(self, bShort:bool = True, magnitude:str = None) -> str:
if magnitude in "KBMGT":
return AmountOfBytes.__bytesToStrFixed(self.__n, magnitude)
elif bShort:
return formatBytes(self.__n)
else:
return AmountOfBytes.__bytesToStrAuto(self.__n)
#
def __float__(self):
return float(self.__n)
#
def __int__(self):
return self.__n
#
def __add__(self, other):
if isinstance(other, AmountOfBytes):
return AmountOfBytes(self.__n + other.__n)
elif isinstance(other, int):
return AmountOfBytes(self.__n + other)
elif isinstance(other, float):
return self.__n + other
else:
raise TypeError(repr(other))
#
def __sub__(self, other):
if isinstance(other, AmountOfBytes):
return AmountOfBytes(self.__n - other.__n)
elif isinstance(other, int):
return AmountOfBytes(self.__n - other)
elif isinstance(other, float):
return self.__n - other
else:
raise TypeError(repr(other))
#
def __mul__(self, other):
if isinstance(other, AmountOfBytes):
raise TypeError(repr(other))
elif isinstance(other, int):
return AmountOfBytes(self.__n * other)
elif isinstance(other, float):
return self.__n * other
else:
raise TypeError(repr(other))
#
def __div__(self, other):
if isinstance(other, AmountOfBytes):
raise TypeError(repr(other))
elif isinstance(other, int):
return self.__n / other
elif isinstance(other, float):
return self.__n / other
else:
raise TypeError(repr(other))
#
def __eq__(self, other):
if isinstance(other, AmountOfBytes):
return self.__n == other.__n
elif isinstance(other, int):
return self.__n == other
else:
return False
#
def __ne__(self, other):
if isinstance(other, AmountOfBytes):
return self.__n != other.__n
elif isinstance(other, int):
return self.__n != other
else:
return False
#
def __gt__(self, other):
if isinstance(other, AmountOfBytes):
return self.__n > other.__n
elif isinstance(other, (int, float)):
return self.__n > other
else:
return False
#
def __ge__(self, other):
if isinstance(other, AmountOfBytes):
return self.__n >= other.__n
elif isinstance(other, (int, float)):
return self.__n >= other
else:
return False
#
def __lt__(self, other):
if isinstance(other, AmountOfBytes):
return self.__n < other.__n
elif isinstance(other, (int, float)):
return self.__n < other
else:
return False
#
def __le__(self, other):
if isinstance(other, AmountOfBytes):
return self.__n <= other.__n
elif isinstance(other, (int, float)):
return self.__n <= other
else:
return False
#
################################################################################################################################
## Static Helper Methods
################################################################################################################################
@staticmethod
def __parseBytesFromAny(v:typing.Union[int,str]) -> int:
if isinstance(v, int):
assert v >= 0
return v
vText = v.strip().upper()
m = re.match("^([0-9.]+)\s*([KMGT]B?)$", vText)
if m:
try:
n = float(m.group(1))
except:
raise Exception("Value does not specify bytes: " + repr(vText))
sFactor = m.group(2)
if sFactor.startswith("K"):
n *= 1024
elif sFactor.startswith("M"):
n *= 1024*1024
elif sFactor.startswith("G"):
n *= 1024*1024*1024
elif sFactor.startswith("T"):
n *= 1024*1024*1024*1024
else:
pass
return int(n)
else:
try:
return int(vText)
except Exception as ee:
raise Exception("Value does not specify bytes: " + repr(vText))
#
@staticmethod
def __bytesToStrAuto(n:int) -> str:
if (n >= 1024*1024*1024*1024) and ((n % (1024*1024*1024*1024)) == 0):
return str(n // (1024*1024*1024*1024)) + "T"
elif (n >= 1024*1024*1024) and ((n % (1024*1024*1024)) == 0):
return str(n // (1024*1024*1024)) + "G"
elif (n >= 1024*1024) and ((n % (1024*1024)) == 0):
return str(n // (1024*1024)) + "M"
elif (n >= 1024) and ((n % 1024) == 0):
return str(n // 1024) + "K"
else:
return str(n)
#
@staticmethod
def __bytesToStrFixed(n:int, mag:str) -> str:
if mag == "T":
v = round(n / (1024*1024*1024*1024), 2)
elif mag == "G":
v = round(n / (1024*1024*1024), 2)
elif mag == "M":
v = round(n / (1024*1024), 2)
elif mag == "K":
v = round(n / 1024, 1)
else:
v = n
return str(v) + " " + mag
#
################################################################################################################################
## Public Static Methods
################################################################################################################################
@staticmethod
def parseFromStr(v:str):
assert isinstance(v, str)
return AmountOfBytes(v)
#
@staticmethod
def parse(v:typing.Union[int,str]):
assert isinstance(v, (int,str))
return AmountOfBytes(v)
#
#
```
#### File: jk_utils/async/queues.py
```python
import asyncio
import collections
from asyncio.coroutines import coroutine
class QueueEmpty(Exception):
"""Exception raised when Queue.get_nowait() is called on a Queue object
which is empty.
"""
pass
class QueueFull(Exception):
"""Exception raised when the Queue.put_nowait() method is called on a Queue
object which is full.
"""
pass
class QueueFinished(Exception):
"""Exception raised when the Queue.put_nowait() method is called on a Queue
object which is already finished.
"""
pass
class QueueStopped():
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "yield from put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
STOPPED = QueueStopped()
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = asyncio.events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Futures.
self._putters = collections.deque()
self._bFinished = False
self._finished = asyncio.locks.Event(loop=self._loop)
self._init(maxsize)
# These three are overridable in subclasses.
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def _wakeup_next(self, waiters):
# Wake up the next waiter (if any) that isn't cancelled.
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
break
def __repr__(self):
return '<{} at {:#x} {}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
return '<{} {}>'.format(type(self).__name__, self._format())
def _format(self):
result = 'maxsize={!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
result += ' _queue={!r}'.format(list(self._queue))
if self._getters:
result += ' _getters[{}]'.format(len(self._getters))
if self._putters:
result += ' _putters[{}]'.format(len(self._putters))
return result
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
@coroutine
def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
This method is a coroutine.
"""
while self.full():
putter = self._loop.create_future()
self._putters.append(putter)
try:
yield from putter
except:
putter.cancel() # Just in case putter is not done yet.
if not self.full() and not putter.cancelled():
# We were woken up by get_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._putters)
raise
return self.put_nowait(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
if self._bFinished:
raise QueueFinished
if self.full():
raise QueueFull
self._put(item)
self._bFinished = False
self._finished.clear()
self._wakeup_next(self._getters)
@coroutine
def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
This method is a coroutine.
"""
while self.empty():
if self._bFinished:
return AsyncQueue.STOPPED
getter = self._loop.create_future()
self._getters.append(getter)
try:
yield from getter
except:
getter.cancel() # Just in case getter is not done yet.
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._getters)
raise
return self.get_nowait()
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
if self.empty():
if self._bFinished:
return AsyncQueue.STOPPED
raise QueueEmpty
item = self._get()
self._wakeup_next(self._putters)
return item
def completed(self):
"""Indicate that producing data is completed.
"""
self._finished.set()
self._bFinished = True
for i in range(0, len(self._getters)):
self._wakeup_next(self._getters)
@coroutine
def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
yield from self._finished.wait()
```
#### File: jk_utils/async/TabularWriterMediaWiki.py
```python
class TabularWriterMediaWiki(object):
def __init__(self, columns:list):
self.__columns = columns
print("{| class=\"wikitable\"")
print("|-")
print("!" + " !! ".join(columns))
self.__proto = [ "" for x in columns ]
#
def print(self, column:str, text:str):
outList = list(self.__proto)
outList[self.__columns.index(column)] = text
print("|-")
print(("|" + " || ".join(outList)).replace(" ", " "))
#
def close(self):
print("|}")
#
#
```
#### File: src/jk_utils/Bytes.py
```python
import binascii
#
# This is a convenience wrapper around an array of bytes.
#
class Bytes(object):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, data):
if isinstance(data, (bytes,bytearray)):
self.__data = bytes(data)
elif isinstance(data, str):
self.__data = binascii.unhexlify(data.encode("ascii"))
else:
raise Exception()
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def hexStr(self):
return binascii.hexlify(self.__data).decode("ascii")
#
def __repr__(self):
return "Bytes<(" + self.__str__() + ")>"
#
def __bytes__(self):
return self.__data
#
def __str__(self):
return binascii.hexlify(self.__data).decode("ascii")
#
def __add__(self, other):
assert isinstance(other, Bytes)
return Bytes(self.__data + other.__data)
#
def __len__(self):
return len(self.__data)
#
def __eq__(self, other):
if isinstance(other, Bytes):
return self.__data == other.__data
elif isinstance(other, (bytes, bytearray)):
return self.__data == other
else:
return False
#
def __ne__(self, other):
if isinstance(other, Bytes):
return self.__data != other.__data
elif isinstance(other, (bytes, bytearray)):
return self.__data != other
else:
return False
#
#
```
#### File: src/jk_utils/CmdLineParser.py
```python
class CmdLineParser(object):
@staticmethod
def parseCmdLine(text):
IN_SPACE = 1
IN_SPACE_EXPECTS_SPACE = 2
IN_WORD = 3
IN_WORD_NEXT_MASKED = 4
IN_STR = 5
IN_STR_NEXT_MASKED = 6
ret = []
mode = IN_SPACE
buffer = []
charNo = 0
for c in text:
charNo += 1
if mode == IN_SPACE:
if (c == " ") or (c == "\t"):
continue
elif c == "\"":
mode = IN_STR
else:
buffer.append(c)
mode = IN_WORD
elif mode == IN_SPACE_EXPECTS_SPACE:
if (c == " ") or (c == "\t"):
mode = IN_SPACE
else:
raise Exception("Space character expected at position " + str(charNo) + ": " + c + " (" + str(ord(c)) + ")")
elif mode == IN_WORD:
if (c == " ") or (c == "\t"):
ret.append("".join(buffer))
buffer.clear()
mode = IN_SPACE
elif c == "\\":
mode = IN_WORD_NEXT_MASKED
elif c == "\"":
raise Exception("Unexpected character at position " + str(charNo) + ": " + c + " (" + str(ord(c)) + ")")
else:
buffer.append(c)
elif mode == IN_WORD_NEXT_MASKED:
buffer.append(c)
mode = IN_WORD
elif mode == IN_STR:
if c == "\"":
ret.append("".join(buffer))
buffer.clear()
mode = IN_SPACE_EXPECTS_SPACE
elif c == "\\":
mode = IN_STR_NEXT_MASKED
else:
buffer.append(c)
elif mode == IN_STR_NEXT_MASKED:
buffer.append(c)
mode = IN_STR
else:
raise Exception()
if mode == IN_WORD:
ret.append("".join(buffer))
elif mode == IN_STR:
raise Exception("Unterminated string at position " + str(charNo) + ": " + c + " (" + str(ord(c)) + ")")
elif mode == IN_STR_NEXT_MASKED:
raise Exception("Unterminated string at position " + str(charNo) + ": " + c + " (" + str(ord(c)) + ")")
return ret
#
#
```
#### File: src/jk_utils/DataMatrix.py
```python
class DataMatrix(object):
# ================================================================================================================================
# ==== Constructor / Destructor
# ================================================================================================================================
def __init__(self, nCols:int, nRows:int):
assert isinstance(nCols, int)
assert nCols > 0
assert isinstance(nRows, int)
assert nRows > 0
self.__rows = []
for nRow in range(0, nRows):
self.__rows.append([ None ] * nCols)
self.__nRows = nRows
self.__nCols = nCols
#
# ================================================================================================================================
# ==== Properties
# ================================================================================================================================
#
# Get the number of rows in this matrix.
#
# @return int Returns the number of rows.
#
@property
def nRows(self) -> int:
return self.__nRows
#
#
# Get the number of colums in this matrix.
#
# @return int Returns the number of colums.
#
@property
def nColumns(self) -> int:
return self.__nCols
#
#
# Get the size of this matrix.
#
# @return tuple<int,int> Returns the number of colums and rows (in this order).
#
@property
def size(self) -> tuple:
return self.__nCols, self.__nRows
#
# ================================================================================================================================
# ==== Methods
# ================================================================================================================================
#
# Append a matrix to the right of this matrix. For this to succeed both matrixes must have the same number of rows.
#
# @param DataMatrix matrix Another data matrix of the same height.
#
def appendMatrixToRight(self, matrix):
assert isinstance(matrix, DataMatrix)
assert matrix.__nRows == self.__nRows
for rowIndex in range(self.__nRows):
self.__rows[rowIndex].extend(matrix.__rows[rowIndex])
self.__nCols += matrix.__nCols
#
#
# Append a matrix to the bottom of this matrix. For this to succeed both matrixes must have the same number of columns.
#
# @param DataMatrix matrix Another data matrix of the same width.
#
def appendMatrixBelow(self, matrix):
assert isinstance(matrix, DataMatrix)
assert matrix.__nCols == self.__nCols
self.__rows.extend([ list(r) for r in matrix.__rows])
self.__nRows += matrix.__nRows
#
#
# Remove a specific row.
#
# @param int rowIndex The index of the row to remove.
#
def removeRow(self, rowIndex:int):
assert isinstance(rowIndex, int)
if rowIndex < 0:
rowIndex = self.__nRows + rowIndex
del self.__rows[rowIndex]
self.__nRows -= 1
#
#
# Get the value of a specific cell.
#
def get(self, rowIndex:int, colIndex:int):
if rowIndex < 0:
rowIndex = self.__nRows + rowIndex
if colIndex < 0:
colIndex = self.__nCols + colIndex
try:
return self.__rows[rowIndex][colIndex]
except IndexError as ee:
self.dump()
raise IndexError(str(ee) + " :: " + repr((rowIndex, colIndex)))
#
#
# Set the value of a specific cell.
#
def set(self, rowIndex:int, colIndex:int, value):
if rowIndex < 0:
rowIndex = self.__nRows + rowIndex
if colIndex < 0:
colIndex = self.__nCols + colIndex
try:
self.__rows[rowIndex][colIndex] = value
except IndexError as ee:
self.dump()
raise IndexError(str(ee) + " :: " + repr((rowIndex, colIndex)))
#
#
# Add a row to the top of this matrix.
#
# @param object content (optional) An arbitrary object. The new row will be filled with references to the specified object.
#
def addRowAtTop(self, content = None):
data = [ content for i in range(0, self.__nCols) ]
self.__rows.insert(0, data)
self.__nRows += 1
#
#
# Add a row at the bottom of this matrix.
#
# @param object content (optional) An arbitrary object. The new row will be filled with references to the specified object.
#
def addRowAtBottom(self, content = None):
data = [ content for i in range(0, self.__nCols) ]
self.__rows.append(data)
self.__nRows += 1
#
#
# Add a column to the right of this matrix.
#
# @param object content (optional) An arbitrary object. The new column will be filled with references to the specified object.
#
def addColumnAtRight(self, content = None):
for nRowIndex in range(0, self.__nRows):
self.__rows[nRowIndex].append(content)
self.__nCols += 1
#
#
# Add a column to the left of this matrix.
#
# @param object content (optional) An arbitrary object. The new column will be filled with references to the specified object.
#
def addColumnAtLeft(self, content = None):
for nRowIndex in range(0, self.__nRows):
self.__rows[nRowIndex].insert(0, content)
self.__nCols += 1
#
#
# Get a copy of the data row by row as a two-dimensional array where the rows are returned as an immutable tuple.
#
def getRowTuple(self) -> tuple:
return tuple([ tuple(row) for row in self.__rows ])
#
#
# Get a copy of the data row by row as a two-dimensional array where the rows are returned as a list.
#
def getRowList(self) -> list:
return [ list(row) for row in self.__rows ]
#
def dump(self):
print(self.__nRows, "x", self.__nCols)
for nRowIndex in range(0, self.__nRows):
print("\t", nRowIndex, "::", len(self.__rows[nRowIndex]), str(self.__rows[nRowIndex]))
#
#
```
#### File: src/jk_utils/EnumBase.py
```python
import enum
#
# This is a base class for enumerations.
#
class EnumBase(enum.Enum):
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
#
#
# Get an integer representation of this enumeration item.
#
def __int__(self):
return self._value_
#
#
# Get a string representation of this enumeration item.
#
def __str__(self):
return self.fullname
#
#
# Get a string representation of this enumeration item.
#
def __repr__(self):
return self.fullname
#
#
# Get an integer representation of this enumeration item.
#
def toJSON(self):
return self._value_
#
#
# Get a list of all states this enumeration contains.
#
@classmethod
def allStates(cls):
ret = []
for key in cls.__dict__["_value2member_map_"]:
enumItem = cls.__dict__["_value2member_map_"][key]
ret.append(enumItem)
return ret
#
#
# This method converts a string or integer representing an enumeration value to an actual enumeration value.
#
# @param mixed data Either a string or an integer to parse. (A member of this enumeration is
# accepted as well and passed through to the caller as in this case there
# is no need to parse anything.)
# @param bool bRaiseExceptionOnError If <c>True</c> (which is the default) an exception is thrown if a
# value has been specified that could not be parsed. (Please note that
# an exception is always thrown if the spcified value is not a string nor
# an integer nor of the enumeration type itself.)
#
@classmethod
def parse(cls, data, bRaiseExceptionOnError:bool = True):
if isinstance(data, int):
if data in cls.__dict__["_value2member_map_"]:
return cls.__dict__["_value2member_map_"][data]
else:
if bRaiseExceptionOnError:
raise Exception("Not a member of enumeration '" + str(cls.__name__) + "': " + repr(data))
else:
return None
elif isinstance(data, str):
for key in cls.__dict__["_value2member_map_"]:
enumItem = cls.__dict__["_value2member_map_"][key]
if str(enumItem) == data:
return enumItem
if data in cls.__dict__["_member_names_"]:
return cls.__dict__[data]
else:
if bRaiseExceptionOnError:
raise Exception("Not a member of enumeration '" + str(cls.__name__) + "': " + repr(data))
else:
return None
elif cls == type(data):
return data
else:
raise Exception("Unrecognized enumeration value type: " + repr(data))
#
#
```
#### File: src/jk_utils/hex.py
```python
__HEXCODE = "0123456789abcdef"
def byteToHex(someValue:int) -> str:
assert isinstance(someValue, int)
someValue = someValue & 255
return __HEXCODE[int(someValue / 16)] + __HEXCODE[someValue % 16]
def byteArrayToHexStr(someByteArray) -> str:
assert isinstance(someByteArray, (bytes, bytearray))
ret = ""
for someValue in someByteArray:
assert isinstance(someValue, int)
someValue = someValue & 255
ret += __HEXCODE[int(someValue / 16)] + __HEXCODE[someValue % 16]
return ret
def hexStrToByteArray(someHexArray:str) -> bytearray:
if (len(someHexArray) % 2) != 0:
raise Exception("Not a valid hex string!")
someHexArray = someHexArray.lower()
dataArray = bytearray()
for offset in range(0, len(someHexArray), 2):
charA = someHexArray[offset]
charB = someHexArray[offset + 1]
pA = __HEXCODE.find(charA)
pB = __HEXCODE.find(charB)
if (pA < 0) or (pB < 0):
raise Exception("Not a valid hex string!")
dataArray.append(pA * 16 + pB)
return dataArray
def hexToByte(someHexString:str, offset:int) -> int:
someHexString = someHexString.lower()
charA = someHexString[offset]
charB = someHexString[offset + 1]
pA = __HEXCODE.find(charA)
pB = __HEXCODE.find(charB)
if (pA < 0) or (pB < 0):
raise Exception("Not a valid hex string!")
return pA * 16 + pB
```
#### File: src/jk_utils/oop.py
```python
def singleton(clazz):
assert clazz
assert type(clazz) == type
clazz.instance = clazz()
clazz.INSTANCE = clazz.instance
return clazz
#
```
#### File: src/jk_utils/pathutils.py
```python
import os
import collections
def setFileExt(filePath:str, newFileExt:str):
if not newFileExt.startswith("."):
newFileExt = "." + newFileExt
pos = filePath.rfind("/")
if pos > 0:
dirPath = filePath[:pos+1]
filePath = filePath[pos+1:]
else:
dirPath = ""
pos = filePath.rfind(".")
if pos > 0:
filePath = filePath[:pos] + newFileExt
else:
filePath += newFileExt
return dirPath + filePath
#
def getFileExt(filePath:str):
pos = filePath.rfind("/")
if pos > 0:
dirPath = filePath[:pos+1]
filePath = filePath[pos+1:]
else:
dirPath = ""
pos = filePath.rfind(".")
if pos > 0:
return filePath[:pos + 1]
else:
return None
#
def makeAbsDirPathAndCheckDirExists(baseDir:str, dirPath:str):
if baseDir:
if not os.path.isdir(baseDir):
raise Exception("Base directory does not exist: " + repr(baseDir))
if not os.path.isabs(dirPath):
if baseDir is None:
dirPath = os.path.abspath(dirPath)
else:
dirPath = os.path.abspath(os.path.join(baseDir, dirPath))
if not os.path.isdir(dirPath):
raise Exception("Directory does not exist: " + repr(dirPath))
return dirPath
#
def makeAbsFilePathAndCheckFileExists(baseDir:str, filePath:str):
if baseDir:
if not os.path.isdir(baseDir):
raise Exception("Base directory does not exist: " + repr(baseDir))
if not os.path.isabs(filePath):
if baseDir is None:
filePath = os.path.abspath(filePath)
else:
filePath = os.path.abspath(os.path.join(baseDir, filePath))
if not os.path.isfile(filePath):
raise Exception("File does not exist: " + repr(filePath))
return filePath
#
def makeAbsFilePathAndCheckBaseDirExists(baseDir:str, filePath:str):
if baseDir:
if not os.path.isdir(baseDir):
raise Exception("Base directory does not exist: " + repr(baseDir))
if not os.path.isabs(filePath):
if baseDir is None:
filePath = os.path.abspath(filePath)
else:
filePath = os.path.abspath(os.path.join(baseDir, filePath))
baseDir = os.path.dirname(filePath)
if not os.path.isfile(baseDir):
raise Exception("Base directory of file does not exist: " + repr(filePath))
return filePath
#
```
#### File: src/jk_utils/reflection.py
```python
import inspect
def isStaticMethod(theClass, methodName):
try:
value = getattr(theClass, methodName)
except:
# Such a method does not exist
return False
assert getattr(theClass, methodName) == value
for cls in inspect.getmro(theClass):
if inspect.isroutine(value):
if methodName in cls.__dict__:
binded_value = cls.__dict__[methodName]
if isinstance(binded_value, staticmethod):
return True
return False
#
```
#### File: src/jk_utils/TextTable.py
```python
from .MutableString import *
from .TextCanvas import *
#
# This object represents a table cell
#
class TextTableCell(object):
def __init__(self, x, y, colSpan, rowSpan, textLines):
assert isinstance(textLines, list)
assert isinstance(colSpan, int)
assert isinstance(rowSpan, int)
assert x >= 0
assert y >= 0
assert colSpan >= 1
assert rowSpan >= 1
self.__x = x
self.__y = y
self.__textLines = textLines
self.__maxTextWidth = 0
for line in textLines:
assert isinstance(line, str)
if len(line) > self.__maxTextWidth:
self.__maxTextWidth = len(line)
self.__colSpan = colSpan
self.__rowSpan = rowSpan
#
#
# Tests if the specified position is covered by this cell. This method is useful especially for cells that span across multiple cells as this method
# will return the correct results even in this case.
#
# @return bool Returns <c>True</c> or <c>False</c> wether the specified coordinates are covered by this cell or not.
#
def coversXY(self, x, y):
return (x >= self.__x) and (x < self.__x + self.__colSpan) and (y >= self.__y) and (y < self.__y + self.__rowSpan)
#
def __repr__(self):
s = ""
for line in self.__textLines:
s += "\n"
s += line
s = s[1:]
if len(s) > 20:
s = s[:20]
s += "..."
return "{ " + str(self.__x) + "," + str(self.__y) + " - " + str(self.__colSpan) + "," + str(self.__rowSpan) + " - " + repr(s) + " }"
#
def __str__(self):
return self.__repr__()
#
#
# The text lines stored in this cell
#
@property
def textLines(self):
return self.__textLines
#
#
# The width the cell requires
#
@property
def innerWidth(self):
return self.__maxTextWidth
#
#
# The height the cell requires
#
@property
def innerHeight(self):
return len(self.__textLines)
#
@property
def x(self):
return self.__x
#
@property
def y(self):
return self.__y
#
@property
def rowSpan(self):
return self.__rowSpan
#
@property
def colSpan(self):
return self.__colSpan
#
#
#
# This class represents a table, which actually is a grid of cells. Text is organized in lines.
# After text has been added to the table you get it printed to an instance of <c>TextCanvas</c>
# which in turn can then be printed to STDOUT.
#
class TextTable(object):
class __RowCreator(object):
def __init__(self, table, rowNo):
self.table = table
self.rowNo = rowNo
self.__pos = 0
#
def createCell(self, textLines, colSpan = 1, rowSpan = 1):
while self.table.getCell(self.__pos, self.rowNo) != None:
self.__pos += 1
self.table.setCell(self.__pos, self.rowNo, textLines, colSpan, rowSpan)
self.__pos += colSpan
return self
#
#
def __init__(self):
self.__rows = []
self.__countColumns = 0
self.__headingRows = set()
#
def setHeadingRow(self, rowNo):
self.__headingRows.add(rowNo)
#
def ensureSize(self, w, h):
while len(self.__rows) < h:
self.__rows.append([])
self.__countColumns = max(w, self.__countColumns)
for row in self.__rows:
while len(row) < self.__countColumns:
row.append(None)
#
def numberOfRows(self):
return len(self.__rows)
#
def numberOfColumns(self):
return self.__countColumns
#
def setCell(self, x, y, textLines, colSpan = 1, rowSpan = 1):
if isinstance(textLines, str):
textLines = [ textLines ]
elif isinstance(textLines, list):
pass
else:
raise Exception("Invalid data for text lines specified!")
self.ensureSize(x + colSpan, y + rowSpan)
self.__rows[y][x] = TextTableCell(x, y, colSpan, rowSpan, textLines)
for ix in range(x, x + colSpan):
for iy in range(y, y + rowSpan):
if (ix != x) and (iy != y):
self.__rows[y][x] = None
#
@staticmethod
def __calcSpans(valueList, ofs, len):
n = 0
for i in range(ofs, ofs + len):
n += valueList[ofs]
return n
#
def _calcRowHeights(self):
rowHeights = []
numberOfRows = len(self.__rows)
# get height of all cells that do not span across multiple cells
multiRowCells = []
for i in range(0, numberOfRows):
rowHeights.append(0)
row = self.__rows[i]
for cell in row:
if cell is None:
continue
if cell.rowSpan == 1:
h = cell.innerHeight
if h > rowHeights[i]:
rowHeights[i] = h
else:
# collect the cells we need to deal with later
multiRowCells.append(cell)
# now make room for all cells spanning across multiple cells
for cell in multiRowCells:
currentHeightAvailable = TextTable.__calcSpans(rowHeights, cell.y, cell.rowSpan) # get cell height
currentHeightAvailable += cell.rowSpan - 1 # add intermediate borders
# currentHeightAvailable is now the amount of height this cell can cover (without outer border)
heightRequired = cell.innerHeight
# heightRequired will contain the height the cell will need
while currentHeightAvailable < heightRequired:
# expand all heights by 1
for i in range(0, cell.rowSpan):
rowHeights[i] += 1
currentHeightAvailable += 1
return rowHeights
#
def _calcColumnWidths(self, extraHGapLeft, extraHGapRight):
columnWidths = []
numberOfRows = len(self.__rows)
# get width of all cells that do not span across multiple cells
#print("countColumns=" + str(self.__countColumns))
for i in range(0, self.__countColumns):
columnWidths.append(0)
#print("columnWidths=" + str(columnWidths))
#print("numberOfRows=" + str(numberOfRows))
multiRowCells = []
for i in range(0, numberOfRows):
row = self.__rows[i]
#print(str(i) + "\t" + str(row))
j = -1
for cell in row:
j += 1
if cell is None:
continue
if cell.colSpan == 1:
w = cell.innerWidth + extraHGapLeft + extraHGapRight
if w > columnWidths[j]:
columnWidths[j] = w
else:
# collect the cells we need to deal with later
multiRowCells.append(cell)
# now make room for all cells spanning across multiple cells
for cell in multiRowCells:
currentWidthAvailable = TextTable.__calcSpans(columnWidths, cell.x, cell.colSpan) # get cell width
currentWidthAvailable += cell.colSpan - 1 # add intermediate borders
# currentWidthAvailable is now the amount of height this cell can cover (without outer border)
widthRequired = cell.innerWidth
# widthRequired will contain the width the cell will need
while currentWidthAvailable < widthRequired:
# expand all widths by 1
for i in range(0, cell.colSpan):
columnWidths[i] += 1
currentWidthAvailable += 1
return columnWidths
#
def paintToTextCanvas(self, textCanvas = None, extraHGapLeft = 1, extraHGapRight = 1, bCompact = False):
if textCanvas is None:
textCanvas = TextCanvas()
if bCompact:
self.__paintToTextCanvasNoBorders(textCanvas, extraHGapLeft, extraHGapRight)
else:
self.__paintToTextCanvasWithBorders(textCanvas, extraHGapLeft, extraHGapRight)
return textCanvas
#
def __paintToTextCanvasWithBorders(self, textCanvas, extraHGapLeft, extraHGapRight):
columnWidths = self._calcColumnWidths(extraHGapLeft, extraHGapRight)
xPositions = [ 0 ]
for w in columnWidths:
xPositions.append(xPositions[-1] + w + 1)
rowHeights = self._calcRowHeights()
yPositions = [ 0 ]
for h in rowHeights:
yPositions.append(yPositions[-1] + h + 1)
#print("columnWidths=" + str(columnWidths))
#print("xPositions=" + str(xPositions))
#print("rowHeights=" + str(rowHeights))
#print("yPositions=" + str(yPositions))
textCanvas.ensureSize(xPositions[-1] + 1, yPositions[-1] + 1)
for row in self.__rows:
for cell in row:
if cell is None:
continue
x1 = xPositions[cell.x]
y1 = yPositions[cell.y]
x2 = xPositions[cell.x + cell.colSpan]
y2 = yPositions[cell.y + cell.rowSpan]
textCanvas.drawRectangle(
x1, y1, x2, y2,
bDoubleBorderTop = (cell.y - 1) in self.__headingRows)
textCanvas.drawTextLines(x1 + 1 + extraHGapLeft, y1 + 1, cell.textLines)
return textCanvas
#
def __paintToTextCanvasNoBorders(self, textCanvas, extraHGapLeft, extraHGapRight):
columnWidths = self._calcColumnWidths(extraHGapLeft, extraHGapRight)
xPositions = [ 0 ]
for w in columnWidths:
xPositions.append(xPositions[-1] + w + 1)
rowHeights = self._calcRowHeights()
yPositions = [ 0 ]
for h in rowHeights:
yPositions.append(yPositions[-1] + h)
#print("columnWidths=" + str(columnWidths))
#print("xPositions=" + str(xPositions))
#print("rowHeights=" + str(rowHeights))
#print("yPositions=" + str(yPositions))
textCanvas.ensureSize(xPositions[-1] + 1, yPositions[-1] + 1)
for row in self.__rows:
for cell in row:
if cell is None:
continue
x1 = xPositions[cell.x]
y1 = yPositions[cell.y]
x2 = xPositions[cell.x + cell.colSpan]
y2 = yPositions[cell.y + cell.rowSpan]
textCanvas.drawTextLines(x1 + 1 + extraHGapLeft, y1 + 1, cell.textLines)
return textCanvas
#
def createRow(self, bIsHeading = False):
n = len(self.__rows)
if bIsHeading:
self.__headingRows.add(n)
self.ensureSize(self.__countColumns, n + 1)
return TextTable.__RowCreator(self, n)
#
#
# Get the cell at position (x, y). This method is useful especially for cells that span across multiple cells as this method
# will return the apropriate table cell even in this case.
#
def getCell(self, x, y):
if (y >= len(self.__rows)) or (x >= self.__countColumns):
return None
iy = y
while iy >= 0:
ix = x
while ix >= 0:
cell = self.__rows[iy][ix]
if cell != None:
if cell.coversXY(x, y):
return cell
ix -= 1
iy -= 1
return None
#
def print(self, bCompact = False, printFunction = None):
textCanvas = self.paintToTextCanvas(extraHGapLeft = 1, extraHGapRight = 1, bCompact = bCompact)
textCanvas.print(printFunction = printFunction)
#
def dump(self, printFunction = None):
if printFunction is None:
printFunction = print
else:
assert callable(printFunction)
printFunction("TextTable(")
printFunction("\tcountColumns=" + str(self.__countColumns))
printFunction("\tcountRows=" + str(len(self.__rows)))
printFunction("\theadingRows=" + str(self.__headingRows))
printFunction("\trows=[")
i = 0
for row in self.__rows:
printFunction("\t\t" + str(i) + "\t" + str(row))
i += 1
printFunction("\t]")
printFunction(")")
#
#
```
#### File: src/jk_utils/Timer.py
```python
import time
import threading
from .ObservableEvent import ObservableEvent
#
# TODO: Make the timing more accurate by using a more precise sleep time!
#
class Timer(object):
def __init__(self, delaySeconds:int):
self.__bKeepRunning = True
self.__delaySeconds = delaySeconds
self.__onTimerEvent = ObservableEvent("timerTick")
self.__onErrorEvent = ObservableEvent("timerError", bCatchExceptions=True)
self.__thread = threading.Thread(target=self.__runTimer, daemon=True)
self.__thread.start()
#
def __del__(self):
self.__bKeepRunning = False
#
@property
def onTimer(self):
return self.__onTimerEvent
#
@property
def onError(self):
return self.__onErrorEvent
#
def __runTimer(self):
while self.__bKeepRunning:
time.sleep(self.__delaySeconds)
try:
self.__onTimerEvent(self)
except Exception as ee:
self.__onErrorEvent(ee)
self.__thread = None
#
def terminate(self):
self.__bKeepRunning = False
#
#
```
#### File: jk_utils/tokenizer2/RegExBasedTokenizer.py
```python
import re
import collections
from .Token import Token
#
# RegEx based tokenizer: This tokenizer uses regular expressions in order to create tokens.
#
class RegExBasedTokenizer(object):
#
# Initialization method
#
# @param str[] patternDefs A list of pattern definitions. Each pattern definition should be a tuple or list of one of the following structures:
# * 2 items
# * (required) the token type to use on match
# * (required) the pattern to match
# * 4 items
# * (required) the token type to use on match
# * (optional) if not None a pattern to match but that will not be part of the token content
# * (required) the pattern to match (which will be the content of the token)
# * (optional) if not None a pattern to match but that will not be part of the token content
#
def __init__(self, patternDefs):
assert isinstance(patternDefs, (tuple, list))
assert len(patternDefs) > 0
p = ""
for patternDef in patternDefs:
assert isinstance(patternDef, (tuple, list))
if len(p) > 0:
p += "|"
if len(patternDef) == 2:
p += '(?P<' + patternDef[0] + '>' + patternDef[1] + ')'
elif len(patternDef) == 4:
if patternDef[1] != None:
p += '(?:' + patternDef[1] + ')'
p += '(?P<' + patternDef[0] + '>' + patternDef[2] + ')'
if patternDef[3] != None:
p += '(?:' + patternDef[3] + ')'
else:
raise Exception("Invalid pattern definition: " + str(patternDef))
self.__rawPatterns = p
self.__compiledPatterns = None
self.__typeParsingDelegates = {}
#
@property
def isCompiled(self):
return self.__compiledPatterns != None
#
def compile(self):
rawPatterns = self.__rawPatterns
rawPatterns += "|(?P<NEWLINE>\n)"
rawPatterns += "|(?P<SPACE>[\t ]+)"
rawPatterns += "|(?P<ERROR>.)"
try:
self.__compiledPatterns = re.compile(rawPatterns)
except Exception as e:
print("==== COMPILE ERROR")
print("==== " + e.msg)
print("==== " + rawPatterns)
s = ""
for i in range(0, e.colno):
s += " "
s += "^"
print(s)
raise
#
def addTokenPattern(self, tokenType:str, patternRegExStr):
assert isinstance(tokenType, str)
bErr = False
if isinstance(patternRegExStr, (tuple, list)):
if len(patternRegExStr) == 1:
if isinstance(patternRegExStr[0], str):
patternRegExStr = patternRegExStr[0]
else:
bErr = True
elif len(patternRegExStr) == 3:
pass
else:
bErr = True
elif isinstance(patternRegExStr, str):
pass
else:
bErr = True
if bErr:
raise Exception("Invalid patternRegExStr specified!")
if len(self.__rawPatterns) > 0:
self.__rawPatterns += "|"
if isinstance(patternRegExStr, str):
self.__rawPatterns += '(?P<' + tokenType + '>' + patternRegExStr + ')'
else:
if patternRegExStr[0] != None:
self.__rawPatterns += '(?:' + patternRegExStr[0] + ')'
self.__rawPatterns += '(?P<' + tokenType + '>' + patternRegExStr[1] + ')'
if patternRegExStr[2] != None:
self.__rawPatterns += '(?:' + patternRegExStr[2] + ')'
self.__compiledPatterns = None
#
def registerTypeParsingDelegate(self, tokenTypeMain, tokenTypeExtra, delegate):
assert isinstance(tokenTypeMain, str)
assert isinstance(tokenTypeExtra, (type(None), str))
assert callable(delegate)
if tokenTypeExtra is None:
tokenType = tokenTypeMain
else:
tokenType = tokenTypeMain + "_" + tokenTypeExtra
self.__typeParsingDelegates[tokenType] = delegate
#
#
# @return Token[] tokens Returns token objects according to the pattern defined during initialization.
#
def tokenize(self, text, bEmitWhiteSpaces = False, bEmitNewLines = False):
if not self.isCompiled:
self.compile()
lineNo = 1
line_start = 0
for mo in self.__compiledPatterns.finditer(text):
tokenType = mo.lastgroup
value = mo.group(tokenType)
columnNo = mo.start() - line_start + 1
if tokenType == 'NEWLINE':
if bEmitNewLines:
yield Token(tokenType, value, lineNo, columnNo)
line_start = mo.end()
lineNo += 1
elif tokenType == 'SPACE':
if bEmitWhiteSpaces:
yield Token(tokenType, value, lineNo, columnNo)
elif tokenType == 'ERROR':
raise RuntimeError("Tokenization error encountered at " + str(lineNo) + ":" + str(columnNo) + "!")
else:
d = self.__typeParsingDelegates.get(tokenType, None)
if d != None:
value = d(value)
pos = tokenType.find("_")
# tokenTypeExtra = None
#if pos >= 0:
#tokenTypeExtra = tokenType[pos + 1:]
#tokenType = tokenType[0:pos]
# s = "" if tokenTypeExtra == None else tokenTypeExtra
# print("---- " + tokenTypeMain + ":" + s + " ---- \"" + tokenText + "\"")
if pos >= 0:
tokenType = tokenType[0:pos]
yield Token(tokenType, value, lineNo, columnNo)
if tokenType == 'newline':
line_start = mo.end()
lineNo += 1
#
#
# Override this method to perform correct value parsing. This method is called for every
#
def _parseValue(self, tokenTypeMain, tokenTypeExtra, tokenText):
return tokenText
#
#
```
#### File: jk_utils/weakref/WeakRefObservableEvent.py
```python
import weakref
class WeakRefObservableEvent(object):
def __init__(self, name = None):
self.__name = name
self.__listeners = tuple()
#
@property
def name(self):
return self.__name
#
@property
def listeners(self):
return self.__listeners
#
def __len__(self):
return len(self.__listeners)
#
def removeAllListeners(self):
self.__listeners = tuple()
#
def __str__(self):
if self.__name:
ret = repr(self.__name)[1:][:-1] + "("
else:
ret = "Event("
if len(self.__listeners) == 0:
ret += "no listener)"
elif len(self.__listeners) == 1:
ret += "1 listener)"
else:
ret += str(len(self.__listeners)) + " listeners)"
return ret
#
def __repr__(self):
if self.__name:
ret = repr(self.__name)[1:][:-1] + "("
else:
ret = "Event("
if len(self.__listeners) == 0:
ret += "no listener)"
elif len(self.__listeners) == 1:
ret += "1 listener)"
else:
ret += str(len(self.__listeners)) + " listeners)"
return ret
#
def add(self, theCallable:callable):
assert theCallable != None
self.__listeners += (weakref.ref(theCallable),)
return self
#
def __iadd__(self, theCallable:callable):
assert theCallable != None
self.__listeners += (weakref.ref(theCallable),)
return self
#
def remove(self, theCallable:callable) -> bool:
assert theCallable != None
if theCallable in self.__listeners:
n = self.__listeners.index(theCallable)
self.__listeners = self.__listeners[:n] + self.__listeners[n + 1:]
return True
else:
return False
#
def _removeAt(self, n:int):
self.__listeners = self.__listeners[:n] + self.__listeners[n + 1:]
#
def __isub__(self, theCallable:callable):
assert theCallable != None
if theCallable in self.__listeners:
n = self.__listeners.index(theCallable)
self.__listeners = self.__listeners[:n] + self.__listeners[n + 1:]
return self
#
def __call__(self, *argv, **kwargs):
n = 0
for listener in self.__listeners:
o = listener()
if o:
o(*argv, **kwargs)
n += 1
else:
self._removeAt(n)
#
def fire(self, *argv, **kwargs):
n = 0
for listener in self.__listeners:
o = listener()
if o:
o(*argv, **kwargs)
n += 1
else:
self._removeAt(n)
#
#
```
#### File: python-module-jk-utils/testing/test_getFolderSize_performance.py
```python
import os
import time
import timeit
import jk_utils
DIRECTORY_TO_SCAN = os.path.abspath("..")
NUMBER_OF_REPEATS = 200
def avg(arr) -> float:
return sum(arr) / len(arr)
#
print("Measuring variant 1 ...")
vals1 = []
for i in range(0, NUMBER_OF_REPEATS):
t = time.time()
jk_utils.fsutils.__old_getFolderSize(DIRECTORY_TO_SCAN)
duration = time.time() - t
if i > 0:
vals1.append(duration)
print("Measuring variant 2 ...")
vals2 = []
for i in range(0, NUMBER_OF_REPEATS):
t = time.time()
jk_utils.fsutils.getFolderSize(DIRECTORY_TO_SCAN)
duration = time.time() - t
if i > 0:
vals2.append(duration)
print("Measuring variant 2 using timeit ...")
duration2timeit = timeit.timeit(stmt=lambda: jk_utils.fsutils.getFolderSize(DIRECTORY_TO_SCAN), number=NUMBER_OF_REPEATS) / NUMBER_OF_REPEATS
print("Variant 1:", round(avg(vals1) * 1000, 6), "ms")
print("Variant 2:", round(avg(vals2) * 1000, 6), "ms")
print("Variant 2 timeit:", round(duration2timeit * 1000, 6), "ms")
"""
Typical results:
Measuring variant 1 ...
Measuring variant 2 ...
Measuring variant 2 using timeit ...
Variant 1: 8.257395 ms
Variant 2: 4.799469 ms
Variant 2 timeit: 4.815246 ms
"""
``` |
{
"source": "jkpubsrc/python-module-jk-vcard",
"score": 2
} |
#### File: src/jk_vcard/VCFParser.py
```python
import jk_typing
from jk_utils.tokenizer2 import TokenizationError
from jk_version import Version
from .QuotedPrintable import QuotedPrintable
from .VCFTokenizer import VCFTokenizer
from .VCFToken import VCFToken
from .VCardItem import VCardItem
from .VCard import VCard
class VCFParser(object):
@staticmethod
def parseText(text:str, bDebug:bool = False) -> list:
tokens = VCFTokenizer.tokenize(text)
return VCFParser.parseTokens(tokens, bDebug)
#
@staticmethod
def parseTokens(tokens, bDebug:bool = False) -> list:
ret = []
# group tokens by begin/end
buffer = []
version = None
orgLines = []
for token in tokens:
orgLines.append(token.orgLine)
if token.key == "VERSION":
version = Version(token.values[0])
elif (token.key == "BEGIN") and (token.values[0] == "VCARD"):
assert not buffer
assert version is None
elif (token.key == "END") and (token.values[0] == "VCARD"):
assert version
ret.append(
VCard(version, buffer, orgLines)
)
buffer = []
orgLines = []
version = None
else:
preferredEncoding = None
if token.hasParam("ENCODING"):
# this token is encoded; decode it;
sEncoding = token.getParam("ENCODING")
sCharset = token.getParam("CHARSET")
preferredEncoding = sEncoding
# TODO: support more encoding
if sEncoding == "QUOTED-PRINTABLE":
for i in range(0, len(token.values)):
token.values[i] = QuotedPrintable.decode(token.values[i], sCharset)
token.removeParam("ENCODING")
token.removeParam("CHARSET")
else:
raise TokenizationError("Unexpected encoding: " + repr(sEncoding), token.lineNo, 1)
buffer.append(
VCardItem(token.key, token.params, token.values, preferredEncoding)
)
assert not buffer
assert version is None
return ret
#
#
```
#### File: src/jk_vcard/VCFToken.py
```python
class VCFToken(object):
def __init__(self, key:str, params:list, values:list, lineNo:int, orgLine:str):
self.key = key
self.params = params
self.lineNo = lineNo
self.values = values
self.orgLine = orgLine
#
def hasParam(self, paramName:str) -> bool:
for p, v in self.params:
if p == paramName:
return True
return False
#
def getParam(self, paramName:str, defaultValue = None):
for p, v in self.params:
if p == paramName:
return v
return defaultValue
#
def removeParam(self, paramName:str) -> bool:
i = 0
for p, v in self.params:
if p == paramName:
del self.params[i]
return True
i += 1
return False
#
def __str__(self):
return "VCFToken< key=" + repr(self.key) + ", params=" + repr(self.params) + ", values=" + repr(self.values) + " >"
#
def __repr__(self):
return self.__str__()
#
#
``` |
{
"source": "jkpubsrc/python-module-jk-xmlparser",
"score": 3
} |
#### File: src/jk_xmlparser/TagSpecial.py
```python
import jk_tokenizingparsing
class TagSpecial(object):
def __init__(self, location:jk_tokenizingparsing.SourceCodeLocation, name:str):
self.locationFrom = location.clone()
self.locationTo = None
self.name = name
self.items = []
#
def __str__(self):
return "TagSpecial<" + self.name + " @ " + str(self.locationFrom) + ">"
#
def __repr__(self):
return self.__str__()
#
#
```
#### File: src/jk_xmlparser/XMLDOMParser.py
```python
import re
import xml
import xml.sax
#from jk_testing import Assert
from jk_simplexml import *
from jk_tokenizingparsing import Token
from .XMLDOMParseException import XMLDOMParseException
from .TagBegin import TagBegin
from .TagComment import TagComment
from .TagEnd import TagEnd
from .TagSpecial import TagSpecial
from .XMLTokenizerImpl import XMLTokenizerImpl
class SAXConverter(xml.sax.handler.ContentHandler):
def __init__(self, sourceID):
super().__init__()
self.__sourceID = sourceID
self.__rootElement = None
self.__stack = []
#
def getRootElement(self) -> HElement:
return self.__rootElement
#
def startDocument(self):
# self._out.write("<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n")
pass
#
def startElement(self, name, attrs):
# for parsing with line and column numbers: https://stackoverflow.com/questions/15477363/xml-sax-parser-and-line-numbers-etc
x = HElement(name, [ HAttribute(k, v) for k, v in attrs.items() ])
if self.__stack:
self.__stack[-1].children.append(x)
self.__stack.append(x)
if self.__rootElement is None:
self.__rootElement = x
#
def endElement(self, name):
del self.__stack[-1]
#
def characters(self, content):
xElement = self.__stack[-1]
if xElement.children:
xLast = xElement.children[-1]
if isinstance(xLast, HText):
xLast.text += content
else:
xElement.children.append(HText(content))
else:
xElement.children.append(HText(content))
#
def ignorableWhitespace(self, content):
xElement = self.__stack[-1]
if xElement.children:
xLast = xElement.children[-1]
if isinstance(xLast, HText):
xLast.text += content
else:
xElement.children.append(HText(content))
else:
xElement.children.append(HText(content))
#
def processingInstruction(self, target, data):
#self._out.write("<?%s %s?>" % (target, data))
pass
#
#
class XMLDOMParser(object):
def __init__(self):
pass
#
def parseFile(self, filePath:str, logFunction = None) -> HElement:
with open(filePath, "r") as f:
textData = f.read()
return self.parseText(textData, filePath, logFunction = logFunction)
#
def parseText(self, textData:str, sourceID = None, logFunction = None) -> HElement:
saxConverter = SAXConverter(sourceID)
xml.sax.parseString(textData, saxConverter)
return saxConverter.getRootElement()
#
#
``` |
{
"source": "jkpubsrc/python-module-thaniya-client",
"score": 3
} |
#### File: src/thaniya_client/AbstractBackupConnector.py
```python
import typing
import string
import random
import os
import jk_utils
from .ThaniyaBackupContext import ThaniyaBackupContext
#
# This class represents a channel to a backup repository. Typically this is a client for a backup server.
# The regular way how this class is used is following this work flow:
# * instantiate a subclass of this class and pass it on to the backup driver;
# * the backup driver invokes `initialize()`; this method should connect to a backup server and prepare everything for backup
# * the backup is performed by writing to the directory returned by `targetDirPath`;
# * the backup driver invokes `deinitialize()` in order to tear down the connection;
#
class AbstractBackupConnector(object):
@property
def needsToBeRoot(self) -> bool:
return False
#
def initialize(self, ctx:ThaniyaBackupContext, targetDirPath:str, nExpectedNumberOfBytesToWrite:int, parameters:dict):
raise NotImplementedError()
#
def deinitialize(self, ctx:ThaniyaBackupContext, bError:bool, statsContainer:dict):
raise NotImplementedError()
#
@property
def isReady(self) -> bool:
raise NotImplementedError()
#
@property
def targetDirPath(self) -> str:
raise NotImplementedError()
#
def dump(self):
raise NotImplementedError()
#
#
```
#### File: src/thaniya_client/BackupClient_ThaniyaSSH.py
```python
import sys
import os
import json
import urllib.request
import urllib.parse
import requests
import subprocess
from furl import furl
import jk_json
import jk_mounting
from .AbstractBackupConnector import AbstractBackupConnector
from .ThaniyaServerConnector import ThaniyaServerConnector
#
# Objects of this class are instantiated by ShiroiBackup.
#
class BackupClient_ThaniyaSSH(AbstractBackupConnector):
SSHFS_PATH = "/usr/bin/sshfs"
SUDO_PATH = "/usr/bin/sudo"
UMOUNT_PATH = "/bin/umount"
def __init__(self, connector:ThaniyaServerConnector, initStructure:dict):
assert isinstance(connector, ThaniyaServerConnector)
assert isinstance(initStructure, dict)
if not os.path.isfile(BackupClient_ThaniyaSSH.SSHFS_PATH):
raise Exception("sshfs is required but it is not installed!")
self.__connector = connector
self._sessionID = initStructure["sessionID"]
self._ssh = initStructure["ssh"]
self._ssh_dirPathData = initStructure.get("ssh_dirPathData")
self._ssh_dirPathExtra = initStructure.get("ssh_dirPathExtra")
self._ssh_dirPathRoot = initStructure.get("ssh_dirPathRoot")
self._ssh_hostAddress = initStructure.get("ssh_hostAddress")
self._ssh_login = initStructure.get("ssh_login")
self._ssh_password = initStructure.get("ssh_password")
self._ssh_port = initStructure.get("ssh_port")
self._ssh_relPathData = initStructure.get("ssh_relPathData")
self._ssh_relPathExtra = initStructure.get("ssh_relPathExtra")
self.__mountPoint = None
#
@property
def isReady(self) -> bool:
return bool(self.__mountPoint)
#
@property
def targetDirPath(self) -> str:
if self.__mountPoint:
return os.path.join(self.__mountPoint, self._ssh_relPathData)
else:
return None
#
@property
def mountPoint(self) -> str:
return self.__mountPoint
#
def initialize(self, backupDir:str):
self.__backupDir = backupDir
#
def mountSSH(self, dirPath:str):
assert isinstance(dirPath, str)
assert os.path.isdir(dirPath)
dirPath = os.path.abspath(dirPath)
mounter = jk_mounting.Mounter()
mip = mounter.getMountInfoByMountPoint(self._ssh_dirPathRoot)
if mip is not None:
raise Exception("Directory " + repr(self._ssh_dirPathRoot) + " already used by mount!")
cmd = [
BackupClient_ThaniyaSSH.SSHFS_PATH,
"-p", str(self._ssh_port), "-o", "password_stdin", "-o", "reconnect", self._ssh_login + "@" + self._ssh_hostAddress + ":" + self._ssh_dirPathRoot, dirPath
]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write((self._ssh_password + "\n").encode("utf-8"))
(stdout, stderr) = p.communicate(timeout=3)
if p.returncode != 0:
returnCode1 = p.returncode
stdOutData1 = stdout.decode("utf-8")
stdErrData1 = stderr.decode("utf-8")
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write(("yes\n" + self._ssh_password + "\n").encode("utf-8"))
(stdout, stderr) = p.communicate(timeout=3)
if p.returncode != 0:
returnCode2 = p.returncode
stdOutData2 = stdout.decode("utf-8")
stdErrData2 = stderr.decode("utf-8")
print("Mount attempt 1:")
print("\treturnCode =", returnCode1)
print("\tstdOutData =", repr(stdOutData1))
print("\tstdErrData =", repr(stdErrData1))
print("Mount attempt 2:")
print("\treturnCode =", returnCode2)
print("\tstdOutData =", repr(stdOutData2))
print("\tstdErrData =", repr(stdErrData2))
raise Exception("Failed to mount device!")
self.__mountPoint = dirPath
#
def umount(self, throwExceptionOnError:bool = True):
assert isinstance(throwExceptionOnError, bool)
if self.__mountPoint:
cmd = [
BackupClient_ThaniyaSSH.SUDO_PATH,
BackupClient_ThaniyaSSH.UMOUNT_PATH,
self.__mountPoint,
]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(timeout=10)
if p.returncode != 0:
returnCode = p.returncode
stdOutData = stdout.decode("utf-8")
stdErrData = stderr.decode("utf-8")
print("Unmount attempt:")
print("\treturnCode =", returnCode)
print("\tstdOutData =", repr(stdOutData))
print("\tstdErrData =", repr(stdErrData))
if throwExceptionOnError:
raise Exception("Failed to umount device!")
else:
self.__mountPoint = None
#
def __del__(self):
self.umount(throwExceptionOnError=False)
#
def onBackupCompleted(self, bError:bool):
self.umount(throwExceptionOnError=True)
#
def dump(self):
print("BackupClient_ThaniyaSSH")
for key in [ "_sessionID",
"_ssh", "_ssh_dirPathData", "_ssh_dirPathExtra", "_ssh_dirPathRoot", "_ssh_hostAddress", "_ssh_login", "_ssh_password",
"_ssh_port", "_ssh_relPathData", "_ssh_relPathExtra",
"mountPoint" ]:
print("\t" + key, "=", getattr(self, key))
#
#
```
#### File: src/thaniya_client/BackupConnector_CIFSMount.py
```python
import sys
import time
import os
import subprocess
import string
import random
import jk_utils
import jk_mounting
from .AbstractBackupConnector import AbstractBackupConnector
from .ThaniyaIO import ThaniyaIO
from .ThaniyaBackupContext import ThaniyaBackupContext
from .utils.temp import writeTempFile
class BackupConnector_CIFSMount(AbstractBackupConnector):
SMB_CLIENT_PATH = "/usr/bin/smbclient"
MOUNT_PATH = "/bin/mount"
SUDO_PATH = "/usr/bin/sudo"
UMOUNT_PATH = "/bin/umount"
def __init__(self):
self.__targetDirPath = None
self.__bIsMounted = False
#
def initialize(self, ctx:ThaniyaBackupContext, targetDirPath:str, nExpectedNumberOfBytesToWrite:int, parameters:dict):
self.__targetDirPath = targetDirPath
self._cifs_hostAddress = parameters.get("cifs_hostAddress")
self._cifs_login = parameters.get("cifs_login")
self._cifs_password = parameters.get("cifs_password")
#self._cifs_port = parameters.get("cifs_port", 445)
self._cifs_version = parameters.get("cifs_version")
self._cifs_shareName = parameters.get("cifs_shareName")
self._mountCIFS(
self.__targetDirPath,
self._cifs_hostAddress,
#self._cifs_port,
self._cifs_shareName,
self._cifs_login,
self._cifs_password,
self._cifs_version)
self.__bIsMounted = True
#
def deinitialize(self, ctx:ThaniyaBackupContext, bError:bool, statsContainer:dict):
if self.__bIsMounted:
# let's do 5 unmount attempts.
for i in range(0, 4):
time.sleep(1)
try:
self._umount(self.__targetDirPath)
return
except Exception as ee:
pass
time.sleep(1)
self._umount(self.__targetDirPath)
#
@property
def isReady(self) -> bool:
return self.__bIsMounted
#
@property
def targetDirPath(self) -> str:
return self.__targetDirPath
#
def dump(self):
print("BackupConnector_CIFSMount")
#for key in [ "_sessionID",
# "mountPoint" ]:
# print("\t" + key, "=", getattr(self, key))
#
def _mountCIFS(self,
localDirPath:str,
cifsHostAddress:str,
#cifsPort:int,
cifsShareName:str,
cifsLogin:str,
cifsPassword:str,
cifsVersion:str) -> bool:
assert isinstance(localDirPath, str)
assert os.path.isdir(localDirPath)
localDirPath = os.path.abspath(localDirPath)
mounter = jk_mounting.Mounter()
mip = mounter.getMountInfoByMountPoint(localDirPath)
if mip is not None:
raise Exception("Directory " + repr(localDirPath) + " already used by mount!")
credentialFilePath = writeTempFile("rw-------",
"username=" + cifsLogin + "\npassword=" + cifsPassword + "\n"
)
try:
options = [
"user=",
cifsLogin,
",credentials=",
credentialFilePath,
",rw",
]
if cifsVersion:
options.append(",vers=")
options.append(cifsVersion)
cmd = [
BackupConnector_CIFSMount.MOUNT_PATH,
"-t", "cifs",
"-o", "".join(options),
"//" + cifsHostAddress + "/" + cifsShareName,
localDirPath,
]
# print(" ".join(cmd))
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write((cifsPassword + "\n").encode("utf-8"))
(stdout, stderr) = p.communicate(timeout=3)
if p.returncode != 0:
returnCode1 = p.returncode
stdOutData1 = stdout.decode("utf-8")
stdErrData1 = stderr.decode("utf-8")
print("Mount attempt 1:")
print("\tcmd =", cmd)
print("\treturnCode =", returnCode1)
print("\tstdOutData =", repr(stdOutData1))
print("\tstdErrData =", repr(stdErrData1))
raise Exception("Failed to mount device!")
return False
else:
return True
finally:
try:
os.unlink(credentialFilePath)
except:
pass
#
def _umount(self, localDirPath:str, throwExceptionOnError:bool = True) -> bool:
assert isinstance(localDirPath, str)
assert isinstance(throwExceptionOnError, bool)
cmd = [
BackupConnector_CIFSMount.UMOUNT_PATH,
localDirPath,
]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(timeout=10)
if p.returncode != 0:
returnCode = p.returncode
stdOutData = stdout.decode("utf-8")
stdErrData = stderr.decode("utf-8")
print("Unmount attempt:")
print("\treturnCode =", returnCode)
print("\tstdOutData =", repr(stdOutData))
print("\tstdErrData =", repr(stdErrData))
if throwExceptionOnError:
raise Exception("Failed to umount device!")
return False
else:
return True
#
#
```
#### File: src/thaniya_client/TargetDirectoryStrategy_StaticDir.py
```python
from .AbstractTargetDirectoryStrategy import AbstractTargetDirectoryStrategy
class TargetDirectoryStrategy_StaticDir(AbstractTargetDirectoryStrategy):
def __init__(self, extraSubDirPath:str = None):
if extraSubDirPath is not None:
assert isinstance(extraSubDirPath, str)
assert extraSubDirPath
assert extraSubDirPath[0] != "/"
self.__extraSubDirPath = extraSubDirPath
#
def selectEffectiveTargetDirectory(self, baseTargetDirPath:str):
assert isinstance(baseTargetDirPath, str)
if self.__extraSubDirPath is not None:
baseTargetDirPath += "/" + self.__extraSubDirPath
return baseTargetDirPath
#
#
```
#### File: src/thaniya_client/ThaniyaBackupDriver.py
```python
import os
import typing
import time
import datetime
import json
import sys
import shutil
import jk_utils
import jk_logging
#from jk_testing import Assert
import jk_json
from jk_typing import checkFunctionSignature
from .constants import *
from .TargetDirectoryStrategy_StaticDir import TargetDirectoryStrategy_StaticDir
from .AbstractBackupConnector import AbstractBackupConnector
from .ThaniyaBackupContext import ThaniyaBackupContext
from .AbstractThaniyaTask import AbstractThaniyaTask
from .ThaniyaIO import ThaniyaIO
from .ProcessingContext import ProcessingContext, ProcessingFallThroughError
from .AbstractTargetDirectoryStrategy import AbstractTargetDirectoryStrategy
class ThaniyaBackupDriver(object):
# ================================================================================================================================
# ==== Constructor/Destructor
# ================================================================================================================================
#
# Constructor method.
#
# @param AbstractBackupConnector backupConnector An object that is used to connect to a backup repository/backup server later.
# @param dict backupConnectorParameters A dictionary that holds various parameters required to connect to the backup repository/backup server.
# @param str mountDirPath The local mount point to mount the remote directory at.
# @param AbstractTargetDirectoryStrategy targetDirStrategy A strategy that decides about which target directory to use exactly.
#
@checkFunctionSignature()
def __init__(self,
backupConnector:AbstractBackupConnector,
backupConnectorParameters:dict,
mountDirPath:str,
targetDirStrategy:typing.Union[AbstractTargetDirectoryStrategy,None] = None,
):
# verify arguments
if not os.path.isdir(mountDirPath):
raise Exception("Not a directory: " + repr(mountDirPath))
if not os.path.isabs(mountDirPath):
raise Exception("Not an absolute path: " + repr(mountDirPath))
if mountDirPath == "/":
raise Exception("Unsuitable path: " + repr(mountDirPath))
if targetDirStrategy is None:
targetDirStrategy = TargetDirectoryStrategy_StaticDir()
# accept arguments
if mountDirPath.endswith("/"):
mountDirPath = mountDirPath[:-1]
self.__mountDirPath = mountDirPath
self.__mountDirPath2 = mountDirPath + "/"
self.__backupConnector = backupConnector
self.__backupConnectorParameters = backupConnectorParameters
self.__targetDirStrategy = targetDirStrategy
if backupConnector.needsToBeRoot:
if os.geteuid() != 0:
raise Exception("Need to be root to use backup connector " + repr(backupConnector.__class__.__name__) + "!")
#
# ================================================================================================================================
# ==== Helper Methods
# ================================================================================================================================
@checkFunctionSignature()
def __getDirTreeSize(self, dirPath:str, log:jk_logging.AbstractLogger) -> int:
assert dirPath
assert os.path.isabs(dirPath)
assert os.path.isdir(dirPath)
nestedLog = log.descend("Calculating size of directory: " + repr(dirPath))
try:
n = jk_utils.fsutils.getFolderSize(dirPath)
except Exception as ee:
nestedLog.exception(ee)
raise
nestedLog.notice("Size of " + repr(dirPath) + ": " + jk_utils.formatBytes(n))
return n
#
@checkFunctionSignature()
def __getBufferLogger(self, log:jk_logging.MulticastLogger) -> jk_logging.BufferLogger:
for logger in log.loggers:
if isinstance(logger, jk_logging.BufferLogger):
return logger
raise Exception("No buffer logger found in list of loggers!")
#
@checkFunctionSignature()
def __writeLogToFiles(
self,
bufferLogger:jk_logging.BufferLogger,
textFilePath:str,
jsonFilePath:str,
fileMode:typing.Union[int,str,jk_utils.ChModValue,None] = None,
):
bAppendToExistingFile = False
logMsgFormatter = None
jsonLogData = bufferLogger.getDataAsPrettyJSON()
with open(jsonFilePath, "w") as f:
json.dump(jsonLogData, f, indent="\t")
fileLogger = jk_logging.FileLogger.create(
textFilePath,
"none",
bAppendToExistingFile,
False,
fileMode,
logMsgFormatter,
)
bufferLogger.forwardTo(fileLogger)
#
@checkFunctionSignature()
def __analyseLogMessages(self, log:jk_logging.MulticastLogger) -> jk_logging.DetectionLogger:
for logger in log.loggers:
if isinstance(logger, jk_logging.BufferLogger):
detectionLogger = jk_logging.DetectionLogger.create(jk_logging.NullLogger.create())
logger.forwardTo(detectionLogger)
return detectionLogger
raise Exception("No buffer logger in list of loggers!")
#
# ================================================================================================================================
# ==== Public Methods
# ================================================================================================================================
#
# Invoke this method to perform a backup.
#
@checkFunctionSignature()
def performBackup(self,
backupTasks:list,
bSimulate:bool
):
for x in backupTasks:
assert isinstance(x, AbstractThaniyaTask)
#Assert.isInstance(x, AbstractThaniyaTask)
mainLog = jk_logging.MulticastLogger.create(
jk_logging.ConsoleLogger.create(logMsgFormatter=jk_logging.COLOR_LOG_MESSAGE_FORMATTER),
jk_logging.BufferLogger.create()
)
bError = False
try:
statsContainer = {
"tStart": time.time(),
"tEnd": None,
"success": None,
"expectedBytesToWrite": None,
"totalBytesWritten": None,
"avgWritingSpeed": None,
"simulate": bSimulate,
}
effectiveTargetDirPath = None
with ProcessingContext("Performing backup simulation" if bSimulate else "Performing backup", None, mainLog) as ctxMain:
# --------------------------------------------------------------------------------------------------------------------------------
# >>>> estimate the number of bytes we will likely have to write for this backup
with ProcessingContext(
text="Calculating disk space required",
targetDirPath=None,
log=ctxMain.log,
bMeasureDuration=True,
statsContainer=statsContainer,
statsDurationKey="d0_calcDiskSpace"
) as ctx:
nExpectedBytesToWrite = 0
for job in backupTasks:
assert isinstance(job, AbstractThaniyaTask)
#Assert.isInstance(job, AbstractThaniyaTask)
nestedCtx = ctx.descend(job.logMessageCalculateSpaceRequired)
with nestedCtx.log as nestedLog:
nExpectedBytesToWrite += job.calculateSpaceRequired(nestedCtx)
ctx.log.info("Estimated total size of backup: " + jk_utils.formatBytes(nExpectedBytesToWrite))
statsContainer["expectedBytesToWrite"] = nExpectedBytesToWrite
# --------------------------------------------------------------------------------------------------------------------------------
# >>>> now connect to the backup repository
with ProcessingContext(
text="Connecting to backup repository and preparing backup",
targetDirPath=None,
log=ctxMain.log,
bMeasureDuration=True,
statsContainer=statsContainer,
statsDurationKey="d1_connectAndPrepare"
) as ctx:
# check if there is a suitable directory where we can mount the remote file system
ThaniyaIO.checkThatDirExists(ctx, self.__mountDirPath)
ThaniyaIO.ensureDirMode(ctx, self.__mountDirPath, jk_utils.ChModValue("rwx------"))
# mount the remote file system
self.__backupConnector.initialize(ctx, self.__mountDirPath, nExpectedBytesToWrite, self.__backupConnectorParameters)
if not self.__backupConnector.isReady:
raise Exception("Backup client unexpectedly not ready for writing!")
# select the target directory where we will store the data. the variable "effectiveTargetDirPath"
# will receive the directory selected by the target directory strategy. we will write data there.
effectiveTargetDirPath = self.__targetDirStrategy.selectEffectiveTargetDirectory(self.__mountDirPath)
ctx.log.info("Selected target directory: " + repr(effectiveTargetDirPath))
# verify that we have the correct directory: the "effectiveTargetDirPath" must be lokated somewhere within
# the mounted directory tree.
if effectiveTargetDirPath.endswith("/"):
effectiveTargetDirPath2 = effectiveTargetDirPath
else:
effectiveTargetDirPath2 = effectiveTargetDirPath + "/"
assert effectiveTargetDirPath2[:len(self.__mountDirPath2)] == self.__mountDirPath2
ctx.log.notice("Creating subdirectories if necessary ...")
ThaniyaIO.ensureDirExists(ctx, effectiveTargetDirPath, jk_utils.ChModValue("rwx------"))
# check that the target directory fits our requirements: it must be empty.
bIsEmpty, contentEntries = ThaniyaIO.checkIfDirIsEmpty(ctx, effectiveTargetDirPath)
if not bIsEmpty:
print(contentEntries)
if STATS_JSON_FILE_NAME in contentEntries:
# target directory already seems to contain a backup
ctx.log.warn("Target directory already seems to contain a backup: " + effectiveTargetDirPath2)
ctx.log.warn("Overwriting this backup.")
else:
raise Exception("Backup directory contains various non-backup files or directories!")
# now we are ready. but before we begin doing something let's write the backup stats first.
jk_json.saveToFilePretty(statsContainer, os.path.join(effectiveTargetDirPath, STATS_JSON_FILE_NAME))
# ----
ctx.log.notice("Done.")
# --------------------------------------------------------------------------------------------------------------------------------
# >>>> Writing the backup data
if not bSimulate:
with ProcessingContext(
text="Writing the backup data",
targetDirPath=effectiveTargetDirPath,
log=ctxMain.log,
bMeasureDuration=True,
statsContainer=statsContainer,
statsDurationKey="d2_backup"
) as ctx:
for job in backupTasks:
assert isinstance(job, AbstractThaniyaTask)
#Assert.isInstance(job, AbstractThaniyaTask)
nestedCtx = ctx.descend(job.logMessagePerformBackup)
with nestedCtx.log as nestedLog:
job.performBackup(nestedCtx)
nTotalBytesWritten = self.__getDirTreeSize(effectiveTargetDirPath, ctx.log)
fDuration = ctx.duration
if (nTotalBytesWritten > 0) and (fDuration > 0):
fAvgWritingSpeed = nTotalBytesWritten/fDuration
sAvgWritingSpeed = jk_utils.formatBytesPerSecond(fAvgWritingSpeed)
else:
fAvgWritingSpeed = None
sAvgWritingSpeed = "n/a"
ctx.log.info("Total bytes written: " + jk_utils.formatBytes(nTotalBytesWritten))
ctx.log.info("Average writing speed: " + sAvgWritingSpeed)
statsContainer["totalBytesWritten"] = nTotalBytesWritten
statsContainer["avgWritingSpeed"] = fAvgWritingSpeed
except ProcessingFallThroughError as ee:
bError = True
except Exception as ee:
bError = True
mainLog.error(ee)
# --------------------------------------------------------------------------------------------------------------------------------
# >>>> Finish
try:
# detecting errors
detectionLogger = self.__analyseLogMessages(mainLog)
if detectionLogger.hasError() or detectionLogger.hasStdErr() or detectionLogger.hasException():
bError = True
# writing final status log message
if bError:
mainLog.error("Backup terminated erroneously.")
else:
mainLog.success("Backup successfully completed.")
if effectiveTargetDirPath is not None:
# let's try to write the backup stats before termination.
statsContainer["tEnd"] = time.time()
statsContainer["success"] = not bError
jk_json.saveToFilePretty(statsContainer, os.path.join(effectiveTargetDirPath, STATS_JSON_FILE_NAME))
# let's try to write the backup log before termination.
bufferLogger = self.__getBufferLogger(mainLog)
self.__writeLogToFiles(
bufferLogger,
os.path.join(effectiveTargetDirPath, PLAINTEXT_LOG_FILE_NAME),
os.path.join(effectiveTargetDirPath, JSON_LOG_FILE_NAME)
)
except ProcessingFallThroughError as ee:
bError = True
except Exception as ee:
bError = True
mainLog.error(ee)
# terminate connection
try:
with ProcessingContext("Terminating connection", None, mainLog) as ctxMain:
self.__backupConnector.deinitialize(ctx, bError, statsContainer)
except ProcessingFallThroughError as ee:
bError = True
except Exception as ee:
bError = True
mainLog.error(ee)
#
#
# Perform a test of the connector.
#
@checkFunctionSignature()
def testConnector(self):
mainLog = jk_logging.MulticastLogger.create(
jk_logging.ConsoleLogger.create(logMsgFormatter=jk_logging.COLOR_LOG_MESSAGE_FORMATTER),
jk_logging.BufferLogger.create()
)
N_EXPECTED_BYTES_TO_WRITE = 1000
bError = False
try:
statsContainer = {
"tStart": time.time(),
"tEnd": None,
"success": None,
"expectedBytesToWrite": None,
"totalBytesWritten": None,
"avgWritingSpeed": None,
"simulate": True,
}
effectiveTargetDirPath = None
with ProcessingContext("Performing connector test", None, mainLog) as ctxMain:
# --------------------------------------------------------------------------------------------------------------------------------
# >>>> connect to the backup repository
with ProcessingContext(
text="Connecting to backup repository and preparing backup",
targetDirPath=None,
log=ctxMain.log,
bMeasureDuration=True,
statsContainer=statsContainer,
statsDurationKey="d1_connectAndPrepare"
) as ctx:
# check if there is a suitable directory where we can mount the remote file system
ThaniyaIO.checkThatDirExists(ctx, self.__mountDirPath)
ThaniyaIO.ensureDirMode(ctx, self.__mountDirPath, jk_utils.ChModValue("rwx------"))
# mount the remote file system
self.__backupConnector.initialize(ctx, self.__mountDirPath, N_EXPECTED_BYTES_TO_WRITE, self.__backupConnectorParameters)
if not self.__backupConnector.isReady:
raise Exception("Backup client unexpectedly not ready for writing!")
# select the target directory where we will store the data. the variable "effectiveTargetDirPath"
# will receive the directory selected by the target directory strategy. we will write data there.
effectiveTargetDirPath = self.__targetDirStrategy.selectEffectiveTargetDirectory(self.__mountDirPath)
ctx.log.info("Selected target directory: " + repr(effectiveTargetDirPath))
# verify that we have the correct directory: the "effectiveTargetDirPath" must be lokated somewhere within
# the mounted directory tree.
if effectiveTargetDirPath.endswith("/"):
effectiveTargetDirPath2 = effectiveTargetDirPath
else:
effectiveTargetDirPath2 = effectiveTargetDirPath + "/"
assert effectiveTargetDirPath2[:len(self.__mountDirPath2)] == self.__mountDirPath2
ctx.log.notice("Creating subdirectories if necessary ...")
ThaniyaIO.ensureDirExists(ctx, effectiveTargetDirPath, jk_utils.ChModValue("rwx------"))
# check that the target directory fits our requirements: it must be empty.
bIsEmpty, contentEntries = ThaniyaIO.checkIfDirIsEmpty(ctx, effectiveTargetDirPath)
if not bIsEmpty:
if STATS_JSON_FILE_NAME in contentEntries:
# target directory already seems to contain a backup
ctx.log.info("Directory already seems to contain a backup: " + effectiveTargetDirPath2)
else:
raise Exception("Backup directory contains various non-backup files or directories!")
# now we are ready. but before we begin doing something let's write the backup stats first.
jk_json.saveToFilePretty(statsContainer, os.path.join(effectiveTargetDirPath, STATS_JSON_FILE_NAME))
# ----
ctx.log.notice("Done.")
# --------------------------------------------------------------------------------------------------------------------------------
except ProcessingFallThroughError as ee:
bError = True
except Exception as ee:
bError = True
if not ee.__class__.__name__.endswith("_ExceptionInChildContextException"):
mainLog.error(ee)
# --------------------------------------------------------------------------------------------------------------------------------
# >>>> Finish
try:
# detecting errors
detectionLogger = self.__analyseLogMessages(mainLog)
if detectionLogger.hasError() or detectionLogger.hasStdErr() or detectionLogger.hasException():
bError = True
# writing final status log message
if bError:
mainLog.error("Backup terminated erroneously.")
else:
mainLog.success("Backup successfully completed.")
if effectiveTargetDirPath is not None:
# let's try to write the backup stats before termination.
statsContainer["tEnd"] = time.time()
statsContainer["success"] = not bError
jk_json.saveToFilePretty(statsContainer, os.path.join(effectiveTargetDirPath, STATS_JSON_FILE_NAME))
# write log
bufferLogger = self.__getBufferLogger(mainLog)
self.__writeLogToFiles(
bufferLogger,
os.path.join(effectiveTargetDirPath, PLAINTEXT_LOG_FILE_NAME),
os.path.join(effectiveTargetDirPath, JSON_LOG_FILE_NAME)
)
except ProcessingFallThroughError as ee:
bError = True
except Exception as ee:
bError = True
mainLog.error(ee)
# terminate connection
try:
with ProcessingContext("Terminating connection", None, mainLog) as ctxMain:
self.__backupConnector.deinitialize(ctx, bError, statsContainer)
except ProcessingFallThroughError as ee:
bError = True
except Exception as ee:
bError = True
mainLog.error(ee)
#
#
```
#### File: src/thaniya_client/ThaniyaTask_BackupDir.py
```python
import os
import jk_pathpatternmatcher2
import jk_utils
from jk_typing import checkFunctionSignature
from .AbstractThaniyaTask import AbstractThaniyaTask
from .ThaniyaBackupContext import ThaniyaBackupContext
from .EnumTarPathMode import EnumTarPathMode
from .ThaniyaTar import ThaniyaTar
class ThaniyaTask_BackupDir(AbstractThaniyaTask):
@checkFunctionSignature()
def __init__(self, sourceDirPath:str):
assert sourceDirPath
if not os.path.isdir(sourceDirPath):
raise Exception("No such directory: " + sourceDirPath)
if not os.path.isabs(sourceDirPath):
raise Exception("Not an absolute path: " + sourceDirPath)
self.__sourceDirPath = sourceDirPath
self.__targetFileName = sourceDirPath.replace("/", "-") + ".tar"
if self.__targetFileName.startswith("-"):
self.__targetFileName = self.__targetFileName[1:]
#
@property
def logMessageCalculateSpaceRequired(self) -> str:
return "Calculating backup size of directory: " + repr(self.__sourceDirPath)
#
@checkFunctionSignature()
def calculateSpaceRequired(self, ctx:ThaniyaBackupContext) -> int:
nErrors, nSize = ThaniyaTar.tarCalculateSize(
ctx=ctx,
walker=jk_pathpatternmatcher2.walk(self.__sourceDirPath)
)
ctx.log.info("I/O expected: " + jk_utils.formatBytes(nSize))
return nSize
#
@property
def logMessagePerformBackup(self) -> str:
return "Performing backup of directory: " + repr(self.__sourceDirPath)
#
@checkFunctionSignature()
def performBackup(self, ctx:ThaniyaBackupContext):
ThaniyaTar.tar(
ctx=ctx,
outputTarFilePath=ctx.absPath(self.__targetFileName),
walker=jk_pathpatternmatcher2.walk(self.__sourceDirPath),
pathMode = EnumTarPathMode.RELATIVE_PATH_WITH_BASE_DIR
)
ctx.log.info("Backup performed.")
#
#
```
#### File: src/thaniya_client/ThaniyaTask_BackupWordPress.py
```python
import os
import signal
import subprocess
import jk_pathpatternmatcher2
import jk_utils
import jk_json
from .AbstractThaniyaTask import AbstractThaniyaTask
from .ThaniyaBackupContext import ThaniyaBackupContext
from .EnumTarPathMode import EnumTarPathMode
from .ThaniyaTar import ThaniyaTar
"""
#
# This backup task addresses a system WordPress installations.
#
class ThaniyaTask_BackupWordPress(AbstractThaniyaTask):
_FULL_BACKUP = False # if True all directories are considered for backup, even caching directories.
#
# Configuration parameters:
#
# @param str mediaWikiDirPath (required) The absolute directory path where the MediaWiki installation can be found.
# The final directory name in the path must be the same as the site name of the Wiki.
# Additionally there must be a cron script named "<sitename>cron.sh".
# @param str userName (required) The name of the user account under which NGINX, PHP and the Wiki cron process are executed.
#
def __init__(self,
mediaWikiDirPath:str,
userName:str,
):
self.__mwHelper = jk_mediawiki.MediaWikiLocalUserInstallationMgr(mediaWikiDirPath, userName)
if not ThaniyaTask_BackupMediaWiki_User._FULL_BACKUP:
self.__ignoreDirPathPatterns = [
os.path.join(self.__mwHelper.wikiDirPath, "BAK"),
os.path.join(self.__mwHelper.wikiDirPath, "cache"),
os.path.join(self.__mwHelper.wikiDirPath, "images", "thumb"),
os.path.join(self.__mwHelper.wikiDirPath, "images", "graphviz"),
]
else:
self.__ignoreDirPathPatterns = None
#
def calculateSpaceRequired(self, ctx:ThaniyaBackupContext) -> int:
with ctx.log as nestedLog:
# process root directory
nErrorsWikiRoot, nSizeWikiRoot = ThaniyaTar.tarCalculateSize(
ctx,
jk_pathpatternmatcher2.walk(
self.__mwHelper.wikiDirPath,
ignoreDirPathPatterns = self.__ignoreDirPathPatterns
)
)
nestedLog.info("I/O expected: " + jk_utils.formatBytes(nSizeWikiRoot))
# process database directory
nErrorsDBRoot, nSizeDBRoot = ThaniyaTar.tarCalculateSize(
ctx,
jk_pathpatternmatcher2.walk(
self.__mwHelper.wikiDBDirPath
)
)
nestedLog.info("I/O expected: " + jk_utils.formatBytes(nSizeDBRoot))
return nSizeWikiRoot + nSizeDBRoot
#
def performBackup(self, ctx:ThaniyaBackupContext):
with ctx.log as nestedLog:
# shut down various processes
bIsRunning = self.__mwHelper.isCronScriptRunning()
if bIsRunning:
self.__mwHelper.stopCronScript(nestedLog.descend("Stopping cron process(es) ..."))
else:
nestedLog.notice("No cron process(es) need to be stopped and later restarted as they are not running.")
# process root directory
ThaniyaTar.tar(
ctx=ctx,
outputTarFilePath=ctx.absPath(self.__mwHelper.wikiDirName + "-wiki.tar"),
walker=jk_pathpatternmatcher2.walk(
self.__mwHelper.wikiDirPath,
ignoreDirPathPatterns = self.__ignoreDirPathPatterns
),
pathMode=EnumTarPathMode.RELATIVE_PATH_WITH_BASE_DIR,
)
# process database directory
ThaniyaTar.tar(
ctx=ctx,
outputTarFilePath=ctx.absPath(self.__mwHelper.wikiDirName + "-sqlite.tar"),
walker=jk_pathpatternmatcher2.walk(
self.__mwHelper.wikiDBDirPath,
ignoreDirPathPatterns = self.__ignoreDirPathPatterns
),
pathMode=EnumTarPathMode.RELATIVE_PATH_WITH_BASE_DIR,
)
# restart processes
if bIsRunning:
self.__mwHelper.startCronScript(nestedLog.descend("Restarting cron process(es) ..."))
#
#
"""
``` |
{
"source": "jkpubsrc/Thaniya",
"score": 2
} |
#### File: src/thaniya_client/AbstractTargetDirectoryStrategy.py
```python
from .ILocalBackupOriginInfo import ILocalBackupOriginInfo
#
# This strategy builds the subdirectory part where to write backups to.
#
class AbstractTargetDirectoryStrategy(object):
#
# This method is invoked in order to receive a valid target directory.
#
# @param ILocalBackupOriginInfo originInfo Information about the origin of this backup.
# @return str Returns a path which indicates where to write the backup to. This return value will be made part of the absolute path to write data to.
#
def selectEffectiveTargetDirectory(self, originInfo:ILocalBackupOriginInfo) -> str:
raise NotImplementedError()
#
#
```
#### File: src/thaniya_client/BackupConnectorMixin_mountCIFS.py
```python
import os
import subprocess
import typing
import jk_mounting
import jk_typing
from .AbstractBackupConnector import AbstractBackupConnector
from .ThaniyaIO import ThaniyaIO
from .ThaniyaBackupContext import ThaniyaBackupContext
#
# NOTE: For this connector to work the user running this program needs to have <c>sudo</c> rights for invoking <c>umount</c>.
#
class BackupConnectorMixin_mountCIFS(object):
#SMB_CLIENT_PATH = "/usr/bin/smbclient"
MOUNT_PATH = "/bin/mount"
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def _mountCIFS(self,
ctx:ThaniyaBackupContext,
localDirPath:str,
cifsHostAddress:str,
#cifsPort:int,
cifsShareName:str,
cifsLogin:str,
cifsPassword:str,
cifsVersion:str) -> bool:
assert isinstance(localDirPath, str)
assert os.path.isdir(localDirPath)
localDirPath = os.path.abspath(localDirPath)
mounter = jk_mounting.Mounter()
mip = mounter.getMountInfoByMountPoint(localDirPath)
if mip is not None:
raise Exception("Directory " + repr(localDirPath) + " already used by mount!")
credentialFilePath = ctx.privateTempDir.writeTextFile(
"username=" + cifsLogin + "\npassword=" + cifsPassword + "\n"
)
options = [
"user=",
cifsLogin,
",credentials=",
credentialFilePath,
",rw",
]
if cifsVersion:
options.append(",vers=")
options.append(cifsVersion)
cmd = [
BackupConnectorMixin_mountCIFS.MOUNT_PATH,
"-t", "cifs",
"-o", "".join(options),
"//" + cifsHostAddress + "/" + cifsShareName,
localDirPath,
]
# print(" ".join(cmd))
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write((cifsPassword + "\n").encode("utf-8"))
(stdout, stderr) = p.communicate(timeout=3)
if p.returncode != 0:
returnCode1 = p.returncode
stdOutData1 = stdout.decode("utf-8")
stdErrData1 = stderr.decode("utf-8")
print("Mount attempt:")
print("\tcmd =", cmd)
print("\treturnCode =", returnCode1)
print("\tstdOutData =", repr(stdOutData1))
print("\tstdErrData =", repr(stdErrData1))
raise Exception("Failed to mount device!")
return False
else:
return True
#
#
```
#### File: src/thaniya_client/ILocalBackupOriginInfo.py
```python
import typing
import datetime
import socket
import jk_utils
#
# Provide various information about the origin of a backup.
# An instance of this class is used to feed information into various strategy implementations, such as the target directory strategies.
#
class ILocalBackupOriginInfo:
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Public Properties
################################################################################################################################
#
# Time stamp of the start of this backup.
#
@property
def backupDateTime(self) -> datetime.datetime:
raise NotImplementedError()
#
#
# Time stamp of the start of this backup in seconds since Epoch.
#
@property
def backupEpochTime(self) -> int:
raise NotImplementedError()
#
#
# The current host name we currently run this backup on.
#
@property
def localHostName(self) -> str:
raise NotImplementedError()
#
#
# The current user performing this backup.
#
@property
def currentUserName(self) -> str:
raise NotImplementedError()
#
#
# A string that identifies the type of backup. (There might be a variety of independent backups from different hosts.)
#
@property
def backupIdentifier(self) -> typing.Union[None,str]:
raise NotImplementedError()
#
################################################################################################################################
## Public Methods
################################################################################################################################
#
```
#### File: src/thaniya_client/ProcessingContext.py
```python
import time
import datetime
import typing
import jk_utils
import jk_logging
from jk_typing import checkFunctionSignature
from .ProcessingFallThroughError import ProcessingFallThroughError
from .ThaniyaBackupContext import ThaniyaBackupContext
from .BD2 import BD2
"""
def thaniya_context(description):
def thaniya_context2(func):
def wrapped(*wargs, **kwargs):
print(func)
print(func.__annotations__)
kwargs["c"] = "cc"
return 'I got a wrapped up {} for you'.format(str(func(*wargs, **kwargs)))
return wrapped
return thaniya_context2
#
"""
"""
class MeasureDuration(object):
def __init__(self, taskName:str, log:jk_logging.AbstractLogger):
self.__taskName = taskName
self.__log = log
self.__t0 = None
#
def __enter__(self):
self.__t0 = time.time()
d0 = datetime.datetime.fromtimestamp(self.__t0)
self.__log.info("Starting " + self.__taskName + " at: " + str(d0) + " (" + str(int(self.__t0)) + ")")
#
def __exit__(self, ex_type, exception, ex_traceback):
t1 = time.time()
d1 = datetime.datetime.fromtimestamp(t1)
self.__log.info("Terminating " + self.__taskName + " at: " + str(d1) + " (" + str(int(t1)) + ")")
fDurationSeconds = t1 - self.__t0
self.__log.info("Time spent on " + self.__taskName + ": " + jk_utils.formatTime(fDurationSeconds))
#
#
"""
#
# This context is similar to ThaniyaBackupContext, but it comprises a major processing step and measures performance.
#
class ProcessingContext(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# @param str name The text to write to the log on descend.
# @param str targetDirPath The destination directory to write files to. This argument is <c>None</c> if a specific
# section of activities (wrapped by this context) should not perform any writing.
# @param bool bMeasureDuration If <c>True</c> after completing this context time measurement information is written to the log.
# @param str statsDurationKey The name of the variable under which to store the stats data if processing is completed.
#
@checkFunctionSignature()
def __init__(self,
text:str,
bd2:BD2,
bMeasureDuration:bool,
statsDurationKey:typing.Union[str,None],
):
self.__bd2 = bd2
self.__text = text
self.__nestedLog = None
self.__bMeasureDuration = bMeasureDuration
self.__statsDurationKey = statsDurationKey
self.__t0 = None
self.__t1 = None
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def log(self) -> jk_logging.AbstractLogger:
return self.__nestedLog
#
@property
def bd2(self) -> BD2:
return self.__bd2
#
#
# The duration measured from the time when the context is entered.
#
@property
def duration(self) -> float:
if self.__t0 is None:
return -1
else:
if self.__t1 is None:
return time.time() - self.__t0
else:
return self.__t1 - self.__t0
#
################################################################################################################################
## Public Methods
################################################################################################################################
def __enter__(self):
self.__nestedLog = self.__bd2.log.descend(self.__text)
self.__t0 = time.time()
# d0 = datetime.datetime.fromtimestamp(self.__t0)
# self.__nestedLog.info("Starting this activity at: " + str(d0) + " (" + str(int(self.__t0)) + ")")
return ThaniyaBackupContext(self.__bd2, self.__nestedLog)
#
def __exit__(self, ex_type, ex_value, ex_traceback):
self.__t1 = time.time()
#d1 = datetime.datetime.fromtimestamp(t1)
fDurationSeconds = self.__t1 - self.__t0
if ex_type:
self.__bd2.setErrorFlag()
if (ex_type != jk_logging.ExceptionInChildContextException) and (ex_type != ProcessingFallThroughError):
self.__nestedLog.error(ex_value)
raise ProcessingFallThroughError()
# NOTE: we skip fall through errors as they already have been logged
#if exception and (exception is not jk_logging.AbstractLogger._EINSTANCE):
if ex_value and not isinstance(ex_value, ProcessingFallThroughError) and not ex_type.__name__.endswith("ExceptionInChildContextException"): # TODO: simplify!
self.__nestedLog.error(ex_value)
if self.__bMeasureDuration:
if ex_value:
#self.__nestedLog.error("Terminating with error at: " + str(d1) + " (" + str(int(t1)) + ")")
#self.__nestedLog.error("Time spent: " + jk_utils.formatTime(fDurationSeconds))
self.__nestedLog.notice("Terminating with error after: " + jk_utils.formatTime(fDurationSeconds))
else:
#self.__nestedLog.success("Terminating without error at: " + str(d1) + " (" + str(int(t1)) + ")")
#self.__nestedLog.success("Time spent: " + jk_utils.formatTime(fDurationSeconds))
self.__nestedLog.notice("Terminating with success after: " + jk_utils.formatTime(fDurationSeconds))
if self.__bMeasureDuration:
if not ex_value:
if (self.__bd2.statsContainer is not None) and self.__statsDurationKey:
self.__bd2.statsContainer.setValue(self.__statsDurationKey, fDurationSeconds)
if ex_value and not isinstance(ex_value, ProcessingFallThroughError) and not ex_value.__class__.__name__.endswith("ExceptionInChildContextException"): # TODO: simplify!
raise ProcessingFallThroughError()
#
#
```
#### File: thaniya_client/server/ThaniyaServerAPIConnectorV1.py
```python
import requests
import json
import typing
import jk_typing
import jk_json
from thaniya_common.utils import APIPassword
from .Auth_Basic_SHA2_256 import Auth_Basic_SHA2_256
class ThaniyaServerAPIConnectorV1(object):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, hostName:str, port:int):
self.__hostName = hostName
self.__port = port
self.__sid = None
self.__basUrl = "http://" + hostName + ":" + str(port) + "/api/v1/"
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def __performPOSTRequest(self, apiCmd:str, jData:dict):
assert isinstance(apiCmd, str)
assert isinstance(jData, dict)
targetURL = self.__basUrl + apiCmd
print("=" * 160)
print("==== " + targetURL)
if self.__sid:
jData["sid"] = self.__sid
for line in json.dumps(jData, indent="\t", sort_keys=True).split("\n"):
print("¦>> " + line)
r = requests.post(targetURL, json=jData)
if r.status_code != 200:
raise Exception("errResponse")
jData = r.json()
for line in jk_json.dumps(jData, indent="\t", sort_keys=True).split("\n"):
print("<<¦ " + line)
if not isinstance(jData, dict):
raise Exception("errResponse")
if "success" not in jData:
jk_json.prettyPrint(jData)
raise Exception("errResponse")
return jData["data"]
#
################################################################################################################################
## Public Methods
################################################################################################################################
def noop(self):
self.__performPOSTRequest("noop", {})
#
def noopAuth(self):
self.__performPOSTRequest("noopAuth", {})
#
def authenticate(self, userName:str, password:typing.Union[APIPassword,str]):
if isinstance(password, str):
password = APIPassword(password)
elif isinstance(password, APIPassword):
pass
else:
raise TypeError(str(type(password)))
a = Auth_Basic_SHA2_256(userName, password)
r = self.__performPOSTRequest("auth1", {
"userName": userName,
"authMethod": a.identifier,
})
r = a.onServerResponse(r)
r = self.__performPOSTRequest("auth2", r)
self.__sid = r["sid"]
#
def uploadStats(self) -> dict:
return self.__performPOSTRequest("uploadStats", {})
#
@jk_typing.checkFunctionSignature()
def allocateSlot(self, estimatedTotalBytesToUpload:int) -> dict:
return self.__performPOSTRequest("allocateSlot", {
"estimatedTotalBytesToUpload": estimatedTotalBytesToUpload
})
#
@jk_typing.checkFunctionSignature()
def uploadCompleted(self, uploadSlotID:str, bSuccess:bool):
return self.__performPOSTRequest("uploadCompleted", {
"slotID": uploadSlotID,
"success": bSuccess,
})
#
#
```
#### File: src/thaniya_client/ThaniyaBackupContext.py
```python
import os
import jk_logging
from jk_testing import Assert
from .utils.PrivateTempDir import PrivateTempDir
from .ProcessingFallThroughError import ProcessingFallThroughError
#
# This context provides access to important components and data required to perform backup tasks.
#
class ThaniyaBackupContext(object):
################################################################################################################################
## Constructor
################################################################################################################################
def __init__(self, bd2, log:jk_logging.AbstractLogger):
#Assert.isInstance(bd2, "BD2") # extend Assert to support string class names
Assert.isInstance(log, jk_logging.AbstractLogger)
self.__bd2 = bd2
self.__log = log
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def privateTempDir(self) -> PrivateTempDir:
return self.__bd2.privateTempDir
#
#
# Returns <c>True</c> if there has been any error, either specified explicitely by setting the flag <c>bError</c> or by writing an error message to the log.
#
@property
def hasError(self) -> bool:
return self.__bd2.hasError
#
"""
@property
def duration(self) -> float:
return self.__processingContext.duration
#
"""
@property
def log(self) -> jk_logging.AbstractLogger:
return self.__log
#
@property
def targetDirPath(self) -> str:
return self.__bd2.effectiveTargetDirPath
#
################################################################################################################################
## Public Methods
################################################################################################################################
#
# Perform log descending.
#
# @param str text The text to write to the logs.
# @return ThaniyaBackupContext Returns a new backup context, now filling a different section of the log.
#
def descend(self, text:str):
return ThaniyaBackupContext(self.__bd2, self.__log.descend(text))
#
#
# Get the abolute path of a file relative to the target directory.
#
def absPath(self, fileName:str) -> str:
if os.path.isabs(fileName):
assert fileName.startswith(self.__bd2.effectiveTargetDirPath + os.path.sep)
return fileName
else:
return os.path.join(self.__bd2.effectiveTargetDirPath, fileName)
#
def __enter__(self):
return self
#
def __exit__(self, ex_type, ex_value, ex_traceback):
if ex_type:
self.__bd2.setErrorFlag()
if (ex_type != jk_logging.ExceptionInChildContextException) and (ex_type != ProcessingFallThroughError):
self.__log.error(ex_value)
raise ProcessingFallThroughError()
#
#
```
#### File: src/thaniya_client/ThaniyaClientCfg.py
```python
import os
import json
import jk_utils
import jk_json
import jk_typing
from thaniya_common.cfg import *
class _Magic(CfgComponent_Magic):
MAGIC = "thaniya-client-cfg"
VERSION = 1
VALID_VERSIONS = [ 1 ]
#
class _GeneralV1(AbstractCfgComponent):
__VALID_KEYS = [
CfgKeyValueDefinition("tempBaseDir", str, True),
]
def __init__(self):
super().__init__(_GeneralV1.__VALID_KEYS)
self._tempDir = None # str
#
#
class _ServerV1(AbstractCfgComponent):
VALID_KEYS = [
CfgKeyValueDefinition( key="host", pyType=str, nullable=False ),
CfgKeyValueDefinition( key="port", pyType=int, nullable=False ),
CfgKeyValueDefinition( key="login", pyType=str, nullable=False ),
CfgKeyValueDefinition( key="apiPassword", pyType=str, nullable=False ),
]
def __init__(self):
super().__init__()
self._host = None # str
self._port = None # int
self._login = None # str
self._apiPassword = None # str
#
#
class ThaniyaClientCfg(AbstractAppCfg):
################################################################################################################################
## Constructor Method
################################################################################################################################
def __init__(self):
super().__init__(
_Magic,
False,
{
"general": _GeneralV1(),
"server": _ServerV1(),
}
)
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def general(self) -> AbstractCfgComponent:
return self._groups["general"]
#
@property
def server(self) -> AbstractCfgComponent:
return self._groups["server"]
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def load():
cfgFilePathCandidates = [
os.path.join(jk_utils.users.getUserHome(), ".config/thaniya/cfg-client.jsonc"),
"/etc/thaniya/cfg-client.jsonc",
]
for cfgFilePath in cfgFilePathCandidates:
if os.path.isfile(cfgFilePath):
ret = ThaniyaClientCfg.loadFromFile(cfgFilePath)
# ensure that this file will always be private
iFileMode = jk_utils.ChModValue(userR=True, userW=True).toInt()
os.chmod(cfgFilePath, iFileMode)
return ret
raise Exception("No configuration file found!")
#
@staticmethod
def loadFromFile(filePath:str):
assert isinstance(filePath, str)
jData = jk_json.loadFromFile(filePath)
ret = ThaniyaClientCfg()
ret._loadFromJSON(jData)
return ret
#
#
# Write the this configuration to the local configuration file at "~/.config/thaniya/cfg-client.jsonc".
# This method sets file and directory mode to "private" mode: Only the current user will have access!
#
# @return str The path of the configuration file written.
#
def writeToLocal(self, bForceWrite:bool = False) -> str:
cfgFilePath = os.path.join(jk_utils.users.getUserHome(), ".config/thaniya/cfg-client.jsonc")
if os.path.isfile(cfgFilePath):
if not bForceWrite:
raise Exception("Configuration file already exists and 'bForceWrite' was not specified: " + cfgFilePath)
iDirMode = jk_utils.ChModValue(userR=True, userW=True, userX=True).toInt()
iFileMode = jk_utils.ChModValue(userR=True, userW=True).toInt()
dirPath = os.path.dirname(cfgFilePath)
os.makedirs(dirPath, iDirMode, exist_ok=True)
os.chmod(dirPath, iDirMode)
jk_json.saveToFilePretty(self.toJSON(), cfgFilePath)
os.chmod(cfgFilePath, iFileMode)
return cfgFilePath
#
#
```
#### File: thaniya_common/cfg/CfgComponent_Defs.py
```python
import typing
import re
import jk_prettyprintobj
from .CfgKeyValueDefinition import CfgKeyValueDefinition
class CfgComponent_Defs(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self):
self.__definitions = {}
self.__replacements = {} # forward resolution of variables
self.__replacementsReverse = {} # backward resolution of variable
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dump(self, ctx:jk_prettyprintobj.DumpCtx):
ctx.dumpVar("definitions", self.__definitions)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def loadFromJSON(self, jData:dict):
assert isinstance(jData, dict)
for key, value in jData.items():
assert isinstance(key, str)
assert re.fullmatch("[A-Z][A-Z_]*", key)
assert isinstance(value, str)
self.__definitions = jData
self.__replacements = {
"$(" + key + ")":value for key, value in jData.items()
}
self.__replacementsReverse = {
value:key for key, value in self.__replacements.items()
}
#
def resolveValue(self, text:str) -> str:
for key, value in self.__replacements.items():
text = text.replace(key, value)
m = re.match("\$\(([a-zA-Z_]+)\)", text)
if m:
raise Exception("Invalid variable: " + repr(m.group(1)))
return text
#
def simplifyValue(self, text:str) -> str:
for key, value in self.__replacementsReverse.items():
text = text.replace(key, value)
return text
#
def toJSON(self) -> dict:
return self.__definitions
#
################################################################################################################################
## Static Methods
################################################################################################################################
#
```
#### File: thaniya_archive_httpd/inc/main_bp.py
```python
import random
import datetime
import flask
import flask_login
import werkzeug.security
import jk_logging
from thaniya_server_archive import AppRuntimeArchiveHttpd
from thaniya_server.utils import SVGDiskSpaceGraphGenerator
from thaniya_common.utils import APIPassword
def init_blueprint(app:flask.Flask, appRuntime:AppRuntimeArchiveHttpd, log:jk_logging.AbstractLogger):
assert isinstance(appRuntime, AppRuntimeArchiveHttpd)
main_bp = flask.Blueprint("main", __name__)
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/index.html")
#<EMAIL>
def index():
if flask_login.current_user.is_anonymous:
return flask.render_template(
"anonymous/index.jinja2",
rndid = random.randint(0, 999999),
htmlHeadTitle = "Thaniya Archive - /",
)
else:
return flask.render_template(
"authenticated/index.jinja2",
rndid = random.randint(0, 999999),
user = flask_login.current_user,
htmlHeadTitle = "Thaniya Archive - /",
visNavPath = "/ Index",
)
#
# --------------------------------------------------------------------------------------------------------------------------------
#<EMAIL>("/testpages/index.jinja2")
#def testpages_index():
# return flask.render_template("testpages/index.html", rndid=random.randint(0, 999999))
#
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/overview")
@flask_login.login_required
def overview():
fsInfoList = []
for fs in appRuntime.fileSystemCollection.filesystems:
fs.update()
gen = SVGDiskSpaceGraphGenerator(fs)
fsInfoList.append(gen)
return flask.render_template(
"authenticated/overview.jinja2",
rndid = random.randint(0, 999999),
user = flask_login.current_user,
htmlHeadTitle = "Thaniya Archive - /overview",
visNavPath = "/ Overview",
fsInfoList = fsInfoList,
volumeList = appRuntime.backupVolumeMgr.listVolumes(appRuntime.log),
archiveList = appRuntime.archiveMgr.archives,
)
#
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/backups-own")
@flask_login.login_required
def backups_own():
# group backups by date
backupGroupList = []
_lastDate = None
_currentGroup = None
for backup in appRuntime.backupMgr.getBackups(
actingUser = flask_login.current_user,
):
if backup.lBackupStartDate != _lastDate:
_currentGroup = []
_lastDate = backup.lBackupStartDate
backupGroupList.append((
datetime.datetime(_lastDate[0], _lastDate[1], _lastDate[2], 0, 0, 0, 0),
_currentGroup
))
_currentGroup.append(backup)
# 'backupGroupList' now contains tuples consisting of datetime and Backup[]
# ----
return flask.render_template(
"authenticated/backups_own.jinja2",
rndid = random.randint(0, 999999),
user = flask_login.current_user,
htmlHeadTitle = "Thaniya Archive - /backups-own",
visNavPath = "/ Own Backups",
backupGroupList = backupGroupList,
)
#
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/profile")
@flask_login.login_required
def profile():
return flask.render_template(
"authenticated/profile.jinja2",
rndid = random.randint(0, 999999),
user = flask_login.current_user,
htmlHeadTitle = "Thaniya Archive - /profile",
visNavPath = "/ Profile",
)
#
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/profile_chpasswd_own")
@flask_login.login_required
def profile_chpasswd_own():
return flask.render_template(
"authenticated/profile_chpasswd_own.jinja2",
rndid = random.randint(0, 999999),
user = flask_login.current_user,
htmlHeadTitle = "Thaniya Archive - /chpasspwd-own",
visNavPath = "/ Change own login password",
)
#
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/profile_chuploadpasswd_own")
@flask_login.login_required
def profile_chuploadpasswd_own():
return flask.render_template(
"authenticated/profile_chuploadpasswd_own.jinja2",
rndid = random.randint(0, 999999),
user = flask_login.current_user,
htmlHeadTitle = "Thaniya Archive - /chuploadpasspwd-own",
visNavPath = "/ Change backup upload password",
newBackupUploadPwd = APIPassword.generate(),
)
#
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/do_chpasswd_own", methods=["POST"])
@flask_login.login_required
def do_chpasswd_own_post():
#print("current_user =", flask_login.current_user)
# get data
user = flask_login.current_user
password_old = flask.request.form.get("password_old")
password_new_1 = flask.request.form.get("<PASSWORD>")
password_new_2 = flask.request.form.get("password_<PASSWORD>_2")
# check if both new passwords match
if password_new_1 != password_new_2:
flask.flash("Please provide the same new password twice!", "error")
return flask.redirect(flask.url_for("main.profile_chpasswd_own"))
# check if new password is different
if password_old == password_new_1:
flask.flash("Please provide a new password!", "error")
return flask.redirect(flask.url_for("main.profile_chpasswd_own"))
# check if the new password is valid
if len(password_new_1) >= 3:
flask.flash("Please provide a new password of sufficient length!", "error")
return flask.redirect(flask.url_for("main.profile_chpasswd_own"))
# take the user-supplied password, hash it, and compare it to the hashed password in the database
if not werkzeug.security.check_password_hash(user.passwordHash, password_old):
flask.flash("Please provide your old password. The one you specified is not correct.", "error")
return flask.redirect(flask.url_for("main.profile_chpasswd_own"))
if not appRuntime.userMgr.hasPrivilege(user, "privChangeOwnLoginPwd"):
flask.flash("Insufficient privileges.", "error")
return flask.redirect(flask.url_for("main.profile_chpasswd_own"))
# if the above check passes, then we know the user has the right credentials
#print("-- User check: okay. Allowing password change.")
user.passwordHash = <PASSWORD>(<PASSWORD>)
try:
user.store()
flask.flash("Your password has been changed successfully.", "message")
except Exception as ee:
flask.flash("Failed to store the modifications to your user account.", "error")
return flask.redirect(flask.url_for("main.profile"))
#
# --------------------------------------------------------------------------------------------------------------------------------
@main_bp.route("/do_chuploadpasswd_own", methods=["POST"])
@flask_login.login_required
def do_chuploadpasswd_own():
#print("current_user =", flask_login.current_user)
# get data
user = flask_login.current_user
login_password = flask.request.form.get("login_password")
upload_password = flask.request.form.get("upload_password")
# take the user-supplied password, hash it, and compare it to the hashed password in the database
if not werkzeug.security.check_password_hash(user.passwordHash, login_password):
flask.flash("Please provide your valid login password. The one you specified is not correct.", "error")
return flask.redirect(flask.url_for("main.profile_chuploadpasswd_own"))
if not appRuntime.userMgr.hasPrivilege(user, "privChangeOwnBackupUploadPwd"):
flask.flash("Insufficient privileges.", "error")
return flask.redirect(flask.url_for("main.profile_chuploadpasswd_own"))
# if the above check passes, then we know the user has the right credentials
#print("-- User check: okay. Allowing password change.")
user.uploadPwd = <PASSWORD>(upload_password)
try:
user.store()
flask.flash("Your backup upload password has been changed successfully.", "message")
except Exception as ee:
flask.flash("Failed to store the modifications to your user account.", "error")
return flask.redirect(flask.url_for("main.profile"))
#
# --------------------------------------------------------------------------------------------------------------------------------
app.register_blueprint(main_bp)
#
```
#### File: thaniya_server_archive/archive/ArchiveManager.py
```python
import os
import typing
import datetime
import jk_typing
import jk_utils
import jk_logging
import jk_prettyprintobj
from thaniya_common.utils import IOContext
from ..volumes.BackupVolumeID import BackupVolumeID
from ..volumes.BackupVolumeInfo import BackupVolumeInfo
from .ArchiveDataStore import ArchiveDataStore
class ArchiveManager(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self):
self.__ioContext = IOContext()
self.__archives = []
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def archives(self) -> typing.Tuple[ArchiveDataStore]:
return tuple(self.__archives)
#
@property
def backupVolumesUsed(self) -> typing.List[BackupVolumeID]:
return [
a.identifer for a in self.__archives
]
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dumpVarNames(self) -> list:
return [
"archives",
]
#
################################################################################################################################
## Public Methods
################################################################################################################################
@jk_typing.checkFunctionSignature()
def register(self, volume:BackupVolumeInfo, bReadWrite:bool, log:jk_logging.AbstractLogger) -> ArchiveDataStore:
assert volume.isValid
assert volume.isActive
_s = " (readonly) ..." if bReadWrite else " ..."
if ArchiveDataStore.hasDataStore(volume):
with log.descend("Opening existing archive" + _s) as log2:
a = ArchiveDataStore(self.__ioContext, volume, bReadWrite, log2)
else:
with log.descend("Creating new archive" + _s) as log2:
a = ArchiveDataStore(self.__ioContext, volume, bReadWrite, log2)
self.__archives.append(a)
return a
#
@jk_typing.checkFunctionSignature()
def get(self, backupVolumeID:BackupVolumeID) -> typing.Union[ArchiveDataStore,None]:
for a in self.__archives:
if a.identifier == backupVolumeID:
return a
return None
#
@jk_typing.checkFunctionSignature()
def hasArchive(self, backupVolumeID:BackupVolumeID) -> bool:
for a in self.__archives:
if a.identifier == backupVolumeID:
return True
return False
#
#
# Invoke this method to get an archive for uploading a backup.
# This method will check if ...
# * there is an archive,
# * there is enough disk space available in that archive,
# * and provide a temporary upload directory from that archive.
#
# @param str|BackupVolumeID archiveID (optional) If an archive ID is specified, the framework will try to pick exactly *this* archive.
# The reason why this option is required is that uploads that might allready have been performed partially
# should go to exactly the same archive again. We try to be indempotent in upload operations, so this
# information might be provided.
#
# @return ArchiveDataStore archive The archive if a suitable archive is found. <c>None</c> is returned otherwise.
# @return str tempDirPath A prepared temporary upload directory if a suitable archive is found. <c>None</c> is returned otherwise.
#
@jk_typing.checkFunctionSignature()
def getArchiveForIncomingBackup(self,
dt:datetime.datetime,
systemName:str,
backupUserName:str,
backupIdentifier:str,
nMaxSizeOfBackup:int,
archiveID:typing.Union[str,BackupVolumeID,None]
) -> typing.Tuple[ArchiveDataStore,str]:
if archiveID is not None:
if isinstance(archiveID, str):
backupVolumeID = BackupVolumeID.parseFromHexStr(archiveID)
for a in self.__archives:
if a.identifer == backupVolumeID:
tempDirPath = a.tryToPrepareForBackup(
dt=dt,
systemName=systemName,
backupUserName=backupUserName,
backupIdentifier=backupIdentifier,
nMaxSizeOfBackup=nMaxSizeOfBackup
)
if tempDirPath:
return a, tempDirPath
else:
for a in self.__archives:
tempDirPath = a.tryToPrepareForBackup(
dt=dt,
systemName=systemName,
backupUserName=backupUserName,
backupIdentifier=backupIdentifier,
nMaxSizeOfBackup=nMaxSizeOfBackup
)
if tempDirPath:
return a, tempDirPath
return None, None
#
#
```
#### File: thaniya_server_archive/archive/BackupDataFile.py
```python
import typing
import re
import time
import os
import random
import jk_utils
import jk_json
#
# This class represents a backup data file. Backup data files form the main data content of a backup.
#
class BackupDataFile(object):
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, fe):
self.__fileName = fe.name
self.__filePath = fe.path
self.__size = jk_utils.AmountOfBytes.parse(fe.stat().st_size)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
@property
def filePath(self) -> str:
return self.__filePath
#
@property
def fileName(self) -> str:
return self.__fileName
#
@property
def sizeInBytes(self) -> jk_utils.AmountOfBytes:
return self.__size
#
################################################################################################################################
## Static Methods
################################################################################################################################
#
```
#### File: thaniya_server_archive/archive/ProcessingRuleSet.py
```python
import os
import typing
import jk_typing
from .FileNamePattern import FileNamePattern
from .ProcessingRule import ProcessingRule
class ProcessingRuleSet(object):
################################################################################################################################
## Constructors
################################################################################################################################
@jk_typing.checkFunctionSignature()
def __init__(self, processingRules:list):
self.__processingRules = processingRules
#
################################################################################################################################
## Public Property
################################################################################################################################
@property
def processingRules(self) -> list:
return self.__processingRules
#
################################################################################################################################
## Helper Method
################################################################################################################################
################################################################################################################################
## Public Method
################################################################################################################################
#
# Tries to find a suitable processing rule.
#
def tryMatch(self, filePathOrName:str) -> typing.Union[ProcessingRule,None]:
assert isinstance(filePathOrName, str)
fileName = os.path.basename(filePathOrName)
for rule in self.__processingRules:
assert isinstance(rule, ProcessingRule)
if rule.fileNamePattern.match(fileName):
return rule
return None
#
#
```
#### File: src/thaniya_server_ctrl/_CLICmd_slot_list.py
```python
import os
import jk_utils
import jk_argparsing
import jk_console
from thaniya_server.app.CLICmdBase import CLICmdBase
from thaniya_server.app.CLICmdParams import CLICmdParams
from thaniya_server_upload.slots import UploadSlot
from thaniya_server_upload.slots import UploadSlotManager
from .AppRuntimeServerCtrl import AppRuntimeServerCtrl
class _CLICmd_slot_list(CLICmdBase):
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, appRuntime:AppRuntimeServerCtrl):
self.__appRuntime = appRuntime
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def cmdName(self) -> str:
return "slot-list"
#
#
# The short description.
#
@property
def shortDescription(self) -> str:
return "List all backup upload slots. (sudo)"
#
@property
def usesOutputWriter(self) -> bool:
return True
#
################################################################################################################################
## Public Methods
################################################################################################################################
def registerCmd(self, ap:jk_argparsing.ArgsParser) -> jk_argparsing.ArgCommand:
argCmd = super().registerCmd(ap)
#
def _runImpl(self, p:CLICmdParams) -> bool:
with p.out.section("List backup upload slots") as out:
table = jk_console.SimpleTable()
table.addRow(
"systemAccountName",
"state",
"backupUser",
"isAllocated",
"isInUseByClient",
"allocationTime",
"lastUsedTime",
"completionTime",
).hlineAfterRow = True
for slot in sorted(self.__appRuntime.uploadSlotMgr.slots, key=lambda x: x.systemAccountName):
assert isinstance(slot, UploadSlot)
table.addRow(
slot.systemAccountName,
str(slot.state),
slot.backupUser.name if slot.backupUser else "-",
"yes" if slot.isAllocated else "no",
"yes" if slot.isInUseByClient else "no",
str(slot.allocationTime) if slot.allocationTime else "-",
str(slot.lastUsedByClientTime) if slot.lastUsedByClientTime else "-",
str(slot.completionTime) if slot.completionTime else "-",
)
p.out.printTable(table)
#
#
```
#### File: src/thaniya_server_ctrl/TextDiskSpaceGraphGenerator.py
```python
import typing
import jk_typing
import jk_console
from jk_appmonitoring import RDiskSpacePart
from jk_appmonitoring import RFileSystem
_TEXT_COLOR_OTHER = jk_console.Console.ForeGround.STD_DARKGRAY
_TEXT_COLOR_RESERVED = jk_console.Console.ForeGround.STD_PURPLE
_TEXT_COLOR_FREE = jk_console.Console.ForeGround.STD_LIGHTGREEN
_TEXT_COLORS_USED = (
jk_console.Console.ForeGround.STD_LIGHTCYAN,
jk_console.Console.ForeGround.STD_CYAN,
jk_console.Console.ForeGround.STD_BLUE,
jk_console.Console.ForeGround.STD_LIGHTBLUE,
)
class _TextBarPart(RDiskSpacePart):
def __init__(self, part:RDiskSpacePart, textColor:str, character:str, bSmall:bool):
super().__init__(
part.name,
part.dirPath,
part.diskSpaceUsed,
part.fsSizeTotal,
part.partType,
)
self.textColor = textColor
self.character = character
self.bSmall = bSmall
#
#
class TextDiskSpaceGraphGenerator(object):
################################################################################################################################
## Constructor
################################################################################################################################
@jk_typing.checkFunctionSignature()
def __init__(self, filesystem:RFileSystem, bGenerateWithTitle:bool = False, bGenerateWithXMLDeclaration:bool = False):
self.__bGenerateWithTitle = bGenerateWithTitle
self.__bGenerateWithXMLDeclaration = bGenerateWithXMLDeclaration
self.__name = filesystem.name
self.__mountPoint = filesystem.mountPoint
self.__devicePath = filesystem.devicePath
# ----
self.__fsSizeTotal = filesystem.fsSizeTotal
self.__fsSizeUsedPercent = filesystem.fsSizeUsedPercent
# ----
self.__usages = [] # _TextBarPart[]
availableColors = list(_TEXT_COLORS_USED)
for u in filesystem.usages:
if u.partType == "free":
c = "█"
col = _TEXT_COLOR_FREE
bSmall = True
elif u.partType == "reserved":
#c = "▒"
c = "█"
col = _TEXT_COLOR_RESERVED
bSmall = True
elif u.partType == "usedOther":
#c = "▒"
c = "█"
col = _TEXT_COLOR_OTHER
bSmall = True
else:
if not availableColors:
raise Exception("Too many sections!")
c = "█"
col = availableColors[0]
del availableColors[0]
bSmall = False
self.__usages.append(_TextBarPart(u, col, c, bSmall))
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def mountPoint(self) -> str:
return self.__mountPoint
#
@property
def name(self) -> str:
return self.__name
#
@property
def devicePath(self) -> str:
return self.__devicePath
#
@property
def title(self) -> str:
return "{:s} | Total disk space: {:.1f}G ({:.1f}% used)".format(
self.__name,
self.__fsSizeTotal / (1024*1024*1024),
self.__fsSizeUsedPercent,
)
#
################################################################################################################################
## Helper Methods
################################################################################################################################
@jk_typing.checkFunctionSignature()
def __rect(self, width:int, color:str, character:str) -> str:
return color + character * width + jk_console.Console.RESET
#
################################################################################################################################
## Public Methods
################################################################################################################################
@jk_typing.checkFunctionSignature()
def toStr(self) -> str:
width = jk_console.Console.width() - 3 - 2
ret = []
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# >>>> add header
ret.append(
(
" "
+ jk_console.Console.BOLD
+ jk_console.Console.ForeGround.STD_LIGHTCYAN
+ "{:s} | Total disk space: {:.1f}G ({:.1f}% used)"
+ jk_console.Console.RESET
).format(
self.__name,
self.__fsSizeTotal / (1024*1024*1024),
self.__fsSizeUsedPercent,
)
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ret.append("")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# >>>> calculate the bars
# first calculate all widths
barSizes = []
for u in self.__usages:
w = int(round(width * u.diskSpaceUsedFraction))
barSizes.append(w)
# identify the largest bar
iLargest = 0
iLargestW = barSizes[0]
for i in range(1, len(barSizes)):
if barSizes[i] > iLargestW:
iLargestW = barSizes[i]
iLargest = i
# if a section is invisible: take space from largest bar
for i in range(0, len(barSizes)):
if barSizes[i] == 0:
barSizes[i] = 1
barSizes[iLargest] -= 1
# now generate the color bars
_temp = [ " " ]
for u, barSize in zip(self.__usages, barSizes):
_temp.append(self.__rect(barSize, u.textColor, u.character))
ret.append("".join(_temp))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ret.append("")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# >>>> add legend
_temp = ""
for u in self.__usages:
_temp2 = u.textColor + ("●" if u.bSmall else "🞉") + jk_console.Console.RESET + " "
_temp2 += "{:s}: {:.1f}G ({:.1f}%)".format(
u.name,
u.diskSpaceUsed / (1024*1024*1024),
u.diskSpaceUsedFraction * 100,
)
if _temp and (len(_temp) + 2 + len(_temp2) >= width):
ret.append(_temp)
_temp = ""
_temp += " " + _temp2
if _temp:
ret.append(_temp)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return "\n".join(ret)
#
#
```
#### File: thaniya_server/api/APIFlaskBlueprint.py
```python
import typing
import functools
import jk_exceptionhelper
import jk_typing
import flask
import flask_login
from ..utils.APIError import APIError
from .APIMethodContext import APIMethodContext
from ..session.MemorySessionManager import MemorySessionManager
from ..usermgr.BackupUser import BackupUser
from ..usermgr.BackupUserManager import BackupUserManager
class APIFlaskBlueprint(flask.Blueprint):
################################################################################################################################
## Constructor
################################################################################################################################
@jk_typing.checkFunctionSignature()
def __init__(self, app:flask.Flask, import_name:str, appRuntime, sessionMgr:MemorySessionManager, userMgr:BackupUserManager, url_prefix:str):
super().__init__("api", import_name, url_prefix=url_prefix)
self.app = app
self.appRuntime = appRuntime
self.apiSessionMgr = sessionMgr
self.userMgr = userMgr
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def __getPeerFingerprint(self, request:flask.Request, remoteAddr:str) -> str:
userAgent = request.environ.get("HTTP_USER_AGENT", "")
accept = request.environ.get("HTTP_ACCEPT", "")
acceptLanguage = request.environ.get("HTTP_ACCEPT_LANGUAGE", "")
return "|".join([remoteAddr, userAgent, accept, acceptLanguage])
#
def __getRemoteAddr(self, request:flask.Request) -> str:
s = request.environ.get("HTTP_X_FORWARDED_FOR")
if s:
return s
s = request.environ.get("REMOTE_ADDR")
if s:
return s
s = request.remote_addr
if s:
return s
raise Exception()
#
################################################################################################################################
## Public Methods
################################################################################################################################
def apiCall(self, bAuthRequired:bool = False):
def my_decorator(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
jResponse = None
try:
request = flask.request
remoteAddr = self.__getRemoteAddr(request)
peerFingerprint = self.__getPeerFingerprint(request, remoteAddr)
# print some debugging output
print("=" * 160)
print("==== base_url:", request.base_url)
print("==== peerFingerprint:", peerFingerprint)
for k, v in request.headers:
print("==", k, ":", v)
# retrieve the data sent by the client
if request.method == "GET":
jData = request.args.to_dict()
elif request.method == "POST":
jData = request.get_json()
if (jData is None) or not isinstance(jData, dict):
raise APIError("errRequest")
else:
raise APIError("errRequest")
# print incoming data sent by the client
print("¦>> {")
for key, value in jData.items():
print("¦>>\t", key, "=", repr(value))
print("¦>> }")
# let's isolate the session ID and get user name and session data
sessionData = None
userName = None
if "sid" in jData:
sid = jData["sid"]
if not isinstance(sid, str):
raise APIError("errRequest")
del jData["sid"]
userName, sessionData = self.apiSessionMgr.get(sid, peerFingerprint, True)
if userName is None: # None indicates that the session does not exist or has timed out
# let's assume the client did not send a session ID;
# this way *if* we have to deal with a session ID we always will have a valid one from now on;
sid = None
else:
sid = None
# if we have a valid session ID, get the user object
user = None
if sid is None:
if bAuthRequired:
raise APIError("errNotAuth")
else:
user = self.userMgr.getUserE(userName)
# call the API method
ctx = APIMethodContext(self.appRuntime, request, remoteAddr, peerFingerprint, jData, user, sid, sessionData)
newArgs = ( ctx, ) + args
ret = f(*newArgs, **kwargs)
# create the success response data
jResponse = {
"success": True,
"data": ret
}
except APIError as ee:
# something failed; create the error response data
jResponse = {
"errID": ee.errID
}
except Exception as ee:
# something failed seriously and unexpectedly; create the error response data
eobj = jk_exceptionhelper.analyseException(ee)
eobj.dump()
jResponse = {
"errID": "errUnexpected"
}
# print the data we are going to return
print("<<¦ {")
for key, value in jResponse.items():
print("<<¦ \t", key, "=", repr(value))
print("<<¦ }")
# return the data
return flask.jsonify(jResponse)
#
return decorated_function
#
return my_decorator
#
#
```
#### File: thaniya_server/app/OutputWriter.py
```python
import jk_console
from .CLIForm import CLIForm
from .IOutputWriter import IOutputWriter
FG = jk_console.Console.ForeGround
SECTION_COLOR = FG.STD_LIGHTCYAN
SUBSECTION_COLOR = FG.STD_LIGHTCYAN
class _PrintSubSection(IOutputWriter):
def __init__(self, printer, prefix:str, title:str, bColor:bool = True, color:str = None):
assert printer
assert isinstance(prefix, str)
assert isinstance(title, str)
assert isinstance(bColor, bool)
if color is None:
if bColor:
color = SECTION_COLOR
if color is None:
color = ""
colorReset = ""
else:
assert isinstance(color, str)
colorReset = jk_console.Console.RESET
self.__color = color
self.__bColor = bColor
self.__printer = printer
self._print = printer.print
self._print(color + prefix + title + colorReset)
self.__prefix = prefix
#
def __enter__(self):
return self
#
def _dataToStr(self, *args):
return self.__prefix + "⸽ " + " ".join([ str(a) for a in args ])
#
def __exit__(self, exClazz, exObj, exStackTrace):
self._print(self.__prefix + "⸌┈")
self._print()
#
#
class _PrintSection(IOutputWriter):
def __init__(self, printer, title:str, bColor:bool = True, color:str = None):
assert printer
assert isinstance(title, str)
if color is None:
if bColor:
color = SECTION_COLOR
if color is None:
color = ""
colorReset = ""
else:
assert isinstance(color, str)
colorReset = jk_console.Console.RESET
self.__color = color
self.__bColor = bColor
self.__printer = printer
self._print = printer.print
self._print()
self._print(color + ">"*120 + colorReset)
self._print(color + ">>>>>>>> " + title + " " + colorReset)
self._print(color + ">"*120 + colorReset)
self._print()
#
def __enter__(self):
return self
#
def subsection(self, *args, color:str = None) -> _PrintSubSection:
assert len(args) > 0
title = " ".join([str(a) for a in args])
if color is None:
color = self.__color
return _PrintSubSection(self.__printer, " ", title, self.__bColor, color)
#
def _dataToStr(self, *args):
return " " + " ".join([ str(a) for a in args ])
#
def __exit__(self, exClazz, exObj, exStackTrace):
self._print()
#
#
class OutputWriter(IOutputWriter):
################################################################################################################################
## Constructor Methods
################################################################################################################################
def __init__(self, bColor:bool = True):
self.__bLastLineWasEmpty = False
self.__bHadOutput = False
self._print = self.print
self.__bColor = bColor
self.__buffer = []
self.__bAutoFlush = False
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def autoFlush(self) -> bool:
return self.__bAutoFlush
#
@autoFlush.setter
def autoFlush(self, value:bool):
assert isinstance(value, bool)
self.__bAutoFlush = value
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def __printToBuffer(self, *args):
s = " ".join([ str(a) for a in args ])
self.__buffer.append(s)
if self.__bAutoFlush:
self.flush()
#
################################################################################################################################
## Public Methods
################################################################################################################################
def print(self, *args):
text = " ".join([ str(a) for a in args ])
text = text.rstrip()
if len(text) == 0:
if not self.__bLastLineWasEmpty:
self.__buffer.append("")
self.__bLastLineWasEmpty = True
else:
self.__buffer.append(text)
self.__bLastLineWasEmpty = False
self.__bHadOutput = True
if self.__bAutoFlush:
self.flush()
#
def _dataToStr(self, *args):
return " ".join([ str(a) for a in args ])
#
def section(self, *args, color:str = None) -> _PrintSection:
assert len(args) > 0
title = " ".join([str(a) for a in args])
return _PrintSection(self, title, self.__bColor, color)
#
def __enter__(self):
self.print()
return self
#
def __exit__(self, exClazz, exObj, exStackTrace):
self.print()
self.flush()
#
def flush(self):
for line in self.__buffer:
print(line)
self.__buffer.clear()
#
#
```
#### File: thaniya_server/flask/FlaskFilter_formatBytes.py
```python
import jk_utils
from .AbstractFlaskTemplateFilter import AbstractFlaskTemplateFilter
#
# ...
#
class FlaskFilter_formatBytes(AbstractFlaskTemplateFilter):
def __call__(self, value:float):
if value is None:
return ""
return jk_utils.formatBytes(float(value))
#
#
```
#### File: thaniya_server/flask/FlaskFilter_formatPercent.py
```python
import jk_utils
from .AbstractFlaskTemplateFilter import AbstractFlaskTemplateFilter
#
# ...
#
class FlaskFilter_formatPercent(AbstractFlaskTemplateFilter):
def __call__(self, value:float):
if value is None:
return ""
return str(round(value * 100, 1)) + "%"
#
#
```
#### File: thaniya_server/flask/FlaskFilter_tagsToStr.py
```python
from .AbstractFlaskTemplateFilter import AbstractFlaskTemplateFilter
#
# ...
#
class FlaskFilter_tagsToStr(AbstractFlaskTemplateFilter):
def __call__(self, tags:list):
if tags:
return ", ".join(tags)
else:
return ""
#
#
```
#### File: thaniya_server/utils/SVGDiskSpaceGraphGenerator.py
```python
import typing
import jk_typing
from jk_svg import *
from jk_appmonitoring import RDiskSpacePart
from jk_appmonitoring import RFileSystem
_CSS_COLOR_OTHER = "#406040"
_CSS_COLOR_RESERVED = "#604040"
_CSS_COLOR_FREE = "#40ff00"
_CSS_COLORS_USED = (
"#00ffe0",
"#00c0ff",
"#0080ff",
"#006080",
)
class _SVGBarPart(RDiskSpacePart):
def __init__(self, part:RDiskSpacePart, cssColor:str, bSmall:bool):
super().__init__(
part.name,
part.dirPath,
part.diskSpaceUsed,
part.fsSizeTotal,
part.partType,
)
self.cssColor = cssColor
self.bSmall = bSmall
#
#
class SVGDiskSpaceGraphGenerator(object):
################################################################################################################################
## Constructor
################################################################################################################################
@jk_typing.checkFunctionSignature()
def __init__(self, filesystem:RFileSystem, bGenerateWithTitle:bool = False, bGenerateWithXMLDeclaration:bool = False):
self.__bGenerateWithTitle = bGenerateWithTitle
self.__bGenerateWithXMLDeclaration = bGenerateWithXMLDeclaration
self.__name = filesystem.name
self.__mountPoint = filesystem.mountPoint
self.__devicePath = filesystem.devicePath
# ----
self.__fsSizeTotal = filesystem.fsSizeTotal
self.__fsSizeUsedPercent = filesystem.fsSizeUsedPercent
# ----
self.__usages = [] # _SVGBarPart[]
availableColors = list(_CSS_COLORS_USED)
for u in filesystem.usages:
if u.partType == "free":
col = _CSS_COLOR_FREE
bSmall = True
elif u.partType == "reserved":
col = _CSS_COLOR_RESERVED
bSmall = True
elif u.partType == "usedOther":
col = _CSS_COLOR_OTHER
bSmall = True
else:
if not availableColors:
raise Exception("Too many sections!")
col = availableColors[0]
del availableColors[0]
bSmall = False
self.__usages.append(_SVGBarPart(u, col, bSmall))
# ----
self.textCSSColor = "#ffffff"
self.textFontFamily = "Arial"
self.boxFrameCSSColor = None
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def mountPoint(self) -> str:
return self.__mountPoint
#
@property
def name(self) -> str:
return self.__name
#
@property
def devicePath(self) -> str:
return self.__devicePath
#
@property
def title(self) -> str:
return "{:s} | Total disk space: {:.1f}G ({:.1f}% used)".format(
self.__name,
self.__fsSizeTotal / (1024*1024*1024),
self.__fsSizeUsedPercent,
)
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
@jk_typing.checkFunctionSignature()
def toSVG(self, width:int) -> str:
assert width > 0
# ----
svg = SVGGraphic()
y = 0
with svg.createGroup() as g:
if self.__bGenerateWithTitle:
with g.createText() as text:
text.x = 0
text.y = y + 12
text.textContent = "{:s} | Total disk space: {:.1f}G ({:.1f}% used)".format(
self.__name,
self.__fsSizeTotal / (1024*1024*1024),
self.__fsSizeUsedPercent,
)
text.style = "font-size:16px;"
y += 25
x = 0
for u in self.__usages:
with g.createRect() as rect:
w = width * u.diskSpaceUsedFraction
rect.setBounds(x, y, w, 20)
rect.style = "fill:" + u.cssColor
x += w
if self.boxFrameCSSColor:
with g.createRect() as rect:
rect.setBounds(0, y, width, 20)
rect.style = "fill:none;stroke:{};stroke-width:1px".format(boxFrameCSSColor)
y += 25
ymax = y
yTextHeight = 20
with svg.createGroup() as g:
x = 0
for u in self.__usages:
with g.createCircle() as circle:
circle.setCenter(x + 10, y + 15)
if u.bSmall:
circle.r = 6
else:
circle.r = 10
circle.style = "fill:" + u.cssColor
if not u.bSmall:
with g.createCircle() as circle:
circle.setCenter(x + 10, y + 15)
circle.r = 3
circle.style = "fill:#000" # TODO: this is a colored circle with a second black middle circle; make the middle circle transparent => this here should be a ring!
with g.createText() as text:
text.x = x + 30
text.y = y + 20
text.textContent = "{:s}: {:.1f}G ({:.1f}%)".format(
u.name,
u.diskSpaceUsed / (1024*1024*1024),
u.diskSpaceUsedFraction * 100,
)
ymax = y + yTextHeight
x += 280
if x > width - 280:
x = 0
y += yTextHeight
svg.cssStyleLines = [
"text {",
"\tfill:{};".format(self.textCSSColor) if self.textCSSColor else "",
"\tfont-family:{};".format(self.textFontFamily) if self.textFontFamily else "",
"\tfont-size:12px;",
"\tstroke:none;",
"}"
]
svg.attributes["viewBox"] = "0 0 {}px {}px".format(width, ymax)
svg.attributes["width"] = "{}px".format(width)
svg.attributes["height"] = "{}px".format(ymax)
return svg.toSVG(bPretty=True, bWithXMLDeclaration=self.__bGenerateWithXMLDeclaration)
#
#
```
#### File: src/thaniya_server_sudo/SudoScriptRunner.py
```python
import typing
import re
import os
import subprocess
import sys
import jk_prettyprintobj
import jk_typing
import jk_utils
import jk_logging
from .SudoScriptResult import SudoScriptResult
class SudoScriptRunner(object):
################################################################################################################################
## Constructor
################################################################################################################################
@jk_typing.checkFunctionSignature()
def __init__(self, scriptDirPath:str):
assert sys.version_info[0] == 3
self.__scriptDirPath = scriptDirPath
self.__scriptNames = set()
for fe in os.scandir(scriptDirPath):
if fe.is_file():
m = re.match("^([a-zA-Z_0-9-]+)\.sh$", fe.name)
if m is not None:
self.__scriptNames.add(m.group(1))
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def scriptDirPath(self) -> str:
return self.__scriptDirPath
#
#
# Returns a list of scripts that can be run as superuser.
#
@property
def scriptNames(self) -> typing.List[str]:
return sorted(self.__scriptNames)
#
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
@jk_typing.checkFunctionSignature()
def run0(self, scriptName:str, stdin:str = None, timeout:float = None) -> SudoScriptResult:
return self.run(scriptName, [], stdin, timeout)
#
@jk_typing.checkFunctionSignature()
def run0E(self,
scriptName:str,
stdin:str = None,
timeout:float = None,
exErrMsg:str = None,
log:jk_logging.AbstractLogger = None
) -> SudoScriptResult:
return self.runE(scriptName, [], stdin, timeout, exErrMsg, log)
#
@jk_typing.checkFunctionSignature()
def run1(self, scriptName:str, argument1:str, stdin:str = None, timeout:float = None) -> SudoScriptResult:
assert argument1
return self.run(scriptName, [ argument1 ], stdin, timeout)
#
@jk_typing.checkFunctionSignature()
def run1E(self,
scriptName:str,
argument1:str,
stdin:str = None,
timeout:float = None,
exErrMsg:str = None,
log:jk_logging.AbstractLogger = None
) -> SudoScriptResult:
assert argument1
return self.runE(scriptName, [ argument1 ], stdin, timeout, exErrMsg, log)
#
@jk_typing.checkFunctionSignature()
def run2(self, scriptName:str, argument1:str, argument2:str, stdin:str = None, timeout:float = None) -> SudoScriptResult:
assert argument1
assert argument2
return self.run(scriptName, [ argument1, argument2 ], stdin, timeout)
#
@jk_typing.checkFunctionSignature()
def run2E(self,
scriptName:str,
argument1:str,
argument2:str,
stdin:str = None,
timeout:float = None,
exErrMsg:str = None,
log:jk_logging.AbstractLogger = None
) -> SudoScriptResult:
assert argument1
assert argument2
return self.runE(scriptName, [ argument1, argument2 ], stdin, timeout, exErrMsg, log)
#
@jk_typing.checkFunctionSignature()
def run3(self, scriptName:str, argument1:str, argument2:str, argument3:str, stdin:str = None, timeout:float = None) -> SudoScriptResult:
assert argument1
assert argument2
assert argument3
return self.run(scriptName, [ argument1, argument2, argument3 ], stdin, timeout)
#
@jk_typing.checkFunctionSignature()
def run3E(self,
scriptName:str,
argument1:str,
argument2:str,
argument3:str,
stdin:str = None,
timeout:float = None,
exErrMsg:str = None,
log:jk_logging.AbstractLogger = None
) -> SudoScriptResult:
assert argument1
assert argument2
assert argument3
return self.runE(scriptName, [ argument1, argument2, argument3 ], stdin, timeout, exErrMsg, log)
#
#
# Run the specified script as <c>root</c>.
# An error result is returend if the script did not run properly.
#
# @param str scriptName (required) The name of the script to run. An exception is raised if the specified script does not exist.
# @param str[] arguments (required) A list of script arguments. (This may be an empty list, but a list *must* be specified.)
# @param str stdin (optional) Data that should be piped to the script.
# @param float timeout (optional) A timeout value in seconds. If the script has not terminated within the specified time an exception is raised.
#
@jk_typing.checkFunctionSignature()
def run(self, scriptName:str, arguments:list, stdin:str = None, timeout:float = None) -> SudoScriptResult:
if scriptName not in self.__scriptNames:
raise Exception("No such script: " + scriptName)
# ----
if stdin is None:
p_stdin = None
p_input = None
else:
p_stdin = subprocess.PIPE
p_input = stdin
cmdPath = os.path.join(self.__scriptDirPath, scriptName + ".sh")
cmdArgs = [
"/usr/bin/sudo",
cmdPath,
]
cmdArgs.extend(arguments)
if sys.version_info[1] <= 6:
p = subprocess.Popen(
cmdArgs,
universal_newlines=True, # in python 3.7 this is replaced by argument named 'text' (which indicates that text input is required)
shell=False,
stdin=p_stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
raise jk_utils.ImplementationErrorException()
(stdout, stderr) = p.communicate(input=p_input, timeout=timeout)
returnCode = p.wait()
return SudoScriptResult(cmdPath, arguments, stdout, stderr, returnCode)
#
#
# Run the specified script as <c>root</c>.
# An exception is raised if the script did not run properly.
#
# @param str scriptName (required) The name of the script to run. An exception is raised if the specified script does not exist.
# @param str[] arguments (required) A list of script arguments. (This may be an empty list, but a list *must* be specified.)
# @param str stdin (optional) Data that should be piped to the script.
# @param float timeout (optional) A timeout value in seconds. If the script has not terminated within the specified time an exception is raised.
# @param str exErrMsg (optional) A text error message to use for the exception if an error occured. If you specify <c>None</c> here a default
# error message is generated.
# @param AbstractLogger log (optional) If an error exists and a logger has been specified, debugging data from attempting to run the external
# script will be written to this logger.
@jk_typing.checkFunctionSignature()
def runE(self,
scriptName:str,
arguments:list,
stdin:str = None,
timeout:float = None,
exErrMsg:str = None,
log:jk_logging.AbstractLogger = None
) -> SudoScriptResult:
r = self.run(scriptName, arguments, stdin, timeout)
if r.isError:
if log:
r.dump(printFunc=log.notice)
if exErrMsg is None:
if r.errorID:
exErrMsg = "Error encountered: {}".format(r.errorID)
else:
exErrMsg = "Error encountered: (unknown)"
raise Exception(exErrMsg)
return r
#
#
```
#### File: thaniya_server/testing/test-jobprocessing-standard.py
```python
from _testprg import *
from thaniya_server.jobs import *
"""
This file allows testing the job queue and job processing. A job queue is generated, a job is inserted, and a job processer picks it up
and processes the job. But after three seconds job processing is interrupted, so that you will see a <c>InterruptedException</c> error message.
"""
class MyJobProcessor(AbstractJobProcessor):
def __init__(self):
super().__init__("noop")
#
def processJob(self, ctx:JobProcessingCtx, job:Job):
print("BEGIN noop")
for i in range(0, 5):
ctx.checkForTermination()
time.sleep(1)
print("END noop")
#
#
with testPrg(__file__, hasTempDir=True) as (ioCtx, dataDirPath, tempDirPath, log):
jobProcessingEngine = JobProcessingEngine(log, dirPath = tempDirPath)
jobProcessingEngine.register(MyJobProcessor())
jobQueue = JobQueue(
ioContext=ioCtx,
dirPath=tempDirPath,
bResetJobStateOnLoad=True,
log=log
)
jobProcessingEngine.start(jobQueue)
job = jobQueue.scheduleJob("noop", 0, { "foo": "bar" })
while True:
time.sleep(1)
print(job.state)
if job.state not in [ EnumJobState.READY, EnumJobState.PROCESSING ]:
break
jobProcessingEngine.terminate()
print()
print()
print()
print("=" * 120)
jobQueue.dump()
print("=" * 120)
```
#### File: thaniya_server_upload/slots/IUploadSlotContext.py
```python
from thaniya_server_sudo import SudoScriptRunner
from thaniya_server.usermgr import BackupUserManager
from thaniya_server.sysusers import SystemAccountManager
class IUploadSlotContext:
@property
def backupUserManager(self) -> BackupUserManager:
raise NotImplementedError()
#
@property
def sudoScriptRunner(self) -> SudoScriptRunner:
raise NotImplementedError()
#
@property
def systemAccountManager(self) -> SystemAccountManager:
raise NotImplementedError()
#
#
``` |
{
"source": "jkraenzle/appresponse_awsipranges",
"score": 3
} |
#### File: jkraenzle/appresponse_awsipranges/awsipranges.py
```python
import argparse
import getpass
import ipaddress
import json
import os
import requests
from typing import Any, IO
import yaml
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# ---- YAML helper functions -----
# Define YAML Loader, as default Loader is not safe
class YAMLLoader(yaml.SafeLoader):
"""YAML Loader with `!include` constructor."""
def __init__(self, stream: IO) -> None:
"""Initialise Loader."""
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(loader: YAMLLoader, node: yaml.Node) -> Any:
"""Include file referenced at node."""
filename = os.path.abspath(os.path.join(loader._root, loader.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, YAMLLoader)
yaml.add_constructor('!include', construct_include, YAMLLoader)
def yamlread (fn):
try:
if fn != None:
with open(fn) as fh:
yamlresult = yaml.load (fh, YAMLLoader)
else:
yamlresult = None
except FileNotFoundError:
yamlresult = None
return yamlresult
# -----
AWSIPRANGESURL = "https://ip-ranges.amazonaws.com/ip-ranges.json"
AWSIPRANGESSYNCTIMEFILE = "awssynctime.yaml"
def aws_ipranges ():
result = requests.get (AWSIPRANGESURL)
if result.status_code in [200, 201, 204]:
result_json = result.json()
return result_json
else:
print("Failed to pull AWS IP ranges from %s" % AWSIPRANGESURL)
return None
def appresponse_authenticate (hostname, username, password):
credentials = {"username":username, "password":password}
payload = {"generate_refresh_token":False, "user_credentials":credentials}
headers = {"Content-Type":"application/json"}
result = requests.post ('https://' + hostname + '/api/mgmt.aaa/2.0/token', data=json.dumps(payload), headers=headers, verify=False)
if result.status_code not in [200, 201, 204]:
print("Status code was %s" % result.status_code)
print("Error: %s" % result.content)
return None
else:
token_json = result.json ()
access_token = token_json ["access_token"]
return access_token
def appresponse_awsipranges_to_hostgroups (result_json, region_filter=None, service_filter=None, prepend=None):
awsipprefixes = result_json["prefixes"]
awsiprange_hostgroups = {}
for awsipprefix in awsipprefixes:
region = awsipprefix["region"]
if (region_filter != None) and (region not in region_filter):
continue
service = awsipprefix["service"]
if (service_filter != None) and (service not in service_filter):
continue
prefix = awsipprefix["ip_prefix"]
if region in awsiprange_hostgroups.keys ():
awsiprange_hostgroups[region].append(prefix)
else:
values = [prefix]
awsiprange_hostgroups[region] = values
awsipv6prefixes = result_json["ipv6_prefixes"]
for awsipv6prefix in awsipv6prefixes:
region = awsipv6prefix["region"]
if (region_filter != None) and (region not in region_filter):
continue
service = awsipprefix["service"]
if (service_filter != None) and (service not in service_filter):
continue
ipv6_prefix = awsipv6prefix["ipv6_prefix"]
if region in awsiprange_hostgroups.keys ():
awsiprange_hostgroups[region].append(ipv6_prefix)
else:
values = [ipv6_prefix]
awsiprange_hostgroups[region] = values
hostgroups = []
for awsiprange_hostgroup in awsiprange_hostgroups:
if prepend != None:
hostgroup_name = prepend + awsiprange_hostgroup
else:
hostgroup_name = awsiprange_hostgroup
hostgroup = {
# "created":,
"desc": "Created by script",
"enabled": True,
"hosts": awsiprange_hostgroups [awsiprange_hostgroup],
#"id": ,
#"in_speed":,
#"in_speed_unit":,
#"last_modified":,
#"last_modified_username":,
#"member_hostgroups":,
#"member_hostgroups_names":,
"name": hostgroup_name #,
#"out_speed":,
#"out_speed_unit":,
}
hostgroups.append (hostgroup)
return hostgroups
def iprange_to_ipv4subnets (range):
range_strs = range.split('-')
count = len(range_strs)
if count == 1:
ip = ipaddress.IPv4Address(range_strs[0])
subnets = [str(ip) + '/32']
return subnets
elif count == 2:
startip = ipaddress.IPv4Address(range_strs[0])
endip = ipaddress.IPv4Address(range_strs[1])
subnets = [str(subnet) for subnet in ipaddress.summarize_address_range(startip,endip)]
return subnets
return None
def iprange_to_ipv6subnets (range):
range_strs = range.split('-')
count = len(range_strs)
if count == 1:
ip = ipaddress.IPv6Address(range_strs[0])
subnets = [str(ip)]
return subnets
if count == 2:
startip = ipaddress.IPv6Address(range_strs[0])
endip = ipaddress.IPv6Address(range_strs[1])
subnets = [str(subnet) for subnet in ipaddress.summarize_address_range(startip,endip)]
return subnets
return None
def appresponse_hostgroups_get (hostname, access_token):
bearer = "Bearer " + access_token
headers = {"Authorization":bearer}
result = requests.get('https://' + hostname + '/api/npm.classification/3.2/hostgroups', headers=headers,
verify=False)
if result.status_code in [200, 201, 204]:
result_json = result.json ()
else:
return None
hostgroups = result_json ['items']
return hostgroups
def appresponse_hostgroups_merge (hostname, access_token, hostgroups):
# Create headers for authentication
bearer = "Bearer " + access_token
headers = {"Authorization":bearer}
# Place hostgroups in proper format
payload = {}
payload ['items'] = hostgroups
# Submit
result = requests.post('https://' + hostname + '/api/npm.classification/3.2/hostgroups/merge', headers=headers,
data=json.dumps(payload), verify=False)
if result.status_code in [200, 201, 204]:
return result
else:
return None
def appresponse_existing_hosts_convert (ranges):
converted_hosts = []
i = 0
for range in ranges:
if '.' in range:
ipv4_subnets = iprange_to_ipv4subnets(range)
if ipv4_subnets != None:
converted_hosts.extend(ipv4_subnets)
elif ':' in range:
ipv6_subnets = iprange_to_ipv6subnets(range)
if ipv6_subnets != None:
converted_hosts.extend(ipv6_subnets)
i+=1
return converted_hosts
def appresponse_hostname_form (hostgroup_name, prepend):
if prepend != None:
return prepend + hostgroup_name
else:
return hostgroup_name
def appresponse_hostgroups_compare (existing_hostgroups, new_hostgroups):
hostgroups_created = []
hostgroup_ranges_removed = {}
hostgroup_ranges_added = {}
for new_hostgroup in new_hostgroups:
found_name = False
for existing_hostgroup in existing_hostgroups:
new_hostgroup_name = new_hostgroup['name']
if new_hostgroup_name == existing_hostgroup['name']:
found_name = True
if 'hosts' in existing_hostgroup:
hosts_to_compare = appresponse_existing_hosts_convert (existing_hostgroup['hosts'])
else:
hosts_to_compare = []
if set(new_hostgroup['hosts']) == set(hosts_to_compare):
break
else:
removed_ranges = set(hosts_to_compare) - set(new_hostgroup['hosts'])
if len(removed_ranges) != 0:
hostgroup_ranges_removed[new_hostgroup_name] = removed_ranges
added_ranges = set(new_hostgroup['hosts']) - set(hosts_to_compare)
if len(added_ranges) != 0:
hostgroup_ranges_added[new_hostgroup_name] = added_ranges
if found_name == True:
break
if found_name == False:
hostgroups_created.append (new_hostgroup_name)
return hostgroups_created, hostgroup_ranges_removed, hostgroup_ranges_added
def main ():
# Parse the arguments
parser = argparse.ArgumentParser (description="Automated conversion of documented AWS IP ranges to Host Groups")
parser.add_argument('--hostname')
parser.add_argument('--username')
parser.add_argument('--password')
parser.add_argument('--regionfilter', help="YAML file containing list of regions to include in Host Groups")
parser.add_argument('--servicefilter', help="YAML file containing list of services to include in Host Groups")
parser.add_argument('--hostgroupprepend', help="String prepended to the AWS regions to form the Host Group names")
parser.add_argument('--ignoresynctime', action='store_true', help="Do not store time from AWS IP range JSON that is used to check for updates. This flag is useful in testing.")
parser.add_argument('--checkforupdates', action='store_true', help="Check if AWS IP range JSON was pulled in last run")
args = parser.parse_args ()
# Pull latest AWS IP Range file
awsresult = aws_ipranges ()
# Validate the argument --checkforupdates
if args.checkforupdates != None:
if isinstance(args.checkforupdates, bool):
if args.checkforupdates == True:
oldsynctime = yamlread(args.hostname + AWSIPRANGESSYNCTIMEFILE)
if oldsynctime != None and oldsynctime['syncToken'] == awsresult ['syncToken']:
# Shortcut the rest of the script if there is no updates of IP ranges on AWS
print("AWS has not updated their IP ranges on %s. No Host Group definitions will be updated." % AWSIPRANGESURL)
print("If other configurations have changed, please set --checkforupdates to False.")
return
else:
print ("The value for --checkforupdates is not recognized.")
# Assuming there is a new update or the user has not requested to check for updates, validate the other arguments
# and confirm that the script can authenticate to the AppResponse appliance
if args.hostname == None:
print ("Please specify a hostname using --hostname")
return
if args.username == None:
print ("Please specify a username using --username")
return
if args.password == None:
print ("Please provide the password for account %s" % args.username)
password = getpass.getpass ()
else:
password = <PASSWORD>
access_token = appresponse_authenticate (args.hostname, args.username, password)
# Pull existing Host Groups from appliance for comparison
# The script allows filtering, so it will compare existing Host Groups to new definitions to provide details on changes
existing_hostgroups = appresponse_hostgroups_get (args.hostname, access_token)
# If there is no difference in the Host Groups after the filters are applied, do not bother to upload them to the appliance
shortcut = False
# Read filters from files specified in arguments; filters set to None implies nothing to filter
regionfilter = yamlread (args.regionfilter)
servicefilter = yamlread (args.servicefilter)
# Convert and filter AWS IP ranges to Host Group definitions
hostgroups = appresponse_awsipranges_to_hostgroups (awsresult, regionfilter, servicefilter, args.hostgroupprepend)
# Check to see if there are differences
new_hostgroups, hostgroup_prefixes_removed, hostgroup_prefixes_added = appresponse_hostgroups_compare (existing_hostgroups, hostgroups)
if len(new_hostgroups) == 0 and len(hostgroup_prefixes_removed) == 0 and len(hostgroup_prefixes_added) == 0:
# Flag the lack of differences so there is no attempt to upload the Host Groups
shortcut = True
print ("The set of Host Groups chosen to update have the same definitions on the appliance.")
print ("There are no Host Group definitions to push.")
if len(new_hostgroups) > 0:
print ("The new Host Groups are:")
for new_hostgroup in new_hostgroups:
print ("\t%s" % new_hostgroup)
# Get the intersection of the sets
added_and_removed = hostgroup_prefixes_added.keys () & hostgroup_prefixes_removed.keys ()
just_added = hostgroup_prefixes_added.keys () - added_and_removed
just_removed = hostgroup_prefixes_removed.keys () - added_and_removed
if len(added_and_removed) > 0:
for changed_hostgroup in added_and_removed:
print ("The Host Group %s had prefixes added and removed." % changed_hostgroup)
print ("Added:")
print ("\t%s" % hostgroup_prefixes_added[changed_hostgroup])
print ("Removed:")
print ("\t%s" % hostgroup_prefixes_removed[changed_hostgroup])
if len(just_added) > 0:
for changed_hostgroup in just_added:
print ("The Host Group %s had prefixes added." % changed_hostgroup)
print ("Added:")
print ("\t%s" % hostgroup_prefixes_added[changed_hostgroup])
if len(just_removed) > 0:
for changed_hostgroup in just_removed:
print ("The Host Group %s had prefixes removed." % changed_hostgroup)
print ("Removed:")
print ("\t%s" % hostgroup_prefixes_removed[changed_hostgroup])
if shortcut == False:
# Merge converted Host Group definitions into appliance
result = appresponse_hostgroups_merge (args.hostname, access_token, hostgroups)
if result.status_code in [200, 201, 204]:
#resulting_hostgroups = result.json ()
#print (resulting_hostgroups)
print ("Host Group definitions updated.")
# Write YAML file to keep track of last publication pull
if isinstance(args.ignoresynctime, bool) and args.ignoresynctime == True:
print ("The script is not saving a sync time.")
else:
synctime_dict = {'syncToken':awsresult['syncToken'], 'createDate':awsresult['createDate']}
with open(args.hostname + AWSIPRANGESSYNCTIMEFILE, 'w') as yaml_file:
yaml.dump(synctime_dict, yaml_file, default_flow_style=False)
else:
print ("Host Group definitions not updated.")
return
if __name__ == "__main__":
main ()
``` |
{
"source": "jkraenzle/appresponse_utilities",
"score": 2
} |
#### File: jkraenzle/appresponse_utilities/appresponse_utilities.py
```python
from typing import Any, IO
import yaml
import os
import glob
import sys
import requests
import time
import argparse
import json
import getpass
from datetime import datetime
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# Avoid warnings for insecure certificates
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
APPRESPONSE_UTILITIES_ACTIONS = [ "list_backups", \
"pull_backup", \
"delete_backup"]
APPRESPONSE_UTILITIES_SCRIPT_TIMEOUT = 60
##### YAML FUNCTIONS #####
# Define YAML Loader, as default Loader is not safe
class YAMLLoader(yaml.SafeLoader):
"""YAML Loader with `!include` constructor."""
def __init__(self, stream: IO) -> None:
"""Initialise Loader."""
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(loader: YAMLLoader, node: yaml.Node) -> Any:
"""Include file referenced at node."""
filename = os.path.abspath(os.path.join(loader._root, loader.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, YAMLLoader)
yaml.add_constructor('!include', construct_include, YAMLLoader)
def yamlread (fn):
try:
if fn != None:
with open(fn) as fh:
yamlresult = yaml.load (fh, YAMLLoader)
else:
yamlresult = None
except FileNotFoundError:
yamlresult = None
return yamlresult
# -----
##### REST API INTEGRATION #####
# Run REST APIs to appliance and return result
# Assume 'payload' is JSON formatted
def appresponse_rest_api (action, path, appliance, access_token, version, payload = None, data = None, additional_headers = None):
url = "https://" + appliance + path
bearer = "Bearer " + access_token
headers = {"Authorization":bearer}
if additional_headers != None:
headers.update (additional_headers)
if (action == "GET"):
r = requests.get (url, headers=headers, verify=False)
elif (action == "POST"):
if payload != None:
r = requests.post (url, headers=headers, data=json.dumps (payload), verify=False)
else:
r = requests.post (url, headers=headers, data=data, verify=False)
elif (action == "PUT"):
r = requests.put (url, headers=headers, data=json.dumps (payload), verify=False)
elif (action == "DELETE"):
r = requests.delete (url, headers=headers, verify=False)
if (r.status_code not in [200, 201, 202, 204]):
print ("Status code was %s" % r.status_code)
print ("Error: %s" % r.content)
result = None
else:
if (("Content-Type" in r.headers.keys ()) and ("application/json" in r.headers ["Content-Type"])):
result = json.loads (r.content)
elif (("Content-Type" in r.headers.keys ()) and ("application/x-gzip" in r.headers ["Content-Type"])):
result = r.content
else:
result = r.text
return result
##### BACKUP #####
def appresponse_backups_list (appliance, access_token, version):
backup_list = appresponse_rest_api("GET", "/api/npm.backup/1.0/backups", appliance, access_token, version)
return backup_list ["items"]
# REST API Python wrapper to create backup on appliance
def appresponse_backup_create(appliance, access_token, version):
# Kick off backup and give time to process
payload = {"description": "Automated Backup"}
backup_in_process = appresponse_rest_api ("POST", "/api/npm.backup/1.0/backups", appliance, access_token, version, payload)
# If backup creation failed, return upstream showing the failure
if (backup_in_process == None):
return None, None
# Get backup id and sleep so there's time for backup to initially create
backup_id = backup_in_process ["id"]
time.sleep (5)
# Keep checking if backup has completed
backup_complete = False
while (backup_complete == False):
backups = appresponse_backups_list(appliance, access_token, version)
found = False
for backup in backups:
if (backup ["id"] == backup_id):
found = True
if (backup ["status"] == "completed"):
backup_complete = True
# If backup "id" is not found on appliance
if (found == False):
print ("Error starting backup on %s" % appliance)
return None, None
elif (backup_complete == False):
time.sleep (2)
return backup_id, backup
def appresponse_backup_delete (appliance, access_token, version, backup):
try:
result = appresponse_rest_api("DELETE", "/api/npm.backup/1.0/backups/items/" + str(backup['id']), appliance, access_token, version)
except:
result = None
return result
# REST API Python wrapper to download and store automated backup
def appresponse_backup_download_and_store (appliance, access_token, version, backup, path=None):
backup_file = appresponse_rest_api ("GET", "/api/npm.backup/1.0/backups/items/" + backup['id'] + "/file", appliance, access_token, version)
if (backup_file != None):
# Create folders and filenames for store
backup_time_str = "Unknown"
if 'backup_time' in backup:
backup_timestamp = backup['backup_time']
dt = datetime.fromtimestamp(backup_timestamp)
backup_time_str = dt.strftime("%Y%m%d%I%M%S")
backup_filename = appliance + '.' + backup_time_str + ".backup.tgz"
if path != None:
try:
if not os.path.exists(path):
os.mkdir(path)
except:
print("WARNING")
print("Path provided does not exist and could not be created.")
print("Defaulting to local folder.")
path = None
if path != None:
# Likely need to ensure that path ends with appropriate path separator character at this point
backup_filename = path + backup_filename
try:
with open(backup_filename, "wb") as backup_f:
backup_f.write (backup_file)
return backup_filename
except:
return None
else:
return None
# REST API Python wrapper to download and delete automated backup
def appresponse_backup_download_and_delete (appliance, access_token, version, backup, path, delete_after_download=True):
backup_filename = appresponse_backup_download_and_store(appliance, access_token, version, backup, path)
if delete_after_download == None or delete_after_download == True:
delete_status = appresponse_backup_delete(appliance, access_token, version, backup)
return delete_status, backup_filename
# REST API Python wrapper to create and pull backup from appliance
def appresponse_backup_get (appliance, access_token, version, path, delete_after_download=True):
backup_id, backup = appresponse_backup_create (appliance, access_token, version)
if (backup_id != None):
empty_result,filename = appresponse_backup_download_and_delete (appliance, access_token, version, backup, path, delete_after_download)
return True,filename
else:
return False,filename
def appresponse_backup_upload (appliance, access_token, version, backup_file):
data = backup_file.read ()
backup = appresponse_rest_api ("POST", "/api/npm.backup/1.0/backups/upload", appliance, access_token, version, additional_headers={'Content-Type': 'application/octet-stream'}, data=data)
return backup
def appresponse_backup_restore (appliance, access_token, version, id):
backup_restore_status = appresponse_rest_api("POST", "/api/npm.backup/1.0/backups/items/" + id + "/restore", appliance, access_token, version)
return backup_restore_status
def appresponse_backup_restore_status (appliance, access_token, version):
backup_restore_status = appresponse_rest_api("GET", "/api/npm.backup/1.0/restore_status", appliance, access_token, version)
return backup_restore_status
def appresponse_backup_space_create (hostname, access_token, version, delete_options, store_options):
# Set backup options related to locally storing and/or deleting existing backups; verify that they make sense
download_and_store_existing_backups = store_options['download_and_store_existing_backups']
delete_all_existing_backups_on_appliance = delete_options['delete_all_existing_backups_on_appliance']
delete_oldest_backup = delete_options['delete_oldest_backup']
do_not_delete_existing_backups = delete_options['do_not_delete_existing_backups']
if do_not_delete_existing_backups == True and (delete_all_existing_backups_on_appliance == True or delete_oldest_backup == True):
print("WARNING")
print("Configuration file has conflicting settings, and is set to not delete any backups from appliance(s) and configured with deletion options.")
print("Resulting configuration will not delete any files.")
print("Please correct configuration file for subsequent runs.")
delete_all_existing_backups_on_appliance = delete_oldest_backup = False
elif delete_all_existing_backups_on_appliance == True and delete_oldest_backup == True:
print("WARNING")
print("Configuration file is set to delete all backups and oldest backups. Resulting configuration will delete only oldest files from appliance(s).")
print("Please correct configuration file for subsequent runs.")
delete_all_existing_backups_on_appliance = False
print("Checking backup space availability on AppResponse %s." % hostname)
# Check the current list of primary AppResponse backups (list_backups)
backups_list = appresponse_backups_list (hostname, access_token, version)
# If there are two, delete oldest as only allowed to store two at a time on the AppResponse appliance (delete_backup)
if len(backups_list) > 0:
if download_and_store_existing_backups == True:
for backup in backups_list:
filename = appresponse_backup_download_and_store(hostname, access_token, version, backup, store_options['path'])
print("Downloaded %s from %s to store locally." % (filename, hostname))
if delete_all_existing_backups_on_appliance == True:
for backup in backups_list:
delete_status = appresponse_backup_delete(hostname, access_token, version, backup)
if delete_status != None and delete_status != "":
print(delete_status)
print("Deletion of backup %s from hostname %s failed." % (str(backup['id']), hostname))
return False
else:
if delete_oldest_backup == True:
if len(backups_list) == 2:
if do_not_delete_existing_backups == True:
print("AppResponse %s has no available space and flag is set to not delete on-AppResponse backups." % hostname)
return False
else:
# Get ID of oldest backup
timestamp_0 = backups_list[0]['backup_time']
timestamp_1 = backups_list[1]['backup_time']
if timestamp_0 < timestamp_1:
backup_to_delete = backups_list[0]
else:
backup_to_delete = backups_list[1]
print("Deleting oldest backup to create available space on AppResponse %s." % hostname)
delete_status = appresponse_backup_delete(hostname, access_token, version, backup_to_delete)
if delete_status != None and delete_status != "":
print(delete_status)
return False
return True
def appresponse_backup_clean_locally (store_options):
if store_options['number_of_archived_backups'] != None:
num_backups_to_keep = store_options['number_of_archived_backups']
if not isinstance(num_backups_to_keep, int):
print("WARNING")
print("Configuration file has an invalid setting for the number of archived backups")
print("Setting is %s." % str(num_backups_to_keep))
return False
else:
num_backups_to_keep = 0
# Get the list of backups and break them out into a list per appliance
backups_list = []
appliances_dict = {}
if 'path' in store_options:
backups_list = glob.glob(store_options['path'] + "*.backup.tgz")
for backup in backups_list:
hostname = backup.rsplit('.',3)[0]
if hostname not in appliances_dict:
appliances_dict[hostname] = []
appliances_dict[hostname].append(backup)
# Iterate over appliances and remove oldest
cleanup_succeeded = True
for appliance in appliances_dict:
appliance_backups_list = appliances_dict[appliance]
oldest_timestamp = None
oldest_backup = None
while len(appliance_backups_list) > num_backups_to_keep:
for backup in appliance_backups_list:
backup_timestamp = int(backup.rsplit('.', 3)[1])
if oldest_timestamp == None or oldest_timestamp > backup_timestamp:
oldest_timestamp = backup_timestamp
oldest_backup = backup
try:
print("Removing backup %s." % oldest_backup)
appliance_backups_list.remove(oldest_backup)
os.remove (oldest_backup)
oldest_timestamp = None
oldest_backup = None
except:
print("WARNING")
print("Exception while removing backup %s from local disk" % oldest_backup)
cleanup_succeeded = False
return cleanup_succeeded
##### GENERAL FUNCTIONS
# REST API Python wrapper to authenticate to the server (Login)
# URL: https://<appliance>/api/mgmt.aaa/1.0/token ; pre-version 11.6
# URL: https://<appliance>/api/mgmt.aaa/2.0/token ; version 11.6 or later
# Header: Content-Type:application/json
# Body: {"user_credentials":{"username":<username>, "password":<password>},"generate_refresh_token":"true"}
def appresponse_authenticate (appliance, username, password, version):
if (version in ["11.4", "11.5"]):
url = "https://" + appliance + "/api/mgmt.aaa/1.0/token"
else:
url = "https://" + appliance + "/api/mgmt.aaa/2.0/token"
credentials = {"username":username, "password":password}
payload = {"user_credentials":credentials, "generate_refresh_token":False}
headers = {"Content-Type":"application/json"}
r = requests.post(url, data=json.dumps(payload), headers=headers, verify=False)
if (r.status_code != 201):
print ("Status code was %s" % r.status_code)
print ("Error %s" % r.content)
return None
else:
result = json.loads(r.content)
return result["access_token"]
# REST API Python wrapper to request version information
# URL: https://<appliance>/api/common/1.0/info
# Header: AUthorization: Bearer <access_token>
def appresponse_version_get (appliance, access_token, version):
url = "https://" + appliance + "/api/common/1.0/info"
r = requests.get (url, verify=False)
result = json.loads(r.content)
version_str = result["sw_version"]
return version_str
def appresponse_authentication_check(hostname, username, password):
# Login to source and destination AppResponses to confirm the passwords are correct before proceeding
version = appresponse_version_get(hostname, username, password)
access_token = appresponse_authenticate(hostname, username, password, version)
return access_token, version
##### HELPER FUNCTIONS #####
# Helper function to get list of hostnames from input
def hostnamelist_get (hostnamelist):
hostnamelist_f = open (hostnamelist, 'r')
output = []
for row in hostnamelist_f:
hostname = row.rstrip()
output.append (hostname)
hostnamelist_f.close ()
return output
def backup_credentials_get (filename):
credentials = yamlread (filename)
hostname = None
if 'hostname' in credentials:
hostname = credentials['hostname']
hostname_list = None
if 'list' in credentials:
list = credentials['list']
username = None
if 'username' in credentials:
username = credentials['username']
# Allow for testing, but the expectation is that this is not included in YAML
key = None
if 'key' in credentials:
key = credentials['key']
# Include options to handle what to do with existing backups and how to store locally
delete_options = None
if 'delete_options' in credentials:
delete_options = credentials['delete_options']
store_options = None
if 'store_options' in credentials:
store_options = credentials['store_options']
try:
hostnamelist = hostnamelist_get(list)
except:
print("Failed to read file %s to load list of hostnames specified by parameter --hostnamelist." % list)
hostnamelist = None
return hostname, hostnamelist, username, key, delete_options, store_options
def backup_restore_credentials_get (filename):
credentials = yamlread (filename)
src_hostname = None
if 'src_hostname' in credentials:
src_hostname = credentials['src_hostname']
src_username = None
if 'src_username' in credentials:
src_username = credentials['src_username']
dst_hostname = None
if 'dst_hostname' in credentials:
dst_hostname = credentials['dst_hostname']
dst_username = None
if 'dst_username' in credentials:
dst_username = credentials['dst_username']
# Allow for testing, but the expectation is that this is not included in YAML
src_key = None
if 'src_key' in credentials:
src_key = credentials['src_key']
dst_key = None
if 'dst_key' in credentials:
dst_key = credentials['dst_key']
# Include options to handle what to do with existing backups and how to store locally
delete_options = None
if 'delete_options' in credentials:
delete_options = credentials['delete_options']
store_options = None
if 'store_options' in credentials:
store_options = credentials['store_options']
return src_hostname, src_username, src_key, dst_hostname, dst_username, dst_key, delete_options, store_optio
def run_action(hostnamelist, username, key, action, actionfile):
# Check inputs for required data and prep variables
if (hostnamelist == None or hostnamelist == ""):
print ("Please specify a hostname using --hostname or a set of hostnames using --hostnamelist")
return
if (username == None or username == ""):
print ("Please specify a username using --username")
return
if (action == None or action == ""):
print ("Please specify an action using --action")
return
# Check that action exist in set of known actions
if not action in APPRESPONSE_UTILITIES_ACTIONS:
print ("Action %s is unknown" % action)
if (key == None or key == ""):
print ("Please provide key for account %s on %s" % username, hostname)
key = getpass.getpass ()
for hostname in hostnamelist:
# Loop through hosts, applying 'action'
version = appresponse_version_get (hostname, username, key)
access_token = appresponse_authenticate (hostname, username, key, version)
if (access_token == None or access_token == ""):
print ("Failed to login to %s. Terminating action ..." % hostname)
return
# ACTION - list_backups
if (action == "list_backups"):
backups_list = appresponse_backups_list (hostname, access_token, version)
print (backups_list)
# ACTION - pull_backup
elif (action == "pull_backup"):
backup,filename = appresponse_backup_get (hostname, access_token, version)
if (backup == True):
print ("Backup for %s was successful!" % (hostname))
else:
print ("Backup for %s was unsuccessful!" % (hostname))
# ACTION - delete backup
elif (action == "delete_backup"):
if (actionfile == None or actionfile == ""):
print ("Please specify an ID for the filename on the appliance that you would like to delete in --actionfile parameter")
else:
backup_to_delete = None
backups_list = appresponse_backups_list (hostname, access_token, version)
for backup in backups_list:
if actionfile == backup['id']:
backup_to_delete = backup
break
backup = appresponse_backup_delete (hostname, access_token, version, backup_to_delete)
# ACTION - upload_backup
elif (action == "upload_backup"):
if (actionfile == None or actionfile == ""):
print ("Please specify a filename for backup upload in --actionfile parameter")
backup = None
with open(actionfile, 'rb') as backup_file:
backup = appresponse_backup_upload (hostname, access_token, version, backup_file)
print (backup)
return
def backup_from_yaml(config):
print("------------------------------------------------------------------------")
print("")
print("Step 1 of 3: Confirming accounts and pre-requisites ...")
print("")
hostname, hostnamelist, username, key, delete_options, store_options = backup_credentials_get(config)
if hostname != None and hostnamelist != None:
print("Please specify 'hostname' or 'list' in the configuration file, but not both.")
print("Terminating script ...")
return
elif hostname != None and hostnamelist == None:
hostnamelist = []
hostnamelist.append(hostname)
elif hostname == None and hostnamelist == None:
print("Please specify 'hostname' or 'list' in the configruation file.")
print("If 'list' is specified, please ensure file exists and permissions are set appropriately.")
print("Terminating script ...")
return
# Login to source and destination AppResponses to confirm the keys are correct before proceeding
if key == None or key == "":
print("Please provide key for account %s on the AppResponse appliances." % username)
key = getpass.getpass()
num_hostnames = len(hostnamelist)
hostnames_to_backup = []
for hostname in hostnamelist:
try:
access_token, version = appresponse_authentication_check(hostname, username, key)
except:
access_token = None
if access_token == None:
print("WARNING")
print("Authentication failed to AppResponse %s. Removing from backup list ..." % hostname)
else:
hostnames_to_backup.append({"hostname":hostname,"access_token":access_token,"version":version})
num_backups_to_take = len(hostnames_to_backup)
print("Backing up %d of the %d specified AppResponse appliances." % (num_backups_to_take, num_hostnames))
print("")
print("Step 2 of 3: Taking backups from %d AppResponse appliances" % num_backups_to_take)
print("")
backup_in_progress = 1
backup_success = 0
for appliance in hostnames_to_backup:
hostname = appliance['hostname']
access_token = appliance['access_token']
version = appliance['version']
print("Starting backup %d of %d ..." % (backup_in_progress, num_backups_to_take))
status = appresponse_backup_space_create (hostname, access_token, version, delete_options, store_options)
# Create, download, and delete a backup of the AppResponse at a current time (pull_backup)
backup_status,backup_filename = appresponse_backup_get(hostname, access_token, version, store_options['path'], delete_options['delete_automated_backup'])
if backup_status == False:
print("AppResponse %s backup failed. Continuing to next appliance ..." % hostname)
backup_in_progress+=1
continue
else:
backup_success+=1
print("Backup file %s created and downloaded for AppResponse %s" % (backup_filename, hostname))
backup_in_progress+=1
print("")
print("Step 3 of 3: Cleaning up after script execution.")
print("")
cleanup_status = appresponse_backup_clean_locally(store_options)
if cleanup_status == False:
print("Cleanup failed. Terminating script ...")
return
print("Backup from %d of %d configured AppResponse appliances has been completed. %d%% success!" % (backup_success, num_hostnames, int(backup_success/num_hostnames * 100)))
print("")
print("------------------------------------------------------------------------")
return
def main():
# set up arguments in appropriate variables
parser = argparse.ArgumentParser (description="Python utilities to automate information collection or \
configuration tasks within AppResponse environments")
parser.add_argument('--hostname', help="Hostname or IP address of the AppResponse appliance")
parser.add_argument('--list', help="File containing hostnames or IP addresses, one per line")
parser.add_argument('--username', help="Username for the appliance")
parser.add_argument('--key', help="Key for the username")
parser.add_argument('--action', help="Action to perform: %s" % APPRESPONSE_UTILITIES_ACTIONS)
parser.add_argument('--actionfile', help="Settings file associated with action")
parser.add_argument('--backupfromconfig', help="Run full workflow from YAML config")
args = parser.parse_args()
if args.backupfromconfig != None:
backup_from_yaml(args.backupfromconfig)
else:
hostnamelist = []
if args.hostname != None and args.list != None:
print("Please use --hostname or --hostnamelist, but not both, to specify the hostnames to backup.")
elif args.hostname != None:
hostamelist.append(args.hostname)
elif args.list != None:
try:
hostnamelist = hostnamelist_get(args.list)
except:
hostnamelist = None
run_action(hostnamelist, args.username, args.key, args.action, args.actionfile)
if __name__ == "__main__":
main ()
``` |
{
"source": "jkraenzle/Riverbed-Community-Toolkit",
"score": 2
} |
#### File: Ansible-101-Playbook/modules/list_sources.py
```python
DOCUMENTATION = """
---
module: list_sources
short_description: Show the list of available packet sources on a Riverbed AppResponse appliance
options:
host:
description:
- Hostname or IP Address of the AppResponse appliance.
required: True
username:
description:
- Username used to login to the AppResponse appliance.
required: True
password:
description:
- Password used to login to the AppResponse appliance
required: True
output_file:
description:
- File name for write output
required: False
"""
EXAMPLES = """
#Usage Example
- name: Get sources from the AppResponse
list_sources:
host: 192.168.1.1
username: admin
password: <PASSWORD>
register: results
- name: List sources available on the AppResponse
debug: var=results
- name: Get sources from the AppResponse and write to output file
list_sources:
host: 192.168.1.1
username: admin
password: <PASSWORD>
output_file: test.txt
register: results
- name: Display status on writing to output file
debug: var=results
"""
RETURN = r'''
output:
description: Available hostgroups on the AppResponse
returned: success
type: list
msg:
description: Status of writing to the output file
returned: success
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from collections import namedtuple
from steelscript.appresponse.core.app import AppResponseApp
from steelscript.appresponse.core.appresponse import AppResponse
from steelscript.common.api_helpers import APIVersion
from steelscript.common.datautils import Formatter
from steelscript.common.service import UserAuth
from steelscript.common.exceptions import RvbdHTTPException
IFG = namedtuple('IFG', 'type get_id get_items')
class PacketCaptureApp(AppResponseApp):
def __init__(self,output_file=None):
super(AppResponseApp).__init__()
self.output_file = output_file
self.first_line = True
def console(self, source_type, data, headers):
if self.output_file is not None:
f = open(self.output_file, "a+")
f.write('')
if not self.first_line:
f.write("\n")
f.write(source_type + "\n")
f.write('-' * len(source_type) + "\n")
f.close()
self.first_line = False
if data:
Formatter.print_table(data, headers,self.output_file)
def main(self,module):
try:
# handle new packet capture version
version = APIVersion(self.appresponse.versions['npm.packet_capture'])
if version < APIVersion('2.0'):
ifg = IFG('mifg_id',
lambda job: job.data.config.mifg_id,
self.appresponse.capture.get_mifgs)
else:
ifg = IFG('vifgs',
lambda job: job.data.config.vifgs,
self.appresponse.capture.get_vifgs)
# Show Interfaces and VIFGs (skip if MIFG appliance)
if ifg.type == 'vifgs':
total = []
# Show interfaces
headers = ['name', 'description', 'status', 'bytes_total',
'packets_dropped', 'packets_total']
data = []
for iface in self.appresponse.capture.get_interfaces():
data.append([
iface.name,
iface.data.config.description,
iface.status,
iface.stats.bytes_total.total,
iface.stats.packets_dropped.total,
iface.stats.packets_total.total,
])
if self.output_file is not None:
self.console('Interfaces', data, headers)
total.append(headers)
total.append(data)
headers = ['id', 'name', 'enabled', 'filter', 'bytes_received',
'packets_duped', 'packets_received']
data = []
for vifg in self.appresponse.capture.get_vifgs():
data.append([
vifg.data.id,
vifg.data.config.name,
vifg.data.config.enabled,
vifg.data.config.filter,
vifg.data.state.stats.bytes_received.total,
vifg.data.state.stats.packets_duped.total,
vifg.data.state.stats.packets_received.total,
])
if self.output_file is not None:
self.console('VIFGs', data, headers)
total.append(headers)
total.append(data)
# Show capture jobs
headers = ['id', 'name', ifg.type, 'filter', 'state',
'start_time', 'end_time', 'size']
data = []
for job in self.appresponse.capture.get_jobs():
data.append([job.id, job.name,
ifg.get_id(job),
getattr(job.data.config, 'filter', None),
job.data.state.status.state,
job.data.state.status.packet_start_time,
job.data.state.status.packet_end_time,
job.data.state.status.capture_size])
if self.output_file is not None:
self.console('Capture Jobs', data, headers)
total.append(headers)
total.append(data)
# Show clips
headers = ['id', 'job_id', 'start_time', 'end_time', 'filters']
data = []
for clip in self.appresponse.clips.get_clips():
data.append([clip.id, clip.data.config.job_id,
clip.data.config.start_time,
clip.data.config.end_time,
getattr(clip.data.config, 'filters',
dict(items=None))['items']])
if self.output_file is not None:
self.console('Clips', data, headers)
total.append(headers)
total.append(data)
# Show files
headers = ['type', 'id', 'link_type', 'format',
'size', 'created', 'modified']
data = []
for obj in self.appresponse.fs.get_files():
data.append([obj.data.type, obj.id, obj.data.link_type,
obj.data.format, obj.data.size,
obj.data.created, obj.data.modified])
if self.output_file is not None:
self.console('Uploaded Files/PCAPs', data, headers)
total.append(headers)
total.append(data)
if self.output_file is None:
module.exit_json(changed=False,output=total)
else:
result="Successfully wrote output to '{}'".format(self.output_file)
module.exit_json(changed=False, msg=result)
except RvbdHTTPException as e:
results = "Error getting list of sources from AppResponse appliance"
module.fail_json(changed=False,msg=results,reason=str(e))
def main():
fields = {
"host": {"required":True, "type": "str"},
"username": {"required":True, "type": "str"},
"password": {"required":True, "type": "str", "no_log":True},
"output_file": {"required": False, "type": "str"}
}
module = AnsibleModule(argument_spec=fields)
my_ar = AppResponse(module.params['host'], UserAuth(module.params['username'], module.params['password']))
t = PacketCaptureApp(module.params['output_file'])
t.appresponse = my_ar
t.main(module)
if __name__ == '__main__':
main()
```
#### File: 001-NetIM-GMC-Azure-Bandwidth-Cost/scripts/azureBandwidthCost.py
```python
import json
import sys
import time
import requests
import adal
from pprint import pprint
print(sys.argv)
if len(sys.argv) == 1:
print("Arguments Not Given")
sys.exit(2)
elif (len(sys.argv)) > 1 and (len(sys.argv)) < 6:
print("Wrong number of arguments")
sys.exit(2)
arg = sys.argv
TENANT = arg[5]
CLIENT_ID = arg[6]
CLIENT_SECRET = arg[7]
def authenticate_client_key():
"""
Authenticate using service principal w/ key.
"""
authority_host_uri = 'https://login.microsoftonline.com'
tenant = TENANT
authority_uri = authority_host_uri + '/' + tenant
resource_uri = 'https://management.core.windows.net/'
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
context = adal.AuthenticationContext(authority_uri, api_version=None)
mgmt_token = context.acquire_token_with_client_credentials(resource_uri, client_id, client_secret)
return mgmt_token['accessToken']
if __name__== "__main__":
# print ('Number of arguments:', len(sys.argv))
# print ('Argument List:', arg)
deviceName = arg[1]
metricClass = arg[2]
gmcPath = arg[3]
subscriptionId = arg[4]
endpoint = "https://management.azure.com/%2Fsubscriptions%2F" + subscriptionId + "/providers/Microsoft.CostManagement/query"
api_version = '?api-version=2019-10-01'
url = endpoint + api_version
auth_token = authenticate_client_key()
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer ' + auth_token }
data = {
"type": "Usage",
"timeframe": "MonthToDate",
"dataset": {
"granularity": "Monthly",
"aggregation": {
"totalCost": {
"name": "PreTaxCost",
"function": "Sum"
}
},
"grouping": [
{
"type": "Dimension",
"name": "ServiceName"
}
],
"filter": {
"dimensions": {
"name": "ServiceName",
"operator": "In",
"values": [
"bandwidth"
]
}
}
}
}
resp = requests.post(url, data = json.dumps(data), headers=headers)
if resp.status_code != 200:
print ("Error while requesting POST")
print(resp.status_code, resp.reason)
sys.exit(2)
#print(resp.status_code, json.loads(resp.text))
result = json.loads(resp.text)
basename = "azureBandwidthCost"
print(gmcPath)
filename = gmcPath + basename + time.strftime('%Y%m%d%H%M%S') + '.mtr'
timestamp = int(time.time()*1000)
pretaxcost = int(result['properties']['rows'][0][0])
with open(filename, 'a') as f:
f.write('[SampleDataHeader][name={}]'.format(basename))
f.write('[metricClass={}]timestamp,pretaxcost,billingmonth,servicename,currency\n'.format(metricClass))
f.write('[TargetInfoHeader]HEADERNAME,SYSNAME\n')
f.write('[TI]{},{}[SI][SD]{},{},{},{},{}\n'.format(basename, deviceName,timestamp, pretaxcost , result['properties']['rows'][0][1], result['properties']['rows'][0][2], result['properties']['rows'][0][3]))
f.close()
```
#### File: Ansible-101-Playbook/modules/netprofiler_bootstrap.py
```python
DOCUMENTATION = """
---
module: netprofiler_bootstrap
short_description: Configure the initial NetProfiler appliance settings.
options:
username:
description:
- Username used to login to the NetProfiler appliance
required: True
password:
description:
- Password used to login to the NetProfiler appliance
required: True
reset:
description:
- True, if performing a factory-reset on the device during bootstrap; false by default
required: False
virtual:
description:
- True, if on customer equipment; false, if hardware provided by Riverbed
required: True
base_module:
description:
- Parameters for configuration of the NetProfiler base appliance
required: True
suboptions:
hostname:
description:
- Hostname of the NetProfiler appliance.
required: True
ip:
description:
- IP address of the management interface (primary) of the NetProfiler appliance
required: True
mask:
description:
- Mask of the management interface (primary) of the NetProfiler appliance
required: True
gateway:
description:
- Gateway for the NetProfiler management interface (primary)
required: True
terminal_ip:
description:
- IP address of the terminal server connected to the NetProfiler base module
required: False
terminal_port:
description:
- Port of the terminal server connected to the NetProfiler base module
required: False
terminal_username:
description:
- Username for login to the terminal server
required: False
terminal_password:
description:
- Password for login to the terminal server
required: False
expansion_modules:
description:
- Parameters for configuration of the NetProfiler expansion modules
required: False
type: list
sample:
- hostname: expansion1
ip: 10.1.1.2
mask: 255.255.255.0
gateway: 10.1.1.1
dispatcher_module:
description:
- Parameters for configuration of the NetProfiler dispatcher module
required: False
suboptions:
hostname:
description:
- Hostname of the NetProfiler dispatcher module
required: True
ip:
description:
- IP address of the management interface (primary) of the NetProfiler dispatcher module
required: True
mask:
description:
- Mask of the management interface (primary) of the NetProfiler dispatcher module
required: True
gateway:
description:
- Gateway for the NetProfiler dispatcher module management interface (primary)
required: True
terminal_ip:
description:
- IP address of the terminal server connected to the NetProfiler dispatcher module
required: False
terminal_port:
description:
- Port number of terminal server connected to the NetProfiler dispatcher module
required: False
terminal_username:
description:
- Username for login to the terminal server
required: False
terminal_password:
description:
- Password for login to the terminal server
required: False
"""
EXAMPLES = """
#Usage Example 1
- name: Bootstrap the NetProfiler appliance using terminal server connected to console port
netprofiler_bootstrap:
#Usage Example 2
- name: Bootstrap the NetProfiler appliance using SSH to DHCP IP address
netprofiler_bootstrap
"""
RETURN = r'''
output:
description: Result of bootstrap operation
returned: always
type: str
sample: Bootstrap complete.
'''
# Bootstrap Globals
## Command-line session details
BOOTSTRAP_CONNECTION_TERMINAL = 'TERMINAL'
BOOTSTRAP_CONNECTION_SSH = 'SSH'
BOOTSTRAP_TERMINAL_PASSWORD_PROMPT = u'Password: '
BOOTSTRAP_TERMINAL_PASSWORD_REQUEST = u'Enter Terminal Password: '
BOOTSTRAP_LOGIN_PROMPT_REGEX = u'.* login: '
BOOTSTRAP_CLI_PROMPT_REGEX = u'.*\$ '
BOOTSTRAP_ENABLE_PROMPT_REGEX = u'.* # '
BOOTSTRAP_CONFIG_PROMPT_REGEX = u'.* \(config\) # '
BOOTSTRAP_PASSWORD_PROMPT_REGEX = u'[pP]assword: '
BOOTSTRAP_PROMPT_REGEX_LIST = [BOOTSTRAP_LOGIN_PROMPT_REGEX, BOOTSTRAP_CLI_PROMPT_REGEX, BOOTSTRAP_ENABLE_PROMPT_REGEX, BOOTSTRAP_CONFIG_PROMPT_REGEX]
BOOTSTRAP_SSH_COMMAND = "/usr/bin/ssh"
BOOTSTRAP_SSH_ARGS = ["-o StrictHostKeyChecking no", "-o UserKnownHostsFile /dev/null"]
BOOTSTRAP_CONSOLE_COMMAND = "/usr/bin/ssh"
BOOTSTRAP_CONSOLE_ARGS = ["-o StrictHostKeyChecking no", "-o UserKnownHostsFile /dev/null"]
## Commands for factory reset
BOOTSTRAP_BASE_PRODUCTCODE = u'-BASE'
BOOTSTRAP_EXP_PRODUCTCODE = u'-EXP'
BOOTSTRAP_DP_PRODUCTCODE = u'-DP'
BOOTSTRAP_HARDWARE_PRODUCTCODE = u'SCNP-04280'
BOOTSTRAP_BASE_HARDWARE_PRODUCTCODE = BOOTSTRAP_HARDWARE_PRODUCTCODE + BOOTSTRAP_BASE_PRODUCTCODE
BOOTSTRAP_EXP_HARDWARE_PRODUCTCODE = BOOTSTRAP_HARDWARE_PRODUCTCODE + BOOTSTRAP_EXP_PRODUCTCODE
BOOTSTRAP_DP_HARDWARE_PRODUCTCODE = BOOTSTRAP_HARDWARE_PRODUCTCODE + BOOTSTRAP_DP_PRODUCTCODE
BOOTSTRAP_VIRTUAL_PRODUCTCODE = u'SNCP-VE'
BOOTSTRAP_BASE_VIRTUAL_PRODUCTCODE = BOOTSTRAP_VIRTUAL_PRODUCTCODE + BOOTSTRAP_BASE_PRODUCTCODE
BOOTSTRAP_EXP_VIRTUAL_PRODUCTCODE = BOOTSTRAP_HARDWARE_PRODUCTCODE + BOOTSTRAP_EXP_PRODUCTCODE
BOOTSTRAP_DP_VIRTUAL_PRODUCTCODE = BOOTSTRAP_HARDWARE_PRODUCTCODE + BOOTSTRAP_DP_PRODUCTCODE
BOOTSTRAP_BASE_FACTORY_RESET = u'sudo /usr/mazu/bin/cascade-setup --auto --mblade-ip {} --mblade-mask {} --product-code {}'
BOOTSTRAP_EXP_FACTORY_RESET = u'sudo /usr/mazu/bin/cascasde-setup --auto --mgmt-ip {} --mblade-ip {} --mblade-mask {} --product-code {}' # mgmt-ip is BASE IP
BOOTSTRAP_DP_FACTORY_RESET = u'sudo /usr/mazu/bin/cascade-setup --auto --mgmt-ip {} --mblade-ip {} --mblade-mask {} --product-code {}' # mgmt-ip is BASE IP
BOOTSTRAP_DP_10GNICADDON_ENABLE = 'mazu-reconfig --set --user-addon-nic true'
BOOTSTRAP_SKIP_SETUP_ON_LOGIN = u"psql mazu postgres -h pghost -c \"UPDATE settings set value=1 where key='ui_mode'\""
## Commands for configuration
BOOTSTRAP_CONFIG_MGMTIP = u'mazu-reconfig --set --network {}'
BOOTSTRAP_CONFIG_GATEWAY = u'mazu-reconfig --set --gateway {}'
BOOTSTRAP_CONFIG_INTERFACE = u'mazu-reconfig --set --interface {} primary autoneg on'
BOOTSTRAP_CONFIG_DNS = u'mazu-reconfig --set --dns true {} {}' # {suffix1,suffix2} {server1}
BOOTSTRAP_CONFIG_NTP = u'mazu-reconfig --set --ntp {}'
BOOTSTRAP_CONFIG_SNMP = u'mazu-reconfig --set --snmp {}'
BOOTSTRAP_CONFIG_SMTP_SETTING = u"psql mazu postgres -h pghost -c 'UPDATE settings set value={} where key = '{}'; UPDATE 1" #smtp_from, smtp_server, smtp_port, smtp_user, smtp_auth, smtp_pass, smtp_tls_enabled
BOOTSTRAP_CONFIG_SYSLOG = u'mazu-reconfig --set --syslog-forward {}'
BOOTSTRAP_CONFIG_INTERNALADDRESSES = u'mazu-reconfig --set --internal-addresses {}'
def prefixle_from_mask(mask):
try:
prefixle = sum([bin(int(x)).count('1') for x in mask.split('.')])
except:
raise
return prefixle
class BaseModule(object):
def __init__(self, hostname=None, ip=None, mask=None, gateway=None):
self.hostname = hostname
self.ip = ip
self.mask = mask
self.gateway = gateway
self.child = None
class ExpansionMdodule(object):
def __init__(self, hostname=None, ip=None, mask=None, gateway=None):
self.hostname = hostname
self.ip = ip
self.mask = mask
self.gateway = gateway
self.child = None
class DispatcherModule(object):
def __init__(self, hostname=None, ip=None, mask=None, gateway=None):
self.hostname = hostname
self.ip = ip
self.mask = mask
self.gateway = gateway
self.child = None
class BootstrapApp(object):
def __init__(self, username=None, password=<PASSWORD>, reset=None, virtual=True, base_module=None, expansion_modules=None, dispatcher_module=None):
try:
# Import required Class libraries and confirm what version of pexpect library is available
import pexpect
import sys
self.pexpect_version = {}
version_split = pexpect.__version__.split('.')
version_split_len = len(version_split)
if version_split_len == 1:
self.pexpect_version['major'] = int(version_split[0])
if version_split_len >= 2:
self.pexpect_version['major'] = int(version_split[0])
self.pexpect_version['minor'] = int(version_split[1])
except:
raise
# Store state
self.username = username
self.password = password
self.reset = reset
self.virtual = virtual
self.base_module = base_module
self.expansion_modules = expansion_modules
self.dispatcher_module = dispatcher_module
# Try to make connections to the base module and each configured expansion and dispatcher
if self.base_module != None:
try:
self.base_module.child = self.login(ip=self.base_module.ip, username=self.username, password=self.password)
except:
raise
if self.expansion_modules != None:
i = 0
for expansion_module in self.expansion_modules:
if expansion_module != None:
try:
expansion_module.child = self.login(ip=expansion_module.ip, username=self.username, password=self.password)
except:
raise
self.expansion_modules[i] = expansion_module
i += 1
if self.dispatcher_module != None:
try:
self.dispatcher_module.child = self.login(ip=self.dispatcher_module, username=self.username, password=self.password)
except:
raise
def wait(self, count=0, limit=5):
import time
if count >= limit:
return
self.child.sendline()
time.sleep(30)
self.wait(count+1, limit)
#
#def reconnect(self, ip=None, password=<PASSWORD>):
# self.base_module.child = self.login(ip=self.base_module.ip)
def login(self, ip=None, port=22, username='mazu', password=<PASSWORD>, terminal=False, terminal_username=None, terminal_password=None, timeout=120):
import pexpect
if ip == None or password == None or (terminal == True and (terminal_username == None or terminal_password == None)):
raise Exception("Missing key information for login")
if terminal == False:
ssh_session = None
if ip != None:
command = BOOTSTRAP_SSH_COMMAND
args = BOOTSTRAP_SSH_ARGS + ["{}@{}".format(username, ip), "-p {}".format(str(port))]
try:
if self.pexpect_version['major'] <= 3:
ssh_session = pexpect.spawn(command, args=args, timeout=timeout)
else:
ssh_session = pexpect.spawn(command, args=args, timeout=timeout, encoding='utf-8')
except pexpect.EOF:
raise RuntimeError("Receiving unexpected pexpect.EOF")
except pexpect.TIMEOUT:
raise RuntimeError("Receiving unexpected pexpect.TIMEOUT")
except NameError as e:
raise
except:
raise Exception("Failed SSH login using args '{}' with message '{}'".format(args, sys.exc_info()))
if ssh_session == None:
raise RuntimeError("SSH login failed to '{}'".format(ip))
ssh_session.expect(BOOTSTRAP_PASSWORD_PROMPT_REGEX, timeout=timeout)
ssh_session.sendline(password)
ssh_session.sendline()
ssh_session.expect(BOOTSTRAP_CLI_PROMPT_REGEX)
if u'Password: ' in ssh_session.after:
raise RuntimeError("Incorrect password for '{}'".format(ip))
return ssh_session
else:
terminal_session = None
if ip != None:
command = BOOTSTRAP_CONSOLE_COMMAND
args = BOOTSTRAP_CONSOLE_ARGS + ["{}@{}".format(terminal_username, ip), "-p {}".format(port)]
try:
if self.pexpect_version['major'] <= 3:
terminal_session = pexpect.spawn(command, args=args, timeout=timeout)
else:
terminal_session = pexpect.spawn(command, args=args, timeout=timeout, encoding='utf-8')
except pexpect.EOF:
raise RuntimeError("Receiving unexpected pexpect.EOF")
except pexpect.TIMEOUT:
raise RuntimeError("Receiving unexpected pexpect.TIMEOUT")
except NameError as e:
raise
except:
raise Exception("Failed terminal login using args '{}' with message '{}'".format(args, sys.exc_info()))
if terminal_session == None:
raise RuntimeError("Terminal login failed to '{}'".format(ip))
terminal_session.expect(BOOTSTRAP_TERMINAL_PASSWORD_PROMPT)
terminal_session.sendline(terminal_password)
terminal_session.sendline()
terminal_session.expect(BOOTSTRAP_PROMPT_REGEX_LIST, timeout=timeout)
if u'login: ' in terminal_session.after:
terminal_session.sendline(username)
terminal_session.expect(BOOTSTRAP_PASSWORD_PROMPT_REGEX, timeout=timeout)
terminal_session.sendline(password)
terminal_session.sendline()
terminal_session.expect(BOOTSTRAP_CLI_PROMPT_REGEX)
if u'Password' in terminal_session.after:
raise Exception("Failed NetProfiler login through terminal server for '{}'".format(username))
return terminal_session
def configure_networking(self):
### Need to understand parameters related to setting interface speed/duplex/auto and MTU
import pexpect
# Set the IP address and gateway of the base module
if self.base_module != None and self.base_module.child != None:
ip_with_prefix_len = self.base_module.ip + '/' + str(prefixle_from_mask(self.base_module.mask))
command = BOOTSTRAP_CONFIG_MGMTIP.format(ip_with_prefix_len)
try:
self.base_module.child.sendline(command)
except:
raise
self.base_module.child.expect(BOOTSTRAP_CLI_PROMPT_REGEX)
command = BOOTSTRAP_CONFIG_GATEWAY.format(self.base_module.gateway)
try:
self.base_module.child.sendline(command)
except:
raise
self.base_module.child.expect(BOOTSTRAP_CLI_PROMPT_REGEX)
else:
raise RuntimeError('ERROR: Unexpected condition. Base module settings are not available.')
if self.expansion_modules != None:
for expansion_module in self.expansion_modules:
if expansion_module != None and self.expansion_module.child != None:
ip_with_prefix_len = expansion_module.ip + '/' + str(prefixle_from_mask(expansion_module.mask))
command = BOOTSTRAP_CONFIG_MGMTIP.format(ip_with_prefix_len)
try:
self.expansion_module.child.sendline(command)
except:
raise
command = BOOTSTRAP_CONFIG_GATEWAY.format(self.expansion_module.gateway)
try:
self.expansion_module.child.sendline(command)
except:
raise
if self.dispatcher_module != None and self.dispatcher_module.child != None:
ip_with_prefix_len = self.dispatcher_module.ip + '/' + str(prefixle_from_mask(self.dispatcher_module.mask))
command = BOOTSTRAP_CONFIG_MGMTIP.format(ip_with_prefix_len)
try:
self.dispatcher_module.child.sendline(command)
except:
raise
command = BOOTSTRAP_CONFIG_GATEWAY.format(self.dispatcher_module.gateway)
try:
self.dispatcher_module.child.sendline(command)
except:
raise
return
def run(self):
# Configure devices' IP addresses and gateway and other network configuration settings
self.configure_networking()
#self.configure_dns()
#self.configure_ntp()
#self.configure_snmp()
#self.configure_smtp()
#self.configure_syslog()
return True, "Bootstrap complete."
BOOTSTRAP_TEST_USERNAME = 'mazu'
BOOTSTRAP_TEST_RESET = False
BOOTSTRAP_TEST_VIRTUAL = True
BOOTSTRAP_TEST_BASE_HOSTNAME = 'netprofiler'
BOOTSTRAP_TEST_BASE_IP = '10.1.150.222'
BOOTSTRAP_TEST_BASE_MASK = '255.255.255.0'
BOOTSTRAP_TEST_BASE_GATEWAY = '10.1.150.1'
def test():
# Check that the dependencies are present to avoid an exception in execution
try:
import pexpect
except ImportError:
print("The pexpect Python module could not be imported during the execution of the NetProfiler Bootstrap Ansible module")
# Check that other dependencies are also present
try:
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
import sys
import time
from getpass import getpass
except ImportError as e:
print("Required Python modules could not be imported.")
try:
print("Enter password for NetProfiler appliance '{}'".format(BOOTSTRAP_TEST_BASE_HOSTNAME))
password = getpass()
# Initialize connection to appliance
bootstrap = BootstrapApp(
username=BOOTSTRAP_TEST_USERNAME,
password=password,
reset=BOOTSTRAP_TEST_RESET,
virtual=BOOTSTRAP_TEST_VIRTUAL,
base_module=BaseModule(hostname=BOOTSTRAP_TEST_BASE_HOSTNAME,
ip=BOOTSTRAP_TEST_BASE_IP,
mask=BOOTSTRAP_TEST_BASE_MASK,
gateway=BOOTSTRAP_TEST_BASE_GATEWAY),
expansion_modules=[],
dispatcher_module=None
)
# Run
success, msg = bootstrap.run()
except pexpect.TIMEOUT as e:
print("pexpect.TIMEOUT: Unexpected timeout waiting for prompt or command: {}".format(e))
print("Failure")
return
except pexpect.EOF as e:
print("pexpect.EOF: Unexpected program termination: {}".format(e))
print("Failure")
return
# Does not seem to be supported in earlier versions of pexpect
#except pexpect.exceptions.ExceptionPexpect as e:
# print("pexpect.exceptions.{0}: {1}".format(type(e).__name__, e))
# print("Failure")
# return
except RuntimeError as e:
print("RuntimeError: {}".format(e))
print("Failure")
return
except:
print("Unexpected error: {}".format(sys.exc_info()))
print("Failure")
return
print("Success")
return
if __name__ == '__main__':
# main()
# Comment out main() and remove comments from test() to be able to execute Python code directly using <python bootstrap.py>.
# This allows the code to be executed separately without being executed as an Ansible module.
# Edit the global BOOTSTRAP_TEST* parameters that are specified above the test () function to specify the parameters to use in the test.
# The passwords will be requested at the command line upon execution.
# Specifying terminal=True connects to the bootstrapped system using a terminal server, while specifying terminal=False connects directly to the DHCP IP over SSH.
test()
``` |
{
"source": "jkraenzle/steelscript-netprofiler",
"score": 2
} |
#### File: steelscript-netprofiler/examples/identity_report.py
```python
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.filters import TimeFilter, TrafficFilter
from steelscript.netprofiler.core.report import (IdentityReport,
TrafficOverallTimeSeriesReport,
TrafficSummaryReport)
from steelscript.common.datautils import Formatter
from steelscript.common.timeutils import string_to_datetime
import sys
import imp
import datetime
import optparse
import itertools
AGGREGATION = {'total': lambda x: sum(x),
'avg' : lambda x: sum(x) / len(x),
'peak' : lambda x: max(x),
'min' : lambda x: min(x)}
# Columns for Time Series Report
TCOLUMNS = [
('time', AGGREGATION['min']),
('total_bytes', AGGREGATION['total']),
('avg_bytes', AGGREGATION['avg']),
('network_rtt', AGGREGATION['peak']),
('response_time', AGGREGATION['peak']),
('server_delay', AGGREGATION['peak']),
('avg_conns_rsts', AGGREGATION['avg']),
('avg_pkts_rtx', AGGREGATION['avg']),
('avg_rsec_jitter', AGGREGATION['avg']),
('avg_vqual_mos', AGGREGATION['min']),
]
# Columns for Traffic Summary Report
SCOLUMNS = [
('total_bytes', AGGREGATION['total']),
('avg_bytes', AGGREGATION['avg']),
('network_rtt', AGGREGATION['peak']),
('response_time', AGGREGATION['peak']),
('server_delay', AGGREGATION['peak']),
('avg_conns_rsts', AGGREGATION['avg']),
('avg_pkts_rtx', AGGREGATION['avg']),
('avg_rsec_jitter', AGGREGATION['avg']),
('avg_vqual_mos', AGGREGATION['min']),
]
def format_time(value):
"""Convenience function to translate timestamp to ISO format"""
t = datetime.datetime.fromtimestamp(value)
return t.isoformat(' ')
class IdentityApp(NetProfilerApp):
def add_options(self, parser):
super(IdentityApp, self).add_options(parser)
group = optparse.OptionGroup(parser, 'Identity Report Options')
group.add_option('-n', '--identity-name', dest='identity_name',
help='Login name to use for search')
group.add_option('-T', '--traffic-filter', dest='trafficexpr', default=None,
help='Traffic filter to narrow down IP address '
'search space')
group.add_option('--time0', dest='time0', default=None,
help='Start time for report')
group.add_option('--time1', dest='time1', default=None,
help='End time for report')
group.add_option('-r', '--timerange', dest='timerange', default=None,
help='Optional time range in place of t0 and t1')
group.add_option('-b', '--backsearch', dest='backsearch', default='24',
help='Hours to look backwards to find possible identity match '
'defaults to the maximum of "24" hours')
group.add_option('--resolution', default='auto',
help='Time resolution to use for report queries, '
'defaults to "auto", may be one of the following: '
'("1min", "15min", "hour", "6hour", "day")')
group.add_option('--timeseries-report', dest='timeseries_report', default=False,
action='store_true',
help='Run time series traffic reports for hosts during found '
'login times.')
group.add_option('--aggregate', dest='aggregate', default=False,
action='store_true',
help='Set to group timeseries data into single row per timeperiod.')
group.add_option('--summary-report', dest='summary_report', default=False,
action='store_true',
help='Run summary reports for hosts during found login times.')
group.add_option('--groupby-application', dest='groupby_application', default=False,
action='store_true',
help='Run summary reports for hosts during found login times.')
group.add_option('--groupby-interface', dest='groupby_interface', default=False,
action='store_true',
help='Run summary reports for hosts during found login times.')
group.add_option('--csv', dest='csv', default=False, action='store_true',
help='Print results in CSV table.')
group.add_option('--tsv', dest='tsv', default=False, action='store_true',
help='Print results in TSV (tab-separated-values) table.')
group.add_option('--testfile', dest='testfile', default=None,
help='Optional test file with identity events to use in place of '
'actual netprofiler queries.')
group.add_option('--usecache', dest='usecache', default=False, action='store_true',
help='Use internal cache to help with large traffic query sets')
parser.add_option_group(group)
def validate_args(self):
""" Check that either both t0 and t1 are used or time range
"""
super(IdentityApp, self).validate_args()
if self.options.timerange and (self.options.time0 or
self.options.time1):
self.parser.error('timerange and t0/t1 are mutually exclusive, '
'choose only one.')
elif (not self.options.timerange and
not self.options.time0 and
not self.options.time1):
self.parser.error('A timerange must be chosen.')
elif not self.options.identity_name:
self.parser.error('An identity_name must be chosen.')
elif int(self.options.backsearch) > 24:
self.parser.error('Time for back search cannot exceed "24" hours.')
elif self.options.timeseries_report and self.options.summary_report:
self.parser.error('Only one report type may be selected at a time.')
def identity_report(self, timefilter=None, trafficexpr=None, testfile=None):
""" Run IdentityReport and return data
"""
identity = self.options.identity_name
if not testfile:
# run report against all users
print('Running IdentityReport ...')
report = IdentityReport(self.netprofiler)
report.run(timefilter=timefilter, trafficexpr=trafficexpr)
print('Report complete, gathering data ...')
data = report.get_data()
if identity not in (x[1] for x in data):
print('Running report farther back to find identity ...')
delta = datetime.timedelta(hours=int(self.options.backsearch))
timefilter.start = timefilter.start - delta
report.run(timefilter=timefilter, trafficexpr=trafficexpr)
data = report.get_data()
if not data:
print("Empty data results.")
legend = report.get_legend()
report.delete()
else:
print('Reading from testfile {testfile} ...'
''.format(testfile=testfile))
try:
#TODO: Figure out new importlib
f, path, desc = imp.find_module(testfile)
test = imp.load_module(testfile, f, path, desc)
data = test.data
legend = self.netprofiler.get_columns(test.legend)
except ImportError:
print('Error importing test file {testfile}\nEnsure it is in '
'the PYTHONPATH, and contains a valid data object.'
''.format(testfile=testfile))
sys.exit(1)
finally:
f.close()
return legend, data
def traffic_report(self, host, timefilter, report_type):
""" Generate average statistics for host given `timefilter` time period
`report_type` is one of ('timeseries', 'summary')
"""
print('Running {0} report for {1} over {2}/{3}'
''.format(report_type,
host,
timefilter.start,
timefilter.end))
texpr = TrafficFilter('host %s' % host)
if report_type == 'timeseries':
columns = [c[0] for c in TCOLUMNS]
report = TrafficOverallTimeSeriesReport(self.netprofiler)
report.run(columns,
timefilter=timefilter,
trafficexpr=texpr,
resolution=self.options.resolution)
elif report_type == 'summary':
columns = [c[0] for c in SCOLUMNS]
report = TrafficSummaryReport(self.netprofiler)
if self.options.groupby_application:
columns.insert(0, 'app_name')
groupby = 'app'
elif self.options.groupby_interface:
columns.insert(0, 'interface_alias')
columns.insert(0, 'interface')
groupby = 'ifc'
else:
groupby = 'hos'
report.run(groupby,
columns,
timefilter=timefilter,
trafficexpr=texpr,
resolution=self.options.resolution)
else:
raise RuntimeError('unknown report type: %s' % report_type)
print('Report complete, gathering data ...')
data = report.get_data()
if not data:
print("Empty data results.")
elif len(data) == 10000:
print('WARNING: data size exceeds max length of 10000 rows')
legend = report.get_legend()
report.delete()
return legend, data
def analyze_login_data(self, all_data, legend_columns, single_ip=True):
""" Identify periods user logged into each host
`single-ip` indicates that a user may only have one IP at a time.
Logins indicating a different IP address will mean previous
IP address has been released.
"""
logins = {} # scratch lookup table
current_ip = None # last ip of user
activity = [] # sequence of events
identity = self.options.identity_name
legend_keys = [c.key for c in legend_columns]
# all_data contains a time-ordered list of logins for all users
# the sort order is latest to earliest, so we want to parse this in the
# reverse order
for login in all_data[::-1]:
login = dict(zip(legend_keys, login))
time = login['time']
host = login['host_dns'].strip('|')
user = login['username']
if single_ip:
if current_ip and (host == current_ip or user == identity):
# ip changes to new user or user gets assigned new ip
start = logins.pop(current_ip)
duration = time - start
activity.append((current_ip, start, time, duration))
current_ip = None
if user == identity:
logins[host] = time
current_ip = host
else:
if host in logins:
# new login to existing host
start = logins.pop(host)
duration = time - start
activity.append((host, start, time, duration))
current_ip = None
if user == identity:
logins[host] = time
current_ip = host
activity.sort(key=lambda x: x[1])
legend = ['Host IP', 'Login Time', 'Logout Time', 'Duration']
return legend, activity
def generate_traffic(self, activity, legend_keys, report_type):
""" Generate traffic data during the time the user was logged-in.
"""
cache = {}
combined_activity = []
for event in activity:
# handle dns names in host along with IP address
host = event[0].split('|', 1)[0]
timefilter = TimeFilter(string_to_datetime(event[1]),
string_to_datetime(event[2]))
# if event occurs in less than a minute, add extra minute to report
while len(timefilter.profiler_minutes()) == 1:
timefilter.end += datetime.timedelta(minutes=1)
# normalize times to minute increments
mins = timefilter.profiler_minutes()
tf = TimeFilter(mins[0], mins[-1])
if self.options.usecache and report_type == 'timeseries':
# only consider a hit when whole time period is covered
minutes = tf.profiler_minutes(astimestamp=True)
if host in cache and all(t in cache[host] for t in minutes):
data = [cache[host][t] for t in minutes]
else:
legend, data = self.traffic_report(host, tf, report_type)
# store results in cache by host->times->data
cache.setdefault(host, {}).update((int(x[0]), x) for x in data)
else:
legend, data = self.traffic_report(host, tf, report_type)
if data:
if self.options.aggregate and report_type == 'timeseries':
# generate running averages over data samples received
# first convert empty strings to zeros, then run averages
columns = map(lambda c: [0 if x == '' else x for x in c],
itertools.izip(*data))
aggmap = [x[1] for x in TCOLUMNS]
aggregates = [aggmap[i](x) for i, x in enumerate(columns)]
combined_activity.append(list(event) + aggregates)
elif report_type == 'timeseries' or report_type == 'summary':
# create entry for each element in report
for row in data:
r = ['--' if x == '' else x for x in row]
combined_activity.append(list(event) + r)
else:
raise RuntimeError('unknown report type: %s' % report_type)
else:
# populate result with blanks
combined_activity.append(list(event) + ['--'] * len(legend))
traffic_legend = [c.key for c in legend]
legend = legend_keys + traffic_legend
return legend, combined_activity
def main(self):
""" Setup query and run report with default column set
"""
if self.options.timerange:
timefilter = TimeFilter.parse_range(self.options.timerange)
else:
timefilter = TimeFilter(self.options.time0, self.options.time1)
if self.options.trafficexpr:
trafficexpr = TrafficFilter(self.options.trafficexpr)
else:
trafficexpr = None
legend_columns, all_data = self.identity_report(timefilter=timefilter,
trafficexpr=trafficexpr,
testfile=self.options.testfile)
legend, activity = self.analyze_login_data(all_data, legend_columns)
if activity and self.options.timeseries_report:
headers, tbl_data = self.generate_traffic(activity, legend, 'timeseries')
elif activity and self.options.summary_report:
headers, tbl_data = self.generate_traffic(activity, legend, 'summary')
else:
headers = ('Host IP', 'Login Time', 'Logout Time', 'Duration')
tbl_data = [(x[0], format_time(x[1]), format_time(x[2]), x[3])
for x in activity]
if self.options.csv:
Formatter.print_csv(tbl_data, headers)
elif self.options.tsv:
Formatter.print_csv(tbl_data, headers, delim='\t')
else:
Formatter.print_table(tbl_data, headers)
if __name__ == '__main__':
IdentityApp().run()
```
#### File: steelscript-netprofiler/examples/import_hostgroups.py
```python
import csv
import sys
import string
import optparse
from collections import defaultdict
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.hostgroup import HostGroupType, HostGroup
from steelscript.commands.steel import prompt_yn
from steelscript.common.exceptions import RvbdException
# This script will take a file with subnets and SiteNames
# and create a HostGroupType on the target NetProfiler.
# If the HostGroupType already exists, it will be deleted,
# before creating a new one with the same name.
#
# See the EXAMPLE text below for the format of the input
# file. Note that multiple SiteNames with different
# IP address spaces can be included.
EXAMPLE_WARN = """
Invalid file format
Ensure file has correct header.
example file:
subnet SiteName
10.143.58.64/26 CZ-Prague-HG
10.194.32.0/23 MX-SantaFe-HG
10.170.55.0/24 KR-Seoul-HG
10.234.9.0/24 ID-Surabaya-HG
10.143.58.63/23 CZ-Prague-HG
"""
class HostGroupImport(NetProfilerApp):
def add_options(self, parser):
super(HostGroupImport, self).add_options(parser)
group = optparse.OptionGroup(parser, "HostGroup Options")
group.add_option('--hostgroup', action='store',
help='Name of hostgroup to overwrite')
group.add_option('-i', '--input-file', action='store',
help='File path to hostgroup file')
parser.add_option_group(group)
def validate_args(self):
"""Ensure all arguments are present."""
super(HostGroupImport, self).validate_args()
if not self.options.input_file:
self.parser.error('Host group file is required, specify with '
'"-i" or "--input-file"')
if not self.options.hostgroup:
self.parser.error('Hostgroup name is required, specify with '
'"--hostgroup"')
def validate(self, name):
valid = set(string.letters + string.digits + '.-_')
return all(c in valid for c in name)
def import_file(self):
"""Process the input file and load into dict."""
groups = defaultdict(list)
with open(self.options.input_file, 'rb') as f:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
header = reader.next()
if header != ['subnet', 'SiteName']:
print(EXAMPLE_WARN)
for i, row in enumerate(reader):
cidr, group = row
if not self.validate(group):
print('Invalid group name on line {0}: {1}'
''.format(i+2, group))
sys.exit()
groups[group].append(cidr)
return groups
def update_hostgroups(self, groups):
"""Replace existing HostGroupType with contents of groups dict."""
# First find any existing HostGroupType
try:
hgtype = HostGroupType.find_by_name(self.netprofiler,
self.options.hostgroup)
hgtype.config = []
hgtype.groups = {}
print('Existing HostGroupType "{0}" found.'
''.format(self.options.hostgroup))
except RvbdException:
print('No existing HostGroupType found, creating a new one.')
hgtype = HostGroupType.create(self.netprofiler,
self.options.hostgroup)
# Add new values
for group, cidrs in groups.items():
hg = HostGroup(hgtype, group)
hg.add(cidrs)
# Save to NetProfiler
hgtype.save()
print ('HostGroupType "%s" configuration saved.'
% self.options.hostgroup)
def main(self):
"""Confirm overwrite then update hostgroups."""
confirm = ('The contents of hostgroup {0} will be overwritten '
'by the file {1}, are you sure?'
''.format(self.options.hostgroup, self.options.input_file))
if not prompt_yn(confirm):
print('Okay, aborting.')
sys.exit()
groups = self.import_file()
self.update_hostgroups(groups)
print('Successfully updated {0} on {1}'.format(self.options.hostgroup,
self.netprofiler.host))
if __name__ == '__main__':
HostGroupImport().run()
```
#### File: steelscript-netprofiler/examples/top_ports.py
```python
from steelscript.netprofiler.core import *
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.filters import TimeFilter, TrafficFilter
import pprint
class TopPortsApp(NetProfilerApp):
def main(self):
# Create and run a traffic summary report of all server ports in use
# by hosts in 10/8
report = TrafficSummaryReport(self.netprofiler)
# Run the report
report.run(
groupby=self.netprofiler.groupbys.port,
columns=[self.netprofiler.columns.key.protoport,
self.netprofiler.columns.key.protoport_name,
self.netprofiler.columns.value.avg_bytes,
self.netprofiler.columns.value.network_rtt],
sort_col=self.netprofiler.columns.value.avg_bytes,
timefilter=TimeFilter.parse_range("last 15 m"),
trafficexpr=TrafficFilter("host 10/8")
)
# Retrieve and print data
data = report.get_data()
printer = pprint.PrettyPrinter(2)
printer.pprint(data[:20])
TopPortsApp().run()
```
#### File: steelscript-netprofiler/examples/top_ports_time.py
```python
import optparse
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.filters import TimeFilter
from steelscript.netprofiler.core.report import \
TrafficSummaryReport, TrafficTimeSeriesReport
from steelscript.common.datautils import Formatter
class TopPortsTime(NetProfilerApp):
def add_options(self, parser):
super(TopPortsTime, self).add_options(parser)
group = optparse.OptionGroup(parser, "Report Options")
group.add_option(
'--timefilter',
default='last 15 min',
help=('Time range to analyze (defaults to "last 15 min") '
'other valid formats are: "4/21/13 4:00 to 4/21/13 5:00" '
'or "16:00:00 to 21:00:04.546"'))
group.add_option(
'-N',
default=10,
help=('Top N to report on'))
parser.add_option_group(group)
def main(self):
netprof = self.netprofiler
timefilter = TimeFilter.parse_range(self.options.timefilter)
# Create and run a traffic summary report of all server ports in use
report = TrafficSummaryReport(netprof)
# Run the report
report.run(
groupby=netprof.groupbys.port,
columns=[netprof.columns.key.protoport,
netprof.columns.key.protocol,
netprof.columns.key.port,
netprof.columns.value.avg_bytes],
sort_col=netprof.columns.value.avg_bytes,
timefilter=timefilter)
# Retrieve and print data
ports_data = report.get_data()[:int(self.options.N)]
report.delete()
# Now create a new report using the ports_data
report = TrafficTimeSeriesReport(netprof)
# The format the query_columns for 'ports' is:
# 'ports' = [{'name': 'tcp/80'},
# {'name': 'tcp/443'},
# {'name': 'icmp/0'}]
# For most protocols, this works just fine from the report data,
# but for icmp the result from data is 'icmp/0/0' -- where the two
# zeros are type and code. This doesn't work for input to
# netprofiler, it expects type and code to be smushed into a single
# 16-bit number (type << 8 | code).
query_columns = []
for (protoport, protocol, port, avgbytes) in ports_data:
if protoport.startswith('icmp'):
protoport = 'icmp/%s' % (port)
query_columns.append({'name': protoport})
# Run the report
report.run(columns=[netprof.columns.key.time,
netprof.columns.value.avg_bytes],
resolution='1 min',
query_columns_groupby='ports',
query_columns=query_columns,
timefilter=timefilter)
# Get the data!
data = report.get_data()
Formatter.print_table(
data, padding=1,
headers=(['time'] + [q['name'] for q in query_columns]))
if __name__ == "__main__":
TopPortsTime().run()
```
#### File: steelscript-netprofiler/examples/traffic_timeseries.py
```python
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.report import TrafficOverallTimeSeriesReport
from steelscript.netprofiler.core.filters import TimeFilter, TrafficFilter
from steelscript.common.datautils import Formatter
import optparse
class TrafficTimeSeriesApp(NetProfilerApp):
def add_options(self, parser):
super(TrafficTimeSeriesApp, self).add_options(parser)
group = optparse.OptionGroup(parser, "Report Parameters")
group.add_option('--centricity', dest='centricity', default='host',
help='"host" vs "interface" centricity (default "host")')
group.add_option('--columns', dest='columns',
help='Comma-separated list of column names and/or '
'ID numbers, required')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Filter Options")
group.add_option('--timefilter', dest='timefilter', default='last 1 hour',
help='Time range to analyze (defaults to "last 1 hour") '
'other valid formats are: "4/21/13 4:00 to 4/21/13 5:00" '
'or "16:00:00 to 21:00:04.546"')
group.add_option('--trafficexpr', dest='trafficexpr', default=None,
help='Traffic Expression to apply to report (default None)')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Output options")
group.add_option('--csv', dest='as_csv', default=False, action='store_true',
help='Return values in CSV format instead of tabular')
parser.add_option_group(group)
def validate_args(self):
""" Ensure columns are included
"""
super(TrafficTimeSeriesApp, self).validate_args()
if self.options.centricity == 'host':
self.centricity = 'hos'
elif self.options.centricity == 'interface':
self.centricity = 'int'
elif self.options.centricity not in ['hos', 'int']:
self.parser.error('Centricity option must be either "int" or "hos".')
else:
self.centricity = self.options.centricity
if not self.options.columns:
self.parser.error('Comma-separated list of columns is required.')
def print_data(self, data, header):
if self.options.as_csv:
Formatter.print_csv(data, header)
else:
Formatter.print_table(data, header)
def main(self):
self.timefilter = TimeFilter.parse_range(self.options.timefilter)
if self.options.trafficexpr:
self.trafficexpr = TrafficFilter(self.options.trafficexpr)
else:
self.trafficexpr = None
with TrafficOverallTimeSeriesReport(self.netprofiler) as report:
report.run(columns=self.options.columns.split(','),
timefilter=self.timefilter,
trafficexpr=self.trafficexpr,
centricity=self.centricity)
data = report.get_data()
legend = [c.label for c in report.get_legend()]
self.print_data(data, legend)
if __name__ == '__main__':
TrafficTimeSeriesApp().run()
```
#### File: appfwk/datasources/netprofiler_live.py
```python
import logging
import pandas as pd
from steelscript.appfwk.apps.datasource.models import DatasourceTable,\
TableQueryBase, TableField, Column
from steelscript.appfwk.apps.devices.forms import fields_add_device_selection
from steelscript.appfwk.apps.jobs import QueryComplete
from steelscript.appfwk.apps.datasource.forms import \
fields_add_time_selection
from steelscript.appfwk.libs.fields import Function
from steelscript.appfwk.apps.devices.models import Device
from steelscript.appfwk.apps.datasource.forms import IDChoiceField
from steelscript.netprofiler.core._constants import EPHEMERAL_COLID
from steelscript.netprofiler.core.report import LiveReport
from steelscript.appfwk.apps.devices.devicemanager import DeviceManager
import steelscript.appfwk.apps.report.modules.yui3 as yui3
logger = logging.getLogger(__name__)
def netprofiler_live_templates(form, id, field_kwargs):
"""Query netprofiler for available live templates. """
netprofiler_device = form.get_field_value('netprofiler_device', id)
if netprofiler_device == '':
choices = [('', '<No netprofiler device>')]
else:
netprofiler = DeviceManager.get_device(netprofiler_device)
choices = [(t['id'], t['name'])
for t in netprofiler.api.templates.get_live_templates()]
field_kwargs['choices'] = choices
field_kwargs['label'] = 'Live Template'
class NetProfilerLiveConfigTable(DatasourceTable):
class Meta:
proxy = True
_query_class = 'NetProfilerLiveConfigQuery'
def post_process_table(self, field_options):
fields_add_device_selection(self, keyword='netprofiler_device',
label='NetProfiler', module='netprofiler',
enabled=True)
fields_add_time_selection(self, show_end=True, show_duration=False)
func = Function(netprofiler_live_templates, self.options)
TableField.create('template_id', label='Template',
obj=self,
field_cls=IDChoiceField,
parent_keywords=['netprofiler_device'],
dynamic=True,
pre_process_func=func)
self.add_column('template_id', 'Template ID', datatype='string',
iskey=True)
self.add_column('widget_id', 'Widget ID', datatype='integer',
iskey=True)
self.add_column('title', 'Title', datatype='string')
self.add_column('widget_type', 'Type', datatype='string')
self.add_column('visualization', 'Visualization', datatype='string')
self.add_column('datasource', 'Data Source', datatype='string')
class NetProfilerLiveConfigQuery(TableQueryBase):
def run(self):
criteria = self.job.criteria
profiler = DeviceManager.get_device(criteria.netprofiler_device)
widget_config = profiler.api.templates.get_config(criteria.template_id)
recs = []
for w in widget_config:
dict0 = {'template_id': str(criteria.template_id)}
dict1 = dict((k, w[k]) for k in ['widget_id', 'title'])
dict2 = dict((k, w['config'][k]) for k in
['widget_type', 'visualization', 'datasource'])
recs.append(dict((k, v) for d in [dict0, dict1, dict2]
for k, v in d.items()))
return QueryComplete(pd.DataFrame(recs))
class NetProfilerLiveTable(DatasourceTable):
class Meta:
proxy = True
_query_class = 'NetProfilerLiveQuery'
TABLE_OPTIONS = {'netprofiler_id': None,
'template_id': None,
'query_id': None,
'widget_id': None
}
class NetProfilerLiveQuery(TableQueryBase):
def run(self):
# For each of the widget, get all the data
profiler = DeviceManager.get_device(self.table.options.netprofiler_id)
lr = LiveReport(profiler, template_id=self.table.options.template_id)
# Figure out columns by querying the widget
# cols = lr.get_columns(self.table.options.widget_id)
# Find the query object
query_idx = lr.get_query_names().index(self.table.options.query_id)
# refresh the columns of the table
self._refresh_columns(profiler, report=lr, query=lr.queries[query_idx])
data = lr.get_data(index=query_idx)
col_names = [col.label if col.ephemeral else col.key
for col in lr.queries[query_idx].columns]
df = pd.DataFrame(columns=col_names, data=data)
return QueryComplete(df)
def _refresh_columns(self, profiler, report, query):
# Delete columns
for col in self.table.get_columns():
col.delete()
cols = []
for col in query.columns:
if col.id >= EPHEMERAL_COLID:
cols.append(col)
if not cols:
cols = report.get_columns(self.table.options.widget_id)
if query.is_time_series:
# 98 is the column id for 'time'
cols = [profiler.columns[98]] + cols
for col in cols:
if (col.json['type'] == 'float' or
col.json['type'] == 'reltime' or
col.json['rate'] == 'opt'):
data_type = 'float'
elif col.json['type'] == 'time':
data_type = 'time'
elif col.json['type'] == 'int':
data_type = 'integer'
else:
data_type = 'string'
col_name = col.label if col.ephemeral else col.key
Column.create(self.table, col_name, col.label,
datatype=data_type, iskey=col.iskey)
def add_widgets_to_live_report(report, template_id, widget_query_ids,
netprofiler_name=None):
if netprofiler_name:
netprofiler_id = Device.objects.filter(name=netprofiler_name)[0].id
else:
netprofiler_id = Device.objects.\
filter(enabled=True, module='netprofiler')[0].id
profiler = DeviceManager.get_device(netprofiler_id)
lr = LiveReport(profiler, template_id)
for wid, qid in widget_query_ids.items():
q = [q for q in lr.queries if q.id.endswith(qid)][0]
t = NetProfilerLiveTable.create(
'live-{0}-{1}'.format(template_id, wid),
netprofiler_id=netprofiler_id,
template_id=template_id,
query_id=q.id,
widget_id=wid,
cacheable=False)
if q.is_time_series:
widget_cls = yui3.TimeSeriesWidget
t.add_column('time', 'Time', datatype='time', iskey=True)
else:
widget_cls = yui3.TableWidget
widget_title = 'Template %s Widget %s' % (template_id, wid)
report.add_widget(widget_cls, t, widget_title, width=12)
```
#### File: appfwk/datasources/netprofiler.py
```python
import os
import time
import json
import types
import logging
import datetime
import threading
from collections import namedtuple
import pandas
from django import forms
from django.conf import settings
from steelscript.netprofiler.core.services import \
Service, ServiceLocationReport
from steelscript.netprofiler.core.report import \
Report, SingleQueryReport, TrafficTimeSeriesReport, MultiQueryReport
from steelscript.netprofiler.core.filters import TimeFilter, TrafficFilter
from steelscript.common.timeutils import (parse_timedelta,
timedelta_total_seconds)
from steelscript.appfwk.apps.datasource.models import \
DatasourceTable, Column, TableQueryBase, Table
from steelscript.appfwk.apps.datasource.models import TableField
from steelscript.appfwk.apps.devices.forms import fields_add_device_selection
from steelscript.appfwk.apps.devices.devicemanager import DeviceManager
from steelscript.appfwk.apps.datasource.forms import \
fields_add_time_selection, fields_add_resolution
from steelscript.appfwk.libs.fields import Function
from steelscript.netprofiler.core.hostgroup import HostGroupType
from steelscript.appfwk.apps.jobs import QueryComplete, QueryError
logger = logging.getLogger(__name__)
lock = threading.Lock()
def _post_process_combine_filterexprs(form, id, criteria, params):
exprs = []
if ('netprofiler_filterexpr' in criteria and
criteria.netprofiler_filterexpr != ''):
exprs.append(criteria.netprofiler_filterexpr)
field = form.get_tablefield(id)
for parent in field.parent_keywords:
expr = criteria[parent]
if expr is not None and expr != '':
exprs.append(expr)
if len(exprs) == 0:
val = ""
elif len(exprs) == 1:
val = exprs[0]
else:
val = "(" + ") and (".join(exprs) + ")"
criteria['netprofiler_filterexpr'] = val
def netprofiler_hostgroup_types(form, id, field_kwargs, params):
""" Query netprofiler for all hostgroup types. """
netprofiler_device = form.get_field_value('netprofiler_device', id)
if netprofiler_device == '':
choices = [('', '<No netprofiler device>')]
else:
netprofiler = DeviceManager.get_device(netprofiler_device)
choices = []
for hgt in netprofiler.api.host_group_types.get_all():
choices.append((hgt['name'], hgt['name']))
field_kwargs['label'] = 'HostGroupType'
field_kwargs['choices'] = choices
def netprofiler_hostgroups(form, id, field_kwargs, params):
""" Query netprofiler for groups within a given hostgroup. """
netprofiler_device = form.get_field_value('netprofiler_device', id)
if netprofiler_device == '':
choices = [('', '<No netprofiler device>')]
else:
netprofiler = DeviceManager.get_device(netprofiler_device)
if params is not None and 'hostgroup_type' in params:
hgt = HostGroupType.find_by_name(netprofiler,
params['hostgroup_type'])
else:
hostgroup_type = form.get_field_value('hostgroup_type', id)
hgt = HostGroupType.find_by_name(netprofiler,
hostgroup_type)
choices = [(group, group) for group in hgt.groups.keys()]
field_kwargs['label'] = 'HostGroup'
field_kwargs['choices'] = choices
def get_netprofiler_apps(netprofiler, force=False):
apps = None
app_cache = os.path.join(settings.DATA_CACHE,
'%s-applications.json' % netprofiler.host)
if os.path.exists(app_cache) and force is False:
logger.debug('loading apps from app cache %s' % app_cache)
with open(app_cache) as f:
apps = json.load(f)
else:
logger.debug('app cache not found, loading apps from netprofiler')
apps = netprofiler.conn.json_request(
'GET', '/api/profiler/1.9/applications?enabled=true'
)
apps.sort(key=lambda x: x['name'])
# save for later
with open(app_cache, 'w') as f:
json.dump(apps, f)
logger.debug('app cache saved')
return apps
def netprofiler_application_choices(form, id, field_kwargs, params):
# let's get all the applications and store them
netprofiler_device = form.get_field_value('netprofiler_device', id)
if netprofiler_device == '':
choices = [('', '<No netprofiler device>')]
else:
netprofiler = DeviceManager.get_device(netprofiler_device)
apps = get_netprofiler_apps(netprofiler)
# now we've got the apps return just name and id
choices = [(x['name'], x['name']) for x in apps]
field_kwargs['label'] = 'Application'
field_kwargs['choices'] = choices
def add_netprofiler_application_field(report, section, app=None):
""" Attach fields for dynamic Application dropdowns to add as filter
expressions to the report.
This can be added for each section in a report where the added filter
expression is desired.
The optional ``app`` argument can be either a single string or a list
of strings for each HostGroupType. If a single string, the
'Application' field will be hidden and automatically filter Applications
to the given App. If a list, the elements of the App
list will be fixed to those in the list.
"""
# add default filter expr to extend against
filterexpr = TableField.create(keyword='netprofiler_filterexpr')
section.fields.add(filterexpr)
if app is None:
# use all available apps
field = TableField.create(
keyword='application',
label='Application',
obj=report,
field_cls=forms.ChoiceField,
field_kwargs={'widget_attrs': {'class': 'form-control'}},
parent_keywords=['netprofiler_device'],
dynamic=True,
pre_process_func=Function(netprofiler_application_choices)
)
section.fields.add(field)
elif type(app) in (list, tuple):
# add apps field that uses given list
field = TableField.create(
keyword='application',
label='Application',
obj=report,
field_cls=forms.ChoiceField,
field_kwargs={'choices': zip(app, app),
'widget_attrs': {'class': 'form-control'}},
parent_keywords=['netprofiler_device'],
)
section.fields.add(field)
else:
# no app, so no field just hardcode the filter
NetProfilerTable.extend_filterexpr(
section, keyword='application',
template='app {}'.format(app)
)
# we're done here
return
NetProfilerTable.extend_filterexpr(
section, keyword='hg_filterexpr',
template='app {application}'
)
def add_netprofiler_hostgroup_field(report, section, hg_type=None):
""" Attach fields for dynamic HostGroup dropdowns to add as filter
expressions to the report.
This can be added for each section in a report where the added filter
expression is desired.
The optional ``hg_type`` argument can be either a single string or a list
of strings for each HostGroupType. If a single string, the
'HostGroupType' field will be hidden and automatically filter HostGroups
to the given HostGroupType. If a list, the elements of the HostGroupType
list will be fixed to those in the list; this can be helpful if certain
HostGroupTypes may be sensitive or not applicable to the report.
"""
# add default filter expr to extend against
filterexpr = TableField.create(keyword='netprofiler_filterexpr')
section.fields.add(filterexpr)
# defaults if we are using hostgroup type field
hg_template = '{hostgroup_type}'
hg_parent = ['hostgroup_type']
hg_params = None
if hg_type is None:
# add hostgroup types field that queries netprofiler
field = TableField.create(
keyword='hostgroup_type',
label='HostGroup Type',
obj=report,
field_cls=forms.ChoiceField,
field_kwargs={'widget_attrs': {'class': 'form-control'}},
parent_keywords=['netprofiler_device'],
dynamic=True,
pre_process_func=Function(netprofiler_hostgroup_types)
)
section.fields.add(field)
elif type(hg_type) in (list, tuple):
# add hostgroup types field that uses given list
field = TableField.create(
keyword='hostgroup_type',
label='HostGroup Type',
obj=report,
field_cls=forms.ChoiceField,
field_kwargs={'choices': zip(hg_type, hg_type),
'widget_attrs': {'class': 'form-control'}},
parent_keywords=['netprofiler_device'],
)
section.fields.add(field)
else:
# no field, hardcode the given value
hg_template = hg_type
hg_parent = None
hg_params = {'hostgroup_type': hg_type}
# add hostgroup field
field = TableField.create(
keyword='hostgroup',
label='HostGroup',
obj=report,
field_cls=forms.ChoiceField,
field_kwargs={'widget_attrs': {'class': 'form-control'}},
parent_keywords=hg_parent,
dynamic=True,
pre_process_func=Function(netprofiler_hostgroups, params=hg_params)
)
section.fields.add(field)
NetProfilerTable.extend_filterexpr(
section, keyword='hg_filterexpr',
template='hostgroup %s:{hostgroup}' % hg_template
)
class NetProfilerTable(DatasourceTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
_query_class = 'NetProfilerQuery'
TABLE_OPTIONS = {'groupby': None,
'realm': None,
'interface': None}
# default field parameters
FIELD_OPTIONS = {'duration': 60,
'durations': ('15 min', '1 hour',
'2 hours', '4 hours', '12 hours',
'1 day', '1 week', '4 weeks'),
'resolution': 'auto',
'resolutions': (('auto', 'Automatic'),
'1min', '15min', 'hour', '6hour'),
}
def post_process_table(self, field_options):
resolution = field_options['resolution']
if resolution != 'auto':
if isinstance(resolution, int):
res = resolution
else:
res = int(timedelta_total_seconds(parse_timedelta(resolution)))
resolution = Report.RESOLUTION_MAP[res]
field_options['resolution'] = resolution
fields_add_device_selection(self, keyword='netprofiler_device',
label='NetProfiler', module='netprofiler',
enabled=True)
duration = field_options['duration']
if isinstance(duration, int):
duration = "%d min" % duration
fields_add_time_selection(self,
initial_duration=duration,
durations=field_options['durations'])
fields_add_resolution(self,
initial=field_options['resolution'],
resolutions=field_options['resolutions'],
special_values=['auto'])
self.fields_add_filterexpr()
def fields_add_filterexpr(self, keyword='netprofiler_filterexpr',
initial=None):
field = TableField(keyword=keyword,
label='NetProfiler Filter Expression',
help_text=('Traffic expression using NetProfiler '
'Advanced Traffic Expression syntax'),
initial=initial,
required=False)
field.save()
self.fields.add(field)
def fields_add_filterexprs_field(self, keyword):
field = self.fields.get(keyword='netprofiler_filterexpr')
field.post_process_func = Function(
function=_post_process_combine_filterexprs
)
parent_keywords = set(field.parent_keywords or [])
parent_keywords.add(keyword)
field.parent_keywords = list(parent_keywords)
field.save()
return field
@classmethod
def extend_filterexpr(cls, obj, keyword, template):
field = obj.fields.get(keyword='netprofiler_filterexpr')
field.post_process_func = Function(
function=_post_process_combine_filterexprs
)
TableField.create(
keyword=keyword, obj=obj, hidden=True,
post_process_template=template)
parent_keywords = set(field.parent_keywords or [])
parent_keywords.add(keyword)
field.parent_keywords = list(parent_keywords)
field.save()
class NetProfilerTimeSeriesTable(NetProfilerTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
TABLE_OPTIONS = {'groupby': 'time',
'realm': 'traffic_overall_time_series',
'interface': None,
'limit': None}
class NetProfilerGroupbyTable(NetProfilerTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
TABLE_OPTIONS = {'groupby': None,
'realm': 'traffic_summary',
'interface': None,
'limit': None}
class NetProfilerQuery(TableQueryBase):
def _prepare_report_args(self):
class Args(object):
pass
args = Args()
criteria = self.job.criteria
if criteria.netprofiler_device == '':
logger.debug('%s: No netprofiler device selected' % self.table)
self.job.mark_error("No NetProfiler Device Selected")
return False
args.profiler = DeviceManager.get_device(criteria.netprofiler_device)
args.columns = [col.name for col
in self.table.get_columns(synthetic=False)]
args.sortcol = None
if self.table.sortcols is not None:
args.sortcol = self.table.sortcols[0]
args.timefilter = TimeFilter(start=criteria.starttime,
end=criteria.endtime)
logger.info("Running NetProfiler table %d report for timeframe %s" %
(self.table.id, str(args.timefilter)))
if ('datafilter' in criteria) and (criteria.datafilter is not None):
args.datafilter = criteria.datafilter.split(',')
else:
args.datafilter = None
args.trafficexpr = TrafficFilter(
self.job.combine_filterexprs(exprs=criteria.netprofiler_filterexpr)
)
# Incoming criteria.resolution is a timedelta
logger.debug('NetProfiler report got criteria resolution %s (%s)' %
(criteria.resolution, type(criteria.resolution)))
if criteria.resolution != 'auto':
rsecs = int(timedelta_total_seconds(criteria.resolution))
args.resolution = Report.RESOLUTION_MAP[rsecs]
else:
args.resolution = 'auto'
logger.debug('NetProfiler report using resolution %s (%s)' %
(args.resolution, type(args.resolution)))
args.limit = (self.table.options.limit
if hasattr(self.table.options, 'limit') else None)
if getattr(self.table.options, 'interface', False):
args.centricity = 'int'
else:
args.centricity = 'hos'
return args
def _wait_for_data(self, report, minpct=0, maxpct=100):
criteria = self.job.criteria
done = False
logger.info("Waiting for report to complete")
while not done:
time.sleep(0.5)
with lock:
s = report.status()
logger.debug('Status: XXX %s' % str(s))
pct = int(float(s['percent']) * ((maxpct - minpct)/100.0) + minpct)
self.job.mark_progress(progress=pct)
done = (s['status'] == 'completed')
# Retrieve the data
with lock:
data = report.get_data()
tz = criteria.starttime.tzinfo
# Update criteria
query = report.get_query_by_index(0)
criteria.starttime = (datetime.datetime
.utcfromtimestamp(query.actual_t0)
.replace(tzinfo=tz))
criteria.endtime = (datetime.datetime
.utcfromtimestamp(query.actual_t1)
.replace(tzinfo=tz))
self.job.safe_update(actual_criteria=criteria)
return data
def run(self):
""" Main execution method
"""
args = self._prepare_report_args()
with lock:
report = SingleQueryReport(args.profiler)
report.run(
realm=self.table.options.realm,
groupby=args.profiler.groupbys[self.table.options.groupby],
centricity=args.centricity,
columns=args.columns,
timefilter=args.timefilter,
trafficexpr=args.trafficexpr,
data_filter=args.datafilter,
resolution=args.resolution,
sort_col=args.sortcol,
sync=False,
limit=args.limit
)
data = self._wait_for_data(report)
if self.table.rows > 0:
data = data[:self.table.rows]
logger.info("Report %s returned %s rows" % (self.job, len(data)))
return QueryComplete(data)
#
# Template-based MultiQueryReports
#
class NetProfilerTemplateTable(NetProfilerTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
_query_class = 'NetProfilerTemplateQuery'
TABLE_OPTIONS = {'template_id': None}
class NetProfilerTemplateQuery(NetProfilerQuery):
# Used by Table to actually run a query
def run(self):
""" Main execution method. """
args = self._prepare_report_args()
with lock:
report = MultiQueryReport(args.profiler)
report.run(template_id=self.table.options.template_id,
timefilter=args.timefilter,
trafficexpr=args.trafficexpr,
resolution=args.resolution)
data = self._wait_for_data(report)
headers = report.get_legend()
# create dataframe with all of the default headers
df = pandas.DataFrame(data, columns=[h.key for h in headers])
# now filter down to the columns requested by the table
columns = [col.name for col in self.table.get_columns(synthetic=False)]
df = df[columns]
logger.info("Report %s returned %s rows" % (self.job, len(df)))
return QueryComplete(df)
#
# Traffic Time Series
#
# Timeseries report with criteria per columns, as opposed to just a time series
#
class NetProfilerTrafficTimeSeriesTable(NetProfilerTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
TABLE_OPTIONS = {'base': None,
'groupby': None,
'col_criteria': None,
'interface': None,
'top_n': None,
'include_other': False}
_query_class = 'NetProfilerTrafficTimeSeriesQuery'
@classmethod
def process_options(cls, table_options):
# handle direct id's, table references, or table classes
# from tables option and transform to simple table id value
table_options['base'] = Table.to_ref(table_options['base'])
return table_options
def post_process_table(self, field_options):
super(NetProfilerTrafficTimeSeriesTable, self).post_process_table(
field_options)
if self.options.top_n is None:
# If not top-n, the criteria field 'query_columns' must
# either be a string or an array of column definitions
# (a string is simply parsed as json to the latter).
#
# This criteria field must resolve to an array of
# field definitions, one per column to be queried
#
# An array of column defintions looks like the following:
# [ {'name': <name>, 'label': <name>, 'json': <json>},
# {'name': <name>, 'label': <name>, 'json': <json>},
# ... ]
#
# Each element corresponds to a column requested. <name> is
# used as the Column.name, <label> is for the Column.label
# and json is what is passed on to NetProfiler in the POST
# to create the report
#
TableField.create(keyword='query_columns',
label='Query columns',
obj=self)
TSQ_Tuple = namedtuple('TSQ_Tuple', ['groupby', 'columns', 'parser'])
class NetProfilerTrafficTimeSeriesQuery(NetProfilerQuery):
# Dictionary of config for running time-series/top-n queries for a
# requested groupby. The components are:
#
# groupby: the groupby to use for the time-series query, usually
# just the plural form of the standard NetProfiler groupby
#
# columns: the key column(s) to ask for as part of the query
#
# parser: the name of the row parsing function that takes a row
# and converts the row/keys into the necessary form
# as required by the time-series groupby report (in run())
#
CONFIG = {
'port':
TSQ_Tuple('ports', ['protoport_parts'], 'parse_port'),
'application':
TSQ_Tuple('applications', ['app_name', 'app_raw'], 'parse_app'),
'host_group':
TSQ_Tuple('host_groups', ['group_name'], 'parse_host_group'),
'host_pair_protoport':
TSQ_Tuple('host_pair_ports', ['hostpair_protoport_parts'],
'parse_hostpair_protoport'),
}
@classmethod
def parse_app(cls, row):
app_name = row[0]
app_raw = row[1]
return {'name': app_name,
'label': app_name,
'json': {'code': app_raw}}
@classmethod
def parse_port(cls, row):
proto, port = row[0].split('|')
return {'name': '%s%s' % (proto, port),
'label': '%s/%s' % (proto, port),
'json': {'name': '%s/%s' % (proto, port)}}
@classmethod
def parse_host_group(cls, row):
group_name = row[0]
return {'name': group_name,
'label': group_name,
'json': {'name': group_name}}
@classmethod
def parse_hostpair_protoport(cls, row):
srv_ip, srv_name, cli_ip, cli_name, proto, port = row[0].split('|')
if not srv_name:
srv_name = srv_ip
if not cli_name:
cli_name = cli_ip
return {'name': '%s%s%s%s' % (srv_name, cli_name, proto, port),
'label': '%s - %s - %s/%s' % (srv_name, cli_name, proto, port),
'json': {'port': {'name': '%s/%s' % (proto, port)},
'server': {'ipaddr': '%s' % srv_ip},
'client': {'ipaddr': '%s' % cli_ip}}}
# Run a SingleQueryReport based on the requested groupby and
# return a list of column definitions that will be passed
# on to the TrafficTimeSeriesReport query_columns argument
def run_top_n(self, config, args, base_col, minpct, maxpct):
columns = config.columns + [base_col.name]
with lock:
report = SingleQueryReport(args.profiler)
report.run(
realm='traffic_summary',
centricity=args.centricity,
groupby=args.profiler.groupbys[self.table.options.groupby],
columns=columns,
timefilter=args.timefilter,
trafficexpr=args.trafficexpr,
resolution=args.resolution,
sort_col=base_col.name,
sync=False
)
rows = self._wait_for_data(report, minpct=minpct, maxpct=maxpct)
if not rows:
msg = ('Error computing top-n columns for TimeSeries report, '
'no columns were found.')
logger.error(msg)
return []
defs = []
parser = getattr(self, config.parser)
for row in rows[:int(self.table.options.top_n)]:
defs.append(parser(row))
return defs
# This is the main run method and will run up to 3 reports
#
# 1. Top-N report -- if table.options.top_n is specified, this report
# drives what columns are requested
#
# 2. TrafficTimeSeriesReport - a time-series report with one column
# per requested criteria.
#
# 3. Other report -- a time-series report showing all traffic, use to
# compute "other" if table.options.include_other
#
def run(self):
args = self._prepare_report_args()
base_table = Table.from_ref(self.table.options.base)
base_col = base_table.get_columns()[0]
# only calculate other when we aren't filtering data
include_other = self.table.options.include_other
if self.job.criteria.netprofiler_filterexpr:
include_other = False
if self.table.options.groupby not in self.CONFIG:
raise ValueError('not supported for groupby=%s' %
self.table.options.groupby)
config = self.CONFIG[self.table.options.groupby]
# num_reports / cur_report are used to compute min/max pct
num_reports = (1 +
(1 if self.table.options.top_n else 0) +
(1 if include_other else 0))
cur_report = 0
if self.table.options.top_n:
# Run a top-n report to drive the criteria for each column
query_column_defs = self.run_top_n(config, args, base_col,
minpct=0,
maxpct=(100/num_reports))
cur_report += 1
else:
query_column_defs = self.job.criteria.query_columns
if isinstance(query_column_defs, types.StringTypes):
query_column_defs = json.loads(query_column_defs)
query_columns = [col['json'] for col in query_column_defs]
if not query_columns:
msg = 'Unable to compute query colums for job %s' % self.job
logger.error(msg)
return QueryError(msg)
with lock:
report = TrafficTimeSeriesReport(args.profiler)
columns = [args.columns[0], base_col.name]
logger.info("Query Columns: %s" % str(query_columns))
if self.table.options.groupby == 'host_group':
host_group_type = 'ByLocation'
else:
host_group_type = None
report.run(
centricity=args.centricity,
columns=columns,
timefilter=args.timefilter,
trafficexpr=args.trafficexpr,
resolution=args.resolution,
sync=False,
host_group_type=host_group_type,
query_columns_groupby=config.groupby,
query_columns=query_columns
)
data = self._wait_for_data(report,
minpct=cur_report * (100/num_reports),
maxpct=(cur_report + 1) * (100/num_reports))
cur_report += 1
df = pandas.DataFrame(data,
columns=(['time'] + [col['name'] for
col in query_column_defs]))
# Create ephemeral columns for all the data based
# on the related base table
for col in query_column_defs:
Column.create(self.job.table, col['name'], col['label'],
ephemeral=self.job, datatype=base_col.datatype,
formatter=base_col.formatter)
if include_other:
# Run a separate timeseries query with no column filters
# to get "totals" then use that to compute an "other" column
with lock:
report = SingleQueryReport(args.profiler)
report.run(
realm='traffic_overall_time_series',
centricity=args.centricity,
groupby=args.profiler.groupbys['time'],
columns=columns,
timefilter=args.timefilter,
trafficexpr=args.trafficexpr,
resolution=args.resolution,
sync=False
)
totals = self._wait_for_data(report,
minpct=cur_report * (100/num_reports),
maxpct=(cur_report + 1) * (100/num_reports))
df = df.set_index('time')
df['subtotal'] = df.sum(axis=1)
totals_df = (pandas.DataFrame(totals, columns=['time', 'total'])
.set_index('time'))
df = df.merge(totals_df, left_index=True, right_index=True)
df['other'] = df['total'] = df['subtotal']
colnames = ['time'] + [col['name'] for col in query_column_defs] + ['other']
# Drop the extraneous total and subtotal columns
df = (df.reset_index().ix[:, colnames])
Column.create(self.job.table, 'other', 'Other',
ephemeral=self.job, datatype=base_col.datatype,
formatter=base_col.formatter)
logger.info("Report %s returned %s rows" % (self.job, len(df)))
return QueryComplete(df)
#
# Service reports
#
class NetProfilerServiceByLocTable(DatasourceTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
_query_class = 'NetProfilerServiceByLocQuery'
# rgb - red/yellow/green, if True return string values
# instead of numbers
TABLE_OPTIONS = {'rgb': True}
FIELD_OPTIONS = {'duration': '15min',
'durations': ('15 min', '1 hour',
'2 hours', '4 hours', '12 hours',
'1 day', '1 week', '4 weeks'),
}
def post_process_table(self, field_options):
fields_add_device_selection(self, keyword='netprofiler_device',
label='NetProfiler', module='netprofiler',
enabled=True)
duration = field_options['duration']
fields_add_time_selection(self,
initial_duration=duration,
durations=field_options['durations'])
class NetProfilerServiceByLocQuery(TableQueryBase):
def run(self):
""" Main execution method
"""
criteria = self.job.criteria
if criteria.netprofiler_device == '':
logger.debug('%s: No netprofiler device selected' % self.table)
self.job.mark_error("No NetProfiler Device Selected")
return False
profiler = DeviceManager.get_device(criteria.netprofiler_device)
report = ServiceLocationReport(profiler)
tf = TimeFilter(start=criteria.starttime,
end=criteria.endtime)
logger.info(
'Running NetProfilerServiceByLocTable %d report for timeframe %s' %
(self.table.id, str(tf)))
with lock:
report.run(timefilter=tf, sync=False)
done = False
logger.info("Waiting for report to complete")
while not done:
time.sleep(0.5)
with lock:
s = report.status()
self.job.mark_progress(progress=int(s['percent']))
done = (s['status'] == 'completed')
# Retrieve the data
with lock:
data = report.get_data()
query = report.get_query_by_index(0)
tz = criteria.starttime.tzinfo
# Update criteria
criteria.starttime = (datetime.datetime
.utcfromtimestamp(query.actual_t0)
.replace(tzinfo=tz))
criteria.endtime = (datetime.datetime
.utcfromtimestamp(query.actual_t1)
.replace(tzinfo=tz))
self.job.safe_update(actual_criteria=criteria)
if len(data) == 0:
return QueryComplete(None)
# Add ephemeral columns for everything
Column.create(self.job.table, 'location', 'Location',
ephemeral=self.job, datatype='string')
for k in data[0].keys():
if k == 'location':
continue
Column.create(self.job.table, k, k,
ephemeral=self.job, datatype='string',
formatter='rvbd.formatHealth')
df = pandas.DataFrame(data)
if self.job.table.options.rgb:
state_map = {Service.SVC_NOT_AVAILABLE: 'gray',
Service.SVC_DISABLED: 'gray',
Service.SVC_INIT: 'gray',
Service.SVC_NORMAL: 'green',
Service.SVC_LOW: 'yellow',
Service.SVC_MED: 'yellow',
Service.SVC_HIGH: 'red',
Service.SVC_NODATA: 'gray'}
df = df.replace(state_map.keys(),
state_map.values())
return QueryComplete(df)
class NetProfilerHostPairPortTable(NetProfilerTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
TABLE_OPTIONS = {'groupby': 'host_pair_protoport',
'realm': 'traffic_summary',
'interface': None,
'limit': None,
'sort_col': 'in_avg_bytes'}
_query_class = 'NetProfilerHostPairPortQuery'
class NetProfilerHostPairPortQuery(NetProfilerQuery):
def run(self):
""" Main execution method
"""
args = self._prepare_report_args()
with lock:
report = SingleQueryReport(args.profiler)
report.run(
realm=self.table.options.realm,
groupby=args.profiler.groupbys[self.table.options.groupby],
centricity=args.centricity,
columns=args.columns,
timefilter=args.timefilter,
trafficexpr=args.trafficexpr,
data_filter=args.datafilter,
resolution=args.resolution,
sort_col=self.table.options.sort_col,
sync=False,
limit=args.limit
)
data = self._wait_for_data(report)
if not data:
msg = 'Report %s returned no data' % self.job
logger.error(msg)
return QueryError(msg)
def tonumber(s):
# return an int if the string represents an integer,
# a float if it represents a float
# None otherwise.
# check the int first since float() captures both
try:
return int(s)
except ValueError:
try:
return float(s)
except:
return None
others = []
totals = []
for i, col in enumerate(args.columns):
if i == 0:
others.append(u'Others')
totals.append(u'Total')
elif tonumber(data[0][i]):
others.append(0)
totals.append(0)
else:
others.append(u'')
totals.append(u'')
for i, row in enumerate(data):
for j, col in enumerate(args.columns):
val = tonumber(row[j])
if val:
row[j] = val
totals[j] += row[j]
if i > self.table.rows:
others[j] += row[j]
# Clip the table at the row limit, then add two more
# for other and total
if self.table.rows > 0:
data = data[:self.table.rows]
self.table.rows += 2
data.append(others)
data.append(totals)
# Formatting:
# - Add percents of total to numeric columns
# - Strip "ByLocation|" from the groups if it exists
# - Parse dns
for row in data:
for j, col in enumerate(args.columns):
if isinstance(row[j], float):
row[j] = "%.2f (%.0f%%)" % \
(row[j], 100 * row[j] / totals[j])
elif isinstance(row[j], int):
row[j] = "%d (%.0f%%)" % \
(row[j], 100 * row[j] / totals[j])
elif isinstance(row[j], str):
if row[j].startswith('ByLocation|'):
row[j] = row[j][11:]
elif ((col == 'cli_host_dns' or col == 'srv_host_dns')
and ('|' in row[j])):
# If we're using dns columns, they are ip|name
# We should use the name if it's non-empty,
# ip otherwise
ip, name = row[j].split('|')
if name:
row[j] = name
else:
row[j] = ip
logger.info("Report %s returned %s rows" % (self.job, len(data)))
return QueryComplete(data)
```
#### File: appfwk/libs/profiler_tools.py
```python
import pandas
import logging
from steelscript.appfwk.apps.datasource.modules.analysis import \
AnalysisTable, AnalysisQuery
logger = logging.getLogger(__name__)
def process_interface_dns_elem(interface_dns):
parts = interface_dns.split("|")
ip = parts[0]
name = parts[1]
ifindex = parts[2]
if name is not "":
return name + ":" + ifindex
else:
return ip + ":" + ifindex
def process_interface_dns(target, tables, criteria, params):
table = tables['table']
table['interface_dns'] = table['interface_dns'].map(process_interface_dns_elem)
return table
def explode_interface_dns(interface_dns):
parts = interface_dns.split("|")
ip = parts[0]
ifindex = parts[2]
ifdescr = parts[4]
return ip, ifindex, ifdescr
class ProfilerMergeIpDeviceTable(AnalysisTable):
class Meta:
proxy = True
app_label = 'steelscript.netprofiler.appfwk'
_query_class = 'ProfilerMergeIpDeviceQuery'
@classmethod
def create(cls, name, devices, traffic, **kwargs):
kwargs['tables'] = {'devices': devices,
'traffic' : traffic}
return super(ProfilerMergeIpDeviceTable, cls).create(name, **kwargs)
def post_process_table(self, field_options):
super(ProfilerMergeIpDeviceTable, self).post_process_table(field_options)
self.add_column('interface_name', 'Interface', iskey=True,
datatype="string")
self.copy_columns(self.options['tables']['traffic'],
except_columns=['interface_dns'])
class ProfilerMergeIpDeviceQuery(AnalysisQuery):
def post_run(self):
dev = self.tables['devices']
tr = self.tables['traffic']
if tr is None or len(tr) == 0:
self.data = None
return True
if dev is None or len(dev) == 0:
self.data = tr
return True
dev = dev.copy()
tr['interface_ip'], tr['interface_index'], tr['interface_ifdescr'] = \
zip(*tr['interface_dns'].map(explode_interface_dns))
df = pandas.merge(tr, dev,
left_on='interface_ip',
right_on='ipaddr',
how='left')
# Set the name to the ip addr wherever the name is empty
nullset = ((df['name'].isnull()) | (df['name'] == ''))
df.ix[nullset, 'name'] = df.ix[nullset, 'interface_ip']
# Set ifdescr to the index if empty
df['ifdescr'] = df['interface_ifdescr']
nullset = ((df['ifdescr'].isnull()) | (df['ifdescr'] == ''))
df.ix[nullset, 'ifdescr'] = df.ix[nullset, 'interface_index']
# Compute the name from the name and ifdescr
df['interface_name'] = (df['name'].astype(str) +
':' +
df['ifdescr'].astype(str))
self.data = df
return True
```
#### File: netprofiler/core/_exceptions.py
```python
class ProfilerException(Exception):
pass
class ProfilerHTTPException(ProfilerException):
def __init__(self, *args, **kwargs):
ProfilerException.__init__(self, *args, **kwargs)
class InvalidGroupbyException(ProfilerException):
def __init__(self, *args, **kwargs):
ProfilerException.__init__(self, *args, **kwargs)
class InvalidColumnException(ProfilerException):
def __init__(self, *args, **kwargs):
super(InvalidColumnException, self).__init__(*args, **kwargs)
``` |
{
"source": "jkraenzle/steelscript",
"score": 2
} |
#### File: steelscript/commands/steel.py
```python
import re
import os
import sys
import time
import glob
import getpass
import optparse
import subprocess
from optparse import OptionParser, OptionGroup
from threading import Thread
from functools import partial
from collections import deque
from pkg_resources import (get_distribution, iter_entry_points, parse_version,
DistributionNotFound, AvailableDistributions)
try:
import importlib
has_importlib = True
except ImportError:
has_importlib = False
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
import logging
if __name__ == '__main__':
logger = logging.getLogger('steel')
logger.setLevel(logging.ERROR)
else:
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
try:
__VERSION__ = get_distribution('steelscript').version
except DistributionNotFound:
__VERSION__ = None
LOG_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'critical': logging.CRITICAL,
'error': logging.ERROR
}
LOGFILE = None
STEELSCRIPT_CORE = ['steelscript',
'steelscript.netprofiler',
'steelscript.appresponse',
'steelscript.packets',
'steelscript.wireshark',
]
STEELSCRIPT_APPFW = ['steelscript.appfwk',
'steelscript.appfwk.business-hours']
STEELSCRIPT_STEELHEAD = ['steelscript.cmdline',
'steelscript.scc',
'steelscript.steelhead']
logging_initialized = False
class ShellFailed(Exception):
def __init__(self, returncode, output=None):
self.returncode = returncode
self.output = output
class MainFailed(Exception):
pass
class _Parser(OptionParser):
"""Custom OptionParser that does not re-flow the description."""
def format_description(self, formatter):
return self.description
class PositionalArg(object):
def __init__(self, keyword, help, dest=None):
self.keyword = keyword.upper()
self.help = help
if dest:
self.dest = dest
else:
self.dest = keyword.replace('-', '_')
class BaseCommand(object):
"""Parent class for all Command objects."""
# The keyword to select this command, not used
# for the root Command in a file
keyword = None
# Short help for this command
help = None
# Submodule to search for subcommands of this command
submodule = None
def __init__(self, parent=None):
self.parent = parent
if self.keyword is None:
self.keyword = self.__module__.split('.')[-1]
# OptionParser created when parse() called
self.parser = None
# Positional args
self.positional_args = []
self._positional_args_initialized = False
# options and args filled in by parse()
self.args = None
self.options = None
# List of subcommands
self._subcommands = []
self._subcommands_loaded = False
if parent:
self.parent._subcommands.append(self)
def add_positional_arg(self, keyword, help, dest=None):
self.positional_args.append(PositionalArg(keyword, help, dest=dest))
def add_positional_args(self):
pass
@property
def subcommands(self):
# On demand, load additional subcommands from self.submodule
# if defined.
if not self._subcommands_loaded:
self._subcommands_loaded = True
if self.positional_args:
pass
elif self.submodule:
self.load_subcommands()
return self._subcommands
def usage(self, fromchild=False):
# Customize the usage string based on whether there are any
# subcommands
if self.parent:
parentusage = self.parent.usage(True)
if parentusage:
base = '%s %s' % (parentusage, self.keyword)
else:
base = self.keyword
else:
base = '%prog'
if self.positional_args:
for p in self.positional_args:
base = base + ' <' + p.keyword.upper() + '>'
return '%s [options] ...' % base
elif self.subcommands:
if fromchild:
return base
return '%s [command] ...' % base
else:
return '%s [options]' % base
def version(self):
return '%s (%s)' % (__VERSION__, os.path.abspath(__file__))
def description(self):
# Customize the description. If there are subcommands,
# build a help table.
if self.help is not None:
lines = self.help.strip().split('\n')
desc = '\n'.join([' ' + line for line in lines]) + '\n'
else:
desc = ''
def add_help_items(title, items, desc):
help_items = [(sc.keyword,
(sc.help or '').split('\n')[0]) for sc in items]
help_items.sort(key=lambda item: item[0])
maxkeyword = max([len(sc.keyword) for sc in items])
maxkeyword = max(10, maxkeyword)
if desc:
desc = desc + '\n'
desc = desc + (
title + ':\n' +
'\n'.join([' %-*s %s' % (maxkeyword, item[0], item[1] or '')
for item in help_items]) + '\n')
return desc
if self.positional_args:
desc = add_help_items('Required Arguments',
self.positional_args, desc)
elif self.subcommands:
desc = add_help_items('Sub Commands',
self.subcommands, desc)
return desc
def _load_command(self, mod):
try:
return mod.Command(self)
except AttributeError:
if str(mod.__name__) != 'steelscript.commands.steel':
logger.warning('Module has no Command class: %s' % str(mod))
return None
except:
raise
def load_subcommands(self):
# Look for *.py files in self.submodule and try to
# construct a Command() object from each one.
if not has_importlib:
return None
try:
i = importlib.import_module(self.submodule)
except ImportError:
return
for f in glob.glob(os.path.join(os.path.dirname(i.__file__), '*.py')):
base_f = os.path.basename(f)
# Always skip __init__.py and this script
if base_f == '__init__.py' or os.path.abspath(f) == __file__:
continue
n = '%s.%s' % (self.submodule,
os.path.splitext(base_f)[0])
i = importlib.import_module(n)
self._load_command(i)
def add_options(self, parser):
add_log_options(parser)
def parse(self, args):
"""Parse the argument list."""
if self.parent is None:
start_logging(args)
if not self._positional_args_initialized:
self._positional_args_initialized = True
self.add_positional_args()
# Look for subcommands, strip off and pass of
# remaining args to the subcommands. If there are
# positional args, skip this step
if (not self.positional_args and
len(args) > 0 and
not args[0].startswith('-')):
subcmds = [subcmd for subcmd in self.subcommands
if subcmd.keyword == args[0]]
if subcmds:
# Found a matching registered subcommand
subcmds[0].parse(args[1:])
return
if not self.parser:
# Create a parser
self.parser = _Parser(usage=self.usage(),
version=self.version(),
description=self.description())
self.add_options(self.parser)
if not self.positional_args and args and not args[0].startswith('-'):
self.parser.error('Unrecognized command: {cmd}'
.format(cmd=args[0]))
(self.options, self.args) = self.parser.parse_args(args)
if self.positional_args:
if len(self.args) < len(self.positional_args):
self.parser.error('Missing required argument: %s'
% self.positional_args[0].keyword)
for i, p in enumerate(self.positional_args):
setattr(self.options, p.dest, self.args[i])
self.args = self.args[len(self.positional_args):]
self.validate_args()
self.setup()
try:
self.main()
except MainFailed as e:
console(e, logging.ERROR)
sys.exit(1)
def validate_args(self):
"""Hook for validating parsed options/arguments.
If validation fails, this function should raise an error with
self.parser.error(msg) or raise an exception.
If defined in a subclass, the subclass must recursively
validate_args of the parent:
super(<subclass>, self).setup()
"""
pass
def setup(self):
"""Commands to run before execution.
If defined in a subclass, the subclass will mostly
want to call setup() of the parent via:
super(<subclass>, self).setup()
This will ensure the any setup required of the parent
classes is performed as well.
"""
pass
def main(self):
"""Body of the execution for this command.
This is where subclasses should define the action of this
command. By this point all command line arguments are
parsed and stored as attributes."""
if self.args:
self.parser.error('Unrecognized command: {cmd}'
.format(cmd=self.args[0]))
self.parser.print_help()
class SteelCommand(BaseCommand):
"""The 'steel' command, top of all other commands."""
keyword = 'steel'
help = 'SteelScript commands'
submodule = 'steelscript.commands'
@property
def subcommands(self):
# In addition to looking in the submodule, look
# for entry points labeled 'steel.commands'
if not self._subcommands_loaded:
super(SteelCommand, self).subcommands
for obj in iter_entry_points(group='steel.commands', name=None):
i = obj.load(obj)
# See if the entry point has a Command defined
# in __init__.py
if hasattr(i, 'Command'):
self._load_command(i)
else:
# If not, just create a simple command based
# on this submodule
cmd = BaseCommand(self)
cmd.keyword = obj.name
cmd.submodule = i.__name__
cmd.help = ('Commands for {mod} module'
.format(mod=i.__name__))
return self._subcommands
class InstallCommand(BaseCommand):
keyword = 'install'
help = 'Package installation'
def add_options(self, parser):
group = OptionGroup(parser, 'Package installation options')
group.add_option(
'-U', '--upgrade', action='store_true', default=False,
help='Upgrade packages that are already installed')
group.add_option(
'-d', '--dir', action='store', default=None,
help='Directory to use for offline installation')
group.add_option(
# Install packages from GitHub
'-g', '--github', action='store_true',
help="Install packages from github")
group.add_option(
# Dev only - install packages from bitbucket
'-B', '--bitbucket', action='store_true',
help=optparse.SUPPRESS_HELP)
group.add_option(
# Install packages from a git url
'--giturl', action='store',
help=optparse.SUPPRESS_HELP)
group.add_option(
'--develop', action='store_true',
help='Combine with --github to checkout packages in develop mode')
group.add_option(
'-p', '--package', action='append', dest='packages',
help='Package to install (may specify more than once)')
group.add_option(
'--appfwk', action='store_true',
help='Install all application framework packages')
group.add_option(
'--pip-options', default='',
help='Additional options to pass to pip')
group.add_option(
'--steelhead', action='store_true',
help='Install optional steelhead packages')
parser.add_option_group(group)
def validate_args(self):
if self.options.packages is None:
self.options.packages = STEELSCRIPT_CORE
if self.options.appfwk:
self.options.packages.extend(STEELSCRIPT_APPFW)
if self.options.steelhead:
self.options.packages.extend(STEELSCRIPT_STEELHEAD)
if self.options.develop:
if not self.options.dir:
console('Must specify a directory (--dir)')
sys.exit(1)
if self.options.upgrade:
console('Cannot upgrade development packages, '
'use git directly')
sys.exit(1)
def main(self):
if not is_root() and not in_venv():
console(
('Running installation as user {username} may not have \n'
'correct privileges to install packages. Consider \n'
'running as root or creating a virtualenv.\n')
.format(username=username()))
if not prompt_yn('Continue with installation anyway?',
default_yes=False):
console('\n*** Aborting installation ***\n')
if not check_virtualenv():
console('Install virtualenv:\n'
'$ sudo pip install virtualenv\n\n'
'Create a new virtual environment\n'
'$ virtualenv <name>\n\n'
'Activate the new virtual environment\n'
'$ source <name>/bin/activate\n\n')
sys.exit(1)
if self.options.giturl:
self.install_git(self.options.giturl)
elif self.options.bitbucket:
self.install_bitbucket()
elif self.options.github:
self.install_github()
elif self.options.dir:
self.install_dir()
else:
self.install_pip()
def prepare_appfwk(self):
# Any special-case appfwk packages should be handled here.
# Check for numpy/pandas and check for prerequisites
if not all([pkg_installed('numpy'), pkg_installed('pandas')]):
import platform
if platform.system() == 'Windows':
if not check_vcpython27():
console('Please follow the instructions found here:\n'
'https://support.riverbed.com/apis/steelscript/'
'appfwk/install.html#detailed-installation\n')
sys.exit(1)
elif not exe_installed('gcc'):
console('Unable to detect installed compiler `gcc`\n'
'which is required for installation of\n'
'`pandas` and `numpy` packages.')
base_msg = ('The following commands should install\n'
'the dependencies, though they will need\n'
'root privileges:\n')
if exe_installed('yum'):
console(base_msg +
'> sudo yum clean all\n'
'> sudo yum groupinstall "Development tools"\n'
'> sudo yum install python-devel\n')
elif exe_installed('apt-get'):
console(
base_msg +
'> sudo apt-get update\n'
'> sudo apt-get install build-essential python-dev\n'
)
else:
console('Cannot determine appropriate package manager\n'
'for your OS. Please run the `steel about -v`\n'
'command and post that information as a question\n'
'to the Splash community here:\n'
'https://splash.riverbed.com/community/'
'product-lines/steelscript\n')
sys.exit(1)
def prepare_cmdline(self):
# Manually install pycrypto if it is a windows platform
if not pkg_installed('pycrypto'):
import platform
if platform.system() == 'Windows' and not check_vcpython27():
console('Please follow the instructions found here:\n'
'https://support.riverbed.com/apis/steelscript/'
'install/steelhead.html#detailed-installation')
sys.exit(1)
def install_git(self, baseurl):
"""Install packages from a git repository."""
try:
check_git()
except ShellFailed:
console('no\ngit is not installed, please install git to continue',
lvl=logging.ERROR)
sys.exit(1)
check_install_pip()
for pkg in self.options.packages:
if pkg_installed(pkg) and not self.options.upgrade:
console('Package {pkg} already installed'.format(pkg=pkg))
continue
repo = '{baseurl}/{pkg}.git'.format(
baseurl=baseurl, pkg=pkg.replace('.', '-'))
if pkg == 'steelscript.appfwk':
self.prepare_appfwk()
if pkg == 'steelscript.cmdline':
self.prepare_cmdline()
if self.options.develop:
# Clone the git repo
outdir = os.path.join(self.options.dir, pkg.replace('.', '-'))
shell(cmd=('git clone --recursive {repo} {outdir}'
.format(repo=repo, outdir=outdir)),
msg=('Cloning {repo}'.format(repo=repo)))
# Install the requirements.txt
reqfile = os.path.join(outdir, 'requirements.txt')
if os.path.exists(reqfile):
shell(
cmd=('pip install -r {reqfile} {pip_options} '
.format(reqfile=reqfile,
pip_options=self.options.pip_options)),
msg=('Installing {pkg} requirements'.format(pkg=pkg))
)
# Now install this git repo in develop mode
shell(cmd=('cd {outdir}; pip install -e .'
.format(outdir=outdir)),
msg=('Installing {pkg}'.format(pkg=pkg)))
else:
suffix = 'git+{repo} '.format(repo=repo)
self.pip_install_pkg_with_upgrade(pkg, suffix=suffix)
def install_bitbucket(self):
"""Install packages from bitbucket internal to riverbed."""
check_install_pip()
self.install_git('https://code.rvbdtechlabs.net/scm/sct')
def install_github(self):
"""Install packages from github.com/riverbed."""
check_install_pip()
self.install_git('https://github.com/riverbed')
def install_dir(self):
check_install_pip()
if not self.options.dir:
console('Must specify package directory (--dir)')
sys.exit(1)
for pkg in self.options.packages:
if pkg_installed(pkg) and not self.options.upgrade:
console('Package {pkg} already installed'.format(pkg=pkg))
continue
if pkg == 'steelscript.appfwk':
self.prepare_appfwk()
if pkg == 'steelscript.cmdline':
self.prepare_cmdline()
suffix = ('--no-index --find-links=file://{dir} {pkg}'.
format(dir=self.options.dir, pkg=pkg))
self.pip_install_pkg_with_upgrade(pkg, suffix=suffix)
def pip_install_pkg_with_upgrade(self, pkg, suffix=''):
"""Perform "Only if needed" recursive upgrade install for pkg and
its dependencies.
https://pip.pypa.io/en/latest/user_guide.html
#only-if-needed-recursive-upgrade
"""
if self.options.upgrade:
self.pip_install_pkg(pkg, upgrade=True, suffix=suffix)
self.pip_install_pkg(pkg, suffix=suffix)
def pip_install_pkg(self, pkg, upgrade=False, suffix=''):
cmd = (('pip install {pip_options} {upgrade} {suffix}')
.format(suffix=suffix,
upgrade=('-U --no-deps'
if upgrade else ''),
pip_options=self.options.pip_options))
shell(cmd=cmd,
msg=('Installing {pkg}'.format(pkg=pkg)))
def install_pip(self):
check_install_pip()
for pkg in self.options.packages:
if pkg_installed(pkg) and not self.options.upgrade:
console('Package {pkg} already installed'.format(pkg=pkg))
continue
if pkg == 'steelscript.appfwk':
self.prepare_appfwk()
if pkg == 'steelscript.cmdline':
self.prepare_cmdline()
self.pip_install_pkg_with_upgrade(pkg, suffix=pkg)
class UninstallCommand(BaseCommand):
keyword = 'uninstall'
help = 'Uninstall all SteelScript packages'
def add_options(self, parser):
group = OptionGroup(parser, 'Package uninstall options')
group.add_option(
'--non-interactive', action='store_true', default=False,
help='Remove packages without prompting for input')
parser.add_option_group(group)
def main(self):
if not is_root() and not in_venv():
console(
('Uninstallation as user {username} may not have \n'
'correct privileges to remove packages. Consider \n'
'running as root or activating an existing virtualenv.\n')
.format(username=username()))
if not prompt_yn('Continue with installation anyway?',
default_yes=False):
console('\n*** Aborting uninstall ***\n')
sys.exit(1)
self.uninstall()
def uninstall(self):
e = AvailableDistributions()
pkgs = [x for x in e if x.startswith('steel')]
pkgs.sort()
if not self.options.non_interactive:
if pkgs:
console('The following packages will be removed:\n{pkgs}\n'
.format(pkgs='\n'.join(pkgs)))
console('The `steel` command will be removed as part of this\n'
'operation. To reinstall steelscript you can run\n'
'`pip install steelscript`, or follow an alternative\n'
'method described at '
'http://pythonhosted.com/steelscript\n')
if not prompt_yn('Continue with uninstall?',
default_yes=False):
console('\n*** Aborting uninstall ***\n')
sys.exit(1)
else:
console('Nothing to uninstall.')
for pkg in pkgs:
self.remove_pkg(pkg)
def remove_pkg(self, pkg):
cmd = 'pip uninstall -y {pkg}'.format(pkg=pkg)
shell(cmd=cmd, msg='Uninstalling {pkg}'.format(pkg=pkg))
def pkg_installed(pkg):
try:
out = shell('pip show {pkg}'.format(pkg=pkg),
allow_fail=True, save_output=True)
return pkg in out
except ShellFailed:
return False
def exe_installed(exe):
# linux/mac only
try:
shell('which {exe}'.format(exe=exe), allow_fail=True)
return True
except ShellFailed:
return False
def prompt_yn(msg, default_yes=True):
yn = prompt(msg, choices=['yes', 'no'],
default=('yes' if default_yes else 'no'))
return yn == 'yes'
def prompt(msg, choices=None, default=None, password=False):
if choices is not None:
msg = '%s (%s)' % (msg, '/'.join(choices))
if default is not None:
msg = '%s [%s]' % (msg, default)
msg += ': '
value = None
while value is None:
if password:
value = getpass.getpass(msg)
else:
value = input(msg)
if not value:
if default is not None:
value = default
else:
print('Please enter a valid response.')
if choices and value not in choices:
print(('Please choose from the following choices (%s)' %
'/'.join(choices)))
value = None
return value
def add_log_options(parser):
group = optparse.OptionGroup(parser, "Logging Parameters")
group.add_option("--loglevel",
help="log level: debug, warn, info, critical, error",
choices=list(LOG_LEVELS.keys()), default="info")
group.add_option("--logfile",
help="log file, use '-' for stdout", default=None)
parser.add_option_group(group)
def start_logging(args):
"""Start up logging.
This must be called only once and it will not work
if logging.basicConfig() was already called."""
global logging_initialized
if logging_initialized:
return
logging_initialized = True
# Peek into the args for loglevel and logfile
logargs = []
for i, arg in enumerate(args):
if arg in ['--loglevel', '--logfile']:
logargs.append(arg)
logargs.append(args[i+1])
elif re.match('--log(level|file)=', arg):
logargs.append(arg)
parser = OptionParser()
add_log_options(parser)
(options, args) = parser.parse_args(logargs)
global LOGFILE
if options.logfile == '-':
LOGFILE = None
else:
if options.logfile is not None:
LOGFILE = options.logfile
else:
LOGFILE = os.path.join(os.path.expanduser('~'),
'.steelscript',
'steel.log')
logdir = os.path.dirname(LOGFILE)
if logdir and not os.path.exists(logdir):
os.makedirs(logdir)
logging.basicConfig(
level=LOG_LEVELS[options.loglevel],
filename=LOGFILE,
format='%(asctime)s [%(levelname)-5.5s] (%(name)s) %(message)s')
logger.info("=" * 70)
logger.info("==== Started logging: %s" % ' '.join(sys.argv))
def try_import(m):
"""Try to import a module by name, return None on fail."""
if not has_importlib:
return None
try:
i = importlib.import_module(m)
return i
except ImportError:
return None
def console(msg, lvl=logging.INFO, newline=True):
# Log a message to both the log and print to the console
logger.log(lvl, msg)
m = (sys.stderr if lvl == logging.ERROR else sys.stdout)
m.write(msg)
if newline:
m.write('\n')
m.flush()
debug = partial(console, lvl=logging.DEBUG)
def shell(cmd, msg=None, allow_fail=False, exit_on_fail=True,
env=None, cwd=None, save_output=False, log_output=True):
"""Run `cmd` in a shell and return the result.
:raises ShellFailed: on failure
"""
if msg:
console(msg + '...', newline=False)
def enqueue_output(out, queue):
for line in iter(out.readline, ''):
logger.debug('putting {} on queue'.format(line))
queue.put(line)
out.close()
logger.debug('closing queue')
logger.info('Running command: %s' % cmd)
proc = subprocess.Popen(cmd, shell=True, env=env, cwd=cwd, bufsize=1,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
q = Queue()
t = Thread(target=enqueue_output, args=(proc.stdout, q))
t.daemon = True # thread dies with the program
t.start()
output = [] if save_output else None
tail = deque(maxlen=10)
def drain_to_log(q, output):
stalled = False
while not stalled:
try:
line = q.get_nowait()
line = line.rstrip()
if output is not None:
output.append(line)
tail.append(line)
if log_output:
logger.info('shell: %s' % line.rstrip())
except Empty:
stalled = True
lastdot = time.time()
interval = 0.002
max_interval = 0.5
while t.is_alive():
now = time.time()
if now - lastdot > 4 and msg:
sys.stdout.write('.')
sys.stdout.flush()
lastdot = now
drain_to_log(q, output)
time.sleep(interval)
interval = min(interval * 2, max_interval)
t.join()
proc.poll()
drain_to_log(q, output)
if proc.returncode > 0:
if msg and not allow_fail:
console('failed')
logger.log((logging.INFO if allow_fail else logging.ERROR),
'Command failed with return code %s' % proc.returncode)
if not allow_fail and exit_on_fail:
console('Command failed: %s' % cmd)
for line in tail:
print(' ', line)
if LOGFILE:
console('See log for details: %s' % LOGFILE)
sys.exit(1)
if output is not None:
output = '\n'.join(output)
raise ShellFailed(proc.returncode, output)
if msg:
console('done')
if save_output:
if output:
return '\n'.join(str(x) for x in output)
return ''
return None
def username():
try:
return os.environ['USER']
except KeyError:
return os.environ['USERNAME']
def is_root():
# detect if user has root or Administrator rights
try:
return os.getuid() == 0
except AttributeError:
try:
import ctypes
return ctypes.windll.shell32.IsUserAnAdmin() == 0
except AttributeError:
return False
def in_venv():
# Check if we're in a virtualenv - only works on linux
return 'VIRTUAL_ENV' in os.environ
def check_git():
"""Checks if git installed, raises ShellFailed if not."""
try:
shell(cmd='git --version',
msg='Checking if git is installed',
allow_fail=True)
return True
except ShellFailed:
raise ShellFailed('git is not installed')
def check_install_pip():
try:
out = shell('pip --version',
msg='Checking if pip is installed',
allow_fail=True, save_output=True)
version = out.split()[1]
if parse_version(version) < parse_version('1.4.0'):
upgrade_pip()
return
except ShellFailed:
pass
console('no')
shell('easy_install pip',
msg='Installing pip via easy_install')
def upgrade_pip():
try:
shell('pip install --upgrade pip',
msg='Upgrading pip to compatible version',
allow_fail=True)
except ShellFailed:
console('unable to upgrade pip. steelscript requires\n'
'at least version `1.4.0` to be installed.',
lvl=logging.ERROR)
sys.exit(1)
def check_virtualenv():
try:
shell(cmd='virtualenv --version', allow_fail=True)
return True
except ShellFailed:
return False
def check_vcpython27():
try:
shell(cmd='where /R C:\\Users Visual*C++*2008*-bit*Command*Prompt.lnk',
allow_fail=True)
return True
except ShellFailed:
return False
def run():
# Main entry point as a script from setup.py
# If run as a script directly
# Create the main command
cmd = SteelCommand()
# Manually add commands in this module
install = InstallCommand(cmd)
uninstall = UninstallCommand(cmd)
try:
cmd.parse(sys.argv[1:])
except KeyboardInterrupt:
pass
if __name__ == '__main__':
run()
```
#### File: steelscript/common/timeutils.py
```python
import re
import time
import calendar
from datetime import datetime, timedelta, tzinfo
from decimal import Decimal
from dateutil.relativedelta import relativedelta
__all__ = ['ensure_timezone', 'force_to_utc', 'datetime_to_seconds',
'datetime_to_microseconds', 'datetime_to_nanoseconds',
'usec_string_to_datetime', 'nsec_string_to_datetime',
'timedelta_total_seconds', 'timedelta_str',
'TimeParser', 'parse_timedelta', 'parse_range']
#
# tzinfo objects for utc and the local timezone,
# https://launchpad.net/dateutil
#
ZERO = timedelta(0)
EPOCHORDINAL = datetime.utcfromtimestamp(0).toordinal()
class tzutc(tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzlocal(tzinfo):
_std_offset = timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
# >>> import tz, datetime
# >>> t = tz.tzlocal()
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
def ensure_timezone(dt):
"""Return a datetime object that corresponds to `dt` but that always has
timezone info.
If `dt` already has timezone info, then it is simply returned.
If `dt` does not have timezone info, then the local time zone is assumed.
"""
if dt.tzinfo is not None:
return dt
return dt.replace(tzinfo=tzlocal())
def force_to_utc(dt):
"""Return a datetime object that corresponds to `dt` but in UTC rather than
local time.
If `dt` includes timezone info, then this routine simply converts from the
given timezone to UTC.
If `dt` does not include timezone info, then it is assumed to be in local
time, which is then converted to UTC.
"""
return ensure_timezone(dt).astimezone(tzutc())
def datetime_to_seconds(dt):
"""Return the number of seconds since the Unix epoch for
the datetime object `dt`.
"""
dt = ensure_timezone(dt)
sec = int(calendar.timegm(dt.utctimetuple()))
return sec
def datetime_to_microseconds(dt):
"""Return the number of microseconds since the Unix epoch for
the datetime object `dt`.
"""
dt = ensure_timezone(dt)
sec = int(calendar.timegm(dt.utctimetuple()))
return sec * 1000000 + dt.microsecond
def datetime_to_nanoseconds(dt):
"""Return the number of nanoseconds since the Unix epoch for
the datetime object `dt`.
"""
dt = ensure_timezone(dt)
if not hasattr(dt, 'nanosecond'):
return 1000 * datetime_to_microseconds(dt)
return int(calendar.timegm(dt.utctimetuple()))*1000000000+dt.nanosecond
def sec_string_to_datetime(s):
"""Convert the string `s` which represents a time in seconds
since the Unix epoch to a datetime object.
"""
return datetime.fromtimestamp(s, tzutc())
def msec_string_to_datetime(s):
"""Convert the string `s` which represents a time in milliseconds
since the Unix epoch to a datetime object.
"""
sec = Decimal(s) / Decimal(1000)
return datetime.fromtimestamp(sec, tzutc())
def usec_string_to_datetime(s):
"""Convert the string `s` which represents a time in microseconds
since the Unix epoch to a datetime object.
"""
sec = Decimal(s) / Decimal(1000000)
return datetime.fromtimestamp(sec, tzutc())
def nsec_to_datetime(ns):
"""Convert the value `ns` which represents a time in nanoseconds
since the Unix epoch (either as an integer or a string) to a
datetime object.
"""
if isinstance(ns, str):
ns = int(ns)
if ns == 0:
return None
# we want full nanosecond precision if we're using datetimeng but
# float can't represent absolute timestamps with enough precision.
# Decimal solves that problem but "regular" datetime objects can't
# be constructed from instances of Decimal. so, we create an
# appropriate argument to fromtimestamp() by checking which
# implementation of datetime we're using...
if hasattr(datetime, 'nanosecond'):
sec = Decimal(ns) / 1000000000
else:
sec = float(ns) / 1000000000
return datetime.fromtimestamp(sec, tzutc())
def string_to_datetime(s):
"""Determine level of precision by number of digits and return
a datetime object.
Note: this method is only valid for datetimes between year 2001 and
year 2286 since it assumes second level precision has 10 digits.
"""
t = int(s)
digits = len(str(t))
if digits == 10:
return sec_string_to_datetime(s)
elif digits == 13:
return msec_string_to_datetime(s)
elif digits == 16:
return usec_string_to_datetime(s)
elif digits == 19:
return nsec_string_to_datetime(s)
else:
raise TypeError('Unable to determine units of s: %s' % s)
nsec_string_to_datetime = nsec_to_datetime
def usec_string_to_timedelta(s):
"""Convert the string `s` which represents a number of microseconds
to a timedelta object.
"""
sec = float(s) / 1000000
return timedelta(seconds=sec)
def timedelta_total_seconds(td):
"""Handle backwards compatibility for timedelta.total_seconds."""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
return float(td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
# move out of TimeParser scope since Python3 won't allow it
# ref: https://stackoverflow.com/a/13913933/2157429
class _informat(object):
def __init__(self, pattern, has_date, has_year):
self.pattern = pattern
self.has_date = has_date
self.has_year = has_year
if has_year:
assert has_date
def match(self, s):
tm = datetime.strptime(s, self.pattern)
if self.has_year:
return tm
now = datetime.now()
if self.has_date:
return tm.replace(year=now.year)
return tm.replace(month=now.month, day=now.day, year=now.year)
# all the different time of day formats we can parse.
# refer to the python time module documentation for details
# on the format strings
_time_formats = (
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%I:%M:%S %p',
'%I:%M:%S%p',
'%I:%M %p',
'%I:%M%p',
'%I %p',
'%I%p'
)
# all the different date formats we can parse.
# note that dates are "normalized" a bit, see parse() below.
_date_formats = (
'%m/%d',
'%B %d',
'%b %d'
)
_date_year_formats = (
'%Y/%m/%d',
'%m/%d/%Y',
'%m/%d/%y',
'%B/%d/%Y',
'%b/%d/%Y',
)
class TimeParser(object):
"""Instances of this class parse strings representing dates and/or
times into python `datetime.datetime` objects.
This class is capable of parsing a variety of different formats.
On the first call, the method `parse()` may take some time, as it
tries a series of pre-defined formats one after another. After
successfully parsing a string, the parser object remembers the format
that was used so subsequent calls with identically formatted strings
are as efficient as the underlying method `datetime.strptime`.
"""
def __init__(self):
""" Construct a new TimeParser object """
self._fmt = None
@classmethod
def _parse_no_hint(cls, s):
"""Parse string s as a date/time without any hint about the format.
If it can be parsed, returns a tuple of the datetime object
and the format object that was used. If the string cannot be
parsed, raises ValueError.
"""
for fmt in cls._formats:
try:
dt = fmt.match(s)
return dt, fmt
except ValueError:
pass
raise ValueError("Could not parse datetime string: %s" % s)
@classmethod
def parse_one(cls, s):
"""Do a "one-shot" parsing of string s as a date/time.
Doesn't remember anything about the format that was used.
"""
dt, ignored = cls._parse_no_hint(s)
return dt
def parse(self, s):
"""Parse the string `s` as a date and time.
Returns a `datetime.datetime` object on success or raises `ValueError`
if the string cannot be parsed.
"""
# begin with some normalization, strip whitespace and convert
# dashes to slashes so that 05-10-2011 is equivalent to 05/10/2011
s = s.strip().replace('-', '/')
if self._fmt is not None:
try:
return self._fmt.match(s)
except ValueError:
pass
dt, self._fmt = self._parse_no_hint(s)
return dt
_formats = (
(
[_informat(_tf, False, False) for _tf in _time_formats]
+ [_informat('%s %s' % (_tf, _df), True, False)
for _tf in _time_formats for _df in _date_formats]
+ [_informat('%s %s' % (_df, _tf), True, False)
for _tf in _time_formats for _df in _date_formats]
+ [_informat('%s %s' % (_tf, _df), True, True)
for _tf in _time_formats for _df in _date_year_formats]
+ [_informat('%s %s' % (_df, _tf), True, True)
for _tf in _time_formats for _df in _date_year_formats]
)
)
_timedelta_units = {
'us': 0.000001, 'usec': 0.000001, 'microsecond': 0.000001, 'microseconds': 0.000001,
'ms': 0.001, 'msec': 0.001, 'millisecond': 0.001, 'milliseconds': 0.001,
's': 1, 'sec': 1, 'second': 1, 'seconds': 1,
'm': 60, 'min': 60, 'minute': 60, 'minutes': 60,
'h': 60*60, 'hr': 60*60, 'hour': 60*60, 'hours': 60*60,
'd': 24*60*60, 'day': 24*60*60, 'days': 24*60*60,
'w': 7*24*60*60, 'week': 7*24*60*60, 'weeks': 7*24*60*60,
'month': 30*24*60*60, 'months': 30*24*60*60,
'q': 91*24*60*60, 'quarter': 91*24*60*60, 'quarters': 91*24*60*60,
'y': 365*24*60*60, 'year': 365*24*60*60, 'years': 365*24*60*60
}
_timedelta_re = re.compile("([0-9]*\\.*[0-9]*) *([a-zA-Z]*)")
# Build map from casual units to standard units recognizable by datetime
units_map = {}
for k, v in _timedelta_units.items():
if v == 1:
units_map[k] = 'second'
elif v == 60:
units_map[k] = 'minute'
elif v == 60*60:
units_map[k] = 'hour'
elif v == 60*60*24:
units_map[k] = 'day'
elif v == 60*60*24*7:
units_map[k] = 'week'
elif v == 60*60*24*30:
units_map[k] = 'month'
elif v == 60*60*24*91:
units_map[k] = 'quarter'
elif v == 60*60*24*365:
units_map[k] = 'year'
def timedelta_str(td):
def pluralize(val, base, plural):
if val > 1:
return "%d %s" % (val, plural)
else:
return "%d %s" % (val, base)
if td.days > 0:
if td.seconds != 0 or td.microseconds != 0:
msg = ("Timedelta has too many components for pretty string: %s"
% str(td))
raise ValueError(msg)
if td.days % 7 == 0:
return pluralize(td.days / 7, "week", "weeks")
else:
return pluralize(td.days, "day", "days")
elif td.seconds > 0:
if td.microseconds != 0:
msg = ("Timedelta has too many components for pretty string: %s"
% str(td))
raise ValueError(msg)
if td.seconds % (60*60) == 0:
return pluralize(td.seconds / (60*60), "hour", "hours")
elif td.seconds % 60 == 0:
return pluralize(td.seconds / 60, "minute", "minutes")
else:
return pluralize(td.seconds, "second", "seconds")
else:
if td.microseconds % 1000 == 0:
return pluralize(
td.microseconds / 1000, "millisecond", "milliseconds"
)
else:
return pluralize(td.microseconds, "microsecond", "microseconds")
def round_time(dt=None, round_to=60, round_up=False, trim=False):
"""Round a datetime object to any time laps in seconds.
`dt`: datetime.datetime object, default now.
`round_to`: Closest number of seconds to round to, default 1 minute.
`round_up`: Default rounds down to nearest `round_to` interval,
True here will instead round up.
`trim`: Trim to nearest round_to value rather than rounding.
"""
# ref http://stackoverflow.com/a/10854034/2157429
if dt is None:
dt = datetime.now()
dt = ensure_timezone(dt)
if trim:
rounded = (round_time(dt, round_to, False),
round_time(dt, round_to, True))
return max(rounded) if max(rounded) <= dt else min(rounded)
seconds = (dt - dt.min.replace(tzinfo=dt.tzinfo)).seconds
if round_up:
rounding = (seconds + round_to / 2) // round_to * round_to
else:
rounding = (seconds - round_to / 2) // round_to * round_to
return dt + timedelta(0, rounding - seconds, -dt.microsecond)
def parse_timedelta(s):
"""Parse the string `s` representing some duration of time
(e.g., `"3 seconds"` or `"1 week"`) and return a `datetime.timedelta`
object representing that length of time.
If the string cannot be parsed, raises `ValueError`.
"""
m = _timedelta_re.match(s)
if not m:
raise ValueError("Could not parse string as timedelta: %s" % s)
if m.group(1) == "":
val = 1
else:
val = float(m.group(1))
if m.group(2) == "":
units = 1
else:
try:
units = _timedelta_units[m.group(2)]
except KeyError:
raise ValueError("Invalid timedelta units: %s" % m.group(2))
return timedelta(seconds=units * float(val))
def floor_dt(dt, unit, begin_monday=False):
"""Derive the most recent start datetime of the current duration unit.
i.e., for datetime(2016, 12, 14, 10, 14) and hour, should return
datetime(2016, 12, 14, 10, 0)
:param dt: datetime value
:param unit: string, duration unit: year, quarter, month, week, day,
hour, minute, second.
:return : datetime.
"""
if unit == 'week':
offset = 0 if begin_monday else 1
dt = dt - timedelta(days=dt.weekday() + offset)
return floor_dt(dt, 'day')
if unit == 'quarter':
# Update the month to be first of current quarter
dt = dt.replace(month=(dt.month - 1)//3 * 3 + 1)
return floor_dt(dt, 'month')
start_month, start_day, start_hour, start_minute, start_second = \
1, 1, 0, 0, 0
kwargs, smaller_unit = {}, False
for u in ['year', 'month', 'day', 'hour', 'minute', 'second']:
if smaller_unit:
kwargs[u] = eval('start_' + u)
else:
kwargs[u] = getattr(dt, u)
if not smaller_unit and u == unit:
smaller_unit = True
return datetime(**kwargs)
def parse_range(s, begin_monday=False):
"""Parse the string `s` representing a range of times
(e.g., `"12:00 PM to 1:00 PM"` or `"last 2 weeks"`).
Upon success returns a pair of `datetime.datetime` objects
representing the beginning and end of the time range.
If the string cannot be parsed, raises `ValueError`.
"""
s = s.strip()
if s == 'today':
s = 'this day'
elif s == 'yesterday':
s = 'previous day'
# first try something of the form "time1 to time2"
i = s.split('to')
if len(i) == 2:
try:
p = TimeParser()
start = p.parse(i[0])
end = p.parse(i[1])
return start, end
except ValueError:
pass
# try something of the form "last time"
if s.startswith('last'):
try:
duration = parse_timedelta(s[4:].strip())
end = datetime.now()
start = end - duration
return start, end
except ValueError:
pass
# try to convert the datetime to
elif s.startswith('previous'):
try:
duration = s[8:].strip()
unit = units_map[_timedelta_re.match(duration).group(2)]
now = datetime.now()
how_many = _timedelta_re.match(duration).group(1)
how_many = 1 if how_many == '' else int(how_many)
if unit == 'quarter':
delta = relativedelta(months=how_many*3)
else:
delta = relativedelta(**{unit+'s': how_many})
start = floor_dt(now - delta, unit, begin_monday)
end = floor_dt(now, unit, begin_monday)
return start, end
except ValueError:
pass
elif s.startswith('this'):
try:
duration = s[4:].strip()
unit = units_map[_timedelta_re.match(duration).group(2)]
now = datetime.now()
start = floor_dt(now, unit, begin_monday)
return start, now
except ValueError:
pass
# if it still doesn't work, try the pilot time filter syntax
# which looks like "time1,time2"
i = s.split(',')
# XXX normally, Pilot formats filters with the
# following syntax:
# 11/20/2007 12:28:01.719742,11/20/2007 12:35:19.719742, GMT -8
# We will need to handle the third piece if we want to avoid problems
# with daylight savings
if len(i) >= 2:
try:
p = TimeParser()
start = p.parse(i[0])
end = p.parse(i[1])
return start, end
except ValueError:
pass
raise ValueError('cannot parse time range "%s"' % s)
# XXX probably incorrect in some locales?
_fmt_widths = {
'a': 3, 'A': 9, 'b': 3, 'B': 9, 'c': 24,
'd': 2, 'f': 6, 'H': 2, 'I': 2, 'j': 3,
'm': 2, 'M': 2, 'p': 2, 'S': 2, 'U': 2,
'w': 1, 'W': 2, 'x': 8, 'X': 8, 'y': 2,
'Y': 4, 'z': 5, 'Z': 3, '%': 1
}
def max_width(fmt):
"""Given a time formatting string (i.e., a string that can be
used as the `fmt` option to `datetime.strftime`, compute the
maximum length of a formatted date string.
"""
m = 0
i = 0
while i < len(fmt):
if fmt[i] == '%':
try:
i += 1
m += _fmt_widths[fmt[i]]
except IndexError:
raise ValueError(
'do not understand %%%s in format string' % fmt[i]
)
else:
m += 1
i += 1
return m
``` |
{
"source": "JKraftman/vsphere-automation-sdk-python",
"score": 2
} |
#### File: vsphere-automation-sdk-python/samples/GetAllvCenterObjects.py
```python
import pyVim
from pyVim.connect import SmartConnect
from pyVmomi import vim
import ssl
# List all the objects from vCenter server inventory
s = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
s.verify_mode = ssl.CERT_NONE
si = pyVim.connect.SmartConnect(host="192.168.224.100", user="<EMAIL>", pwd="<PASSWORD>!", sslContext=s)
content = si.content
# Method that populates objects of type vimtype
def get_all_objs(content1, vimtype):
obj = {}
container = content1.viewManager.CreateContainerView(content1.rootFolder, vimtype, True)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
# Calling above method
dcs = get_all_objs(content, [vim.Datacenter])
clusters = get_all_objs(content, [vim.ClusterComputeResource])
hosts = get_all_objs(content, [vim.HostSystem])
datastores = get_all_objs(content, [vim.Datastore])
getAllVms = get_all_objs(content, [vim.VirtualMachine])
# DC
print("Show me the Datacenter: \n")
for dc in dcs:
print(dc.name)
print()
# Iterating each cluster object and printing its name
print("Show me the clusters: \n")
for cluster in clusters:
print(cluster.name)
print()
# ESX hosts
print("Show me the hosts: \n")
for host in hosts:
print(host.name)
print()
# Iterating each vm object and printing its name
print("Show me all the VMs in this vCenter: \n")
for vm in getAllVms:
print(vm.name)
print()
print("Show me the datastores: \n")
for datastore in datastores:
print(datastore.name)
print()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.