content
stringlengths 5
1.05M
|
---|
import argparse
from bus import Bus
from cpu import CPU
from frame import Frame
from io_registers import IO_Registers
from ram import RAM
from ppu.ppu import PPU
from rom import ROM
from ui import UI
def main():
# set up command line argument parser
parser = argparse.ArgumentParser(description='NES Emulator.')
parser.add_argument('rom_path',
metavar='rom_path',
type=str,
help='path to rom')
parser.add_argument('--debug', dest='debug', const=True, default=False, help='logs the running program', nargs='?')
parser.add_argument('--snake', dest='snake', const=True, default=False, help='runs the snake game', nargs='?')
args = parser.parse_args()
# load rom
if args.snake:
rom_bytes = b''.join(list(map(lambda x: x.to_bytes(1, 'little'), [
0x20, 0x06, 0x06, 0x20, 0x38, 0x06, 0x20, 0x0d, 0x06, 0x20, 0x2a, 0x06, 0x60, 0xa9, 0x02, 0x85,
0x02, 0xa9, 0x04, 0x85, 0x03, 0xa9, 0x11, 0x85, 0x10, 0xa9, 0x10, 0x85, 0x12, 0xa9, 0x0f, 0x85,
0x14, 0xa9, 0x04, 0x85, 0x11, 0x85, 0x13, 0x85, 0x15, 0x60, 0xa5, 0xfe, 0x85, 0x00, 0xa5, 0xfe,
0x29, 0x03, 0x18, 0x69, 0x02, 0x85, 0x01, 0x60, 0x20, 0x4d, 0x06, 0x20, 0x8d, 0x06, 0x20, 0xc3,
0x06, 0x20, 0x19, 0x07, 0x20, 0x20, 0x07, 0x20, 0x2d, 0x07, 0x4c, 0x38, 0x06, 0xa5, 0xff, 0xc9,
0x77, 0xf0, 0x0d, 0xc9, 0x64, 0xf0, 0x14, 0xc9, 0x73, 0xf0, 0x1b, 0xc9, 0x61, 0xf0, 0x22, 0x60,
0xa9, 0x04, 0x24, 0x02, 0xd0, 0x26, 0xa9, 0x01, 0x85, 0x02, 0x60, 0xa9, 0x08, 0x24, 0x02, 0xd0,
0x1b, 0xa9, 0x02, 0x85, 0x02, 0x60, 0xa9, 0x01, 0x24, 0x02, 0xd0, 0x10, 0xa9, 0x04, 0x85, 0x02,
0x60, 0xa9, 0x02, 0x24, 0x02, 0xd0, 0x05, 0xa9, 0x08, 0x85, 0x02, 0x60, 0x60, 0x20, 0x94, 0x06,
0x20, 0xa8, 0x06, 0x60, 0xa5, 0x00, 0xc5, 0x10, 0xd0, 0x0d, 0xa5, 0x01, 0xc5, 0x11, 0xd0, 0x07,
0xe6, 0x03, 0xe6, 0x03, 0x20, 0x2a, 0x06, 0x60, 0xa2, 0x02, 0xb5, 0x10, 0xc5, 0x10, 0xd0, 0x06,
0xb5, 0x11, 0xc5, 0x11, 0xf0, 0x09, 0xe8, 0xe8, 0xe4, 0x03, 0xf0, 0x06, 0x4c, 0xaa, 0x06, 0x4c,
0x35, 0x07, 0x60, 0xa6, 0x03, 0xca, 0x8a, 0xb5, 0x10, 0x95, 0x12, 0xca, 0x10, 0xf9, 0xa5, 0x02,
0x4a, 0xb0, 0x09, 0x4a, 0xb0, 0x19, 0x4a, 0xb0, 0x1f, 0x4a, 0xb0, 0x2f, 0xa5, 0x10, 0x38, 0xe9,
0x20, 0x85, 0x10, 0x90, 0x01, 0x60, 0xc6, 0x11, 0xa9, 0x01, 0xc5, 0x11, 0xf0, 0x28, 0x60, 0xe6,
0x10, 0xa9, 0x1f, 0x24, 0x10, 0xf0, 0x1f, 0x60, 0xa5, 0x10, 0x18, 0x69, 0x20, 0x85, 0x10, 0xb0,
0x01, 0x60, 0xe6, 0x11, 0xa9, 0x06, 0xc5, 0x11, 0xf0, 0x0c, 0x60, 0xc6, 0x10, 0xa5, 0x10, 0x29,
0x1f, 0xc9, 0x1f, 0xf0, 0x01, 0x60, 0x4c, 0x35, 0x07, 0xa0, 0x00, 0xa5, 0xfe, 0x91, 0x00, 0x60,
0xa6, 0x03, 0xa9, 0x00, 0x81, 0x10, 0xa2, 0x00, 0xa9, 0x01, 0x81, 0x10, 0x60, 0xa2, 0x00, 0xea,
0xea, 0xca, 0xd0, 0xfb, 0x60
])))
else:
with open(args.rom_path, 'rb') as file:
rom_bytes = file.read()
rom = ROM(rom_bytes, args.snake)
# create ram
ram = RAM()
# create ppu
ppu = PPU(rom.chr_rom, rom.flag_6 & 1)
io_regs = IO_Registers()
frame = Frame()
bus = Bus(ram, ppu, io_regs, rom)
# create cpu
cpu = CPU(bus, args.debug)
ui = UI(cpu, rom.chr_rom, frame)
cpu.start_up(ui.handle_and_update_ui)
cpu.run_rom(rom)
if __name__ == '__main__':
main()
|
"""\
wxSpinButton objects
based on wxGlade/widgets/spin_ctrl/
@copyright: 2004 D.H. aka crazyinsomniac at users.sourceforge.net
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2016-2017 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import wx
from edit_windows import ManagedBase, EditStylesMixin
from tree import Node
import common, config
import new_properties as np
class EditSpinButton(ManagedBase, EditStylesMixin):
"Class to handle wxSpinButton objects"
# XXX unify with EditSpinCtrl?
_PROPERTIES = ["Widget", "range", "value", "style"]
PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES
recreate_on_style_change = True
def __init__(self, name, parent, id, sizer, pos):
ManagedBase.__init__(self, name, 'wxSpinButton', parent, id, sizer, pos)
EditStylesMixin.__init__(self)
# initialise instance properties
self.range = np.IntRangePropertyA( "0, 100" )
self.value = np.SpinPropertyA(0, val_range=(0,100), immediate=True)
def create_widget(self):
self.widget = wx.SpinButton(self.parent.widget, self.id, style=self.style)
self.widget.SetRange( *self.properties["range"].get_tuple() )
self.widget.SetValue( self.value )
def properties_changed(self, modified): # from EditSlider
if not modified or "range" in modified and self.widget:
mi,ma = self.properties["range"].get_tuple()
self.widget.SetRange(mi, ma)
self.properties["value"].set_range(mi,ma)
if not modified or "value" in modified or "range" in modified:
# check that value is inside range
value_p = self.properties["value"]
if value_p.is_active():
mi,ma = self.properties["range"].get_tuple()
value = value_p.get()
if value<mi:
value_p.set(mi)
value = mi
elif value>ma:
value_p.set(ma)
value = ma
if self.widget: self.widget.SetValue(value)
EditStylesMixin.properties_changed(self, modified)
ManagedBase.properties_changed(self, modified)
def builder(parent, sizer, pos, number=[1]):
"factory function for EditSpinButton objects"
name = 'spin_button_%d' % number[0]
while common.app_tree.has_name(name):
number[0] += 1
name = 'spin_button_%d' % number[0]
with parent.frozen():
text = EditSpinButton(name, parent, wx.NewId(), sizer, pos)
text.properties["style"].set_to_default()
text.check_defaults()
node = Node(text)
text.node = node
if parent.widget: text.create()
common.app_tree.insert(node, sizer.node, pos-1)
def xml_builder(attrs, parent, sizer, sizeritem, pos=None):
"factory function to build EditSpinButton objects from a XML file"
from xml_parse import XmlParsingError
try:
name = attrs['name']
except KeyError:
raise XmlParsingError(_("'name' attribute missing"))
if sizer is None or sizeritem is None:
raise XmlParsingError(_("sizer or sizeritem object cannot be None"))
text = EditSpinButton(name, parent, wx.NewId(), sizer, pos)
#sizer.set_item(text.pos, proportion=sizeritem.proportion, span=sizeritem.span, flag=sizeritem.flag, border=sizeritem.border)
node = Node(text)
text.node = node
if pos is None:
common.app_tree.add(node, sizer.node)
else:
common.app_tree.insert(node, sizer.node, pos-1)
return text
def initialize():
"initialization function for the module: returns a wxBitmapButton to be added to the main palette"
common.widgets['EditSpinButton'] = builder
common.widgets_from_xml['EditSpinButton'] = xml_builder
return common.make_object_button('EditSpinButton', 'spinbtn.xpm')
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
RESTful endpoints for createing/showing/deleting a user's Jumpbox in vLab
"""
from setuptools import setup, find_packages
setup(name="vlab-jumpbox-api",
author="Nicholas Willhite,",
author_email='[email protected]',
version='2018.07.25',
packages=find_packages(),
include_package_data=True,
package_files={'vlab_jumpbox_api' : ['app.ini']},
description="Create/delete a Jumpbox for connecting to your virtual lab",
install_requires=['flask', 'ldap3', 'pyjwt', 'uwsgi', 'vlab-api-common',
'ujson', 'cryptography', 'vlab-inf-common', 'celery']
)
|
"""
An object-oriented high-level wrapper for training InceptionV3 CNNs.
"""
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.models import load_model
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras import backend as K
from time import *
import os
# For Function to feed images to model and augment images at the same time
from keras.preprocessing.image import ImageDataGenerator
# For Tensorboard & ValAccHistory
from keras.callbacks import TensorBoard, Callback
# for add_salt_pepper_noise
import numpy as np
# for leaving the program in case of invalid arguments (sys.exit(0))
import sys
# for get_config
from keras.models import Sequential
# for unzipping utility to train a model based on zipped training images
import zipfile
# for customizing SGD, rmsprop
from keras.optimizers import SGD, RMSprop
# for logging
from pathlib import Path
import datetime
# for csv logging
launch_datetime = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
def add_salt_pepper_noise(X_img):
"""
Custom Image Augmentation Function which can be added to the keras
fit_generator function call
Takes an numpy array as input and returns the same array with salt & pepper
noise (similar to what one might expect from bad quality images)
"""
# Need to produce a copy as to not modify the original image
X_img_copy = X_img.copy()
row, col, _ = X_img_copy.shape
salt_vs_pepper = 0.2
amount = 0.004
num_salt = np.ceil(amount * X_img_copy.size * salt_vs_pepper)
num_pepper = np.ceil(amount * X_img_copy.size * (1.0 - salt_vs_pepper))
# Add Salt noise
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in X_img.shape]
X_img[coords[0], coords[1], :] = 1
# Add Pepper noise
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in X_img.shape]
X_img[coords[0], coords[1], :] = 0
return X_img_copy
class ValAccHistory(Callback):
"""
Keras custom Callback which logs the validation history
"""
def on_train_begin(self, logs={}):
self.val_accs = []
def on_epoch_end(self, epoch, logs={}):
self.val_accs.append(logs.get('val_acc'))
class ExtraValidationCallback(Callback):
"""
Keras custom callback class to log valdation metrics for two validation sets
saves everything to a csv called log_train_double_validation.csv
in the current working directory
"""
def __init__(self,extra_validation):
self.extra_validation_dir = extra_validation
def on_train_begin(self, logs={}):
self.val1_accs = []
self.val1_loss = []
self.val2_accs = []
self.val2_loss = []
self.train_accs = []
self.train_loss = []
# extra_validation_dir = self.extra_validation
def on_epoch_end(self, epoch, logs={}):
self.val1_accs.append(logs.get('val_acc'))
self.val1_loss.append(logs.get('val_loss'))
# loss, acc = self.evaluate(extra_validation_dir)
# augmentation configuration for testing: only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
# generator for test data
# similar to above but based on different augmentation function (above)
test_generator = test_datagen.flow_from_directory(
self.extra_validation_dir,
target_size=(224, 224),
batch_size=64,
class_mode='categorical')
loss, acc = self.model.evaluate_generator(test_generator)
self.val2_accs.append(acc)
self.val2_loss.append(loss)
self.train_accs.append(logs.get('acc'))
self.train_loss.append(logs.get('loss'))
logging = True
log_filename = 'log_train_double_validation.csv'
if logging:
print("logging now...")
my_file = Path(log_filename)
# write header if this is the first run
if not my_file.is_file():
print("writing head")
with open(log_filename, "w") as log:
log.write("datetime,epoch,val1_acc,val1_loss,val2_acc,val2_loss,train_acc,train_loss\n")
# append parameters
with open(log_filename, "a") as log:
log.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
log.write(',')
log.write(str(epoch))
log.write(',')
log.write(str(logs.get('val_acc'))),
log.write(',')
log.write(str(logs.get('val_loss'))),
log.write(',')
log.write(str(acc)),
log.write(',')
log.write(str(loss)),
log.write(',')
log.write(str(logs.get('acc'))),
log.write(',')
log.write(str(logs.get('loss')))
log.write('\n')
print('\Second Validation Set, loss: {}, acc: {}\n'.format(loss, acc))
class KerasInception:
"""
Class with provides an interface to train InceptionV3 based CNNs
"""
model = None
input_dim = 0
batch_size = 0
dense_layers = 0
def __init__(self,input_dim=150,batch_size=16,dense_layers=1,dropout=None,
lr=0.0031622777, dense_dim=1024):
self.input_dim = input_dim
self.batch_size = batch_size
self.dense_layers = dense_layers
self.dropout = dropout
self.lr = lr
self.dense_dim = dense_dim
self.model = None
def assemble_model(self,train_dir):
"""
build the InceptionV3 architecture based on the object instance
attributes such as number of dense layers, dropout etc
"""
class_count = len(next(os.walk(train_dir))[1])
# base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# global spatial average pooling layer
x = base_model.output
base_model.layers[-1].name = 'base_output'
x = GlobalAveragePooling2D(name='pooling')(x)
for i in range(self.dense_layers):
# dropout
if self.dropout and i == 0:
x = Dropout(0)(x)
print("added 0 pc dropout for layer 1")
elif self.dropout:
x = Dropout(self.dropout)(x)
print("added ",self.dropout," pc dropout for layer ",i+1)
# fully-connected layer
x = Dense(self.dense_dim, activation='relu',name='dense'+str(i))(x)
# logistic layer
predictions = Dense(class_count, activation='softmax',name='softmax')(x)
# define the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
self.model = model
# we want to train top layers only
for layer in base_model.layers:
layer.trainable = False
# compile the model (*after* setting layers to non-trainable)
# model.compile(optimizer=RMSprop(lr=self.lr), loss='categorical_crossentropy', metrics=['accuracy'])
model.compile(optimizer=SGD(lr=self.lr, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
return model
def save_class_list(self,train_dir,classes_txt_dir):
"""
print train classes to txt file in classes_txt_dir
"""
# assemble path
filename = "classes.txt"
my_file = os.path.join(classes_txt_dir, filename)
print("Writing classes.txt to:\n",my_file,'\n')
print("Classes found:")
for name in os.listdir(train_dir):
if not os.path.isfile(name):
print(name)
# check if file already exists
if not os.path.isfile(my_file):
# write all folder names to txt file
with open(my_file, "w") as classes_file:
for name in os.listdir(train_dir):
# exclude files
if not os.path.isfile(name):
classes_file.write(name)
classes_file.write("\n")
classes_file.close()
def unfreeze(self,layers):
"""
unfreeze a specified number of InceptionV3 layers ard recompile model
"""
inception_layers = 311
slice = inception_layers-layers
for layer in self.model.layers[:slice]:
layer.trainable = False
for layer in self.model.layers[slice:]:
layer.trainable = True
self.model.compile(optimizer=SGD(lr=self.lr, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
# train a model from scratch given a set of training parameters
# choose whether to save the model
def train(self,train_dir,validation_dir,epochs=0,fine_tune=False, unfrozen_layers=0,
salt_pepper=False,augmentation_params={},classes_txt_dir=None,save_model=False,
validation_dir_2=None,steps_per_epoch=12000):
"""
initializes the keras model object and trains the model
train_dir: directory of training data
validation_dir: directory of validation data
epochs: number of epochs to train
fine_tune: whether to fine-tune the model at the end of normal epochs
unfrozen_layers: how many layers of the 311 inceptionV3 conv layers
should be retrained, has to be between 0 and 311
salt_pepper: whether to add salt & pepper noise to the training images
augmentation_params: list of augmentation parameters for keras
classes_txt_dir: if provided a path, it will save a file named
"classes.txt" containing the labels of all classes we train for,
if None, it will not save such a file
save_model: whether to save the model at the end of training
name will default to model.h5 in the working directory
validation_dir_2: if provided a path, this will calculate additional
validation metrics for a second set of data and log everything
in a csv in the current working directory
steps_per_epoch: the number of images that should be processed between
each validation (= the number of images per epoch)
returns validation accuracy history
"""
if classes_txt_dir:
self.save_class_list(train_dir,classes_txt_dir)
# model can only be built here after training directory is clear
# (for number of classes)
# if it wasnt built before, built it now
if not self.model:
self.model = self.assemble_model(train_dir)
# unfreeze specified number of Inception convolutional layer
self.unfreeze(unfrozen_layers)
print("Directory used for training: ",train_dir)
print("Directory used for validation: ",validation_dir)
# augmentation configuration for training
if salt_pepper:
train_datagen = ImageDataGenerator(
rescale=1./255,
preprocessing_function=add_salt_pepper_noise,
**augmentation_params)
else:
train_datagen = ImageDataGenerator(
rescale=1./255,
**augmentation_params)
# generator that will read pictures found in train_dir, and
# indefinitely generate batches of augmented image data and
# rescales images to target_size, splits them into batches
train_generator = train_datagen.flow_from_directory(
train_dir, # this is the target directory
target_size=(self.input_dim, self.input_dim), # all images will be resized to input_dimxinput_dim
batch_size=self.batch_size,
class_mode='categorical',
shuffle=True)
# augmentation configuration for validation: only rescaling
validation_datagen = ImageDataGenerator(rescale=1./255)
# generator for validation data
# similar to above but based on different augmentation function (above)
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(self.input_dim, self.input_dim),
batch_size=self.batch_size,
class_mode='categorical')
# log everything in tensorboard
tensorboard = TensorBoard(log_dir="/data/g1753002_ocado/logs/{}".format(time()),
histogram_freq=0,
batch_size=self.batch_size,
write_graph=True,
write_grads=False,
write_images=True,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None) # histogram_freq=5
history = ValAccHistory()
# if a second validation_dir is provided, add an extra Keras callback
if validation_dir_2:
extralogger = ExtraValidationCallback(validation_dir_2)
cbs = [tensorboard,history,extralogger]
else:
cbs = [tensorboard,history]
self.model.fit_generator(
train_generator,
steps_per_epoch=steps_per_epoch // self.batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=1600 // self.batch_size,
callbacks=cbs)
# use_multiprocessing=True,
# workers=8)
if fine_tune:
self.fine_tune(train_generator,validation_generator,tensorboard)
if save_model:
base_path,train_folder = os.path.split(train_dir)
full_path = os.path.join(base_path, "model.h5")
self.save_model(full_path)
return history
def fine_tune(self,train_generator,validation_generator,tensorboard,
epochs=1):
"""
fine-tunes the top 2 inception blocks for a specified number of epochs
"""
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 249 layers and unfreeze the rest:
for layer in self.model.layers[:249]:
layer.trainable = False
for layer in self.model.layers[249:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
self.model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
loss='categorical_crossentropy', metrics=['accuracy'])
# fine-tuning the top 2 inception blocks alongside the Dense layers
self.model.fit_generator(
train_generator,
steps_per_epoch=2048 // self.batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=800 // self.batch_size,
callbacks = [tensorboard])
def evaluate(self,test_dir):
"""
input = path to directory with test images, expects directory to
be structured as follows: folders with names of classes, images in each
of these folders
output = loss, accuracy of the model
"""
# augmentation configuration for testing: only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
# generator for test data
# similar to above but based on different augmentation function (above)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(self.input_dim, self.input_dim),
batch_size=16,
class_mode='categorical')
score = self.model.evaluate_generator(test_generator)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
return score
def load_model(self,file_path):
"""
input = path to a model in h5 format (a model, not weights!)
model will be as when saving (i.e. compiled), can then call predict etc
"""
self.model = load_model(file_path)
# saves a model, provie a file path ending with .h5
def save_model(self,path):
"""
saves the current model to a specified path
path has to contain the name of the file itself, i.e. end in ".h5"
"""
self.model.save(path)
def get_augmentation_params(augmentation_mode):
"""
returns a list of augmentation parameters for training
0 = no augmentation, 1 = rotation only, 2 = rotation & zoom
"""
if augmentation_mode == 0:
return {}
elif augmentation_mode == 1:
return {'rotation_range': 180}
elif augmentation_mode == 2:
return {'rotation_range': 180, 'zoom_range': 0.2}
else:
print("UNKNOWN AUGMENTATION PARAMETER! (needs to be 0, 1 or 2)")
sys.exit(0)
def unzip_and_return_path_to_folder(path_to_zip_file):
"""
utility to unzip files containing training images
input = path to a zip file
unzips the file to a folder with the same name
returns path to this folder
"""
maindirname, filename = os.path.split(path_to_zip_file)
new_dir = os.path.join(maindirname, filename.split('.')[0])
if not os.path.exists(new_dir):
os.makedirs(new_dir)
zip_ref = zipfile.ZipFile(path_to_zip_file, 'r')
zip_ref.extractall(new_dir)
zip_ref.close()
return path_to_zip_file.split('.')[0] # name of new folder
|
# flake8: noqa
from .moxa import MoxaHTTP_2_2 |
from .app import create_database, delete_database
# Delete any previous
# data in the database along with
# its tables
delete_database()
# Create the database
create_database()
|
import json
import zipfile
import numpy as np
import pandas as pd
import time
from dtw import dtw
### Control the number of closest trips used to calculate trip duration
N_trips = 20000
### Get Haversine distance
def get_dist(lonlat1, lonlat2):
lon_diff = np.abs(lonlat1[0]-lonlat2[0])*np.pi/360.0
lat_diff = np.abs(lonlat1[1]-lonlat2[1])*np.pi/360.0
a = np.sin(lat_diff)**2 + np.cos(lonlat1[1]*np.pi/180.0) * np.cos(lonlat2[1]*np.pi/180.0) * np.sin(lon_diff)**2
d = 2*6371*np.arctan2(np.sqrt(a), np.sqrt(1-a))
return(d)
# read test
zf = zipfile.ZipFile('../input/test.csv.zip')
test = pd.read_csv(zf.open('test.csv'), usecols=['TRIP_ID', 'POLYLINE'])
test['POLYLINE'] = test['POLYLINE'].apply(json.loads)
test['snapshots'] = test['POLYLINE'].apply(len)
test['lonlat'] = test['POLYLINE'].apply(lambda x: x[0])
test = test.reset_index(drop=True)
# read train
zf = zipfile.ZipFile('../input/train.csv.zip')
train = pd.read_csv(zf.open('train.csv'), usecols=['TRIP_ID', 'POLYLINE'], converters={'POLYLINE': lambda x: json.loads(x)[0:1]})
train['snapshots'] = train['POLYLINE'].apply(len)
train['lonlat'] = train['POLYLINE'].apply(lambda x: [0,0] if x==[] else x[0])
train = train.reset_index(drop=True)
print train
test['TRAVEL_TIME'] = 0
for row, ll in enumerate(test['lonlat']):
print row
# Find the closest starting position
d = train['lonlat'].apply(lambda x: get_dist(x, ll))
i = np.argpartition(d, N_trips)[0:N_trips]
# Save file
file_name = './output_1/trip_'+ `row` + '.csv'
f = open(file_name, 'w')
print file_name
for item in i:
f.write("%s\n" % item)
f.close()
|
import ssz
from eth2.beacon.types.attester_slashings import AttesterSlashing
def test_defaults(sample_attester_slashing_params):
attester_slashing = AttesterSlashing(**sample_attester_slashing_params)
assert (
attester_slashing.slashable_attestation_1.validator_indices ==
sample_attester_slashing_params['slashable_attestation_1'].validator_indices
)
assert (
attester_slashing.slashable_attestation_2.custody_bitfield ==
sample_attester_slashing_params['slashable_attestation_2'].custody_bitfield
)
assert ssz.encode(attester_slashing)
|
import sys
import lib
import zipfile
import getpass
from sys import exit
from zipfile import ZipFile
if input("R : Run \nE : Exit\n::: ").upper() != "R" : exit()
user = getpass.getpass("enter username ::: ")
password = getpass.getpass("enter password ::: ")
result = lib.checkDB(".") # checks if there exist a firebird database with lodash
if result[0] == False :
print("Fail :")
print(result[1])
input()
exit()
if result[0] == True :
newName = result[1][:-4] + lib.getDate()
oldFDBPath = result[1]
newFDBPath = newName + ".fdb"
zipName = newName + ".zip"
lib.copy(oldFDBPath,newFDBPath)
zipObject = ZipFile(
zipName
, 'w'
, compression = zipfile.ZIP_BZIP2
, compresslevel = 9)
print("compressing file...")
zipObject.write(newFDBPath)
zipObject.close()
lib.restoreLowDash(oldFDBPath)
print("local database is ready for work")
print("creating auxiliary copy...")
lib.deleteCopy(newFDBPath)
print("auxiliary copy deleted")
print("sending file...")
response =(str(lib.sendFile(zipName,user,password)))
print(response)
if response != "<Response [200]>" :
print("something went wrong, but now a database backup exists in the current dir :(")
else :
print("file sent")
print("everything went OK :)")
input() |
from __future__ import absolute_import
from builtins import range
from functools import partial
import numpy as npo
try:
import scipy
except:
from warnings import warn
warn('Skipping scipy tests.')
else:
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.misc
import autograd.scipy.signal
import autograd.scipy.stats as stats
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.special as special
import autograd.scipy.linalg as spla
import autograd.scipy.integrate as integrate
from autograd import grad
from scipy.signal import convolve as sp_convolve
from autograd.test_util import combo_check, check_grads
from numpy_utils import unary_ufunc_check
npr.seed(1)
R = npr.randn
U = npr.uniform
# Fwd mode not yet implemented for scipy functions
combo_check = partial(combo_check, modes=['rev'])
unary_ufunc_check = partial(unary_ufunc_check, modes=['rev'])
check_grads = partial(check_grads, modes=['rev'])
def symmetrize_matrix_arg(fun, argnum):
def T(X): return np.swapaxes(X, -1, -2) if np.ndim(X) > 1 else X
def symmetrize(X): return 0.5 * (X + T(X))
def symmetrized_fun(*args, **kwargs):
args = list(args)
args[argnum] = symmetrize(args[argnum])
return fun(*args, **kwargs)
return symmetrized_fun
### Stats ###
def test_chi2_pdf(): combo_check(stats.chi2.pdf, [0])([R(4)**2 + 1.1], [1, 2, 3])
def test_chi2_cdf(): combo_check(stats.chi2.cdf, [0])([R(4)**2 + 1.1], [1, 2, 3])
def test_chi2_logpdf(): combo_check(stats.chi2.logpdf, [0])([R(4)**2 + 1.1], [1, 2, 3])
def test_beta_cdf(): combo_check(stats.beta.cdf, [0]) ([U(0., 1., 4)], [R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_beta_pdf(): combo_check(stats.beta.pdf, [0,1,2])([U(0., 1., 4)], [R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_beta_logpdf(): combo_check(stats.beta.logpdf, [0,1,2])([U(0., 1., 4)], [R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_gamma_cdf(): combo_check(stats.gamma.cdf, [0]) ([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_gamma_pdf(): combo_check(stats.gamma.pdf, [0,1])([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_gamma_logpdf(): combo_check(stats.gamma.logpdf, [0,1])([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_norm_pdf(): combo_check(stats.norm.pdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_cdf(): combo_check(stats.norm.cdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logpdf(): combo_check(stats.norm.logpdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logcdf(): combo_check(stats.norm.logcdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_pdf_broadcast(): combo_check(stats.norm.pdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_cdf_broadcast(): combo_check(stats.norm.cdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logpdf_broadcast(): combo_check(stats.norm.logpdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logcdf_broadcast(): combo_check(stats.norm.logcdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_poisson_cdf(): combo_check(stats.poisson.cdf, [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
def test_poisson_logpmf(): combo_check(stats.poisson.logpmf, [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
def test_poisson_pmf(): combo_check(stats.poisson.pmf, [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
def test_poisson_cdf_broadcast(): combo_check(stats.poisson.cdf, [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
def test_poisson_logpmf_broadcast(): combo_check(stats.poisson.logpmf, [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
def test_poisson_pmf_broadcast(): combo_check(stats.poisson.pmf, [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
def test_t_pdf(): combo_check(stats.t.pdf, [0,1,2,3])([R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_cdf(): combo_check(stats.t.cdf, [0,2])( [R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_logpdf(): combo_check(stats.t.logpdf, [0,1,2,3])([R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_logcdf(): combo_check(stats.t.logcdf, [0,2])( [R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_pdf_broadcast(): combo_check(stats.t.pdf, [0,1,2,3])([R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def test_t_cdf_broadcast(): combo_check(stats.t.cdf, [0,2])( [R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def test_t_logpdf_broadcast(): combo_check(stats.t.logpdf, [0,1,2,3])([R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def test_t_logcdf_broadcast(): combo_check(stats.t.logcdf, [0,2])( [R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def make_psd(mat): return np.dot(mat.T, mat) + np.eye(mat.shape[0])
def test_mvn_pdf(): combo_check(symmetrize_matrix_arg(mvn.pdf, 2), [0, 1, 2])([R(4)], [R(4)], [make_psd(R(4, 4))], allow_singular=[False])
def test_mvn_logpdf(): combo_check(symmetrize_matrix_arg(mvn.logpdf, 2), [0, 1, 2])([R(4)], [R(4)], [make_psd(R(4, 4))], allow_singular=[False])
def test_mvn_entropy():combo_check(mvn.entropy,[0, 1])([R(4)], [make_psd(R(4, 4))])
C = np.zeros((4, 4))
C[0, 0] = C[1, 1] = 1
# C += 1e-3 * np.eye(4)
def test_mvn_pdf_sing_cov(): combo_check(mvn.pdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True])
def test_mvn_logpdf_sing_cov(): combo_check(mvn.logpdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True])
def test_mvn_pdf_broadcast(): combo_check(symmetrize_matrix_arg(mvn.pdf, 2), [0, 1, 2])([R(5, 4)], [R(4)], [make_psd(R(4, 4))])
def test_mvn_logpdf_broadcast(): combo_check(symmetrize_matrix_arg(mvn.logpdf, 2), [0, 1, 2])([R(5, 4)], [R(4)], [make_psd(R(4, 4))])
alpha = npr.random(4)**2 + 1.2
x = stats.dirichlet.rvs(alpha, size=1)[0,:]
# Need to normalize input so that x's sum to one even when we perturb them to compute numeric gradient.
def normalize(x): return x / sum(x)
def normalized_dirichlet_pdf( x, alpha): return stats.dirichlet.pdf( normalize(x), alpha)
def normalized_dirichlet_logpdf(x, alpha): return stats.dirichlet.logpdf(normalize(x), alpha)
def test_dirichlet_pdf_x(): combo_check(normalized_dirichlet_pdf, [0])([x], [alpha])
def test_dirichlet_pdf_alpha(): combo_check(stats.dirichlet.pdf, [1])([x], [alpha])
def test_dirichlet_logpdf_x(): combo_check(normalized_dirichlet_logpdf, [0])([x], [alpha])
def test_dirichlet_logpdf_alpha(): combo_check(stats.dirichlet.logpdf, [1])([x], [alpha])
### Misc ###
def test_logsumexp1(): combo_check(autograd.scipy.misc.logsumexp, [0], modes=['fwd', 'rev'])([1.1, R(4), R(3,4)], axis=[None, 0], keepdims=[True, False])
def test_logsumexp2(): combo_check(autograd.scipy.misc.logsumexp, [0], modes=['fwd', 'rev'])([R(3,4), R(4,5,6), R(1,5)], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp3(): combo_check(autograd.scipy.misc.logsumexp, [0], modes=['fwd', 'rev'])([R(4)], b = [np.exp(R(4))], axis=[None, 0], keepdims=[True, False])
def test_logsumexp4(): combo_check(autograd.scipy.misc.logsumexp, [0], modes=['fwd', 'rev'])([R(3,4),], b = [np.exp(R(3,4))], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp5(): combo_check(autograd.scipy.misc.logsumexp, [0], modes=['fwd', 'rev'])([R(2,3,4)], b = [np.exp(R(2,3,4))], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp6():
x = npr.randn(1,5)
def f(a): return autograd.scipy.misc.logsumexp(a, axis=1, keepdims=True)
check_grads(f, modes=['fwd', 'rev'])(x)
check_grads(lambda a: grad(f)(a), modes=['fwd', 'rev'])(x)
### Signal ###
def test_convolve_generalization():
ag_convolve = autograd.scipy.signal.convolve
A_35 = R(3, 5)
A_34 = R(3, 4)
A_342 = R(3, 4, 2)
A_2543 = R(2, 5, 4, 3)
A_24232 = R(2, 4, 2, 3, 2)
for mode in ['valid', 'full']:
assert npo.allclose(ag_convolve(A_35, A_34, axes=([1], [0]), mode=mode)[1, 2],
sp_convolve(A_35[1,:], A_34[:, 2], mode))
assert npo.allclose(ag_convolve(A_35, A_34, axes=([],[]), dot_axes=([0], [0]), mode=mode),
npo.tensordot(A_35, A_34, axes=([0], [0])))
assert npo.allclose(ag_convolve(A_35, A_342, axes=([1],[2]),
dot_axes=([0], [0]), mode=mode)[2],
sum([sp_convolve(A_35[i, :], A_342[i, 2, :], mode)
for i in range(3)]))
assert npo.allclose(ag_convolve(A_2543, A_24232, axes=([1, 2],[2, 4]),
dot_axes=([0, 3], [0, 3]), mode=mode)[2],
sum([sum([sp_convolve(A_2543[i, :, :, j],
A_24232[i, 2, :, j, :], mode)
for i in range(2)]) for j in range(3)]))
def test_convolve():
combo_check(autograd.scipy.signal.convolve, [0,1])(
[R(4), R(5), R(6)],
[R(2), R(3), R(4)], mode=['full', 'valid'])
def test_convolve_2d():
combo_check(autograd.scipy.signal.convolve, [0, 1])(
[R(4, 3), R(5, 4), R(6, 7)],
[R(2, 2), R(3, 2), R(4, 2), R(4, 1)], mode=['full', 'valid'])
def test_convolve_ignore():
combo_check(autograd.scipy.signal.convolve, [0, 1])([R(4, 3)], [R(3, 2)],
axes=[([0],[0]), ([1],[1]), ([0],[1]), ([1],[0]), ([0, 1], [0, 1]), ([1, 0], [1, 0])],
mode=['full', 'valid'])
def test_convolve_ignore_dot():
combo_check(autograd.scipy.signal.convolve, [0, 1])([R(3, 3, 2)], [R(3, 2, 3)],
axes=[([1],[1])], dot_axes=[([0],[2]), ([0],[0])], mode=['full', 'valid'])
### Special ###
def test_beta(): combo_check(special.beta, [0,1])([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_betainc(): combo_check(special.betainc, [2]) ([R(4)**2 + 1.1], [R(4)**2 + 1.1], [U(0., 1., 4)])
def test_betaln(): combo_check(special.betaln, [0,1])([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_gammainc(): combo_check(special.gammainc, [1])([1], R(4)**2 + 1.3)
def test_gammaincc(): combo_check(special.gammaincc, [1])([1], R(4)**2 + 1.3)
def test_polygamma(): combo_check(special.polygamma, [1])([0], R(4)**2 + 1.3)
def test_jn(): combo_check(special.jn, [1])([2], R(4)**2 + 1.3)
def test_yn(): combo_check(special.yn, [1])([2], R(4)**2 + 1.3)
def test_psi(): unary_ufunc_check(special.psi, lims=[0.3, 2.0], test_complex=False)
def test_digamma(): unary_ufunc_check(special.digamma, lims=[0.3, 2.0], test_complex=False)
def test_gamma(): unary_ufunc_check(special.gamma, lims=[0.3, 2.0], test_complex=False)
def test_gammaln(): unary_ufunc_check(special.gammaln, lims=[0.3, 2.0], test_complex=False)
def test_gammasgn(): unary_ufunc_check(special.gammasgn,lims=[0.3, 2.0], test_complex=False)
def test_rgamma() : unary_ufunc_check(special.rgamma, lims=[0.3, 2.0], test_complex=False)
def test_multigammaln(): combo_check(special.multigammaln, [0])([U(4., 5.), U(4., 5., (2,3))],
[1, 2, 3])
def test_j0(): unary_ufunc_check(special.j0, lims=[0.2, 20.0], test_complex=False)
def test_j1(): unary_ufunc_check(special.j1, lims=[0.2, 20.0], test_complex=False)
def test_y0(): unary_ufunc_check(special.y0, lims=[0.2, 20.0], test_complex=False)
def test_y1(): unary_ufunc_check(special.y1, lims=[0.2, 20.0], test_complex=False)
def test_erf(): unary_ufunc_check(special.erf, lims=[-3., 3.], test_complex=True)
def test_erfc(): unary_ufunc_check(special.erfc, lims=[-3., 3.], test_complex=True)
def test_erfinv(): unary_ufunc_check(special.erfinv, lims=[-0.95, 0.95], test_complex=False)
def test_erfcinv(): unary_ufunc_check(special.erfcinv, lims=[0.05, 1.95], test_complex=False)
def test_logit(): unary_ufunc_check(special.logit, lims=[ 0.10, 0.90], test_complex=False)
def test_expit(): unary_ufunc_check(special.expit, lims=[-4.05, 4.95], test_complex=False)
### ODE integrator ###
def func(y, t, arg1, arg2):
return -np.sqrt(t) - y + arg1 - np.mean((y + arg2)**2)
def test_odeint():
combo_check(integrate.odeint, [1,2,3])([func], [R(3)], [np.linspace(0.1, 0.2, 4)],
[(R(3), R(3))])
|
import parse
import audit
import utils
import sys
import argparse
import os
def main():
# Definition of Help Text
# Argument Parsing
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='''\
This script takes a Cisco ACL definition and parses it into a searchable spreadsheet
Automated audit rules have been included for convenience, but are disabled by default.
This is to improve execution time.
======================================================================================''')
parser.add_argument('-o', '--out', nargs=1, help='Overwrite the name of the output file',
default=['ACL_Parsed.xlsx'])
parser.add_argument('-a', '--all', action='store_true', help='Perform all audits')
parser.add_argument('-r', '--redundant', action='store_true', help='Perform the redundant rules audit')
parser.add_argument('-s', '--shadow', action='store_true', help='Perform the shadowed rules audit -- NOT IMPLEMENTED')
parser.add_argument('-x', '--promiscuous', action='store_true', help='Perform the promiscuous rules audit')
parser.add_argument('infile', nargs='+', type=argparse.FileType('r'),
help='Path to the ACL Definition file (.txt format)')
args = parser.parse_args()
outfile = str(os.getcwd()) + "/" + str(args.out[0])
# print(args)
entries_table = []
errors_table = []
audit_table = []
audit_type = []
for acl in args.infile:
entries, errors = parse.parse(acl)
entries_table.append(entries[:])
errors_table.append(errors[:])
if args.all:
audit_type = [True, True, True]
else:
audit_type = [args.promiscuous, args.redundant, args.shadow]
audit_table = audit.audit(entries_table, audit_type)
utils.output_xlsx_file(entries_table, errors_table, audit_type, audit_table, outfile)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from djorm_core.postgresql import server_side_cursors
from .models import TestModel
class ServerSideCursorsTest(TestCase):
@classmethod
def setUpClass(cls):
TestModel.objects.bulk_create([TestModel(num=x) for x in range(200)])
@classmethod
def tearDownClass(cls):
TestModel.objects.all().delete()
def test_simple_01(self):
with self.assertNumQueries(1):
self.assertEqual(len([x for x in TestModel.objects.all()]), 200)
def test_simple_02(self):
with self.assertNumQueries(1):
with server_side_cursors():
self.assertEqual(len([x for x in TestModel.objects.all()]), 200)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
""" Utility module for the workflow."""
import logging
import os
import pprint
import yaml
from colorlog import ColoredFormatter
from tabulate import tabulate
# Classes ---------------------------------------------------------------------
class DataManager(object):
""" Class for a data manager read data manifests.
Methods of this class can be used to access the content of the data
manifest file in various ways.
"""
def __init__(self, infile):
if os.path.exists(infile):
self._infile = infile
else:
raise IOError("Input data manifest file does not exist")
# Read and parse the data manifest file
self._data = self.parse_data_manifest()
def __len__(self):
return len(self.data)
@property
def data(self):
""" Data property accessor."""
return self._data
def count(self, **kwargs):
""" Count the number of feaures in a given grouping.
For allowed query keys, see get_resources().
"""
return len(self.get_resources(**kwargs))
def get_category(self, **kwargs):
""" Get the category associated with a hierarchy level.
Following hierarchy levels can be used:
- collection
- subcollection
:param **kwargs: key-value -pair, where key must be in ["collection",
"subcollection"].
:return list of String name(s) of the category.
"""
# List allowed query keys
allowed_keys = ["collection", "subcollection"]
# Get the key provided.
query_keys = list(kwargs.keys())
# Only one query key allowed.
if len(query_keys) > 1:
raise ValueError("Only one query key allowed at time")
query_key = query_keys[0]
# Multiple collections can have the same name. Store matches into a
# list.
collection_match = []
# Check that the quey key is allowed
if query_key in allowed_keys:
# Construct the actual query key
item_key = "{}_name".format(query_key)
for item in self.data:
# collection category is always present, subcollection category
# not necessarily.
if item_key in list(item.keys()):
if item[item_key] == kwargs[query_key]:
collection_match.append(item['collection_category'])
else:
raise ValueError("Allowed query args are: {}".format(", ".join(allowed_keys)))
if len(collection_match) > 1:
print("WARNING: multiple collection with the same name")
return collection_match
def get_collection(self, **kwargs):
""" Get the ceollection associated with a hierarchy level.
Following hierarchy levels can be used:
- uri
- provider
- collection
:param **kwargs: key-value -pair, where key must be in ["uri",
"provider", "collection"].
:return list of collections.
"""
pass
def get_resources(self, full_path=False, **kwargs):
""" Get the resources associated with a hierarchy level.
Following hierarchy levels can be used:
- uri
- provider
- collection
- subcollection
- category
:param **kwargs: key-value -pair, where key must be in ["uri",
"provider", "collection", "subcollection".
:return list of String file names.
"""
# List allowed query keys
allowed_keys = ["uri", "provider", "collection", "subcollection",
"category"]
# Get the key provided.
query_keys = list(kwargs.keys())
# Only one query key allowed.
if len(query_keys) > 1:
raise ValueError("Only one query key allowed at time")
query_key = query_keys[0]
resource_match = []
# Check that the quey key is allowed
if query_key in allowed_keys:
# Construct the actual query key
if query_key == "uri":
item_key = query_key
elif query_key == "category":
item_key = "collection_category"
else:
item_key = "{}_name".format(query_key)
for item in self.data:
# uri, provider and collection category are always present,
# subcollection category not necessarily.
if item_key in list(item.keys()):
if item[item_key] == kwargs[query_key]:
if full_path:
url = "{0}/{1}/{2}".format(item['uri'],
item['provider_name'],
item['collection_name'])
if "subcollection_name" in list(item.keys()):
url = "{0}/{1}".format(url,
item['subcollection_name'])
resources = []
for resource in item['collection_resources']:
resources.append("{0}/{1}".format(url, resource))
else:
resources = item['collection_resources']
resource_match += resources
else:
raise ValueError("Allowed query args are: {}".format(", ".join(allowed_keys)))
return resource_match
def get_tabular(self, **kwargs):
""" Get a hierarchy level as a tabular table.
Following hierarchy levels can be used:
- collection
- subcollection
:param **kwargs: key-value -pair, where key must be in ["collection",
"subcollection"]
:return list of String file names.
"""
# List allowed query keys
allowed_keys = ["collection", "subcollection"]
# Get the key provided.
query_keys = list(kwargs.keys())
# Only one query key allowed.
if len(query_keys) > 1:
raise ValueError("Only one query key allowed at time")
query_key = query_keys[0]
tablerows = []
# Check that the quey key is allowed
if query_key in allowed_keys:
# Construct the actual query key
item_key = "{}_name".format(query_key)
for item in self.data:
# collection category is always present,
# subcollection category not necessarily.
if item_key in list(item.keys()):
if item[item_key] == kwargs[query_key]:
for resource in item['collection_resources']:
spp_name = self.resource_to_name(resource)
if query_key == "collection":
if "subcollection_name" in list(item.keys()):
tablerows.append([item['subcollection_name'],
spp_name])
else:
tablerows.append([spp_name])
elif query_key == "subcollection":
tablerows.append([spp_name])
else:
raise ValueError("Allowed query args are: {}".format(", ".join(allowed_keys)))
if len(tablerows[0]) == 1:
table = tabulate(tablerows, headers=["species"])
elif len(tablerows[0]) == 2:
table = tabulate(tablerows, headers=["group", "species"])
return(table)
def parse_data_manifest(self):
""" Parse datasets from a data manifest file.
Current implementation can work with the following hierarchy:
[URI]: dict
"provider": str
"collections": list
[NAME]: dict
"category": str
"metadata": list
"resources": list
"""
def parse_collection(collection, collection_name):
collection_data = {}
collection_item = collection[collection_name]
# Get the values for checking
collection_keys = list(collection_item.keys())
# Category is optional
if 'category' in collection_keys:
collection_data['collection_category'] = collection_item['category']
# Metadata is optional
if 'metadata' in collection_keys:
collection_data['collection_metadata'] = collection_item['metadata']
# Resources is required
if 'resources' in collection_keys:
collection_data['collection_resources'] = collection_item['resources']
else:
raise ValueError("Collection {} contains no resources".format(collection_name))
return collection_data
items = []
data_manifest = yaml.safe_load(open(self._infile, 'r'))
for item in data_manifest:
# Item should only have one key, which is the URI
uri = list(item.keys())[0]
# Loop over the provider content. A single provider can have
# multiple collections.
for provider_content in item[uri]:
# Get the textual name of the provider
provider_name = provider_content['provider']
# Loop over the collections for this provider
for collection in provider_content['collections']:
# Collection name is the only key
collection_name = list(collection.keys())[0]
# Check whether collection item is a dict or a list
# (indicating subgroups)
if isinstance(collection[collection_name], dict):
collection_data = parse_collection(collection,
collection_name)
collection_data['uri'] = uri
collection_data['provider_name'] = provider_name
collection_data['collection_name'] = collection_name
items.append(collection_data)
elif isinstance(collection[collection_name], list):
for subcollection in collection[collection_name]:
subcollection_name = list(subcollection.keys())[0]
subcollection_data = parse_collection(subcollection,
subcollection_name)
subcollection_data['uri'] = uri
subcollection_data['provider_name'] = provider_name
subcollection_data['collection_name'] = collection_name
subcollection_data['subcollection_name'] = subcollection_name
items.append(subcollection_data)
else:
raise ValueError("Invalid collection type")
return items
def resource_to_name(self, resource):
""" Convert raster file name to a species name."""
spp_name = resource.replace(".tif", "").replace("_", " ").capitalize()
return(spp_name)
def manifest(self):
pprint.pprint(self.data)
# Functions -------------------------------------------------------------------
def get_iteration_prefix(i, total):
""" Return a String prefix for itarative task phases.
:param i int current step.
:param total int total steps.
"""
return " [{0}/{1}]".format(i, total)
def get_local_logger(name, log_file=None, debug=False):
""" Return a local logger."""
date_format = "%Y-%m-%d %H:%M:%S"
colFormatter = ColoredFormatter("%(log_color)s %(message)s%(reset)s",
datefmt=date_format,
reset=True,
log_colors={'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
llogger = logging.getLogger(name)
llogger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(colFormatter)
if not debug:
consoleHandler.setLevel(logging.INFO)
llogger.addHandler(consoleHandler)
if log_file is not None:
fileFormatter = logging.Formatter("%(asctime)s [%(name)-10s] " +
"[%(levelname)-5.5s] %(message)s",
datefmt=date_format)
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(fileFormatter)
llogger.addHandler(fileHandler)
return llogger
def pick_from_list(items, suffix):
""" Pick an element from list ending with suffix.
If no match is found, return None.
:param items list of items to be searched.
:suffix String suffix defining the match.
"""
match = None
for item in items:
if item.endswith(suffix):
match = item
return match
def process_stdout(x, prefix=""):
"""Process stdout string returned by a shell command.
If x is None, return an empty list.
"""
if x is None or x == "":
return []
x = x.decode('utf-8')
return [prefix + " " + item for item in x.split("\n") if item != ""]
|
from sagemaker_rl.coach_launcher import SageMakerCoachPresetLauncher
class MyLauncher(SageMakerCoachPresetLauncher):
def default_preset_name(self):
"""This points to a .py file that configures everything about the RL job.
It can be overridden at runtime by specifying the RLCOACH_PRESET hyperparameter.
"""
return "preset-cartpole-dqn"
def map_hyperparameter(self, name, value):
"""Here we configure some shortcut names for hyperparameters that we expect to use frequently.
Essentially anything in the preset file can be overridden through a hyperparameter with a name
like "rl.agent_params.algorithm.etc".
"""
# maps from alias (key) to fully qualified coach parameter (value)
mapping = {
"discount": "rl.agent_params.algorithm.discount",
"evaluation_episodes": "rl.evaluation_steps:EnvironmentEpisodes",
"improve_steps": "rl.improve_steps:TrainingSteps",
}
if name in mapping:
self.apply_hyperparameter(mapping[name], value)
else:
super().map_hyperparameter(name, value)
if __name__ == "__main__":
MyLauncher.train_main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, Gaudenz Steinlin <[email protected]>
# Copyright: (c) 2019, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: server
short_description: Manages servers on the cloudscale.ch IaaS service
description:
- Create, update, start, stop and delete servers on the cloudscale.ch IaaS service.
notes:
- If I(uuid) option is provided, it takes precedence over I(name) for server selection. This allows to update the server's name.
- If no I(uuid) option is provided, I(name) is used for server selection. If more than one server with this name exists, execution is aborted.
- Only the I(name) and I(flavor) are evaluated for the update.
- The option I(force=true) must be given to allow the reboot of existing running servers for applying the changes.
author:
- Gaudenz Steinlin (@gaudenz)
- René Moser (@resmo)
- Denis Krienbühl (@href)
version_added: "1.0.0"
options:
state:
description:
- State of the server.
choices: [ running, stopped, absent ]
default: running
type: str
name:
description:
- Name of the Server.
- Either I(name) or I(uuid) are required.
type: str
uuid:
description:
- UUID of the server.
- Either I(name) or I(uuid) are required.
type: str
flavor:
description:
- Flavor of the server.
type: str
image:
description:
- Image used to create the server.
type: str
zone:
description:
- Zone in which the server resides (e.g. C(lgp1) or C(rma1)).
type: str
volume_size_gb:
description:
- Size of the root volume in GB.
default: 10
type: int
bulk_volume_size_gb:
description:
- Size of the bulk storage volume in GB.
- No bulk storage volume if not set.
type: int
ssh_keys:
description:
- List of SSH public keys.
- Use the full content of your .pub file here.
type: list
elements: str
password:
description:
- Password for the server.
type: str
use_public_network:
description:
- Attach a public network interface to the server.
type: bool
use_private_network:
description:
- Attach a private network interface to the server.
type: bool
use_ipv6:
description:
- Enable IPv6 on the public network interface.
default: yes
type: bool
interfaces:
description:
- List of network interface objects specifying the interfaces to be attached to the server.
See U(https://www.cloudscale.ch/en/api/v1/#interfaces-attribute-specification) for more details.
type: list
elements: dict
version_added: 1.4.0
suboptions:
network:
description:
- Create a network interface on the network identified by UUID.
Use 'public' instead of an UUID to attach a public network interface.
Can be omitted if a subnet is provided under addresses.
type: str
addresses:
description:
- Attach a private network interface and configure a subnet and/or an IP address.
type: list
elements: dict
suboptions:
subnet:
description:
- UUID of the subnet from which an address will be assigned.
type: str
address:
description:
- The static IP address of the interface. Use '[]' to avoid assigning an IP address via DHCP.
type: str
server_groups:
description:
- List of UUID or names of server groups.
type: list
elements: str
user_data:
description:
- Cloud-init configuration (cloud-config) data to use for the server.
type: str
force:
description:
- Allow to stop the running server for updating if necessary.
default: no
type: bool
tags:
description:
- Tags assosiated with the servers. Set this to C({}) to clear any tags.
type: dict
extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
'''
EXAMPLES = '''
# Create and start a server with an existing server group (shiny-group)
- name: Start cloudscale.ch server
cloudscale_ch.cloud.server:
name: my-shiny-cloudscale-server
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
server_groups: shiny-group
zone: lpg1
use_private_network: True
bulk_volume_size_gb: 100
api_token: xxxxxx
# Start another server in anti-affinity (server group shiny-group)
- name: Start second cloudscale.ch server
cloudscale_ch.cloud.server:
name: my-other-shiny-server
image: ubuntu-16.04
flavor: flex-8
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
server_groups: shiny-group
zone: lpg1
api_token: xxxxxx
# Force to update the flavor of a running server
- name: Start cloudscale.ch server
cloudscale_ch.cloud.server:
name: my-shiny-cloudscale-server
image: debian-10
flavor: flex-8
force: yes
ssh_keys: ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
use_private_network: True
bulk_volume_size_gb: 100
api_token: xxxxxx
register: server1
# Stop the first server
- name: Stop my first server
cloudscale_ch.cloud.server:
uuid: '{{ server1.uuid }}'
state: stopped
api_token: xxxxxx
# Delete my second server
- name: Delete my second server
cloudscale_ch.cloud.server:
name: my-other-shiny-server
state: absent
api_token: xxxxxx
# Start a server and wait for the SSH host keys to be generated
- name: Start server and wait for SSH host keys
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-ssh-key
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: xxxxxx
register: server
until: server is not failed
retries: 5
delay: 2
# Start a server with two network interfaces:
#
# A public interface with IPv4/IPv6
# A private interface on a specific private network with an IPv4 address
- name: Start a server with a public and private network interface
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-two-network-interfaces
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: xxxxxx
interfaces:
- network: 'public'
- addresses:
- subnet: UUID_of_private_subnet
# Start a server with a specific IPv4 address from subnet range
- name: Start a server with a specific IPv4 address from subnet range
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-specific-address
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: xxxxxx
interfaces:
- addresses:
- subnet: UUID_of_private_subnet
address: 'A.B.C.D'
# Start a server with two network interfaces:
#
# A public interface with IPv4/IPv6
# A private interface on a specific private network with no IPv4 address
- name: Start a server with a private network interface and no IP address
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-specific-address
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: xxxxxx
interfaces:
- network: 'public'
- network: UUID_of_private_network
addresses: []
'''
RETURN = '''
href:
description: API URL to get details about this server
returned: success when not state == absent
type: str
sample: https://api.cloudscale.ch/v1/servers/cfde831a-4e87-4a75-960f-89b0148aa2cc
uuid:
description: The unique identifier for this server
returned: success
type: str
sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
name:
description: The display name of the server
returned: success
type: str
sample: its-a-me-mario.cloudscale.ch
state:
description: The current status of the server
returned: success
type: str
sample: running
flavor:
description: The flavor that has been used for this server
returned: success when not state == absent
type: dict
sample: { "slug": "flex-4", "name": "Flex-4", "vcpu_count": 2, "memory_gb": 4 }
image:
description: The image used for booting this server
returned: success when not state == absent
type: dict
sample: { "default_username": "ubuntu", "name": "Ubuntu 18.04 LTS", "operating_system": "Ubuntu", "slug": "ubuntu-18.04" }
zone:
description: The zone used for booting this server
returned: success when not state == absent
type: dict
sample: { 'slug': 'lpg1' }
volumes:
description: List of volumes attached to the server
returned: success when not state == absent
type: list
sample: [ {"type": "ssd", "device": "/dev/vda", "size_gb": "50"} ]
interfaces:
description: List of network ports attached to the server
returned: success when not state == absent
type: list
sample: [ { "type": "public", "addresses": [ ... ] } ]
ssh_fingerprints:
description: A list of SSH host key fingerprints. Will be null until the host keys could be retrieved from the server.
returned: success when not state == absent
type: list
sample: ["ecdsa-sha2-nistp256 SHA256:XXXX", ... ]
ssh_host_keys:
description: A list of SSH host keys. Will be null until the host keys could be retrieved from the server.
returned: success when not state == absent
type: list
sample: ["ecdsa-sha2-nistp256 XXXXX", ... ]
server_groups:
description: List of server groups
returned: success when not state == absent
type: list
sample: [ {"href": "https://api.cloudscale.ch/v1/server-groups/...", "uuid": "...", "name": "db-group"} ]
tags:
description: Tags assosiated with the server.
returned: success
type: dict
sample: { 'project': 'my project' }
'''
from datetime import datetime, timedelta
from time import sleep
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.api import (
AnsibleCloudscaleBase,
cloudscale_argument_spec,
)
ALLOWED_STATES = ('running',
'stopped',
'absent',
)
class AnsibleCloudscaleServer(AnsibleCloudscaleBase):
def __init__(self, module):
super(AnsibleCloudscaleServer, self).__init__(module)
# Initialize server dictionary
self._info = {}
def _init_server_container(self):
return {
'uuid': self._module.params.get('uuid') or self._info.get('uuid'),
'name': self._module.params.get('name') or self._info.get('name'),
'state': 'absent',
}
def _get_server_info(self, refresh=False):
if self._info and not refresh:
return self._info
self._info = self._init_server_container()
uuid = self._info.get('uuid')
if uuid is not None:
server_info = self._get('servers/%s' % uuid)
if server_info:
self._info = self._transform_state(server_info)
else:
name = self._info.get('name')
if name is not None:
servers = self._get('servers') or []
matching_server = []
for server in servers:
if server['name'] == name:
matching_server.append(server)
if len(matching_server) == 1:
self._info = self._transform_state(matching_server[0])
elif len(matching_server) > 1:
self._module.fail_json(msg="More than one server with name '%s' exists. "
"Use the 'uuid' parameter to identify the server." % name)
return self._info
@staticmethod
def _transform_state(server):
if 'status' in server:
server['state'] = server['status']
del server['status']
else:
server['state'] = 'absent'
return server
def _wait_for_state(self, states):
start = datetime.now()
timeout = self._module.params['api_timeout'] * 2
while datetime.now() - start < timedelta(seconds=timeout):
server_info = self._get_server_info(refresh=True)
if server_info.get('state') in states:
return server_info
sleep(1)
# Timeout succeeded
if server_info.get('name') is not None:
msg = "Timeout while waiting for a state change on server %s to states %s. " \
"Current state is %s." % (server_info.get('name'), states, server_info.get('state'))
else:
name_uuid = self._module.params.get('name') or self._module.params.get('uuid')
msg = 'Timeout while waiting to find the server %s' % name_uuid
self._module.fail_json(msg=msg)
def _start_stop_server(self, server_info, target_state="running", ignore_diff=False):
actions = {
'stopped': 'stop',
'running': 'start',
}
server_state = server_info.get('state')
if server_state != target_state:
self._result['changed'] = True
if not ignore_diff:
self._result['diff']['before'].update({
'state': server_info.get('state'),
})
self._result['diff']['after'].update({
'state': target_state,
})
if not self._module.check_mode:
self._post('servers/%s/%s' % (server_info['uuid'], actions[target_state]))
server_info = self._wait_for_state((target_state, ))
return server_info
def _update_param(self, param_key, server_info, requires_stop=False):
param_value = self._module.params.get(param_key)
if param_value is None:
return server_info
if 'slug' in server_info[param_key]:
server_v = server_info[param_key]['slug']
else:
server_v = server_info[param_key]
if server_v != param_value:
# Set the diff output
self._result['diff']['before'].update({param_key: server_v})
self._result['diff']['after'].update({param_key: param_value})
if server_info.get('state') == "running":
if requires_stop and not self._module.params.get('force'):
self._module.warn("Some changes won't be applied to running servers. "
"Use force=yes to allow the server '%s' to be stopped/started." % server_info['name'])
return server_info
# Either the server is stopped or change is forced
self._result['changed'] = True
if not self._module.check_mode:
if requires_stop:
self._start_stop_server(server_info, target_state="stopped", ignore_diff=True)
patch_data = {
param_key: param_value,
}
# Response is 204: No Content
self._patch('servers/%s' % server_info['uuid'], patch_data)
# State changes to "changing" after update, waiting for stopped/running
server_info = self._wait_for_state(('stopped', 'running'))
return server_info
def _get_server_group_ids(self):
server_group_params = self._module.params['server_groups']
if not server_group_params:
return None
matching_group_names = []
results = []
server_groups = self._get('server-groups')
for server_group in server_groups:
if server_group['uuid'] in server_group_params:
results.append(server_group['uuid'])
server_group_params.remove(server_group['uuid'])
elif server_group['name'] in server_group_params:
results.append(server_group['uuid'])
server_group_params.remove(server_group['name'])
# Remember the names found
matching_group_names.append(server_group['name'])
# Names are not unique, verify if name already found in previous iterations
elif server_group['name'] in matching_group_names:
self._module.fail_json(msg="More than one server group with name exists: '%s'. "
"Use the 'uuid' parameter to identify the server group." % server_group['name'])
if server_group_params:
self._module.fail_json(msg="Server group name or UUID not found: %s" % ', '.join(server_group_params))
return results
def _create_server(self, server_info):
self._result['changed'] = True
self.normalize_interfaces_param()
data = deepcopy(self._module.params)
for i in ('uuid', 'state', 'force', 'api_timeout', 'api_token', 'api_url'):
del data[i]
data['server_groups'] = self._get_server_group_ids()
self._result['diff']['before'] = self._init_server_container()
self._result['diff']['after'] = deepcopy(data)
if not self._module.check_mode:
self._post('servers', data)
server_info = self._wait_for_state(('running', ))
return server_info
def _update_server(self, server_info):
previous_state = server_info.get('state')
# The API doesn't support to update server groups.
# Show a warning to the user if the desired state does not match.
desired_server_group_ids = self._get_server_group_ids()
if desired_server_group_ids is not None:
current_server_group_ids = [grp['uuid'] for grp in server_info['server_groups']]
if desired_server_group_ids != current_server_group_ids:
self._module.warn("Server groups can not be mutated, server needs redeployment to change groups.")
# Remove interface properties that were not filled out by the user
self.normalize_interfaces_param()
# Compare the interfaces as specified by the user, with the interfaces
# as received by the API. The structures are somewhat different, so
# they need to be evaluated in detail
wanted = self._module.params.get('interfaces')
actual = server_info.get('interfaces')
try:
update_interfaces = not self.has_wanted_interfaces(wanted, actual)
except KeyError as e:
self._module.fail_json(
msg="Error checking 'interfaces', missing key: %s" % e.args[0])
if update_interfaces:
server_info = self._update_param('interfaces', server_info)
if not self._result['changed']:
self._result['changed'] = server_info['interfaces'] != actual
server_info = self._update_param('flavor', server_info, requires_stop=True)
server_info = self._update_param('name', server_info)
server_info = self._update_param('tags', server_info)
if previous_state == "running":
server_info = self._start_stop_server(server_info, target_state="running", ignore_diff=True)
return server_info
def present_server(self):
server_info = self._get_server_info()
if server_info.get('state') != "absent":
# If target state is stopped, stop before an potential update and force would not be required
if self._module.params.get('state') == "stopped":
server_info = self._start_stop_server(server_info, target_state="stopped")
server_info = self._update_server(server_info)
if self._module.params.get('state') == "running":
server_info = self._start_stop_server(server_info, target_state="running")
else:
server_info = self._create_server(server_info)
server_info = self._start_stop_server(server_info, target_state=self._module.params.get('state'))
return server_info
def absent_server(self):
server_info = self._get_server_info()
if server_info.get('state') != "absent":
self._result['changed'] = True
self._result['diff']['before'] = deepcopy(server_info)
self._result['diff']['after'] = self._init_server_container()
if not self._module.check_mode:
self._delete('servers/%s' % server_info['uuid'])
server_info = self._wait_for_state(('absent', ))
return server_info
def has_wanted_interfaces(self, wanted, actual):
""" Compares the interfaces as specified by the user, with the
interfaces as reported by the server.
"""
if len(wanted or ()) != len(actual or ()):
return False
def match_interface(spec):
# First, find the interface that belongs to the spec
for interface in actual:
# If we have a public network, only look for the right type
if spec.get('network') == 'public':
if interface['type'] == 'public':
break
# If we have a private network, check the network's UUID
if spec.get('network') is not None:
if interface['type'] == 'private':
if interface['network']['uuid'] == spec['network']:
break
# If we only have an addresses block, match all subnet UUIDs
wanted_subnet_ids = set(
a['subnet'] for a in (spec.get('addresses') or ()))
actual_subnet_ids = set(
a['subnet']['uuid'] for a in interface['addresses'])
if wanted_subnet_ids == actual_subnet_ids:
break
else:
return False # looped through everything without match
# Fail if any of the addresses don't match
for wanted_addr in (spec.get('addresses') or ()):
# Unspecified, skip
if 'address' not in wanted_addr:
continue
addresses = set(a['address'] for a in interface['addresses'])
if wanted_addr['address'] not in addresses:
return False
# If the wanted address is an empty list, but the actual list is
# not, the user wants to remove automatically set addresses
if spec.get('addresses') == [] and interface['addresses'] != []:
return False
if interface['addresses'] == [] and spec.get('addresses') != []:
return False
return interface
for spec in wanted:
# If there is any interface that does not match, clearly not all
# wanted interfaces are present
if not match_interface(spec):
return False
return True
def normalize_interfaces_param(self):
""" Goes through the interfaces parameter and gets it ready to be
sent to the API. """
for spec in (self._module.params.get('interfaces') or ()):
if spec['addresses'] is None:
del spec['addresses']
if spec['network'] is None:
del spec['network']
for address in (spec.get('addresses') or ()):
if address['address'] is None:
del address['address']
if address['subnet'] is None:
del address['subnet']
def main():
argument_spec = cloudscale_argument_spec()
argument_spec.update(dict(
state=dict(default='running', choices=ALLOWED_STATES),
name=dict(),
uuid=dict(),
flavor=dict(),
image=dict(),
zone=dict(),
volume_size_gb=dict(type='int', default=10),
bulk_volume_size_gb=dict(type='int'),
ssh_keys=dict(type='list', elements='str', no_log=False),
password=dict(no_log=True),
use_public_network=dict(type='bool'),
use_private_network=dict(type='bool'),
use_ipv6=dict(type='bool', default=True),
interfaces=dict(
type='list',
elements='dict',
options=dict(
network=dict(type='str'),
addresses=dict(
type='list',
elements='dict',
options=dict(
address=dict(type='str'),
subnet=dict(type='str'),
),
),
),
),
server_groups=dict(type='list', elements='str'),
user_data=dict(),
force=dict(type='bool', default=False),
tags=dict(type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(
['interfaces', 'use_public_network'],
['interfaces', 'use_private_network'],
),
required_one_of=(('name', 'uuid'),),
supports_check_mode=True,
)
cloudscale_server = AnsibleCloudscaleServer(module)
if module.params['state'] == "absent":
server = cloudscale_server.absent_server()
else:
server = cloudscale_server.present_server()
result = cloudscale_server.get_result(server)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
'''
高级特性
1. 切片
2. 迭代
3. 列表生成式
4. 生成器
5. 迭代器
'''
#%% 切片
# 类似于 Matlab 的冒号表达式,区别在于
# 开始 : 结束 : 间隔
# 同时支持负数索引
# 可以省略任意元素
L = list(range(100))
print(L[0:10])
print(L[90:-1])
print(L[95:])
print(L[:10])
print(L[:10:2])
print(L[:])
#%% 迭代
# 普通迭代
L = {'a': 1, 'b': 2, 3: 3}
for k in L:
print(k, L[k])
# 键值迭代
for k, v in L.items():
print(k, v)
# 下标迭代
L = [1, 2, 3, 4, 5]
for i, v in enumerate(L):
print(i, v)
#%% 列表生成式
# 单变量单层生成式
print([x**2 for x in list(range(10))])
# 多变量单层生成式
print([x + '=' + v for x, v in {'a': 'X', 'b': 'y'}.items()])
# 双层生成式
print([x + y for x in range(3) for y in range(4)])
# 条件生成式
L = ['Hello', 'World', 18, 'Apple', None]
L1 = [x.lower() for x in L if isinstance(x, str)]
print(L1)
#%% 生成器
# 采用生成器而不采用列表,可以节省内存
# 适用于只按顺序访问一次的情形
# 列表生成器
# 将列表生成式的中括括号改为圆括号即可
g = (x**2 for x in range(10))
for x in g:
print(x)
#%% 函数生成器
# 函数生成器和普通函数的区别在于 yield 替代了 return
# 当函数运行到 yield 时返回值
# 下次调用时从 yield 处继续执行
def fib(max):
n, a, b = 0, 0, 1
while n < max:
yield b
a, b = b, a + b
n = n + 1
g = fib(10)
for x in g:
print(x)
#%% 迭代器
# 可以被在for循环中使用的都是 Iterable
# 生成器是 Iterator
# list , set , dict , str 虽然 Iterable 但不是 Iterator
from collections import Iterable
from collections import Iterator
print(isinstance({}, Iterable))
print(isinstance({}, Iterator))
print(isinstance(range(10), Iterable))
print(isinstance(range(10), Iterator))
print(isinstance((x for x in range(3)), Iterator))
|
'''
Given an array of characters chars, compress it using the following algorithm:
Begin with an empty string s. For each group of consecutive repeating characters in chars:
If the group's length is 1, append the character to s.
Otherwise, append the character followed by the group's length.
The compressed string s should not be returned separately, but instead be stored in the input character array chars. Note that group lengths that are 10 or longer will be split into multiple characters in chars.
After you are done modifying the input array, return the new length of the array.
You must write an algorithm that uses only constant extra space.
Input: chars = ["a","a","b","b","c","c","c"]
Output: Return 6, and the first 6 characters of the input array should be: ["a","2","b","2","c","3"]
Explanation: The groups are "aa", "bb", and "ccc". This compresses to "a2b2c3".
Input: chars = ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
Output: Return 4, and the first 4 characters of the input array should be: ["a","b","1","2"].
Explanation: The groups are "a" and "bbbbbbbbbbbb". This compresses to "ab12".
Precondition:
n = len(s)
n >= 1
char in ASCII
single char exceeds 100? Yes
Postcondition:
s should be modified
C1: only 1
C2: with 1 and > 1
C3: with > 10
Algo:
two pointer, left points to the modified word, right points to the original word
use a count for each group, if it is 1, count just one, if it is 2-9, count two, if it is > 10, count 3
Runtime: O(n)
Space: O(1)
'''
class Solution:
def compress(self, chars: List[str]) -> int:
left = 0
right = 0
count = 1
while right < len(chars):
char = chars[right]
count = 0
while right < len(chars) and chars[right] == char:
right += 1
count += 1
chars[left] = char
left += 1
if 1 < count < 10:
chars[left] = str(count)
left += 1
elif count >= 10:
for c in str(count):
chars.insert(left, c)
left += 1
right += 1
return left
|
import torch
from torch.autograd import Function
from torch.autograd import Variable
from ..build.lib import SpaVar
class SpaVarFunction(Function) :
@staticmethod
def forward(ctx, ref_feas, tar_feas, ref_mask, tar_mask, disparity, max_disp) :
"""sparse matching while forwarding
Args:
ref_feas, tar_feas: feature map of left/right view, Batch*Channel*Height*Width;
ref_mask, tar_mask: mask of left/right view, Batch*Height*Width;
max_disp: the maximmum disparity in current scale;
Returns:
output: the computed disparity map, Batch*Height*Width;
"""
assert(ref_feas.is_contiguous() == True and tar_feas.is_contiguous() == True)
assert(ref_mask.is_contiguous() == True and tar_mask.is_contiguous() == True)
with torch.cuda.device_of(ref_feas) :
output = ref_mask.new().resize_(ref_mask.size()).zero_()
sum_similarities = ref_mask.new().resize_(ref_mask.size()).zero_()
max_cost = ref_mask.new().resize_(ref_mask.size()).zero_()
SpaVar.sparse_var_cuda_forward(ref_feas, tar_feas, ref_mask, tar_mask, disparity, output, sum_similarities, max_cost, max_disp)
output = output.contiguous()
sum_similarities = sum_similarities.contiguous()
ctx.save_for_backward(ref_feas, tar_feas, ref_mask, tar_mask, disparity, output, sum_similarities, max_cost)
ctx.max_disp = max_disp
return output
@staticmethod
def backward(ctx, grad_output) :
ref_feas, tar_feas, ref_mask, tar_mask, disparity, output, sum_similarities, max_cost = ctx.saved_tensors
max_disp = ctx.max_disp
assert(grad_output.is_contiguous() == True)
with torch.cuda.device_of(grad_output) :
grad_ref_feas = ref_feas.new().resize_(ref_feas.size()).zero_()
grad_tar_feas = tar_feas.new().resize_(tar_feas.size()).zero_()
grad_disparity = disparity.new().resize_(disparity.size()).zero_()
SpaVar.sparse_var_cuda_backward(ref_feas, tar_feas, ref_mask, tar_mask, disparity, output, sum_similarities, max_cost,
grad_output, grad_ref_feas, grad_tar_feas, grad_disparity, max_disp)
# print(grad_tar_feas.max())
grad_ref_feas = grad_ref_feas.contiguous()
grad_tar_feas = grad_tar_feas.contiguous()
grad_disparity = grad_disparity.contiguous()
# print(grad_ref_feas.shape, grad_tar_feas.shape, grad_disparity.shape, disparity.shape)
# print(grad_output.max(), grad_ref_feas.max(), grad_tar_feas.max(), torch.isnan(grad_output).sum(), torch.isnan(grad_ref_feas).sum(), torch.isnan(grad_tar_feas).sum())
return grad_ref_feas, grad_tar_feas, Variable(torch.Tensor([0])), Variable(torch.Tensor([0])), grad_disparity, None
|
#Crie um algoritmo que leia um número e mostre o seu dobro, triplo e a raiz quadrada .
print("==========Exercicio 6 ==========")
numero = int(input("Digite um número: "))
dobro = numero * 2
triplo = numero * 3
raiz = numero**(0.5)
print(f"O dobro de {numero} é {dobro} \n o triplo de {numero} é {triplo} \n a raiz quadrada de {numero} é {raiz} ")
|
""" Hand Tracker on Depth Image based on https://www.learnopencv.com/object-tracking-using-opencv-cpp-python/
"""
import os
import PIL
import glob
import numpy as np
from matplotlib import pyplot as plt
import cv2
import json
import configparser
import csv
DATASET_BASE_DIR_NAME = r"D:\git\HandPointer\dataset"
def get_local_minima(img_d, img_c):
img_d[200:, :] = 10000
scale = 1/8
confidence_thrshold = 100
morph_kernel = np.ones((9, 9), np.uint8)
h, w = img_d.shape[:2]
sh = int(h*scale)
sw = int(w*scale)
imgd_scaled = cv2.resize(img_d, (sh, sw))
imgc_scaled = cv2.resize(img_c, (sh, sw))
mask = imgc_scaled > confidence_thrshold
fimgd = cv2.morphologyEx(imgd_scaled, cv2.MORPH_BLACKHAT, morph_kernel)
fimg = np.multiply(fimgd, mask.astype(np.uint8))
inv_mask = np.invert(mask)
imgd_scaled[inv_mask] = 10000
# imgd_scaled = np.multiply(imgd_scaled, mask.astype(np.uint8))
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgd_scaled, mask.astype(np.uint8))
imgd_scaled = imgd_scaled-400
cimg = (imgd_scaled.clip(min=0, max=600)/5).astype(np.uint8)
cimg = cv2.cvtColor(cimg, cv2.COLOR_GRAY2BGR)
cimg = cv2.drawMarker(cimg, min_loc, (0, 0, 0))
cimg = cv2.resize(cimg, (500, 500))
cv2.imshow("dpeth", cimg)
cv2.waitKey(1)
# print(min_loc, min_val)
# ax1 = plt.subplot(121)
# plt.imshow(mask)
# plt.subplot(122, sharex=ax1, sharey=ax1)
# plt.imshow(cimg), plt.title("after top hat")
# plt.show()
def check_velocity():
# json_file_name = os.path.join(DATASET_BASE_DIR_NAME, "result.json")
# if os.path.isfile(json_file_name):
# with open(json_file_name, "r", encoding='utf8') as fid:
# datasets_info = json.load(fid)
# dataset = datasets_info[id]
csv_file_names = ['D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#fast_circles.csv'
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#gestures_two_hands.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#gestures_two_hands_swap.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#sequence_closed_hand.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#sequence_open_hand.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#sequence_small_shapes.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_ccw.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_ccw_far.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_ccw_hand.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_sequence.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#multiple_shapes_1.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#rectangle_ccw.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#rectangle_cw.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#star.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#zigzag.csv',
'D:\git\HandPointer\dataset\#git#HandPointer#dataset#ds325#fast_circles.csv']
csv_file_name = os.path.join(r"D:\git\HandPointer\dataset", "#git#ajay#DepthSensing#dataset#ds325#fast_circles.csv")
csv_file_name = csv_file_names[13]
trajectory_data = np.zeros((800, 3))
with open(csv_file_name, "r") as fid:
reader = csv.reader(fid)
header = next(reader)
for row in reader:
if len(row)<4:
continue
file_id = int(row[0])
x = int(row[1])
y = int(row[2])
d = int(row[3])
trajectory_data[file_id,0] = x
trajectory_data[file_id,1] = y
trajectory_data[file_id,2] = d
velocity = np.zeros((800, 2))
step = 5
velocity[step:,0] = trajectory_data[step:,0] - trajectory_data[:-step,0]
velocity[step:,1] = trajectory_data[step:,1] - trajectory_data[:-step,1]
velocity_norm = np.linalg.norm(velocity, axis=1)
stop = (velocity_norm < 5).astype(np.uint8)
plt.subplot(311)
plt.plot(trajectory_data[:,0],'b')
plt.plot(trajectory_data[:,1],'r')
plt.plot(stop*100, 'g'), plt.title("xy")
plt.subplot(312), plt.plot(velocity[:,0],'b')
plt.plot(velocity[:,1],'r'), plt.title("velocity xy")
plt.subplot(313), plt.plot(velocity_norm,'b'), plt.title("velocty norm")
plt.plot(stop*100, 'g')
plt.show()
def get_datasets():
datasets = [
r"ds325\fast_circles",
r"ds325\gestures_two_hands",
r"ds325\gestures_two_hands_swap",
r"ds325\sequence_closed_hand",
r"ds325\sequence_open_hand",
r"ds325\sequence_small_shapes",
r"ds536\circle_ccw",
r"ds536\circle_ccw_far",
r"ds536\circle_ccw_hand",
r"ds536\circle_sequence",
r"ds536\multiple_shapes_1",
r"ds536\rectangle_ccw",
r"ds536\rectangle_cw",
r"ds536\star",
r"ds536\zigzag",
]
datasets = [os.path.join(DATASET_BASE_DIR_NAME, dataset) for dataset in datasets]
return datasets
datasets_info = [{
"base_dir_name" : r"D:\git\HandPointer\dataset\ds325\gestures_two_hands_swap",
"max_file_count" : 600,
"init_frame_id" : 50
}]
def calc_tajectory(file_id, loc, img_d, img_c):
# x = int(bbox[0] + bbox[2])//2
# y = int(bbox[1] + bbox[3])//2
x = int(loc[0])
y = int(loc[1])
depth = img_d[y,x]
confidence = img_c[y, x]
trajectory = {
"file_id" : file_id,
"finger_tip" : {
"x": x,
"y": y,
},
"depth": depth,
"confidence": confidence
}
return trajectory
def create_video_from_results():
dataset_dir_names = get_datasets()
for dataset_dir_name in dataset_dir_names:
camera_file_name = os.path.join(os.path.dirname(dataset_dir_name), "camera_parameters.txt")
mtx, dist, newcameramtx = read_camera_parameter(camera_file_name)
video_file_name = dataset_dir_name.replace(DATASET_BASE_DIR_NAME,"")[1:]
video_file_name = video_file_name.replace("\\", "_") + "_result.avi"
video_file_name = os.path.join(DATASET_BASE_DIR_NAME, video_file_name)
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'DIVX'), 60, (320, 240))
file_names = glob.glob(os.path.join(dataset_dir_name, "*_result.png"), recursive=True)
for file_name in file_names:
if not os.path.isfile(file_name):
continue
img = np.array(PIL.Image.open(file_name))
img = img[:,:,::-1]
img = cv2.undistort(img, mtx, dist, None, newcameramtx)
out.write(img)
out.release()
def create_video():
dataset_dir_names = get_datasets()
for dataset_dir_name in dataset_dir_names:
camera_file_name = os.path.join(os.path.dirname(dataset_dir_name), "camera_parameters.txt")
mtx, dist, newcameramtx = read_camera_parameter(camera_file_name)
video_file_name = dataset_dir_name.replace(DATASET_BASE_DIR_NAME,"")[1:]
video_file_name = video_file_name.replace("\\", "_") + "_depth.avi"
video_file_name = os.path.join(DATASET_BASE_DIR_NAME, video_file_name)
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'DIVX'), 60, (320, 240))
file_names = glob.glob(os.path.join(dataset_dir_name, "*_depth.tiff"), recursive=True)
for file_name in file_names:
confidence_file_name = file_name.replace("depth", "confidence")
if not os.path.isfile(file_name) or not os.path.isfile(confidence_file_name):
continue
img_d = np.array(PIL.Image.open(file_name)).astype(np.float)*0.1
img_c = np.array(PIL.Image.open(confidence_file_name)).astype(np.float)*0.1
img_d = np.clip(img_d, 0, 255).astype(np.uint8)
img_c = np.clip(img_c, 0, 255).astype(np.uint8)
img_d = cv2.undistort(img_d, mtx, dist, None, newcameramtx)
img_c = cv2.undistort(img_c, mtx, dist, None, newcameramtx)
img_out = np.zeros((*img_d.shape, 3), dtype=np.uint8)
img_out[:,:, 0] = img_c.astype(np.uint8)
img_out[:,:, 1] = img_d.astype(np.uint8)
img_out[:,:, 2] = img_d.astype(np.uint8)
out.write(img_out)
out.release()
def read_camera_parameter(file_name):
# file_name = r"D:\git\HandPointer\dataset\ds325\camera_parameters.txt"
config = configparser.ConfigParser()
config.read(file_name)
data = {}
for key in config['camera']:
data[key] = float(config['camera'][key])
mtx = np.eye(3)
mtx[0, 0] = data['focal_x']
mtx[1, 1] = data['focal_y']
mtx[0, 2] = data['center_x']
mtx[1, 2] = data['center_y']
dist = (data['k1'], data['k2'], data['p1'], data['p2'], data['k3'])
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (320, 240), 1, (320, 240))
# print(mtx, dist)
# print(newcameramtx, roi)
return mtx, dist, newcameramtx
def get_depth(img_d, img_c, bbox):
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[0] + bbox[2])
y2 = int(bbox[1] + bbox[3])
img_d_roi = img_d[y1:y2, x1:x2]
img_c_roi = img_c[y1:y2, x1:x2]
min_val, max_c_val, min_loc, max_c_loc = cv2.minMaxLoc(img_c_roi)
mask = (img_c_roi > max_c_val*0.3).astype(np.uint8)*255
min_d_val, max_d_val, min_loc, max_d_loc = cv2.minMaxLoc(img_d_roi, mask)
max_d_loc = (x1+max_d_loc[0], y1 + max_d_loc[1] )
cv2.imshow("cropped_mask", mask)
return max_d_val, max_c_val, max_d_loc
def test_dataset(id=0, n_traj=2):
DEPTH_TH = 30
tracker_type = 'BOOSTING'
trackers = []
for traj_id in range(n_traj):
trackers.append(cv2.TrackerBoosting_create())
json_file_name = os.path.join(DATASET_BASE_DIR_NAME, "result.json")
if os.path.isfile(json_file_name):
with open(json_file_name, "r", encoding='utf8') as fid:
datasets_info = json.load(fid)
dataset = datasets_info[id]
init_frame_id = dataset['init_frame_id']
dataset_dir_name = dataset['base_dir_name']
max_file_count = dataset['max_file_count']
dataset['trajectories'] = {}
print( dataset['base_dir_name'])
camera_file_name = os.path.join(os.path.dirname(dataset_dir_name), "camera_parameters.txt")
mtx, dist, newcameramtx = read_camera_parameter(camera_file_name)
hsv = plt.cm.get_cmap('hsv', max_file_count)
trajectory_img = np.zeros((240, 320, 3), dtype=np.uint8)
for file_id in range(max_file_count):
depth_file_name = os.path.join(dataset_dir_name, r"{0:06d}_depth.tiff".format(file_id))
confi_file_name = os.path.join(dataset_dir_name, r"{0:06d}_confidence.tiff".format(file_id))
if not os.path.isfile(depth_file_name) or not os.path.isfile(confi_file_name):
print( " file not found:", depth_file_name)
continue
img_d = np.array(PIL.Image.open(depth_file_name)).astype(np.float)
img_c = np.array(PIL.Image.open(confi_file_name)).astype(np.float)
img_d = 2500 - img_d
img_d = cv2.undistort(img_d, mtx, dist, None, newcameramtx)
img_c = cv2.undistort(img_c, mtx, dist, None, newcameramtx)
img_d_norm = np.clip(img_d*0.1,0, 255).astype(np.uint8)
img_d_norm = cv2.cvtColor(img_d_norm, cv2.COLOR_GRAY2BGR)
# # Define an initial bounding box
# bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
if file_id < init_frame_id:
continue
elif file_id == init_frame_id:
for traj_id in range(n_traj):
bbox_key = 'bbox{0}'.format(traj_id+1)
if bbox_key in dataset:
dataset[bbox_key] = [int(val) for val in dataset[bbox_key]]
bbox = tuple(dataset[bbox_key])
else:
bbox = cv2.selectROI(img_d_norm, False)
print("{0}: ".format(bbox_key), bbox)
dataset[bbox_key] = bbox
depth, confidence, max_d_loc = get_depth(img_d, img_c, bbox)
img_d_norm_mask = (img_d_norm > depth*0.1 - DEPTH_TH).astype(np.uint8)*255
img_d_norm_masked = cv2.bitwise_and(img_d_norm, img_d_norm_mask)
ok = trackers[traj_id].init(img_d_norm_masked, bbox)
traj = calc_tajectory(file_id, max_d_loc, img_d, img_c)
traj_key = "traj_{0}".format(traj_id + 1)
dataset['trajectories'][traj_key] = [traj]
# # display log image
# cv2.imshow("img_d_norm", img_d_norm)
# cv2.imshow("img_d_norm_masked", img_d_norm_masked)
# cv2.waitKey(0)
continue
# Start timer
timer = cv2.getTickCount()
# depth = get_depth(img_d, img_c, bbox)
# # display log image
# cv2.imshow("img_d_norm", img_d_norm)
# cv2.imshow("img_d_norm_masked", img_d_norm_masked)
# cv2.waitKey(0)
oks = []
bboxes = []
for traj_id in range(n_traj):
depth = dataset['trajectories'][traj_key][-1]['depth']
img_d_norm_mask = (img_d_norm > depth*0.1 - DEPTH_TH).astype(np.uint8)*255
img_d_norm_masked = cv2.bitwise_and(img_d_norm, img_d_norm_mask)
ok, bbox = trackers[traj_id].update(img_d_norm_masked)
oks.append(ok)
bboxes.append(bbox)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if all(oks):
# Tracking success
for traj_id in range(n_traj):
p1 = (int(bboxes[traj_id][0]), int(bboxes[traj_id][1]))
p2 = (int(bboxes[traj_id][0] + bboxes[traj_id][2]), int(bboxes[traj_id][1] + bboxes[traj_id][3]))
cv2.rectangle(img_d_norm_masked, p1, p2, (255, 0, 0), 2, 1)
depth, confidence, max_d_loc1 = get_depth(img_d, img_c, bboxes[traj_id])
cv2.drawMarker(img_d_norm_masked, max_d_loc1, (255, 0, 255))
traj = calc_tajectory(file_id, max_d_loc1, img_d, img_c)
traj_key = "traj_{0}".format(traj_id + 1)
dataset['trajectories'][traj_key].append(traj)
p1 = dataset['trajectories'][traj_key][-2]['finger_tip']
p1 = (int(p1['x']), int(p1['y']))
p2 = dataset['trajectories'][traj_key][-1]['finger_tip']
p2 = (int(p2['x']), int(p2['y']))
color_val = hsv(file_id)[:3]
color_val = (color_val[0]*255, color_val[1]*255, color_val[2]*255)
cv2.line(trajectory_img, p1,p2, color_val)
else:
# Tracking failure
cv2.putText(img_d_norm_masked, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
# Display tracker type on frame
cv2.putText(img_d_norm_masked, tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
# Display FPS on frame
cv2.putText(img_d_norm_masked, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
# Display result
cv2.imshow("Tracking", img_d_norm_masked)
cv2.imshow("Trajectory", trajectory_img)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27:
break
# get_local_minima(img_d, img_c)
with open(json_file_name, "w", encoding='utf8') as fid:
json.dump(datasets_info, fid, indent=4)
if __name__ == "__main__":
create_video_from_results()
# test_dataset(id=15, n_traj=1)
# check_velocity() |
from openmdao.recorders.case_reader import CaseReader
cr = CaseReader('propulsor.db')
print(cr.system_cases.list_cases()) |
# -*- coding: utf-8 -*-
"""
@Time : 2022/1/14 3:34 下午
@Author : hcai
@Email : [email protected]
"""
import os
import time
file_root = os.path.dirname(__file__)
import sys
sys.path.append(file_root)
from nlg.run import Service
class Summarization(object):
def __init__(self, model_name, mode='predict', **kwargs):
self.model = Service(model_name, mode, **kwargs)
self.mode = mode
self.kwargs = kwargs
def run(self,text=[]):
b0 = time.time()
if self.mode == 'train':
res = self.model.run_train()
elif self.mode == 'evaluate':
res = self.model.run_evaluate(**self.kwargs)
else:
res = self.model.run_predict(text)
print('cost {}'.format(time.time() - b0))
return res
if __name__ == '__main__':
# 预训练时可以通过resume (bool类型)控制是否继续训练,其他predict和evaluate阶段可以不传入这个参数
summarize = Summarization("point-net", mode='predict', use_word=False, **{"data_dir": '/Volumes/work/project/unlp/unlp/supervised/nlg/data/weibo',
"model_path":"/Volumes/work/project/unlp/unlp/supervised/nlg/data/weibo/saved_dict/point-net/point-net.pt"})
res = summarize.run(text=['艺龙网并购两家旅游网站,封基上周溃退 未有明显估值优势,中华女子学院:本科层次仅1专业招男生'])
print(res) |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from rodan.celery import app
class TaskQueueActiveView(APIView):
"""
Returns the list of active Celery tasks.
"""
permission_classes = (permissions.IsAdminUser,)
def get(self, request, format=None):
inspect = app.control.inspect()
return Response(inspect.active())
class TaskQueueConfigView(APIView):
"""
Returns the config of Celery queue.
"""
permission_classes = (permissions.IsAdminUser,)
def get(self, request, format=None):
inspect = app.control.inspect()
return Response(inspect.conf())
class TaskQueueScheduledView(APIView):
"""
Returns the list of scheduled Celery tasks.
"""
permission_classes = (permissions.IsAdminUser,)
def get(self, request, format=None):
inspect = app.control.inspect()
return Response(inspect.scheduled())
class TaskQueueStatusView(APIView):
"""
Returns the status of Celery queue.
"""
permission_classes = (permissions.IsAdminUser,)
def get(self, request, format=None):
inspect = app.control.inspect()
return Response(inspect.stats())
|
# Generated by Django 2.2.9 on 2020-01-21 01:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20200121_1017'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='avartar',
new_name='avatar',
),
migrations.AddField(
model_name='user',
name='birthdate',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='user',
name='currency',
field=models.CharField(blank=True, choices=[('usd', 'USD'), ('krw', 'KRW')], max_length=3, null=True),
),
migrations.AddField(
model_name='user',
name='language',
field=models.CharField(blank=True, choices=[('en', 'English'), ('kr', 'Korean')], max_length=2, null=True),
),
migrations.AddField(
model_name='user',
name='superhost',
field=models.BooleanField(default=False),
),
]
|
import json
import logging
from functools import wraps
from django.http import HttpResponse
from rest_framework_auth0.settings import (
auth0_api_settings,
)
from rest_framework_auth0.utils import (
get_auth_token,
get_roles_from_payload,
)
logger = logging.getLogger(__name__)
def json_response(response_dict, status=200):
response = HttpResponse(
json.dumps(response_dict),
content_type="application/json",
status=status
)
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'
return response
"""
TODO: Verify if the token is valid and not expired(need to decode before verify)
TODO: Test if the decorators work
"""
class token_required(object):
def __init__(self, view_func):
self.view_func = view_func
wraps(view_func)(self)
def __call__(self, request, *args, **kwargs):
# maybe do something before the view_func call
# print(request.method)
# print ("----hello")
if request.method == 'OPTIONS':
return func(request, *args, **kwargs)
auth_header = request.META.get('HTTP_AUTHORIZATION', None)
if auth_header is not None:
tokens = auth_header.split(' ')
if len(tokens) == 2 and \
tokens[0] == auth0_api_settings.AUTH_HEADER_PREFIX:
token = tokens[1]
# get called view
response = self.view_func(request, *args, **kwargs)
else:
response = json_response(
{"msg": "Not valid token"},
status=401
)
else:
response = json_response(
{"msg": "Missing token"},
status=401
)
# maybe do something after the view_func call
# print ("----bye")
return response
class is_authenticated(object):
def __init__(self, view_func):
self.view_func = view_func
wraps(view_func)(self)
def __call__(self, request, *args, **kwargs):
# maybe do something before the view_func call
# print(request.method)
# print ("----hello")
if request.method == 'OPTIONS':
return func(request, *args, **kwargs)
if request.user.is_authenticated():
# get called view
response = self.view_func(request, *args, **kwargs)
else:
response = json_response(
{"msg": "Not authenticated"},
status=401
)
# maybe do something after the view_func call
# print ("----bye")
return response
class with_role(object):
def __init__(self, view_func):
self.view_func = view_func
wraps(view_func)(self)
def __call__(self, request, *args, **kwargs):
# maybe do something before the view_func call
# print(request.method)
# print ("----hello")
if request.method == 'OPTIONS':
return func(request, *args, **kwargs)
jwt = get_auth_token(request)
try:
# TODO: get payload from token
payload = {}
roles = get_roles_from_payload(payload)
if(len(roles) > 0):
# get called view
response = self.view_func(request, *args, **kwargs)
else:
response = json_response(
{"msg": "User has no roles"},
status=401
)
except Exception as e:
response = json_response(
{"msg": str(e)},
status=401
)
# pass
# maybe do something after the view_func call
# print ("----bye")
return response
|
import unittest
import logging as l
from astroutilities.logger import *
class BaseLoggerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
setup_applevel_logger(log_level=l.DEBUG)
def test_simple_info_log(self):
log = get_logger("test1")
with self.assertLogs() as captured:
log.info("Info logs")
self.assertEqual(1, len(captured.records))
def test_simple_debug_log(self):
log = get_logger("module")
with self.assertLogs(level='DEBUG') as captured:
log.debug("Debug logs")
self.assertEqual(1, len(captured.records))
if __name__ == '__main__':
unittest.main()
|
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.views.generic import TemplateView
from persona.models import Persona
from openpyxl import Workbook
from django.http.response import HttpResponse
# Create your views here.
class HomeView(ListView):
model = Persona
template_name = 'persona/index.html'
paginate_by = 10
class PersonaDetailView(DetailView):
model = Persona
template_name = 'persona/persona_detail.html'
class PersonaCreateView(CreateView):
model = Persona
template_name = 'persona/persona_create.html'
fields = ['numIdentificacion', 'tipoIdentificacion', 'apellidoPaterno','apellidoMaterno','nombres','fechaNacimiento','sexo','telefono','direccion','email','evento']
class PersonaUpdateView(UpdateView):
model = Persona
template_name = 'persona/persona_create.html'
fields = ['numIdentificacion', 'tipoIdentificacion', 'apellidoPaterno','apellidoMaterno','nombres','fechaNacimiento','sexo','telefono','direccion','email','evento']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['edit'] = True
return context
class PersonaDeleteView(DeleteView):
model = Persona
success_url = reverse_lazy('persona:homePersona')
template_name = 'persona/confirm_persona_deletion.html'
class ConsolidadoPersona(TemplateView):
def get(self,request,*args,**kwargs):
personas = Persona.objects.all()
wb = Workbook()
ws = wb.active
ws['B1'] = 'CONSOLIDADO DE PERSONAS'
ws.merge_cells('B1:L1')
ws['B3'] = 'NO IDENTIFICACION'
ws['C3'] = 'TIPO IDENTIFICACION'
ws['D3'] = 'APELLIDO PATERNO'
ws['E3'] = 'APELLIDO MATERNO'
ws['F3'] = 'NOMBRES'
ws['G3'] = 'FECHA NACIMIENTO'
ws['H3'] = 'SEXO'
ws['I3'] = 'TELEFONO'
ws['J3'] = 'DIRECCION'
ws['K3'] = 'EMAIL'
#ws['L3'] = 'EVENTO'
cont = 4
for persona in personas:
ws.cell(row = cont, column = 2).value = persona.numIdentificacion
ws.cell(row = cont, column = 3).value = persona.tipoIdentificacion
ws.cell(row = cont, column = 4).value = persona.apellidoPaterno
ws.cell(row = cont, column = 5).value = persona.apellidoMaterno
ws.cell(row = cont, column = 6).value = persona.nombres
ws.cell(row = cont, column = 7).value = persona.fechaNacimiento
ws.cell(row = cont, column = 8).value = persona.sexo
ws.cell(row = cont, column = 9).value = persona.telefono
ws.cell(row = cont, column = 10).value = persona.direccion
ws.cell(row = cont, column = 11).value = persona.email
#ws.cell(row = cont, column = 12).value = persona.evento
cont+=1
nombre_archivo = "ConsolidadoPersona.xlsx"
response = HttpResponse(content_type = "applicacion/ms-excel")
content = "attachment; filename = {0}".format(nombre_archivo)
response['Content-Disposition'] = content
wb.save(response)
return response |
import src.bpmnextract
import src.data
import src.capabilities
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import link_local_config
import ipv6_address
class address(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/gigabitethernet/ipv6/ipv6-config/address. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__use_link_local_only','__link_local_config','__ipv6_address',)
_yang_name = 'address'
_rest_name = 'address'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__use_link_local_only = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="use-link-local-only", rest_name="use-link-local-only", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure automatically computed link-local address', u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__link_local_config = YANGDynClass(base=link_local_config.link_local_config, is_container='container', presence=False, yang_name="link-local-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)
self.__ipv6_address = YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'gigabitethernet', u'ipv6', u'ipv6-config', u'address']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'GigabitEthernet', u'ipv6', u'address']
def _get_use_link_local_only(self):
"""
Getter method for use_link_local_only, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_config/address/use_link_local_only (empty)
"""
return self.__use_link_local_only
def _set_use_link_local_only(self, v, load=False):
"""
Setter method for use_link_local_only, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_config/address/use_link_local_only (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_use_link_local_only is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_use_link_local_only() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="use-link-local-only", rest_name="use-link-local-only", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure automatically computed link-local address', u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """use_link_local_only must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="use-link-local-only", rest_name="use-link-local-only", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure automatically computed link-local address', u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__use_link_local_only = t
if hasattr(self, '_set'):
self._set()
def _unset_use_link_local_only(self):
self.__use_link_local_only = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="use-link-local-only", rest_name="use-link-local-only", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure automatically computed link-local address', u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
def _get_link_local_config(self):
"""
Getter method for link_local_config, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_config/address/link_local_config (container)
"""
return self.__link_local_config
def _set_link_local_config(self, v, load=False):
"""
Setter method for link_local_config, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_config/address/link_local_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_local_config.link_local_config, is_container='container', presence=False, yang_name="link-local-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_local_config.link_local_config, is_container='container', presence=False, yang_name="link-local-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)""",
})
self.__link_local_config = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local_config(self):
self.__link_local_config = YANGDynClass(base=link_local_config.link_local_config, is_container='container', presence=False, yang_name="link-local-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'callpoint': u'phy-intf-ipv6-cfg-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)
def _get_ipv6_address(self):
"""
Getter method for ipv6_address, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_config/address/ipv6_address (list)
"""
return self.__ipv6_address
def _set_ipv6_address(self, v, load=False):
"""
Setter method for ipv6_address, mapped from YANG variable /interface/gigabitethernet/ipv6/ipv6_config/address/ipv6_address (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_address must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)""",
})
self.__ipv6_address = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6_address(self):
self.__ipv6_address = YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)
use_link_local_only = __builtin__.property(_get_use_link_local_only, _set_use_link_local_only)
link_local_config = __builtin__.property(_get_link_local_config, _set_link_local_config)
ipv6_address = __builtin__.property(_get_ipv6_address, _set_ipv6_address)
_pyangbind_elements = {'use_link_local_only': use_link_local_only, 'link_local_config': link_local_config, 'ipv6_address': ipv6_address, }
|
import time
from ops import *
from utils import *
from tensorflow.python.client import timeline
from graphviz import Digraph
import json
def profile(run_metadata, epoch=0):
with open('profs/timeline_step' + str(epoch) + '.json', 'w') as f:
# Create the Timeline object, and write it to a json file
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
f.write(chrome_trace)
def graph_to_dot(graph):
dot = Digraph()
for n in graph.as_graph_def().node:
dot.node(n.name, label=n.name)
for i in n.input:
dot.edge(i, n.name)
return dot
class ResNet(object):
def __init__(self, sess, args):
self.model_name = 'ResNet'
self.sess = sess
self.dataset_name = args.dataset
if self.dataset_name == 'cifar10':
self.train_x, self.train_y, self.test_x, self.test_y = load_cifar10()
self.img_size = 32
self.c_dim = 3
self.label_dim = 10
if self.dataset_name == 'cifar100':
self.train_x, self.train_y, self.test_x, self.test_y = load_cifar100()
self.img_size = 32
self.c_dim = 3
self.label_dim = 100
if self.dataset_name == 'mnist':
self.train_x, self.train_y, self.test_x, self.test_y = load_mnist()
self.img_size = 28
self.c_dim = 1
self.label_dim = 10
if self.dataset_name == 'fashion-mnist':
self.train_x, self.train_y, self.test_x, self.test_y = load_fashion()
self.img_size = 28
self.c_dim = 1
self.label_dim = 10
if self.dataset_name == 'tiny':
self.train_x, self.train_y, self.test_x, self.test_y = load_tiny()
self.img_size = 64
self.c_dim = 3
self.label_dim = 200
self.checkpoint_dir = args.checkpoint_dir
self.log_dir = args.log_dir
self.res_n = args.res_n
self.epoch = args.epoch
self.batch_size = args.batch_size
self.iteration = len(self.train_x) // self.batch_size
self.init_lr = args.lr
##################################################################################
# Generator
##################################################################################
def network(self, x, is_training=True, reuse=False):
with tf.variable_scope("network", reuse=reuse):
if self.res_n < 50:
residual_block = resblock
else:
residual_block = bottle_resblock
residual_list = get_residual_layer(self.res_n)
ch = 8 # paper is 64
x = conv(x, channels=ch, kernel=3, stride=1, scope='conv')
for i in range(residual_list[0]):
x = residual_block(x, channels=ch, is_training=is_training,
downsample=False, scope='resblock0_' + str(i))
########################################################################################################
x = residual_block(
x, channels=ch*2, is_training=is_training, downsample=True, scope='resblock1_0')
for i in range(1, residual_list[1]):
x = residual_block(x, channels=ch*2, is_training=is_training,
downsample=False, scope='resblock1_' + str(i))
########################################################################################################
x = residual_block(
x, channels=ch*4, is_training=is_training, downsample=True, scope='resblock2_0')
for i in range(1, residual_list[2]):
x = residual_block(x, channels=ch*4, is_training=is_training,
downsample=False, scope='resblock2_' + str(i))
########################################################################################################
x = residual_block(
x, channels=ch*8, is_training=is_training, downsample=True, scope='resblock_3_0')
for i in range(1, residual_list[3]):
x = residual_block(x, channels=ch*8, is_training=is_training,
downsample=False, scope='resblock_3_' + str(i))
########################################################################################################
x = batch_norm(x, is_training, scope='batch_norm')
x = relu(x)
x = global_avg_pooling(x)
x = fully_conneted(x, units=self.label_dim, scope='logit')
return x
##################################################################################
# Model
##################################################################################
def build_model(self):
""" Graph Input """
self.train_inptus = tf.placeholder(tf.float32, [
self.batch_size, self.img_size, self.img_size, self.c_dim], name='train_inputs')
self.train_labels = tf.placeholder(
tf.float32, [self.batch_size, self.label_dim], name='train_labels')
self.test_inptus = tf.placeholder(tf.float32, [len(
self.test_x), self.img_size, self.img_size, self.c_dim], name='test_inputs')
self.test_labels = tf.placeholder(
tf.float32, [len(self.test_y), self.label_dim], name='test_labels')
self.lr = tf.placeholder(tf.float32, name='learning_rate')
""" Model """
self.train_logits = self.network(self.train_inptus)
self.test_logits = self.network(
self.test_inptus, is_training=False, reuse=True)
self.train_loss, self.train_accuracy = classification_loss(
logit=self.train_logits, label=self.train_labels)
self.test_loss, self.test_accuracy = classification_loss(
logit=self.test_logits, label=self.test_labels)
reg_loss = tf.losses.get_regularization_loss()
self.train_loss += reg_loss
self.test_loss += reg_loss
""" Training """
self.optim = tf.train.MomentumOptimizer(
self.lr, momentum=0.9).minimize(self.train_loss)
"""" Summary """
self.summary_train_loss = tf.summary.scalar(
"train_loss", self.train_loss)
self.summary_train_accuracy = tf.summary.scalar(
"train_accuracy", self.train_accuracy)
self.summary_test_loss = tf.summary.scalar("test_loss", self.test_loss)
self.summary_test_accuracy = tf.summary.scalar(
"test_accuracy", self.test_accuracy)
self.train_summary = tf.summary.merge(
[self.summary_train_loss, self.summary_train_accuracy])
self.test_summary = tf.summary.merge(
[self.summary_test_loss, self.summary_test_accuracy])
##################################################################################
# Train
##################################################################################
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(
self.log_dir + '/' + self.model_dir, self.sess.graph)
# fareed
dot_rep = graph_to_dot(self.sess.graph)
with open('./resnet.dot', 'w') as fwr:
fwr.write(str(dot_rep))
options = tf.RunOptions(
trace_level=tf.RunOptions.SOFTWARE_TRACE)
run_metadata = tf.RunMetadata()
operations_tensors = {}
operations_names = self.sess.graph.get_operations()
count1 = 0
count2 = 0
for operation in operations_names:
operation_name = operation.name
operations_info = self.sess.graph.get_operation_by_name(
operation_name).values()
if len(operations_info) > 0:
if not (operations_info[0].shape.ndims is None):
operation_shape = operations_info[0].shape.as_list()
operation_dtype_size = operations_info[0].dtype.size
if not (operation_dtype_size is None):
operation_no_of_elements = 1
for dim in operation_shape:
if not(dim is None):
operation_no_of_elements = operation_no_of_elements * dim
total_size = operation_no_of_elements * operation_dtype_size
operations_tensors[operation_name] = total_size
else:
count1 = count1 + 1
else:
count1 = count1 + 1
operations_tensors[operation_name] = -1
# print('no shape_1: ' + operation_name)
# print('no shape_2: ' + str(operations_info))
# operation_namee = operation_name + ':0'
# tensor = tf.get_default_graph().get_tensor_by_name(operation_namee)
# print('no shape_3:' + str(tf.shape(tensor)))
# print('no shape:' + str(tensor.get_shape()))
else:
# print('no info :' + operation_name)
# operation_namee = operation.name + ':0'
count2 = count2 + 1
operations_tensors[operation_name] = -1
# try:
# tensor = tf.get_default_graph().get_tensor_by_name(operation_namee)
# print(tensor)
# print(tf.shape(tensor))
# except:
# print('no tensor: ' + operation_namee)
print(count1)
print(count2)
with open('./tensors_sz.json', 'w') as f:
json.dump(operations_tensors, f)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
epoch_lr = self.init_lr
start_epoch = (int)(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
if start_epoch >= int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.01
elif start_epoch >= int(self.epoch * 0.5) and start_epoch < int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.1
print(" [*] Load SUCCESS")
else:
epoch_lr = self.init_lr
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
for epoch in range(start_epoch, self.epoch):
if epoch == int(self.epoch * 0.5) or epoch == int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.1
# get batch data
for idx in range(start_batch_id, self.iteration):
batch_x = self.train_x[idx *
self.batch_size:(idx+1)*self.batch_size]
batch_y = self.train_y[idx *
self.batch_size:(idx+1)*self.batch_size]
batch_x = data_augmentation(
batch_x, self.img_size, self.dataset_name)
train_feed_dict = {
self.train_inptus: batch_x,
self.train_labels: batch_y,
self.lr: epoch_lr
}
test_feed_dict = {
self.test_inptus: self.test_x,
self.test_labels: self.test_y
}
# update network
if idx % 5 == 0:
_, summary_str, train_loss, train_accuracy = self.sess.run(
[self.optim, self.train_summary, self.train_loss, self.train_accuracy], feed_dict=train_feed_dict, run_metadata=run_metadata, options=options)
profile(run_metadata, str(epoch) + '_' + str(idx))
else:
st_time = time.time()
_, summary_str, train_loss, train_accuracy = self.sess.run(
[self.optim, self.train_summary, self.train_loss, self.train_accuracy], feed_dict=train_feed_dict)
print('step_time' + str(time.time() - st_time))
self.writer.add_summary(summary_str, counter)
# test
summary_str, test_loss, test_accuracy = self.sess.run(
[self.test_summary, self.test_loss, self.test_accuracy], feed_dict=test_feed_dict)
self.writer.add_summary(summary_str, counter)
# display training status
counter += 1
print("Epoch: [%2d] [%5d/%5d] time: %4.4f, train_accuracy: %.2f, test_accuracy: %.2f, learning_rate : %.4f"
% (epoch, idx, self.iteration, time.time() - start_time, train_accuracy, test_accuracy, epoch_lr))
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model
self.save(self.checkpoint_dir, counter)
# save model for final step
self.save(self.checkpoint_dir, counter)
@property
def model_dir(self):
return "{}{}_{}_{}_{}".format(self.model_name, self.res_n, self.dataset_name, self.batch_size, self.init_lr)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(
checkpoint_dir, self.model_name+'.model'), global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(
checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def test(self):
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
test_feed_dict = {
self.test_inptus: self.test_x,
self.test_labels: self.test_y
}
test_accuracy = self.sess.run(
self.test_accuracy, feed_dict=test_feed_dict)
print("test_accuracy: {}".format(test_accuracy))
|
import csv
import time
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
import numpy as np
from planner.drone import DroneState
from planner.flyZone import MOCAP_FLY_ZONE, FlyZone
from planner.plotting import plotDroneState, plotFlyZone, plotlive
from plotUtils import saveFig
Z_HEIGHT = 1.5
class SimpleTrajectory:
def __init__(self, timestamps=None, x=None, y=None, z=None, yaw=None, csv=None, poses=None):
if csv is not None:
trajectoryData = np.loadtxt(csv, delimiter=",", skiprows=1)
timestampsTemp = np.array([row[0] for row in trajectoryData])
self.timestamps = timestampsTemp - timestampsTemp[0]
self.x = np.array([row[1] for row in trajectoryData])
self.y = np.array([row[2] for row in trajectoryData])
# If trajectory is from motion planner, keep z
self.z = np.array([row[3] if row[3] != -1997 else Z_HEIGHT for row in trajectoryData])
self.yaw = np.array([row[4] for row in trajectoryData])
elif poses is not None:
self.timestamps = timestamps
x, y, z, yaw = [], [], [], []
for pose in poses:
x.append(pose.x)
y.append(pose.y)
# If trajectory is from motion planner, keep z
z.append(pose.z) if pose.z != -1997 else Z_HEIGHT
yaw.append(pose.yaw)
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
self.yaw = np.array(yaw)
else:
if len(timestamps) == 0:
print("No values in trajectory")
exit()
self.timestamps = timestamps - timestamps[0]
self.x = x
self.y = y
self.z = z if z[0] != -1997 else np.full(len(z), Z_HEIGHT) # If trajectory is from motion planner, keep z
self.yaw = yaw
def saveTrajectoryToCsv(self, fileName):
with open(fileName, 'w', encoding='UTF8') as f:
writer = csv.writer(f)
header = ["timestamp", "x", "y", "z", "yaw"]
writer.writerow(header)
numDroneStates = len(self.timestamps)
for i in range(numDroneStates):
row = [
self.timestamps[i],
self.x[i] / 100, self.y[i] / 100, self.z[i] / 100,
self.yaw[i]
]
writer.writerow(row)
def interpolateTrajectory(self, delta: float):
"""
Args:
delta (float): Distance between each data point in the interpolated trajectory.
Returns:
SimpleTrajectory: Interpolated trajectory.
"""
t_d = np.arange(0, self.timestamps[-1], delta)
x_d = np.interp(t_d, self.timestamps, self.x)
y_d = np.interp(t_d, self.timestamps, self.y)
z_d = np.interp(t_d, self.timestamps, self.z)
yaw_d = np.interp(t_d, self.timestamps, self.yaw)
interpolatedTrajectory = SimpleTrajectory(t_d, x_d, y_d, z_d, yaw_d)
return interpolatedTrajectory
def getVelocites(self):
currentPositions = [np.array([self.x[i], self.y[i], self.z[i]]) for i in range(len(self.timestamps))]
goalPositions = [np.array([self.x[i + 1], self.y[i + 1], self.z[i + 1]]) for i in range(len(self.timestamps) - 1)]
goalPositions.append(np.array([self.x[-1], self.y[-1], self.z[-1]]))
time = self.timestamps[1] - self.timestamps[0]
currentPositions = np.array(currentPositions)
goalPositions = np.array(goalPositions)
distanceToGoalPosition = np.array([np.linalg.norm(currentPositions[i] - goalPositions[i]) for i in range(len(currentPositions))])
velocities = distanceToGoalPosition / time
return velocities
def plotTrajectory(self, otherTrajectory: "SimpleTrajectory" = None, fileName = None):
fig, axs = plt.subplots(5, sharex=True)
if len(self.timestamps) == 0:
print("No values in trajectory")
return
for ax in axs:
ax.set_xlabel("time (s)")
axs[0].plot(self.timestamps, self.x, "-g")
axs[1].plot(self.timestamps, self.y, "-g")
axs[2].plot(self.timestamps, self.z, "-g")
axs[3].plot(self.timestamps, np.rad2deg(self.yaw), "-g")
plannedVelocities = self.getVelocites()
axs[4].plot(self.timestamps, plannedVelocities, "-g")
axs[0].set_ylabel("x (m)")
axs[1].set_ylabel("y (m)")
axs[2].set_ylabel("z (m)")
axs[3].set_ylabel("yaw (degree)")
axs[4].set_ylabel("velocity (m/s)")
if otherTrajectory is not None and len(otherTrajectory.timestamps) > 0:
axs[0].plot(otherTrajectory.timestamps, otherTrajectory.x, "-r")
axs[1].plot(otherTrajectory.timestamps, otherTrajectory.y, "-r")
axs[2].plot(otherTrajectory.timestamps, otherTrajectory.z, "-r")
axs[3].plot(otherTrajectory.timestamps, np.rad2deg(otherTrajectory.yaw), "-r")
executedVelocities = otherTrajectory.getVelocites()
axs[4].plot(self.timestamps, executedVelocities, "-r")
if fileName is not None:
saveFig(fig, fileName)
# plt.show()
# plt.pause(300000000)
def visualize(self):
fig, ax = plt.subplots()
for i, t_curr in enumerate(self.timestamps):
self.plotDronePose(ax)
x, y, yaw = self.x[i], self.y[i], self.yaw[i]
droneState = DroneState(parent=None, x=x, y=y, yaw=yaw)
plotDroneState(ax, droneState)
plt.show()
if i < len(self.timestamps) - 1:
t_next = self.timestamps[i + 1]
t_sleep = t_next - t_curr
plt.pause(t_sleep)
else:
plt.pause(2)
plt.close(fig)
@plotlive
def plotDronePose(self, ax: "Axes"):
plotFlyZone(ax, MOCAP_FLY_ZONE)
def __str__(self):
return f"-Trajectory-\ntimestamps={self.timestamps}\nx={self.x}\ny={self.y}\nz={self.z}\nyaw={np.round(np.rad2deg(self.yaw))}\n"
|
#
# PySNMP MIB module WWP-LEOS-PING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WWP-LEOS-PING-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:31:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
AddressFamilyNumbers, = mibBuilder.importSymbols("IANA-ADDRESS-FAMILY-NUMBERS-MIB", "AddressFamilyNumbers")
InetAddressType, = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Gauge32, Unsigned32, Counter32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, ObjectIdentity, IpAddress, MibIdentifier, iso, Integer32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Gauge32", "Unsigned32", "Counter32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "ObjectIdentity", "IpAddress", "MibIdentifier", "iso", "Integer32", "TimeTicks")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
wwpModulesLeos, = mibBuilder.importSymbols("WWP-SMI", "wwpModulesLeos")
wwpLeosPingMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19))
wwpLeosPingMIB.setRevisions(('2012-04-02 00:00', '2001-07-03 12:57',))
if mibBuilder.loadTexts: wwpLeosPingMIB.setLastUpdated('201204020000Z')
if mibBuilder.loadTexts: wwpLeosPingMIB.setOrganization('Ciena, Inc')
class PingFailCause(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))
namedValues = NamedValues(("unknownHost", 1), ("socketError", 2), ("bindError", 3), ("connectError", 4), ("missingHost", 5), ("asyncError", 6), ("nonBlockError", 7), ("mcastError", 8), ("ttlError", 9), ("mcastTtlError", 10), ("outputError", 11), ("unreachableError", 12), ("isAlive", 13), ("txRx", 14), ("commandCompleted", 15), ("noStatus", 16), ("sendRecvMismatch", 17))
class PingState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("idle", 1), ("pinging", 2), ("pingComplete", 3), ("failed", 4))
wwpLeosPingMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1))
wwpLeosPingDelay = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosPingDelay.setStatus('current')
wwpLeosPingPacketSize = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1464)).clone(56)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosPingPacketSize.setStatus('current')
wwpLeosPingActivate = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosPingActivate.setStatus('current')
wwpLeosPingAddrType = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 4), AddressFamilyNumbers()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosPingAddrType.setStatus('current')
wwpLeosPingAddr = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosPingAddr.setStatus('current')
wwpLeosPingPacketCount = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosPingPacketCount.setStatus('current')
wwpLeosPingPacketTimeout = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosPingPacketTimeout.setStatus('current')
wwpLeosPingSentPackets = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosPingSentPackets.setStatus('current')
wwpLeosPingReceivedPackets = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosPingReceivedPackets.setStatus('current')
wwpLeosPingFailCause = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 10), PingFailCause()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosPingFailCause.setStatus('current')
wwpLeosPingState = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 11), PingState().clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosPingState.setStatus('current')
wwpLeosPingUntilStopped = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 12), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosPingUntilStopped.setStatus('current')
wwpLeosPingInetAddrType = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 19, 1, 13), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosPingInetAddrType.setStatus('current')
mibBuilder.exportSymbols("WWP-LEOS-PING-MIB", PingState=PingState, PingFailCause=PingFailCause, PYSNMP_MODULE_ID=wwpLeosPingMIB, wwpLeosPingSentPackets=wwpLeosPingSentPackets, wwpLeosPingReceivedPackets=wwpLeosPingReceivedPackets, wwpLeosPingState=wwpLeosPingState, wwpLeosPingAddrType=wwpLeosPingAddrType, wwpLeosPingPacketTimeout=wwpLeosPingPacketTimeout, wwpLeosPingPacketSize=wwpLeosPingPacketSize, wwpLeosPingDelay=wwpLeosPingDelay, wwpLeosPingAddr=wwpLeosPingAddr, wwpLeosPingFailCause=wwpLeosPingFailCause, wwpLeosPingMIBObjects=wwpLeosPingMIBObjects, wwpLeosPingPacketCount=wwpLeosPingPacketCount, wwpLeosPingUntilStopped=wwpLeosPingUntilStopped, wwpLeosPingInetAddrType=wwpLeosPingInetAddrType, wwpLeosPingActivate=wwpLeosPingActivate, wwpLeosPingMIB=wwpLeosPingMIB)
|
import numpy as np
from netCDF4 import Dataset
from matplotlib import pyplot as plt
from PIL import Image
# Both datasets use an evenly spaced grid with a 2.5 deg. resolution.
# Relative humidity, expressed as a percentage, indicates a present state of absolute humidity relative to a maximum humidity given the same temperature.
# Levels (kPa) --> {1000,925,850,700,600,500,400,300} --> doesn't go higher b/c of tropopause temp. inversion.
monthConversion = {
"January" : [1,31],
"February" : [2,28],
"March" : [3,31],
"April" : [4,30],
"May" : [5,31],
"June" : [6,30],
"July" : [7,31],
"August" : [8,31],
"September" : [9,30],
"October" : [10,31],
"November" : [11,30],
"December" : [12,31]
}
def generate_specHumidity_profs(year,month,date,monthConversion):
NC_specHumidity_2m = Dataset('datasets/shum.2m.gauss.' + str(year) + '.nc', "r", format="NETCDF4")
NC_specHumidity_multiLevels = Dataset('datasets/shum.' + str(year) + '.nc', "r", format="NETCDF4")
specHumidity_2m = np.array(NC_specHumidity_2m.variables['shum'])# shape(366,94,192) for 2020.
specHumidity = np.array(NC_specHumidity_multiLevels.variables['shum'])# shape(366,8,73,144) for 2020.
# newDate calculation.
if len(specHumidity) == 366: # Assuming no. of time entries for surf + pressure datasets are equal.
leapYear = True
newDate = 0
for elem in monthConversion.keys():
if elem != month:
newDate += (monthConversion[elem])[1]
else:
newDate += date
break
if leapYear == True and newDate > 59:
newDate += 1
# Extracting profs by alt. (Indexes: 300mbar: -1, 850mbar: 2)
specHumidity = np.rot90(specHumidity,1) # shape (8,366,73,144)
specHumidity_at300 = specHumidity[-1][newDate - 3:newDate + 4]
specHumidity_at850 = specHumidity[2][newDate - 3:newDate + 4] # Final shapes (7,73,144)
specHumidity_2m = specHumidity_2m[newDate - 3:newDate + 4]
specHumidity = np.array([specHumidity_at850,specHumidity_at300])
np.save("outData/shum_7d_3l_2x7x73x144.npy",specHumidity) # Arranged by ascending altitude.
np.save("outData/shum_surf2m_7x94x192.npy",specHumidity_2m)
return 0.
def plot_specHumidity(err):
shumData = np.load("outData/shum_7d_3l_2x7x73x144.npy")
shumSurfData = np.load("outData/shum_surf2m_7x94x192.npy")
shumData = np.rot90(shumData,1) # Switch primary looping var. to day
# new shape(7,2,73,144). --> shumData
im = Image.open("globalMap.png")
x = np.linspace(0,143,num=9,endpoint=True)
xLabels = [str(i) for i in list(np.arange(start=-180,stop=181,step=45))]
y = np.linspace(0,72,num=7,endpoint=True)
yLabels = [str(i) for i in list(np.arange(start=-90,stop=91,step=30))]
dayLabel = 1
levelLabel = ['_850mbar','_300mbar']
for day in shumData:
index_levelLabel = 0
for level in day:
fig = plt.figure()
ax = plt.axes()
plt.imshow(im,extent=[0,143,0,72])
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plt.xticks(x,xLabels)
plt.yticks(y,yLabels)
cs = ax.contourf(level,alpha=0.5)
plt.colorbar(cs, ax=ax, label="Specific Humidity (kg/kg)", orientation='horizontal')
plt.savefig("finalOutput_plots/specificHumidity/specificHumidity_day" + str(dayLabel) + levelLabel[index_levelLabel] + ".png")
plt.cla()
plt.clf()
plt.close()
index_levelLabel += 1
dayLabel += 1
# Creating surface profiles (shumSurfData)
dayLabel = 1
x = np.linspace(0,191,num=9,endpoint=True)
xLabels = [str(i) for i in list(np.arange(start=-180,stop=181,step=45))]
y = np.linspace(0,93,num=7,endpoint=True)
yLabels = [str(i) for i in list(np.arange(start=-90,stop=91,step=30))]
for day in shumSurfData:
fig = plt.figure()
ax = plt.axes()
plt.imshow(im,extent=[0,191,0,93])
plt.xlabel("Longitude")
plt.xticks(x,xLabels)
plt.ylabel("Latitude")
plt.yticks(y,yLabels)
cs = ax.contourf(day, alpha=0.5)
plt.colorbar(cs, ax=ax, label="Specific Humidity (kg/kg)", orientation='horizontal')
plt.savefig("finalOutput_plots/specificHumidity/specificHumidity_day" + str(dayLabel) + "_atSurface.png")
plt.cla()
plt.clf()
plt.close()
dayLabel += 1
return 0.
plot_specHumidity(0.)
# T62 grid error holds true for shumSurfData. |
"""
# MULTI-LOSS WEIGHTING WITH COEFFICIENT OF VARIATIONS
## https://arxiv.org/pdf/2009.01717.pdf
In other words, this hypothesis says that a loss with a constant value should not be optimised any further. Variance alone,
however, is not sufficient, given that it can be expected that a loss which has a larger (mean) magnitude, also has a higher
absolute variance. Even if the loss is relatively less variant. Therefore, we propose to use a dimensionless measure of
uncertainty, the Coefficient of Variation (cv, which shows the variability of the data in relation to the (observed) mean:
```
cv = σ/µ, (2)
```
where `µ` denotes the mean and `σ` the standard deviation. It allows to fairly compare the uncertainty in losses, even
with different magnitudes, under the assumption that each loss measures in a ratio-scale, that is with a unique and
non-arbitrary zero value.
Here a more robust loss ratio is proposed:
```
Li(t)
li(t) = --------
µLi(t − 1)
```
*(3)*
where `µLi(t − 1)` is the mean over all observed losses from iteration 1 to (`t` - 1) for a specific loss Li. The loss ratio l(t)
has the same meaningful zero point when `Li(t)` is zero and is a relative comparison of two measurements of the loss
statistic. Now, the loss ratio is used as a point estimate of the mean to yield the following definition for loss weights:
```
σli(t)
αi = ------
li(t)
```
*(4)*
where `σli(t)` is the standard deviation over all known loss ratios `Li(t)/µli(t−1)` until iteration `t` - 1
"""
import numpy as np
import torch
class LossTracker:
def __init__(self, name, experiment, weight=1, warmup=np.inf, max=np.inf, block_size=100):
"""A wrapper around the pytorch `backwards` method call that also:
- calculates a set of running statistics of losses
- applies constraints to the loss value
- `weight`: static scaling value applied before `warmup` steps
- `max`: hard upper limit on loss value
- dynamic weight: `weight` is set based on statistical calculations after `warmup` steps
- NOTE: This only works with "well behaved" losses that fall in the 0-1 range
- logs each value and statistic after each update
- comet.ml experiment
- console
Args:
name (str): Used in logging and as a key for lookup tables
experiment ([type]): comet.ml interface
weight (int, optional): Static weight scaling value, used until `warmup` calls to update.
Defaults to 1.
warmup (int, optional): Determines number of updates to do before dynamically
calculating weight. Using infinity effectively disables the dynamic weight
feature. Defaults to np.inf.
max (int, optional): Hard upper limit on the value of the loss. Using infinity
effectively disables this feature. Defaults to np.inf.
block_size (int, optional): Initial size of the dynamically allocated loss history
buffers. `expand_buffer` must be called before each epoch with Defaults to 100.
"""
self.name = name
self.exp = experiment
self.weight = weight
self.max = max
self.warmup = warmup
self.block_size = block_size
self.reset()
def reset(self):
self.mean = 1
self.var = 0
self.std = 0
self.ratio = 0
self.ratio_std = 0
self.cov = 0
self.cov_weight = self.weight
self.value_history = np.empty(self.block_size)
self.ratio_history = np.empty(self.block_size)
self.max_history_size = self.block_size
self.value = 0
self.total = 0
self.count = 0
def expand_buffer(self, block_size=None):
if block_size is not None:
self.block_size = block_size
self.value_history = np.concat(self.value_history, np.empty(self.block_size))
self.ratio_history = np.concat(self.ratio_history, np.empty(self.block_size))
self.max_history_size += self.block_size
def update(self, value, do_backwards=True, do_comet=True, do_console=False):
if do_backwards:
value = self.constrain_loss(value)
value.backward()
self.value = value.item()
else:
self.value = value
self.total += self.value
assert self.count < self.max_history_size
self.value_history[self.count] = self.value
# calculate li(t)
if self.mean != 0:
self.ratio = self.value / self.mean # µLi(t − 1) is the mean over all observed losses from iteration 1 to (t - 1) for a specific loss Li
else:
self.ratio = 1 # ratio of 1 when mean is 0
self.ratio_history[self.count] = self.ratio
self.count += 1
if self.count > 1: # only once there is a history
self.ratio_std = self.ratio_history.std() # σli(t) is the standard deviation over all known loss ratios Li(t)/µli(t−1) until iteration t - 1
self.cov_weight = self.ratio_std / self.ratio # αi = σli(t) / li(t)
if self.count > self.warmup:
# use cov weight as functioning weight after warmup period to allow for meaningful statistics to build
self.weight = self.cov_weight
self.mean = self.value_history[:self.count].mean()
self.var = self.value_history[:self.count].var()
self.std = self.value_history[:self.count].std()
self.cov = self.std / self.mean
# update comet or print out
self.log(comet=do_comet, console=do_console)
def log(self, comet=True, console=False):
if comet:
self.exp.log_metric(f"{self.name}_loss", self.value)
self.exp.log_metric(f"{self.name}_cov", self.cov)
self.exp.log_metric(f"{self.name}_cov_weight", self.cov_weight)
self.exp.log_metric(f"{self.name}_var", self.var)
self.exp.log_metric(f"{self.name}_std", self.std)
if console:
msg = f"[{self.name}] [{self.count}]\t{self.value} @ {self.cov_weight}x \t ~ mean: {self.mean} var: {self.var} std: {self.std} cov: {self.cov}"
print(msg)
self.exp.log_text(msg)
def get_history(self):
return self.value_history[:self.count]
def constrain_loss(self, loss):
loss *= self.weight
if loss > self.max:
magnitude = torch.floor(loss / self.max)
loss = loss / min(magnitude, 1)
loss = torch.clamp(loss, 0, self.max)
return loss
|
"""043 - DESENVOLVA UMA LÓGICA QUE LEIA O PESO E A ALTURA DE UMA PESSOA, CALCULO SEU IMC E MOSTRE SEU STATUS,
DE ACORDO COM A TABELA ABAIXO:
- ABAIXO DE 18,5: ABAIXO DO PESO
- ENTRE 18,5 E 25: PESO IDEAL
- ENTRE 25 E 30: SOBREPESO
- ENTRE 30 E 40: OBESIDADE MORBIDA"""
print('-' * 20, 'DESAFIO 043', '-' * 20)
p = float(input('Informe seu peso: '))
a = float(input('Informe sua altura: '))
imc = p / (a ** 2)
if imc < 18.5:
print('Seu IMC é de {:.2f}kg/m e você está abaixo do peso.'.format(imc))
elif imc <= 25:
print('Seu IMC é de {:.2f}kg/m e você está no peso ideal.'.format(imc))
elif imc <= 30:
print('Seu IMC é de {:.2f}kg/m e você está com sobrepeso.'.format(imc))
else:
print('Seu IMC é {:.2f}kg/m e você está com obesidade morbida.'.format(imc))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
# from django.db import connection #引入数据库的列表
from django.core.cache import cache
from django.views.generic import ListView, DetailView
# from silk.profiling.profiler import silk_profile
from .models import Post, Tag, Category
from config.models import SideBar
from comment.models import Comment
from comment.views import CommentShowMixin
logger = logging.getLogger(__name__)
def cache_it(func):
def wrapper(self, *args, **kwargs):
key = repr((func.__name__, args, kwargs))
result = cache.get(key)
if result:
return result
result = func(self, *args, **kwargs)
cache.set(key, result, 60 * 5)
return result
return wrapper
class CommonMixin(object):
@cache_it
def get_category_context(self):
categories = Category.objects.filter(status=1)
nav_cates=[]
cates = []
for cate in categories:
if cate.is_nav:
nav_cates.append(cate)
else:
cates.append(cate)
return {
'nav_cates': nav_cates,
'cates': cates,
}
def get_context_data(self, **kwargs):
side_bars = SideBar.objects.filter(status=1)
recently_posts = Post.objects.filter(status=1)[:10]
hot_posts = Post.objects.filter(status=1).order_by('-pv')[:10]
recently_comments = Comment.objects.filter(status=1)[:10]
kwargs.update({
'side_bars': side_bars,
'recently_comments': recently_comments,
'recently_posts': recently_posts,
'hot_posts': hot_posts,
})
kwargs.update(self.get_category_context())
return super(CommonMixin, self).get_context_data(**kwargs)
class BasePostsView(CommonMixin, ListView):
model = Post
template_name = 'blog/list.html'
context_object_name = 'posts'
paginate_by = 5 # 分页
def time_it(func):
import time
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print(func.__name__, 'cost', time.time() - start)
return result
return wrapper
class IndexView(BasePostsView):
@time_it
def get_queryset(self):
query = self.request.GET.get('query')
logger.info('query:[%s]', query)
qs = super(IndexView, self).get_queryset()
if query:
qs= qs.filter(title__icontains=query) # select * from blog_post where title ilike '%query%'
logger.info('query result:[%s]', qs)
return qs
def get_context_data(self, **kwargs):
query = self.request.GET.get('query')
return super(IndexView, self).get_context_data(query=query)
class CategoryView(BasePostsView):
def get_queryset(self):
qs = super(CategoryView, self).get_queryset()
cate_id = self.kwargs.get('category_id')
qs = qs.filter(category_id=cate_id)
return qs
class TagView(BasePostsView):
def get_queryset(self):
tag_id = self.kwargs.get('tag_id')
try:
tag = Tag.objects.get(id=tag_id)
except Tag.DoesNotExist:
return []
posts = tag.posts.all()
return posts
class AuthorView(BasePostsView):
def get_queryset(self):
author_id = self.kwargs.get('author_id')
qs = super(AuthorView, self).get_queryset()
author_id = self.request.GET.get('author_id')
if author_id:
qs = qs.filter(owner_id=author_id)
return qs
class PostView(CommonMixin, CommentShowMixin, DetailView):
model = Post
template_name = 'blog/detail.html'
context_object_name = 'post'
def get(self, request, *args, **kwargs):
response = super(PostView, self).get(request, *args, **kwargs)
self.pv_uv()
return response
def pv_uv(self):
# 添加pv
# 判断用户, 添加uv
# TODO:判断用户24小时内有没有访问
sessionid = self.request.COOKIES.get('sessionid')
path = self.request.path
if not sessionid:
return
pv_key = 'pv:%s:%s' % (sessionid, path)
# import pdb; pdb.set_trace()
if not cache.get(pv_key):
self.object.increse_pv()
cache.set(pv_key, 1, 60)
uv_key = 'uv:%s:%s' % (sessionid, path)
if not cache.get(uv_key):
self.object.increse_uv()
cache.set(uv_key, 1, 60 * 60 *24)
|
# -*- coding: utf-8 -*-
# (c) 2018 The PosterKit developers <[email protected]>
import os
import logging
import requests
from glob import glob
from docopt import docopt, DocoptExit
from posterkit import __version__
from posterkit.util import boot_logging, normalize_options, read_list
from gafam.poster import render_posters, render_mosaic, POSTER_NAMES, POSTER_VARIANTS, POSTER_TRANSLATIONS_URI
logger = logging.getLogger(__name__)
APP_NAME = 'posterkit'
def run():
"""
Usage:
gafam-info pdf [options] [<path>]
gafam-info mosaic [options] [<path>]
gafam-info --help
Options:
--language=<language> Comma-separated list of language codes
One or more of fr,en,de ...
--name=<name> Comma-separated list of poster names
One or more of google,apple,facebook,amazon,microsoft
--variant=<variant> Comma-separated list of variants
One or more of black,eco
--all Render posters in all languages and variants
<path> Where to store the output files [default: .]
Examples:
# Render single-page PDF document and output to STDOUT
gafam-info pdf --language=fr --name=google --variant=black -
# Render multi-page PDF documents for multiple languages and variants and store to output path
gafam-info pdf --language=fr,en,de --name=google,apple,facebook,amazon,microsoft --variant=black,eco /srv/www/posterkit
# Render multi-page PDF documents for French language in all variants and store to output path
gafam-info pdf --language=fr --name=all --variant=all /srv/www/posterkit
# Render multi-page PDF documents for all languages and variants and store to output path
gafam-info pdf --language=all --name=all --variant=all /srv/www/posterkit
"""
# Use generic commandline options schema and amend with current program name
commandline_schema = run.__doc__
# Read commandline options
options = docopt(commandline_schema, version=APP_NAME + ' ' + __version__)
# Initialize logging
boot_logging(options)
# Normalize commandline options
options = normalize_options(options)
# Render all selected PDF documents
if options['pdf']:
path = options['path']
rendering_info = get_rendering_info(options)
check_options(rendering_info)
files = render_posters(info=rendering_info, path=path)
# 2018-05-24: Don't render mosaic each time when invoked with a subset of the whole collection
#render_mosaic(path=path, files=files, variant='color')
elif options['mosaic']:
if not options['variant']:
raise DocoptExit('Error: No variant selected, use "--variant={black,eco,color}"')
path = options['path']
# Scan directory for all PDF files.
pdf_files = sorted(glob(os.path.join(path, 'pdf', '**', 'lqdn-gafam-poster-*.pdf')))
# Filter empty source files.
pdf_files = [item for item in pdf_files if os.stat(item).st_size > 0]
# Debugging.
#print 'pdf_files:', pdf_files
render_mosaic(path=path, files=pdf_files, variant='color')
def check_options(options):
if not options['language']:
raise DocoptExit('Error: No language selected, use "--language=fr,en,de" or "--language=all"')
if not options['name']:
raise DocoptExit('Error: No name selected, use "--name=google,facebook" or "--name=all"')
if not options['variant']:
raise DocoptExit('Error: No variant selected, use "--variant=black,eco" or "--variant=all"')
def get_rendering_info(options):
if options['all']:
info = {
'language': read_list(options['language']),
'name': read_list(options['name']),
'variant': read_list(options['variant']),
}
else:
info = {
'language': options['language'] == 'all' and get_languages() or read_list(options['language']),
'name': options['name'] == 'all' and POSTER_NAMES or read_list(options['name']),
'variant': options['variant'] == 'all' and POSTER_VARIANTS or read_list(options['variant']),
}
return info
def get_languages():
# https://api.github.com/repos/gafam/gafam-poster-translations/contents/json
response = requests.get(POSTER_TRANSLATIONS_URI)
data = response.json()
language_codes = []
for fileinfo in data:
language_code = fileinfo['name'].replace('.json', '')
language_codes.append(language_code)
return language_codes
|
import numpy as np
import collections
from random import randint
from core.evaluation.labels import PositiveLabel, NegativeLabel, NeutralLabel
from sample import Sample
from extracted_relations import ExtractedRelation
class BagsCollection:
def __init__(self,
relations,
bag_size,
create_sample_func,
shuffle):
assert(isinstance(bag_size, int) and bag_size > 0) # relations from relationsCollection
assert(isinstance(relations, collections.Iterable)) # relations from relationsCollection
assert(isinstance(shuffle, bool))
self.bags = []
self.bag_size = bag_size
for relation in relations:
assert(isinstance(relation, ExtractedRelation))
if (len(self.bags) == 0) or (len(self.bags[-1]) == bag_size):
self.bags.append(Bag())
s = create_sample_func(relation)
assert(isinstance(s, Sample))
self.bags[-1].add_sample(s)
if len(self.bags) > 0:
self._complete_last_bag(self.bags, bag_size)
if shuffle:
np.random.shuffle(self.bags)
def _complete_last_bag(self, bags, bag_size):
assert(isinstance(bags, list) and len(bags) > 0)
assert(isinstance(bag_size, int))
last_bag = bags[-1]
assert(isinstance(last_bag, Bag))
if len(last_bag) == 0:
return
while len(last_bag) < bag_size:
random_bag = bags[randint(0, len(bags)-1)]
assert(isinstance(random_bag, Bag))
random_sample_ind = randint(0, len(random_bag._samples)-1)
last_bag.add_sample(random_bag._samples[random_sample_ind])
def iter_by_groups(self, bags_per_group):
"""
returns: list of MiniBatch
"""
assert(type(bags_per_group) == int and bags_per_group > 0)
groups_count = len(self.bags) / bags_per_group
end = 0
for index in range(groups_count):
begin = index * bags_per_group
end = begin + bags_per_group
yield self.bags[begin:end]
delta = len(self.bags) - end
if delta > 0:
yield self.bags[end:] + self.bags[:bags_per_group - delta]
class Bag:
"""
Bag is a list of samples
"""
def __init__(self):
self._samples = []
def add_sample(self, sample):
assert(isinstance(sample, Sample))
self._samples.append(sample)
def __len__(self):
return len(self._samples)
def __iter__(self):
for sample in self._samples:
yield sample
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import sys
import warnings
from itertools import chain
try:
from django.urls import Resolver404, resolve
except ImportError:
from django.core.urlresolvers import Resolver404, resolve
try:
from django.utils.encoding import force_str as force_text
except ImportError:
from django.utils.encoding import force_text
from django.utils.html import escape, format_html
try:
from collections import UserList
from urllib import parse
string_types = str,
PY3 = True
PY2 = False
except ImportError:
from UserList import UserList
import urlparse as parse
string_types = basestring,
PY3 = False
PY2 = True
__version_info__ = '0.2.3'
__version__ = '0.2.3'
version = '0.2.3'
VERSION = '0.2.3'
__all__ = ['get_version', 'generate_css_names_from_string',
'request_path_to_css_names']
def get_version():
return version # pragma: no cover
def _generate_css_names_from_string(item, split_on, prefix='', suffix='', midpoint=''):
split_path = item.strip(split_on).split(split_on)
# Refs #2 - If there's anything urlencoded, decode it.
unquoted_path = (parse.unquote(item).strip() for item in split_path)
# Refs #2 - Make sure we only bother with ought-to-be-safe ascii
# rather than full unicode planes.
decoded_path = (codecs.decode(part.encode('utf-8'), "ascii", "ignore") for part in unquoted_path)
# Refs #2 - Don't take anything which needed escaping (ie: included < or & etc)
escaped_path = (item for item in decoded_path if escape(item) == item)
newpath = tuple(part for part in escaped_path if part)
# If the thing is empty, just return an empty tuple
if not newpath:
return ()
newpath_length = len(newpath) + 1
variations = (newpath[0:l] for l in range(1, newpath_length))
# If there's a prefix and it doesn't end with a sensible separator (given
# the valid names of CSS identifiers), add midpoint.
if prefix and not prefix.endswith(('-', '_')):
prefix = '%s%s' % (prefix, midpoint)
# same as prefix, but start, rather than end
if suffix and not suffix.startswith(('-', '_')):
suffix = '%s%s' % (midpoint, suffix,)
finalised_variations = (
'%s%s%s' % (prefix, midpoint.join(variation), suffix)
for variation in variations
)
return finalised_variations
def generate_css_names_from_string(item, split_on, prefix='', suffix='', midpoint=''):
seen = set()
complete_variations = _generate_css_names_from_string(
item=item, split_on=split_on, prefix=prefix, suffix=suffix,
midpoint=midpoint)
for variation in complete_variations:
if variation not in seen:
seen.add(variation)
yield variation
# Given a URL which includes, say, 2 dynamic parts like so:
# /section/<DYNAMIC>/comments/<DYNAMIC>/details/
# try and get the following variations:
# section section-comments section-comments-details
static_variations = ()
try:
matched_view = resolve(item)
except Resolver404:
pass
else:
dynamic_parts = chain(matched_view.args, matched_view.kwargs.values())
dynamic_strs = (x for x in dynamic_parts
if isinstance(x, string_types))
item_copy = item
for to_replace in dynamic_strs:
item_copy = item_copy.replace(to_replace, '', 1)
static_variations = _generate_css_names_from_string(
item_copy, split_on=split_on, prefix=prefix,
suffix=suffix, midpoint=midpoint)
for extra_variation in static_variations:
if extra_variation not in seen:
seen.add(extra_variation)
yield extra_variation
def request_path_to_css_names(item, prefix='', suffix='', midpoint=''):
# TODO: remove this function.
warnings.warn(
"request_path_to_css_names() is scheduled to be removed, and should be "
"replaced with generate_css_names_from_string() with a valid "
"split_on argument",
PendingDeprecationWarning
)
return generate_css_names_from_string(item=item, split_on='/',
prefix=prefix, suffix=suffix,
midpoint=midpoint)
class Output(UserList):
string_template = "{}"
string_separator = " "
def rendered(self):
for x in self.data:
yield format_html(self.string_template, force_text(x))
def __str__(self):
"""
Used when doing something like:
{% path2css ... as OUTVAR %}
{{ OUTVAR }}
"""
parts = self.rendered()
val = self.string_separator.join(parts)
if PY2:
return val.encode('utf-8')
return val
if PY2:
def __unicode__(self):
"""
Python2 only.
Used when doing something like:
{% path2css ... as OUTVAR %}
{{ OUTVAR }}
"""
parts = self.rendered()
return self.string_separator.join(parts)
def __html__(self):
"""
Used in {% path2css x y %} is used directly
"""
return force_text(self)
"""
__getitem__ is used when doing something like:
{% path2css ... as OUTVAR %}
{% for x in OUTVAR %}{{ x }}{% endfor %}
"""
class LinkOutput(Output):
string_template = '<link href="{}" rel="stylesheet" type="text/css" />'
string_separator = "\n"
def rendered(self):
for x, found in self.data:
data = format_html(self.string_template, force_text(x))
if found:
yield data
else:
yield "<!-- {} -->".format(data)
"""
__getitem__ is used when doing something like:
{% path2css ... as OUTVAR %}
{% for x, did_file_exist in OUTVAR %}{{ x }}={{ did_file_exist }}{% endfor %}
"""
def context_processor(request):
return {
"PATH2CSS": Output(generate_css_names_from_string(request.path, split_on='/',midpoint='-')),
}
|
import argparse
from pyannote.database.util import load_rttm
import numpy as np
import tqdm
def segment_load(seg_path):
segment = np.load(seg_path, allow_pickle=True).item()
return segment
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="convert segment file to rttm file")
parser.add_argument("seg_path", type=str, help="input segment file path")
parser.add_argument("rttm_path", type=str, help="output rttm file path")
args = parser.parse_args()
segment = segment_load(args.seg_path)
rttm_lines = []
for uri_id in tqdm.tqdm(segment.keys()):
for time in segment[uri_id]:
start_time, duration = time
rttm_line = "SPEAKER {} 1 {} {} <NA> <NA> A <NA> <NA>\n".format(uri_id, start_time, duration)
rttm_lines.append(rttm_line)
with open(args.rttm_path, "w") as rttm_file:
rttm_file.writelines(rttm_lines)
pass |
import os
import numpy as np
import scipy.misc
import h5py
import imageio
from PIL import Image
# Loading data from disk
class DataLoaderDisk(object):
def __init__(self, **kwargs):
self.load_size = int(kwargs['load_size'])
self.fine_size = int(kwargs['fine_size'])
self.data_mean = np.array(kwargs['data_mean'])
self.randomize = kwargs['randomize']
self.data_root = os.path.join(kwargs['data_root'])
# read data info from lists
self.list_im = []
self.list_lab = []
with open(kwargs['data_list'], 'r') as f:
for line in f:
path, lab =line.rstrip().split(' ')
self.list_im.append(os.path.join(self.data_root, path))
self.list_lab.append(int(lab))
self.list_im = np.array(self.list_im, np.object)
self.list_lab = np.array(self.list_lab, np.int64)
self.num = self.list_im.shape[0]
print('# Images found:', self.num)
# permutation
perm = np.random.permutation(self.num)
self.list_im[:, ...] = self.list_im[perm, ...]
self.list_lab[:] = self.list_lab[perm, ...]
self._idx = 0
def next_batch(self, batch_size):
images_batch = np.zeros((batch_size, self.fine_size, self.fine_size, 3))
labels_batch = np.zeros(batch_size)
for i in range(batch_size):
image = imageio.imread(self.list_im[self._idx])
image = np.array(Image.fromarray(image).resize((self.load_size, self.load_size)))
image = image.astype(np.float32)/255.
image = image - self.data_mean
if self.randomize:
flip = np.random.random_integers(0, 1)
if flip>0:
image = image[:,::-1,:]
offset_h = np.random.random_integers(0, self.load_size-self.fine_size)
offset_w = np.random.random_integers(0, self.load_size-self.fine_size)
else:
offset_h = (self.load_size-self.fine_size)//2
offset_w = (self.load_size-self.fine_size)//2
images_batch[i, ...] = image[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[i, ...] = self.list_lab[self._idx]
self._idx += 1
if self._idx == self.num:
self._idx = 0
return images_batch, labels_batch
def size(self):
return self.num
def reset(self):
self._idx = 0 |
#!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
def main():
src, dst = sys.argv[1:]
if os.path.exists(dst):
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
#work around https://github.com/ninja-build/ninja/issues/1554
os.utime(dst, None)
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0006_auto_20160309_0453'),
]
operations = [
migrations.AddField(
model_name='imageitem',
name='show_caption',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='imageitem',
name='show_title',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='imageitem',
name='title_override',
field=models.CharField(max_length=512, blank=True),
),
migrations.AlterField(
model_name='image',
name='alt_text',
field=models.CharField(help_text="A description of the image for users who don't see images", max_length=255, blank=True),
),
]
|
import numpy as np
import rasterio
def write_raster(filename, data, transform, crs, nodata, **kwargs):
"""Write data to a GeoTIFF.
Parameters
----------
filename : str
data : d ndarray
transform : rasterio transform object
crs : rasterio.crs object
nodata : int
"""
count = 1 if len(data.shape) == 2 else data.shape[-1]
meta = {
"driver": "GTiff",
"dtype": data.dtype,
"nodata": nodata,
"width": data.shape[1],
"height": data.shape[0],
"count": count,
"crs": crs,
"transform": transform,
"compress": "lzw",
}
if kwargs:
meta.update(kwargs)
with rasterio.open(filename, "w", **meta) as out:
if count == 1:
out.write(data, indexes=1)
else:
# rework from row, col, z to z,row, col
out.write(np.rollaxis(data, axis=-1))
|
import warnings
warnings.filterwarnings("ignore")
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
import time
from sklearn.utils import shuffle
from SelfTraining import StandardSelfTraining
from sklearn.metrics import accuracy_score, cohen_kappa_score
sca = MinMaxScaler()
base = 'mnist'
modelo = 'RF'
caminho = 'D:/Drive UFRN/bases/'
dados = pd.read_csv(caminho + base +'.csv')
X = sca.fit_transform(dados.drop(['classe'], axis=1).values)
Y = dados['classe'].values
rotulados = [50 , 100, 150, 200, 250, 300]
porcentagem = [0.0047, 0.0093, 0.0140, 0.0186, 0.0233, 0.0279]
resultado = pd.DataFrame()
acuraciai = []
acuraciat = []
kappai = []
kappat = []
for r, p in enumerate(porcentagem):
inicio = time.time()
print('Teste: '+str(rotulados[r]))
X_train, X_test, y_train, y_test = train_test_split(X,Y, train_size=0.9, test_size=0.1, stratify=Y)
""" PROCESSO TRANSDUTIVO """
L, U, y, yu = train_test_split(X_train, y_train, train_size = p, test_size= 1.0 - p, stratify=y_train)
#print('Dividiu os dados...')
if modelo == 'MLP':
classificador = MLPClassifier(hidden_layer_sizes=(10,), max_iter=100)
elif modelo == 'KNN':
classificador = KNeighborsClassifier(n_neighbors=5)
#print('Pegou o KNN')
elif modelo == 'SVM':
classificador = SVC(probability=True)
elif modelo == 'RF':
classificador = RandomForestClassifier(n_estimators=20)
elif modelo == 'NB':
classificador = GaussianNB()
else:
classificador = LogisticRegression()
selfT = StandardSelfTraining(modelo, classificador)
#print('Gerou o modelo')
X_treino = np.concatenate((L, U))
Y_treino = np.concatenate((y.astype(str), np.full_like(yu.astype(str), "unlabeled")))
#print('Iniciou o treinamento..')
selfT.fit(X_treino, Y_treino)
#print('Guardando os dados')
""" FASE TRANDUTIVA """
acuraciat.append(accuracy_score(yu, selfT.predict(U).astype('int64')))
kappat.append(cohen_kappa_score(yu, selfT.predict(U).astype('int64')))
""" FASE INDUTIVA """
acuraciai.append(accuracy_score(y_test, selfT.predict(X_test).astype('int64')))
kappai.append(cohen_kappa_score(y_test, selfT.predict(X_test).astype('int64')))
fim = time.time()
tempo = np.round((fim - inicio)/60,2)
print('........ Tempo: '+str(tempo)+' minutos.')
resultado['R'] = rotulados
resultado['AT'] = acuraciat
resultado['KT'] = kappat
resultado['AI'] = acuraciai
resultado['KI'] = kappai
resultado.to_csv('resultados/RF/'+base+'.csv') |
import random
import operator
import tkinter
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot
import matplotlib.animation
import agentframework
import csv
import requests
import bs4
import time
start_time = time.time()
# We set the random seed so we can see repeatable patterns
random.seed(0)
#Function distance_between that uses Pythagoras' theorem to calculate distance between agents
#def distance_between(agents_row_a, agents_row_b):
# return (((agents_row_a.x - agents_row_b.x)**2) +
# ((agents_row_a.y - agents_row_b.y)**2))**0.5
#Setting up initial variables
num_of_agents = 10
num_of_iterations = 1000 #Number of times the agents move
agents = []
environment = []
neighbourhood = 20
r = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')
content = r.text
soup = bs4.BeautifulSoup(content, 'html.parser')
td_ys = soup.find_all(attrs={"class" : "y"})
td_xs = soup.find_all(attrs={"class" : "x"})
print(td_ys)
print(td_xs)
#Code to store the environment for ABM as a list of lists
#Read the in.text file
with open('in.txt', newline='') as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowList = []
for value in row:
rowList.append(value)
environment.append(rowList)
#Calculate size of environment, so that it can be used to pass to agents etc
rows = len(environment)
cols = len(environment[0])
# Make the agents. This is calling the Agent class in agentframework
# We are passing various variables so they need to be created first
for i in range(num_of_agents):
# We set the y and x values from the data scraped from the webpage, if
# we choose not to do this
y = int(td_ys[i].text)
x = int(td_xs[i].text)
agents.append(agentframework.Agent(i, agents, environment, rows, cols, y, x))
# This is the code to not pass y and x variables, so the agents will be
# created in a random place
#agents.append(agentframework.Agent(i, agents, environment, rows, cols))
#Create variables for max and min distance between each agent
max_distance = 0
min_distance = 0
'''
max_distance = distance_between(agents[0], agents[1])
min_distance = max_distance
#for i in range(num_of_agents):
for i in range(0, num_of_agents, 1):
#for j in range(num_of_agents):
for j in range(0, num_of_agents, 1):
#if i != j:
#if i < j:
answer = distance_between(agents[i], agents[j])
max_distance = max(max_distance, answer)
min_distance = min(min_distance, answer)
print("Maximum distance between agents =", max_distance)
print("Minimum distance between agents =", min_distance)
'''
def run():
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
canvas.draw()
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
# This builds the main window ("root"); sets its title, then creates and lays
# out a matplotlib canvas embedded within our window and associated with fig,
# our matplotlib figure.
root = tkinter.Tk()
root.wm_title("Model")
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
menu = tkinter.Menu(root)
root.config(menu=menu)
model_menu = tkinter.Menu(menu)
menu.add_cascade(label="Model", menu=model_menu)
model_menu.add_command(label="Run model", command=run)
carry_on = True
def update(frame_number):
#print("update")
fig.clear()
#Show the environment raster image
matplotlib.pyplot.imshow(environment)
#Limit the graph size on the x and y axis to the max size of our environment
matplotlib.pyplot.xlim(0, cols)
matplotlib.pyplot.ylim(0, rows)
global carry_on
print("Iteration: ", frame_number)
random.shuffle(agents)
for i in range(num_of_agents):
#Make each agent move
agents[i].move()
#After moving, make each agent eat
agents[i].eat()
#After eating make each agent share, if they are in range
agents[i].share_with_neighbours(neighbourhood)
#Then Print out each agent's status after moving, eating and sharing
print("Agent", i, agents[i])
if random.random() < 0.01:
carry_on = False
print("Probability stopping condition met")
for i in range(num_of_agents):
#print("Scatter plotting")
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
#print(agents[i][0],agents[i][1])
def gen_function(b = [0]):
a = 0
global carry_on #Not actually needed as we're not assigning, but clearer
global agents # Used to access the agents list within this function
while (a < num_of_iterations) & (carry_on) :
yield a # Returns control and waits next call.
a = a + 1
#print("Iteration number", a)
else:
print("Max iterations: ", num_of_iterations)
print("Stopping condition encountered:\n" + "Completed " + str(a) + " iterations")
#Print final agent states
print("Final agents")
#Use the sorted function to reorder the agents list, sorted on the attribute i
#This is to make the readout easier to read for humans, as the agents are
#always in the same order, so we can easily compare how any changes have
#affected the outcome
agents = sorted(agents, key=lambda Agent: Agent.i)
for i in range(num_of_agents):
print(agents[i])
#Write sorted agent final states to output.txt
#Using with means the file is closed when the with statement is finished
with open("output.txt", "w") as f:
f.write("Final agent states\n")
for i in range(num_of_agents):
f.write(str(agents[i]))
f.write("\n")
end_time = time.time()
print ("Time taken =", (end_time - start_time))
#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)
#animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
#def run():
# animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
# canvas.draw()
#matplotlib.pyplot.show()
tkinter.mainloop()
|
#!/usr/bin/env python
"""Tests for grr.parsers.wmi_parser."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.parsers import wmi_parser
from grr.test_data import client_fixture
class WMIParserTest(test_lib.FlowTestsBaseclass):
def testInterfaceParsing(self):
parser = wmi_parser.WMIInterfacesParser()
rdf_dict = rdf_protodict.Dict()
wmi_properties = (client_fixture.WMIWin32NetworkAdapterConfigurationMock.
__dict__.iteritems())
for key, value in wmi_properties:
if not key.startswith("__"):
try:
rdf_dict[key] = value
except TypeError:
rdf_dict[key] = "Failed to encode: %s" % value
result_list = list(parser.Parse(
None, rdf_dict, None))
self.assertEqual(len(result_list), 2)
for result in result_list:
if isinstance(result, rdf_client.Interface):
self.assertEqual(len(result.addresses), 4)
self.assertItemsEqual(
[x.human_readable_address for x in result.addresses],
["192.168.1.20", "ffff::ffff:aaaa:1111:aaaa",
"dddd:0:8888:6666:bbbb:aaaa:eeee:bbbb",
"dddd:0:8888:6666:bbbb:aaaa:ffff:bbbb"])
self.assertItemsEqual(
[x.human_readable_address for x in result.dhcp_server_list],
["192.168.1.1"])
self.assertEqual(result.dhcp_lease_expires.AsMicroSecondsFromEpoch(),
1409008979123456)
self.assertEqual(result.dhcp_lease_obtained.AsMicroSecondsFromEpoch(),
1408994579123456)
elif isinstance(result, rdf_client.DNSClientConfiguration):
self.assertItemsEqual(result.dns_server, ["192.168.1.1",
"192.168.255.81",
"192.168.128.88"])
self.assertItemsEqual(result.dns_suffix, ["blah.example.com",
"ad.example.com",
"internal.example.com",
"example.com"])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
import pandas as pd
import utils
import torch
from config import *
from model import *
import matplotlib.pyplot as plt
def predict():
# test_loader = utils.data_loader('Dig-MNIST')
data = utils.load_data('test')
data = torch.from_numpy(data).to(Config.device)
num_sample = data.size(0)
idx = torch.randint(data.size(0), size=(num_sample,))
data = data[idx, :]
img = data[:, 1:].float() / 255
img = img.view(-1, 1, 28, 28)
idx = data[:, 0].long()
F = SuperDuperFeatureExtractor().to(Config.device)
C = SuperDuperClassifier().to(Config.device)
F.load_state_dict(torch.load(Config.checkpoint + 'F.pth'))
C.load_state_dict(torch.load(Config.checkpoint + 'C.pth'))
F.eval()
C.eval()
# for idx, batch in enumerate(test_loader):
# img = batch['image']
# # img: [batch_size, 1, 28, 28]
# label = batch['label']
# # label: [batch_size,]
#
# img = img.view(-1, 28 * 28)
#
# feat = F(img)
# rec = R(feat)
# pred = C(feat)
#
# # accuracy
# pred_label = pred.argmax(1)
# accuracy = (pred_label == label).sum().item() / label.size(0)
# print('Batch: {}, Test Accuracy: {}'.format(idx, accuracy))
curr_label = 0
pred_label = []
num_sample = img.size(0)
for i in range(num_sample):
tmp_img = img[i]
tmp_img = tmp_img.view(-1, 1, 28, 28)
feat = F(tmp_img)
pred = C(feat)
tmp_pred_label = pred.argmax(1)
pred_label.append(tmp_pred_label.item())
# plot
plt.figure('Test Sample Predictions')
if tmp_pred_label.item() == curr_label:
curr_label += 1
tmp_img_arr = tmp_img.cpu().numpy()
tmp_img_arr = tmp_img_arr.reshape(28, 28)
plt.subplot(2, 5, curr_label)
plt.imshow(tmp_img_arr, cmap='gray')
plt.title(tmp_pred_label.item())
if curr_label == 10:
break
plt.show()
# accuracy
# submission = pd.DataFrame({'id': idx.tolist()})
# submission = submission.join(pd.DataFrame({'label': pred_label}))
# submission.to_csv('submission.csv', index=False)
if __name__ == '__main__':
predict()
|
from flask_wtf.csrf import CSRFProtect
from dotenv import load_dotenv
import os
csrf = CSRFProtect()
class Configurate:
def __init__(self, app, config, option="config"):
self.app = app
self.config = config
self.SQLALCHEMY_DATABASE_URI = 'SQLALCHEMY_DATABASE_URI'
if option == "config":
self.register_config()
elif option.lower() == "env":
self.register_env()
def register_env(self):
"""Register environment variable"""
if self.__env_file_exists():
SECRET_KEY = os.getenv('SECRET_KEY')
def register_app_url(self):
"""Register app url"""
self.app.config['APP_URL'] = self.config.APP_URL
self.app.config['APP_PORT'] = self.config.APP_PORT
def __env_file_exists(self):
"""Return true if .env file exists"""
dotenv_path = join(dirname(__file__), '.env')
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
return True
else:
raise EnvironmentError('Cannot find .env file')
def register_config(self):
"""Register config file"""
self.register_debug()
self.register_crsf()
self.register_secret_key()
self.register_database()
self.register_sqlalchemy_track_modifications()
self.register_app_url()
def register_debug(self):
"""Check for debug properties"""
if self.config.DEBUG_ENABLED:
self.app.debug = True
def register_crsf(self):
"""Check for csrf properties"""
if self.config.CSRF_ENABLED:
csrf.init_app(self.app)
def register_secret_key(self):
"""Check for secret key"""
if self.has_existing_key('SECRET_KEY'):
self.app.secret_key = self.config.SECRET_KEY
def register_database(self):
"""Register database"""
if self.config.TESTING == True:
self.app.config[self.SQLALCHEMY_DATABASE_URI] = self.config.DB_TEST
else:
if self.has_database_uri():
self.app.config[self.SQLALCHEMY_DATABASE_URI] = self.config.DB_URI
else:
database_support = ['mysql', 'sqlite', 'postgresql', 'oracle', 'firebird', 'sybase']
if self.config.DB_CONNECTION.lower() in database_support:
self.connect_to_database_engine()
def register_sqlalchemy_track_modifications(self):
"""Register sqlalchemy track modification"""
if self.has_existing_key('SQLALCHEMY_TRACK_MODIFICATIONS'):
self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = self.config.SQLALCHEMY_TRACK_MODIFICATIONS
def connect_to_database_engine(self):
"""Establish a connection to database"""
if self.config.DB_CONNECTION == 'mysql':
self.connect_to_mysql()
if self.config.DB_CONNECTION == 'postgresql':
self.connect_to_postgresql()
if self.config.DB_CONNECTION == 'sqlite':
self.connect_to_sqlite()
def connect_to_sqlite(self):
"""connect to sqlite database"""
if self.has_database_uri() == False:
raise Exception('Please provide sqlite db path in config file')
def connect_to_mysql(self):
"""connect to mysql engine"""
if self.has_database_uri() == False:
database_uri = f'{self.config.DB_CONNECTION}://{self.config.DB_USERNAME}:{self.config.DB_PASSWORD}@{self.config.DB_HOST}:{self.config.DB_PORT}/{self.config.DB_DATABASE}'
self.app.config[self.SQLALCHEMY_DATABASE_URI] = database_uri
def connect_to_postgresql(self):
"""connect to postgresql engine"""
if self.has_database_uri() == False:
database_uri = f'postgresql+psycopg2://{self.config.DB_USERNAME}:{self.config.DB_PASSWORD}@{self.config.DB_HOST}:{self.config.DB_PORT}/{self.config.DB_DATABASE}'
self.app.config[self.SQLALCHEMY_DATABASE_URI] = database_uri
def has_existing_key(self, key: str):
"""Check if environment key exists"""
members = dir(self.config)
if key in members:
if len(key) > 1:
return True
return False
""" error_message = "Expected environment variable '{}' not set in Config file.".format(key)
raise Exception(error_message)"""
def has_database_uri(self):
"""check if database URI is available in config"""
if self.has_existing_key('DB_URI'):
self.app.config[self.SQLALCHEMY_DATABASE_URI] = self.config.DB_URI
return True
return False |
# -*- coding: utf-8 -*-
#
# File : examples/timeserie_prediction/switch_attractor_esn
# Description : NARMA 30 prediction with ESN.
# Date : 26th of January, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Nils Schaetti <[email protected]>
# Imports
import torch.utils.data
from echotorch import datasets
from echotorch.transforms import text
# Reuters C50 dataset
reutersloader = torch.utils.data.DataLoader(
datasets.ReutersC50Dataset(root="../../data/reutersc50/", download=True, n_authors=2,
transform=text.Token(), dataset_size=2, dataset_start=20),
batch_size=1, shuffle=True)
# For each batch
for k in range(10):
# Set fold and training mode
reutersloader.dataset.set_fold(k)
reutersloader.dataset.set_train(True)
# Get training data for this fold
for i, data in enumerate(reutersloader):
# Inputs and labels
inputs, label, labels = data
# end for
# Set test mode
reutersloader.dataset.set_train(False)
# Get test data for this fold
for i, data in enumerate(reutersloader):
# Inputs and labels
inputs, label, labels = data
# end for
# end for
|
from pyparsing import (CharsNotIn,
Word,
Literal,
OneOrMore,
alphanums,
delimitedList,
printables,
alphas,
alphas8bit,
nums,
oneOf,
Or,
Combine,
ZeroOrMore)
import sys
def makeDecoratingParseAction(marker):
def parse_action_impl(s,l,t):
return (marker, t)
return parse_action_impl
def parseRef(citation_str, debug = False):
author_name_before_comma = CharsNotIn(',')
abbreviated_name = Combine(Word(alphas.upper(), exact=1) + '.'|' ')
unicodePrintables = ''.join(chr(c) for c in range(sys.maxunicode)
if not chr(c) in [' ', ';', ',', '.'])
unicodePrintablesJournal = ''.join(chr(c) for c in range(sys.maxunicode)
if not chr(c) in [';', '.'])
non_abbreviated_name = Word(unicodePrintables)
name_component = (abbreviated_name | non_abbreviated_name)
author_name_after_comma = OneOrMore(name_component)
author = (author_name_before_comma("author_name_before_comma") +
Literal(',').suppress() +
author_name_after_comma("author_name_after_comma"))
author_list = delimitedList(author, delim = ';')
author.setParseAction(makeDecoratingParseAction("author"))
author_list = (delimitedList(author, delim = ';') + Literal('.').suppress())
sentence = (OneOrMore(Word(unicodePrintables, excludeChars='.'), stopOn=Literal('.')))
title = sentence('Title')
title = title.setParseAction(makeDecoratingParseAction("title"))
sentenceJournal = OneOrMore(Word(unicodePrintablesJournal, excludeChars='.'), stopOn=Literal('.'))
journal = sentenceJournal('Journal')
journal = journal.setParseAction(makeDecoratingParseAction("journal"))
valid_year = Word(nums, exact=4) + Literal('.').suppress()
year = (valid_year | Literal('0') + Literal('.').suppress())
year = year.setParseAction(makeDecoratingParseAction("year"))
remaining_stuff = ZeroOrMore(Word(printables), stopOn=year)
remaining_stuff = remaining_stuff.setParseAction(makeDecoratingParseAction("Remaining"))
valid_qualis = (Word(alphanums, exact=2) | Literal('C')) + ZeroOrMore(Word(printables, excludeChars='.'))
ni_qualis = (Literal('Não identificado') + OneOrMore(Word(printables, excludeChars='.')))
qualis = (valid_qualis | ni_qualis + Literal('.').suppress())
qualis = qualis.setParseAction(makeDecoratingParseAction("qualis"))
if debug:
# to track the matching expressions
non_abbreviated_name.setName("non_abbreviated_name").setDebug()
abbreviated_name.setName("abbreviated_name").setDebug()
author.setName("author").setDebug()
author_list.setName("author_list").setDebug()
sentence.setName("Sentence").setDebug()
title.setName("title").setDebug()
journal.setName("Journal").setDebug()
year.setName("year").setDebug()
remaining_stuff.setName("Remaining").setDebug()
qualis.setName("qualis").setDebug()
citation = (author_list('AuthorLst') +
title +
Literal('.') +
journal +
Literal('.') +
remaining_stuff +
year +
qualis)
result = citation.parseString(citation_str)
return result
def parseConferenceRef(citation_str, debug = False):
author_name_before_comma = CharsNotIn(',')
author_name_before_comma.setParseAction(makeDecoratingParseAction("author_name_before_comma"))
abbreviated_name = Combine(Word(alphas.upper(), exact=1) + '.'|' ')
unicodePrintables = ''.join(chr(c) for c in range(sys.maxunicode)
if not chr(c) in [' ', ';', ',', '.'])
unicodePrintablesConference = ''.join(chr(c) for c in range(sys.maxunicode)
if not chr(c) in [';', '.'])
non_abbreviated_name = Word(unicodePrintables)
name_component = (abbreviated_name | non_abbreviated_name)
author_name_after_comma = OneOrMore(name_component)
author_name_after_comma.setParseAction(makeDecoratingParseAction("author_name_after_comma"))
author = (author_name_before_comma("author_name_before_comma") +
Literal(',').suppress() +
author_name_after_comma("author_name_after_comma"))
author_list = delimitedList(author, delim = ';')
author.setParseAction(makeDecoratingParseAction("author"))
author_list = (delimitedList(author, delim = ';') + Literal('.').suppress())
sentence = (OneOrMore(Word(unicodePrintablesConference, excludeChars='.'), stopOn=Literal('. Em:')))
sentence.ignore('.')
title = sentence('Title')
title = title.setParseAction(makeDecoratingParseAction("title"))
conference_name = Literal('Em: ').suppress() + OneOrMore(Word(unicodePrintablesConference, excludeChars='.,'), stopOn=Literal('.'))
conference = conference_name('conference_name') + Literal(',').suppress()
conference = conference.setParseAction(makeDecoratingParseAction("conference"))
year = Word(nums, exact=4) + Literal('.').suppress()
year = year.setParseAction(makeDecoratingParseAction("year"))
remaining_stuff = ZeroOrMore(Word(printables), stopOn=year)
valid_qualis = (Word(alphanums, exact=2) | Literal('C')) + ZeroOrMore(Word(unicodePrintablesConference, excludeChars='.'))
ni_qualis = (Literal('Não identificado') + OneOrMore(Word(unicodePrintablesConference, excludeChars='.')))
qualis = (valid_qualis | ni_qualis + Literal('.').suppress())
qualis = qualis.setParseAction(makeDecoratingParseAction("qualis"))
if debug:
# to track the matching expressions
non_abbreviated_name.setName("non_abbreviated_name").setDebug()
abbreviated_name.setName("abbreviated_name").setDebug()
author.setName("author").setDebug()
author_list.setName("author_list").setDebug()
sentence.setName("Sentence").setDebug()
title.setName("title").setDebug()
conference.setName("conference_name").setDebug()
year.setName("year").setDebug()
remaining_stuff.setName("Remaining").setDebug()
qualis.setName("qualis").setDebug()
citation = (author_list('AuthorLst') +
title +
Literal('.') +
conference +
remaining_stuff +
year +
qualis)
result = citation.parseString(citation_str)
return result
def infosCitation(result):
authors = []
for element in result.asList():
if element[0] == 'author':
try: authors.append(element[1][0][1].asList() + element[1][1][1].asList())
except: authors.append(element[1].asList())
elif element[0] == 'title':
title = element[1].asList()
title = ' '.join(word for word in title)
elif element[0] == 'journal':
journal = element[1].asList()
journal = ' '.join(word for word in journal)
elif element[0] == 'conference':
conference = element[1].asList()
conference = ' '.join(word for word in conference)
elif element[0] == 'Remaining':
remaining = element[1].asList()
remaining = ' '.join(word for word in remaining)
l = remaining.find('issn: ') + 6
issn = remaining[l:l+9]
elif element[0] == 'year':
year = element[1][0]
elif element[0] == 'qualis':
qualis = element[1].asList()
qualis = ' '.join(word for word in qualis)
try: return authors, title, journal, issn, year, qualis
except: return authors, title, conference, year, qualis
def parseJournalPublication(citation, debug = False):
result = parseRef(citation, debug = debug)
return infosCitation(result)
def parseConferencePublication(citation, debug = False):
result = parseConferenceRef(citation, debug = debug)
return infosCitation(result)
|
# ====================================================================
# Author : swc21
# Date : 2018-03-14 09:42:27
# Project : ClusterFiles
# File Name : parallel_IO_test
# Last Modified by : swc21
# Last Modified time : 2018-03-14 12:03:25
# ====================================================================
#
import datetime
import numpy as np
import time
from mpi4py import MPI
save_path = './'
load_path = 'SHARED/sols_data/'
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
name = MPI.Get_processor_name()
mode = MPI.MODE_RDWR | MPI.MODE_CREATE
Filter_Types = ['dcmc_i', 'dcmc_j', 'dcmc_h', 'dcmc_ks']
Inclusion_Areas = []
for i in range(0, 300, 5):
Inclusion_Areas.append((i, i+5))
Distances = []
for i in range(1, 100, 5):
Distances.append(i*1e5)
def inner_most(filter_type=None, inclusion_area=None, distance=None, data=None):
assert filter_type in ['dcmc_i', 'dcmc_j', 'dcmc_h', 'dcmc_ks']
assert len(inclusion_area) > 1.0
assert 0.0 <= inclusion_area[0]
assert inclusion_area[0] < inclusion_area[1] <= 750.0
L_now = 0.0
for star in data:
if not inclusion_area[0] <= star[0] <= inclusion_area[1]:
continue
L_now += star[1]
return L_now
if rank == 0:
carlos = np.zeros(
(len(Filter_Types), len(Distances), len(Inclusion_Areas)))
np.save(save_path+'carlos_out.npy', carlos)
jobs = []
for i, x in enumerate(Filter_Types):
for k, z in enumerate(Distances):
for j, y in enumerate(Inclusion_Areas):
jobs.append([x, y, z, i, j, k])
chunk = len(jobs)/size+1
work = [jobs[x:x+chunk] for x in range(0, len(jobs), chunk)]
else:
work = None
comm.Barrier()
work_list = comm.scatter(work, root=0)
local_filter_types = []
local_inclusion_areas = []
local_distances = []
for i in range(0, len(work_list)):
local_filter_types.append((work_list[i][0], work_list[i][3]))
local_inclusion_areas.append((work_list[i][1], work_list[i][4]))
local_distances.append((work_list[i][2], work_list[i][5]))
comm.Barrier()
time.sleep(rank)
print ' --> Process:', rank, ' is on:', name, ' with', len(work_list), 'jobs'
comm.Barrier()
time.sleep(2)
comm.Barrier()
if rank == 0:
print ''
print ' [host] [filter] [area] [distance] [time] [percent]'
print ' ---------------------------------------------------------------------------------------'
comm.Barrier()
out_file = MPI.File.Open(comm, save_path+'carlos_out.npy', mode)
buffer = np.zeros((len(Filter_Types), len(Distances), len(Inclusion_Areas)))
last_filter = None
last_distance = None
comm.Barrier()
if rank == 0:
runtime = datetime.datetime.now()
comm.Barrier()
job_counter = 0
runtime1 = datetime.datetime.now()
time.sleep(rank*0.25)
for filter_type in local_filter_types:
if last_filter == filter_type[1]:
print rank, 'skipped', filter_type[0]
continue
for distance in local_distances:
if last_distance == distance[1]:
print rank, 'skipped', distance[0]
continue
data = np.load(
load_path+filter_type[0]+str(distance[0]/1e6)+'Mpc_dataArray.npy')
for inclusion_area in local_inclusion_areas:
start = datetime.datetime.now()
x = inner_most(
filter_type[0], inclusion_area[0], distance[0], data)
buffer[filter_type[1], distance[1], inclusion_area[1]] = inner_most(
filter_type[0], inclusion_area[0], distance[0], data)
out_file.Iwrite(
buffer[filter_type[1], distance[1], inclusion_area[1]])
job_counter += 1
end = datetime.datetime.now()
out_file.Sync()
print name, rank, ' ', filter_type[0], ' ', inclusion_area[0], 'Kpc ', distance[0]/1e6, 'Mpc ', end-start, ' ', job_counter, 'Jobs Done'
out_file.Sync()
last_distance = distance[1]
out_file.Sync()
last_filter = filter_type[1]
out_file.Sync()
endtime1 = datetime.datetime.now()
print 'process', rank, 'on ', name, 'has finished all', len(work_list), 'jobs in', endtime1 - runtime1
comm.Barrier()
out_file.Close()
if rank == 0:
endtime = datetime.datetime.now()
print 'Program Finished in', endtime-runtime
|
name="MatricesM"
__package__="MatricesM"
__path__='./MatricesM' |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import os
pchars = u"abcdefghijklmnopqrstuvwxyz,.?!'()[]{}"
fchars = u"ɐqɔpǝɟƃɥıɾʞlɯuodbɹsʇnʌʍxʎz'˙¿¡,)(][}{"
flipper = dict(zip(pchars, fchars))
def flip(s):
charList = [ flipper.get(x, x) for x in s.lower() ]
charList.reverse()
return u"\n(╯°□°)╯︵ " + "".join(charList)
os.system("pkill " + " ".join(sys.argv[2::]))
print flip(" ".join(sys.argv[2::]))
|
from cython.cimports.cpython.ref import PyObject
def main():
python_string = "foo"
# Note that the variables below are automatically inferred
# as the correct pointer type that is assigned to them.
# They do not need to be typed explicitly.
ptr = cython.cast(cython.p_void, python_string)
adress_in_c = cython.cast(Py_intptr_t, ptr)
address_from_void = adress_in_c # address_from_void is a python int
ptr2 = cython.cast(cython.pointer(PyObject), python_string)
address_in_c2 = cython.cast(Py_intptr_t, ptr2)
address_from_PyObject = address_in_c2 # address_from_PyObject is a python int
assert address_from_void == address_from_PyObject == id(python_string)
print(cython.cast(object, ptr)) # Prints "foo"
print(cython.cast(object, ptr2)) # prints "foo"
|
"""
Select a Shortlist of Applicants Based on Isotonic Regression Calibration.
Reference: Accurate Uncertainties for Deep Learning Using Calibrated Regression Volodymyr Kuleshov et al.
"""
import argparse
import pickle
import numpy as np
from sklearn.isotonic import IsotonicRegression
from utils import calculate_expected_qualified, calculate_expected_selected, transform_except_last_dim
from train_LR import NoisyLR
class IsotonicRegressionSelect(object):
def __init__(self):
# Hyper-parameters
self.delta = 1e-10
# Parameters to be learned
self.iso_reg_model = None
self.t = None
# Internal variables
self.fitted = False
def _nudge(self, matrix):
return ((matrix + np.random.uniform(low=0,
high=self.delta,
size=matrix.shape)) / (1 + self.delta))
def fit(self, y_score, y, k, test_size):
y_score = y_score.squeeze()
y = y.squeeze()
assert (y_score.size == y.size), "Check dimensions of input matrices"
# All required (hyper-)parameters have been passed correctly
# Isotonic Regression Starts
# delta-randomization
y_score = self._nudge(y_score)
# select items with larger scores first
y_score = -y_score
# build dataset for isotonic regression
cal_size = y.size
y_score, y = zip(*sorted(zip(y_score, y), key=lambda pair: pair[0]))
hat_p = []
for label in y:
if len(hat_p) == 0:
hat_p.append(label / cal_size)
else:
hat_p.append(label / cal_size + hat_p[-1])
self.iso_reg_model = IsotonicRegression(y_min=0., y_max=1., out_of_bounds='clip').fit(y_score, hat_p)
predicted_p = self.iso_reg_model.predict(y_score)
self.t = -np.inf
for i in range(cal_size):
if predicted_p[i] >= k / test_size:
self.t = -y_score[i]
break
# isotonic regression selection model fitted
self.fitted = True
def select(self, scores):
scores = scores.squeeze()
size = scores.size
# delta-randomization
scores = self._nudge(scores)
# make decisions
s = np.zeros(size, dtype=bool)
for i in range(size):
if scores[i] >= self.t:
s[i] = True
else:
s[i] = False
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cal_data_path", type=str, help="the input calibration data path")
parser.add_argument("--test_raw_path", type=str, help="the raw test data for sampling test data")
parser.add_argument("--classifier_path", type=str, help="the input classifier path")
parser.add_argument("--result_path", type=str, help="the output selection result path")
parser.add_argument("--k", type=float, help="the target expected number of qualified candidates")
parser.add_argument("--m", type=float, help="the expected number of incoming candidates")
parser.add_argument("--scaler_path", type=str, help="the path for the scaler")
args = parser.parse_args()
k = args.k
m = args.m
# calibration
with open(args.cal_data_path, 'rb') as f:
X_cal, y_cal = pickle.load(f)
with open(args.classifier_path, "rb") as f:
classifier = pickle.load(f)
n = y_cal.size
scores_cal = classifier.predict_proba(X_cal)[:, 1]
iso_reg_select = IsotonicRegressionSelect()
iso_reg_select.fit(scores_cal, y_cal, k, m)
# test
with open(args.test_raw_path, "rb") as f:
X_test_raw, y_test_raw = pickle.load(f)
with open(args.scaler_path, "rb") as f:
scaler = pickle.load(f)
X_test_raw = transform_except_last_dim(X_test_raw, scaler)
scores_test_raw = classifier.predict_proba(X_test_raw)[:, 1]
s_test_raw = iso_reg_select.select(scores_test_raw)
performance_metrics = {}
performance_metrics["num_qualified"] = calculate_expected_qualified(s_test_raw, y_test_raw, m)
performance_metrics["num_selected"] = calculate_expected_selected(s_test_raw, y_test_raw, m)
performance_metrics["constraint_satisfied"] = True if performance_metrics["num_qualified"] >= k else False
with open(args.result_path, 'wb') as f:
pickle.dump(performance_metrics, f)
|
from setuptools import setup
setup (name='pyscarab',
version='0.1.1',
description='Python wrapper and abstractions for libscarab FHE library',
author='Bogdan Kulynych, Benjamin Lipp, Davide Kirchner',
author_email='[email protected], [email protected], [email protected]',
url='https://github.com/blindstore/pyscarab',
packages=['scarab', 'scarab.tests'],
license='MIT',
keywords='crypto',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Security :: Cryptography',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
test_suite='nose.collector',
test_requires=[
'nose'
])
|
import difflib
import logging
import pathlib
from abc import abstractmethod
from typing import Iterable, List, Optional, Sequence, Set
from . import git_utils
from .command import CommandBase
from .error_lines import parse_error_diffs
from .reporter import Reporter
from .source import FilePredicateType, Source
class LintCommandBase(CommandBase):
def __init__(self, base_dir: pathlib.Path, source: Source) -> None:
self._base_dir = base_dir
self._source = source
def _get_sources(
self, reporter: Reporter, filter_predicate: FilePredicateType
) -> Set[pathlib.Path]:
return self.source.resolve_files(
self.base_dir,
filter_predicate,
self.git_enabled(),
reporter,
)
def _get_covered_files(
self,
reporter: Reporter,
files: Sequence[pathlib.Path],
filter_predicate: FilePredicateType,
) -> List[pathlib.Path]:
sources = self._get_sources(reporter, filter_predicate)
covered: List[pathlib.Path] = []
for f in files:
if f in sources:
covered.append(f)
else:
reporter.logger.info(f"Skipping {f} for {self.name}")
return covered
@property
def base_dir(self) -> pathlib.Path:
return self._base_dir
def git_enabled(self) -> bool:
return git_utils.check_git_available(self.base_dir)
@property
def source(self) -> Source:
return self._source
class SingleFileLintCommandBase(LintCommandBase):
def _run(self, reporter: Reporter, file_paths: Iterable[pathlib.Path]) -> int:
# NOTE(igarashi): create a list to evaluate check() for all file paths
if all([self.check(file_path, reporter) for file_path in file_paths]):
return 0
else:
return 1
def __call__(self, reporter: Reporter) -> int:
file_paths = self._get_sources(reporter, self.filter)
reporter.logger.info(f"Checking {len(file_paths)} files")
return self._run(reporter, file_paths)
def run_files(self, reporter: Reporter, files: Sequence[pathlib.Path]) -> int:
covered_files = self._get_covered_files(reporter, files, self.filter)
return self._run(reporter, covered_files)
@abstractmethod
def filter(self, file_path: pathlib.Path) -> bool:
...
@abstractmethod
def check(self, file_path: pathlib.Path, reporter: Reporter) -> bool:
...
class SingleFileFormatCommandBase(SingleFileLintCommandBase):
def __init__(
self, base_dir: pathlib.Path, source: Source, inplace_edit: bool
) -> None:
super().__init__(base_dir, source)
self._inplace_edit = inplace_edit
@property
def inplace_edit(self) -> bool:
return self._inplace_edit
def check(self, file_path: pathlib.Path, reporter: Reporter) -> bool:
formatted = self.format(file_path, reporter)
if formatted is None:
return False
if self._inplace_edit:
with file_path.open(mode="w") as f:
f.write(formatted)
return True
else:
with file_path.open() as f:
original = f.readlines()
diff = "".join(
difflib.unified_diff(
original,
formatted.splitlines(True),
fromfile=str(file_path),
tofile=str(file_path),
)
)
if len(diff) == 0:
return True
else:
reporter.process_output.log(logging.INFO, diff)
diagnostics = parse_error_diffs(
diff, lambda _: file_path, logger=reporter.logger
)
reporter.report_diagnostics(list(diagnostics))
return False
@abstractmethod
def format(self, file_path: pathlib.Path, reporter: Reporter) -> Optional[str]:
"""Returns formatted content without modifying the original file.
Note:
If a file cannot be formatted due to its content (e.g. invalid syntax),
this method should return `None`.
In this case, `SingleFileFormatCommandBase` continues to check other files.
"""
...
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import Context, loader
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from siptracklib.utils import object_by_attribute
import siptracklib.errors
from siptrackweb.views import helpers
from siptrackweb.forms import *
from siptrackweb.views import attribute
from siptrackweb.views import config
@helpers.authcheck
def display(request, oid):
pm = helpers.PageManager(request, 'stweb/views/networktrees/networks/display_range.html')
range = pm.object_store.getOID(oid)
network_tree = range.getNetworkTree()
pm.render_var['network_tree_list'] = network_tree.parent.listChildren(include = ['network tree'])
pm.render_var['browsable_path'] = []
pm.render_var['network_tree'] = network_tree
pm.render_var['network_range'] = range
pm.render_var['network'] = range
pm.render_var['template_list'] = range.listChildren(include = ['device template', 'network template'])
pm.render_var['attribute_list'] = attribute.parse_attributes(range)
pm.render_var['config_list'] = config.parse_config(range)
pm.render_var['device_list'] = range.listReferences(include = ['device', 'device category'])
pm.path(range)
return pm.render()
@helpers.authcheck
def add(request, parent_oid):
pm = helpers.PageManager(request, 'stweb/views/networktrees/networks/add_range.html')
parent = pm.object_store.getOID(parent_oid)
network_tree = parent.getNetworkTree()
pm.render_var['network_tree_list'] = network_tree.parent.listChildren(include = ['network tree'])
pm.render_var['network_tree'] = network_tree
pm.render_var['parent'] = parent
pm.setForm(NetworkRangeAddForm())
pm.path(parent)
return pm.render()
@helpers.authcheck
def add_post(request, parent_oid):
pm = helpers.PageManager(request, 'stweb/views/networktrees/networks/add_range.html')
parent = pm.object_store.getOID(parent_oid)
pm.path(parent)
network_tree = parent.getNetworkTree()
pm.render_var['network_tree_list'] = network_tree.parent.listChildren(include = ['network tree'])
pm.render_var['network_tree'] = network_tree
pm.setForm(NetworkRangeAddForm(request.POST))
if not pm.form.is_valid():
return pm.error()
range = network_tree.addNetworkRange(pm.form.cleaned_data['range'])
if len(pm.form.cleaned_data['description']) > 0:
range.attributes['description'] = pm.form.cleaned_data['description']
return pm.redirect('network.range.display', (range.oid,))
@helpers.authcheck
def delete(request, oid):
pm = helpers.PageManager(request, 'stweb/views/networktrees/networks/delete_range.html')
range = pm.object_store.getOID(oid)
network_tree = range.getNetworkTree()
pm.render_var['network_tree'] = network_tree
pm.render_var['network_tree_list'] = network_tree.parent.listChildren(include = ['network tree'])
pm.render_var['network_range'] = range
pm.path(range)
return pm.render()
@helpers.authcheck
def delete_post(request, oid):
pm = helpers.PageManager(request, 'stweb/views/networktrees/networks/delete_range.html')
range = pm.object_store.getOID(oid)
parent_oid = range.parent.oid
range.delete()
return pm.redirect('network.display', (parent_oid,))
|
#
# PySNMP MIB module CXFrameRelayInterfaceModule-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CXFrameRelayInterfaceModule-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:17:14 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
DLCI, = mibBuilder.importSymbols("CXFrameRelay-MIB", "DLCI")
Alias, SapIndex, cxFrameRelayInterfaceModule = mibBuilder.importSymbols("CXProduct-SMI", "Alias", "SapIndex", "cxFrameRelayInterfaceModule")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, MibIdentifier, Gauge32, Bits, Counter32, iso, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Integer32, IpAddress, Unsigned32, NotificationType, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "MibIdentifier", "Gauge32", "Bits", "Counter32", "iso", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Integer32", "IpAddress", "Unsigned32", "NotificationType", "ObjectIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class SubRef(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 255)
frimSRConnectInterval = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSRConnectInterval.setStatus('mandatory')
frimServiceCost = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimServiceCost.setStatus('mandatory')
frimSapTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3), )
if mibBuilder.loadTexts: frimSapTable.setStatus('mandatory')
frimSapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1), ).setIndexNames((0, "CXFrameRelayInterfaceModule-MIB", "frimSapId"))
if mibBuilder.loadTexts: frimSapEntry.setStatus('mandatory')
frimSapId = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 1), SapIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapId.setStatus('mandatory')
frimSapRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapRowStatus.setStatus('mandatory')
frimSapType = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("lower", 1), ("upper", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapType.setStatus('mandatory')
frimSapAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 4), Alias()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapAlias.setStatus('mandatory')
frimSapCompanionAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 5), Alias()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapCompanionAlias.setStatus('mandatory')
frimSapMaxDlcis = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1022)).clone(32)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapMaxDlcis.setStatus('mandatory')
frimSapMaxErrorFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 14)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapMaxErrorFrames.setStatus('mandatory')
frimSapMonitorFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 14)).clone(14)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapMonitorFrames.setStatus('mandatory')
frimSapFrWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 300)).clone(150)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSapFrWindowSize.setStatus('mandatory')
frimSapControlStats = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clearSapStats", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: frimSapControlStats.setStatus('mandatory')
frimSapstatRxDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatRxDataFrames.setStatus('mandatory')
frimSapstatRxDataOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatRxDataOctets.setStatus('mandatory')
frimSapstatTxDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatTxDataFrames.setStatus('mandatory')
frimSapstatTxDataOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatTxDataOctets.setStatus('mandatory')
frimSapstatUnopenedServiceDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatUnopenedServiceDiscards.setStatus('mandatory')
frimSapstatPvcDownDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatPvcDownDiscards.setStatus('mandatory')
frimSapstatUserSuccessfulOpens = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatUserSuccessfulOpens.setStatus('mandatory')
frimSapstatUserUnsuccessfulOpens = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatUserUnsuccessfulOpens.setStatus('mandatory')
frimSapstatSRSuccessfulConnects = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatSRSuccessfulConnects.setStatus('mandatory')
frimSapstatSRUnsuccessfulConnects = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatSRUnsuccessfulConnects.setStatus('mandatory')
frimSapstatTxResets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatTxResets.setStatus('mandatory')
frimSapstatRxBECN = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatRxBECN.setStatus('mandatory')
frimSapstatRxFECN = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 3, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSapstatRxFECN.setStatus('mandatory')
frimSRTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6), )
if mibBuilder.loadTexts: frimSRTable.setStatus('mandatory')
frimSREntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1), ).setIndexNames((0, "CXFrameRelayInterfaceModule-MIB", "frimSRFrpCircuitSapId"), (0, "CXFrameRelayInterfaceModule-MIB", "frimSRFrpCircuitDlci"), (0, "CXFrameRelayInterfaceModule-MIB", "frimSRProtocolId"), (0, "CXFrameRelayInterfaceModule-MIB", "frimSRSubRef"))
if mibBuilder.loadTexts: frimSREntry.setStatus('mandatory')
frimSRFrpCircuitSapId = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 1), SapIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSRFrpCircuitSapId.setStatus('mandatory')
frimSRFrpCircuitDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 2), DLCI()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSRFrpCircuitDlci.setStatus('mandatory')
frimSRProtocolId = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSRProtocolId.setStatus('mandatory')
frimSRSubRef = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 4), SubRef()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSRSubRef.setStatus('mandatory')
frimSRRefRangeEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 5), SubRef()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSRRefRangeEnd.setStatus('mandatory')
frimSRRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSRRowStatus.setStatus('mandatory')
frimSRDestFrpCircuitAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 7), Alias()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSRDestFrpCircuitAlias.setStatus('mandatory')
frimSRDestSubRef = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 8), SubRef()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frimSRDestSubRef.setStatus('mandatory')
frimSRRouteStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notConnected", 1), ("inProgress", 2), ("connected", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSRRouteStatus.setStatus('mandatory')
frimSRClearStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 6, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))).clone(namedValues=NamedValues(("noFailure", 1), ("internalError", 2), ("localAllocFailure", 3), ("remoteAllocFailure", 4), ("localNoAccess", 5), ("remoteNoAccess", 6), ("localPvcDown", 7), ("remotePvcDown", 8), ("localPvcBusy", 9), ("remotePvcBusy", 10), ("localFcnFailure", 11), ("remoteFcnFailure", 12), ("localDsnFailure", 13), ("localRefInUse", 14), ("remoteAliasNotFound", 15), ("remoteNoPvcService", 16)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSRClearStatus.setStatus('mandatory')
frimSysConTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5), )
if mibBuilder.loadTexts: frimSysConTable.setStatus('mandatory')
frimSysConEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1), ).setIndexNames((0, "CXFrameRelayInterfaceModule-MIB", "frimSysConSapId"), (0, "CXFrameRelayInterfaceModule-MIB", "frimSysConDlci"), (0, "CXFrameRelayInterfaceModule-MIB", "frimSysConPid"), (0, "CXFrameRelayInterfaceModule-MIB", "frimSysConRef"))
if mibBuilder.loadTexts: frimSysConEntry.setStatus('mandatory')
frimSysConSapId = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 1), SapIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConSapId.setStatus('mandatory')
frimSysConDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 2), DLCI()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConDlci.setStatus('mandatory')
frimSysConPid = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("pidFr", 1), ("pidLan", 2), ("pidX25", 3), ("pidCcm", 4), ("pidGmf", 5), ("pidLlc2", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConPid.setStatus('mandatory')
frimSysConRef = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 4), SubRef()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConRef.setStatus('mandatory')
frimSysConRemoteSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConRemoteSlot.setStatus('mandatory')
frimSysConCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConCreationTime.setStatus('mandatory')
frimSysConReqDataSize = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConReqDataSize.setStatus('mandatory')
frimSysConNegDataSize = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConNegDataSize.setStatus('mandatory')
frimSysConNegSizeExceededFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConNegSizeExceededFrames.setStatus('mandatory')
frimSysConRefRangeEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 2, 5, 1, 10), SubRef()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frimSysConRefRangeEnd.setStatus('mandatory')
mibBuilder.exportSymbols("CXFrameRelayInterfaceModule-MIB", frimSapAlias=frimSapAlias, frimSRTable=frimSRTable, frimSapstatSRSuccessfulConnects=frimSapstatSRSuccessfulConnects, frimSysConNegDataSize=frimSysConNegDataSize, frimSysConRefRangeEnd=frimSysConRefRangeEnd, frimSRFrpCircuitSapId=frimSRFrpCircuitSapId, frimSapTable=frimSapTable, frimSysConCreationTime=frimSysConCreationTime, frimSREntry=frimSREntry, frimSysConDlci=frimSysConDlci, frimSapMonitorFrames=frimSapMonitorFrames, frimSapRowStatus=frimSapRowStatus, frimSRRowStatus=frimSRRowStatus, frimSysConRemoteSlot=frimSysConRemoteSlot, frimSapMaxErrorFrames=frimSapMaxErrorFrames, frimSysConEntry=frimSysConEntry, frimSRRouteStatus=frimSRRouteStatus, frimSapMaxDlcis=frimSapMaxDlcis, frimSRProtocolId=frimSRProtocolId, frimSysConSapId=frimSysConSapId, frimSysConRef=frimSysConRef, frimSRFrpCircuitDlci=frimSRFrpCircuitDlci, frimSapFrWindowSize=frimSapFrWindowSize, frimSRClearStatus=frimSRClearStatus, frimSysConTable=frimSysConTable, frimSapEntry=frimSapEntry, frimSRConnectInterval=frimSRConnectInterval, frimSapId=frimSapId, frimSapstatRxBECN=frimSapstatRxBECN, frimSysConPid=frimSysConPid, frimServiceCost=frimServiceCost, frimSapstatTxDataFrames=frimSapstatTxDataFrames, frimSapCompanionAlias=frimSapCompanionAlias, frimSapstatTxDataOctets=frimSapstatTxDataOctets, frimSapstatRxDataOctets=frimSapstatRxDataOctets, frimSRRefRangeEnd=frimSRRefRangeEnd, frimSapControlStats=frimSapControlStats, frimSRSubRef=frimSRSubRef, frimSapstatPvcDownDiscards=frimSapstatPvcDownDiscards, frimSapType=frimSapType, frimSRDestFrpCircuitAlias=frimSRDestFrpCircuitAlias, frimSapstatUserSuccessfulOpens=frimSapstatUserSuccessfulOpens, frimSapstatRxDataFrames=frimSapstatRxDataFrames, frimSRDestSubRef=frimSRDestSubRef, SubRef=SubRef, frimSapstatUserUnsuccessfulOpens=frimSapstatUserUnsuccessfulOpens, frimSapstatTxResets=frimSapstatTxResets, frimSapstatUnopenedServiceDiscards=frimSapstatUnopenedServiceDiscards, frimSysConReqDataSize=frimSysConReqDataSize, frimSapstatRxFECN=frimSapstatRxFECN, frimSapstatSRUnsuccessfulConnects=frimSapstatSRUnsuccessfulConnects, frimSysConNegSizeExceededFrames=frimSysConNegSizeExceededFrames)
|
class HashTable:
'''
Hashable reperesents a custom dictionary implementation
where we use two private list to achieve storing and hashing
of key-value pairs.
'''
def __init__(self):
self.max_capacity = 4
self.__keys = [None] * self.max_capacity
self.__values = [None] * self.max_capacity
def __getitem__(self, key):
index = self.__keys.index(key)
return self.__values[index]
def __setitem__(self, key, value):
# If we try to add key which already exists - change its value in the second list (values)
if key in self.__keys:
index = self.__keys.index(key)
self.__values[index] = value
return
if self.actual_length == self.max_capacity:
self.__resize()
index = self.__hash(key)
self.__keys[index] = key
self.__values[index] = value
def __len__(self):
return self.max_capacity
def __repr__(self):
result = [
f"{self.__keys[index]}: {self.__values[index]}"
for index in range(len(self.__keys))
if self.__keys[index] is not None
]
return "{" + "{}".format(", ".join(result)) + "}"
@property
def keys(self):
return self.__keys
@property
def values(self):
return self.__values
@property
def actual_length(self):
return len(([el for el in self.__keys if el is not None]))
def add(self, key, value):
self[key] = value
def __resize(self):
self.__keys = self.__keys + [None] * self.max_capacity
self.__values = self.__values + [None] * self.max_capacity
self.max_capacity *= 2
def get(self, key, default=None):
try:
index = self.__keys.index(key)
return self.__values[index]
except ValueError:
return default
def __check_available_index(self, index):
'''
Checks if there is empty slot on this index,
if not implement the linear approach when there is a
collision between two hash indexes and returns the next available index
:param index: int
:return: int -> next current available index
'''
if index == len(self.__keys):
return self.__check_available_index(0)
# Linear approach implementation
if self.__keys[index] is None:
return index
# Try next index
return self.__check_available_index(index + 1)
def __hash(self, key):
index = sum([ord(ch) for ch in key]) % self.max_capacity
available_index = self.__check_available_index(index)
return available_index
# table = HashTable()
# print(table.keys)
# table["name"] = "Peter"
# table["age"] = 25 # Collision if we try it with max_capacity = 4
# table["age"] = 26
# table.add("work", "Some title")
# table["eyes color"] = "blue"
# table["eyes color2"] = "brown"
#
# print(table["name"])
# print(table["age"])
# print(table.get(5, "Not in dict"))
# print(len(table))
# print(table.actual_length)
# print(table)
|
import falcon
from ebl.context import Context
from ebl.corpus.application.corpus import Corpus
from ebl.corpus.web.alignments import AlignmentResource
from ebl.corpus.web.chapters import ChaptersDisplayResource, ChaptersResource
from ebl.corpus.web.colophons import ColophonsResource
from ebl.corpus.web.extant_lines import ExtantLinesResource
from ebl.corpus.web.lemmatizations import LemmatizationResource
from ebl.corpus.web.lines import LinesImportResource, LinesResource, LineResource
from ebl.corpus.web.manuscripts import ManuscriptsResource
from ebl.corpus.web.texts import TextResource, TextSearchResource, TextsResource
from ebl.corpus.web.unplaced_lines import UnplacedLinesResource
from ebl.transliteration.application.transliteration_query_factory import (
TransliterationQueryFactory,
)
def create_corpus_routes(api: falcon.App, context: Context):
corpus = Corpus(
context.text_repository,
context.get_bibliography(),
context.changelog,
context.sign_repository,
context.parallel_line_injector,
)
context.text_repository.create_indexes()
texts = TextsResource(corpus)
text = TextResource(corpus)
text_search = TextSearchResource(
corpus, TransliterationQueryFactory(context.sign_repository)
)
chapters = ChaptersResource(corpus)
chapters_display = ChaptersDisplayResource(corpus)
chapters_line = LineResource(corpus)
alignment = AlignmentResource(corpus)
manuscript_lemmatization = LemmatizationResource(corpus)
manuscript = ManuscriptsResource(corpus)
lines = LinesResource(corpus)
lines_import = LinesImportResource(corpus)
colophons = ColophonsResource(corpus)
unplaced_lines = UnplacedLinesResource(corpus)
extant_lines = ExtantLinesResource(corpus)
text_url = "/texts/{genre}/{category}/{index}"
chapter_url = text_url + "/chapters/{stage}/{name}"
api.add_route("/texts", texts)
api.add_route("/textsearch", text_search)
api.add_route(text_url, text)
api.add_route(chapter_url, chapters)
api.add_route(f"{chapter_url}/display", chapters_display)
api.add_route(f"{chapter_url}/alignment", alignment)
api.add_route(f"{chapter_url}/lemmatization", manuscript_lemmatization)
api.add_route(f"{chapter_url}/manuscripts", manuscript)
api.add_route(f"{chapter_url}/lines", lines)
api.add_route(f"{chapter_url}/lines/{{number}}", chapters_line)
api.add_route(f"{chapter_url}/import", lines_import)
api.add_route(f"{chapter_url}/colophons", colophons)
api.add_route(f"{chapter_url}/unplaced_lines", unplaced_lines)
api.add_route(f"{chapter_url}/extant_lines", extant_lines)
|
class perro():
nombre = 'solovino'
raza = 'cruza'
edad = 5
color = 'Negro'
def setnombre(self):
self.nombre = input("Ingresa el nombre de tu perro: ")
def getnombre(self):#Getter o método que obtiene propiedad
return self.nombre
def crecio(self):
x = int(input("Cuántos años creció?: "))
if(x >= 0 and x <= 15):
self.edad += x
else:
print("No te pases")
def imprimirDatos(self):
print("Nombre: ", self.nombre,"Raza: ", self.raza , "Edad: ",self.edad , "Color: ", self.color)
perro1 = perro()
perro1.setnombre()
perro1.crecio()
perro1.imprimirDatos() |
CONTEXT_ALL = 0
CONTEXT_CHANNEL = 1
CONTEXT_QUERY = 2
|
from src.config import hiv_config
from hiv_domain.fittedQiter import FittedQIteration
from sklearn.externals import joblib
import pickle
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
ep_reward = []
config = hiv_config
rewards = []
""" Load hiv environment - the environment comes with a policy which can be
made eps greedy. """
with open('hiv_domain/hiv_simulator/hiv_preset_hidden_params', 'rb') as f:
preset_hidden_params = pickle.load(f, encoding='latin1')
env = FittedQIteration(perturb_rate=0.05,
preset_params=preset_hidden_params[config.ins],
gamma=config.gamma,
ins=config.ins,
episode_length=config.max_length)
env.tree = joblib.load('hiv_domain/extra_tree_gamma_ins20.pkl')
for i_episode in range(config.sample_num_traj):
if i_episode % 10 == 0:
print("{} trajectories generated".format(i_episode))
#episode = env.run_episode(eps=config.behavior_eps, track=True)
episode = env.run_episode(eps=0, track=True)
G = 0
for idx_step in range(config.max_length):
G += episode[idx_step][2]*config.gamma**idx_step
ep_reward.append(G)
rewards += ([episode[i][2] for i in range(len(episode))])
standardization_data = dict()
standardization_data[config.ins] = dict()
standardization_data[config.ins]["reward_mean"] = np.mean(rewards)
standardization_data[config.ins]["reward_std"] = np.std(rewards)
with open("hiv_domain/standardization_data.pkl", "wb") as f:
pickle.dump(standardization_data, f)
|
import re
from model.address import Address
def test_address_on_home_page(app):
# info o kontakcie ze str glownej
address_from_home_page = app.address.get_address_list()[0]
# info o kontakcie z formy edytowania
address_from_edit_page = app.address.get_address_info_from_edit_page(0)
assert address_from_home_page.first_name == address_from_edit_page.first_name
assert address_from_home_page.last_name == address_from_edit_page.last_name
assert address_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(address_from_edit_page)
assert address_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(address_from_edit_page)
def test_addresses_on_home_page(app, db):
addresses_from_home_page = app.address.get_address_list()
addresses_from_db = db.get_address_list()
assert sorted(addresses_from_home_page, key=Address.id_or_max) == sorted(addresses_from_db, key=Address.id_or_max)
def clear(s):
return re.sub('[() -]', '', s)
def merge_phones_like_on_home_page(address):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[address.home_telephone, address.mobile_telephone, address.work_telephone, address.phone2]))))
def merge_emails_like_on_home_page(address):
return "\n".join(filter(lambda x: x != "",
(filter(lambda x: x is not None, [address.email, address.email2, address.email3])))) |
from rlib.jit import promote
from som.interpreter.ast.frame import is_on_stack
from som.vmobjects.abstract_object import AbstractObject
from som.vmobjects.block_ast import VALUE_SIGNATURE
from som.vmobjects.primitive import (
UnaryPrimitive,
BinaryPrimitive,
TernaryPrimitive,
)
class BcBlock(AbstractObject):
_immutable_fields_ = ["_method", "_outer"]
def __init__(self, method, inner):
AbstractObject.__init__(self)
self._method = method
self._outer = inner
def is_same_context(self, other_block):
assert isinstance(other_block, BcBlock)
return self._outer is other_block._outer # pylint: disable=protected-access
def get_method(self):
return promote(self._method)
def get_from_outer(self, index):
promote(index)
assert self._outer and 0 <= index < len(self._outer), "No outer in " + str(
self._method
)
assert isinstance(self._outer[index], AbstractObject)
return self._outer[index]
def set_outer(self, index, value):
promote(index)
assert 0 <= index < len(self._outer)
assert isinstance(value, AbstractObject)
self._outer[index] = value
def is_outer_on_stack(self):
# TODO: can we get rid of this?
# we may need a second bytecode for this (push block w/o context)
if self._outer is None:
return True
return is_on_stack(self._outer)
def get_on_stack_marker(self):
return self._outer
def get_class(self, universe):
return universe.block_classes[self._method.get_number_of_arguments()]
def get_object_layout(self, universe):
return universe.block_layouts[self._method.get_number_of_arguments()]
def block_evaluation_primitive(num_args):
if num_args == 1:
return UnaryPrimitive(VALUE_SIGNATURE[num_args], _invoke_1)
if num_args == 2:
return BinaryPrimitive(VALUE_SIGNATURE[num_args], _invoke_2)
if num_args == 3:
return TernaryPrimitive(VALUE_SIGNATURE[num_args], _invoke_3)
raise Exception("Unsupported number of arguments for block: " + str(num_args))
def _invoke_1(rcvr):
return rcvr.get_method().invoke_1(rcvr)
def _invoke_2(rcvr, arg):
return rcvr.get_method().invoke_2(rcvr, arg)
def _invoke_3(rcvr, arg1, arg2):
return rcvr.get_method().invoke_3(rcvr, arg1, arg2)
|
class Util:
def __init__(self, bot):
self.bot = bot
def update_channels(self):
conversations = self.bot.slack_client.api_call("conversations.list")
for channel in conversations.get("channels"):
self.bot.redis.hset("channels:cid_name", channel["id"], channel["name"])
self.bot.redis.hset("channels:name_cid", channel["name"], channel["id"])
print(self.bot.redis.hgetall("channels"))
def get_channelname(self, cid):
channelname = self.bot.redis.hget("chanenls:name_cid", cid)
return channelname.decode("utf-8") if channel else None
def get_cid(self, channel):
cid = self.bot.redis.hget("channels:name_cid", channel)
return cid.decode("utf-8" if cid else None)
def update_users(self):
users = self.bot.slack_client.api_call("users.list")
for user in users.get("members"):
username = user["name"]
self.bot.redis.hset("users:uid_name", user["id"], username)
self.bot.redis.hset("users:name_uid", username, user["id"])
if user.get("is_admin", False) or user.get("is_owner", False):
self.bot.redis.hset("permissions", user["id"], 90)
if username == "zifnab":
self.bot.redis.hset("permissions", user["id"], 9001)
def get_username(self, uid):
username = self.bot.redis.hget("users:uid_name", uid)
return username.decode("utf-8") if username else None
def get_uid(self, username):
uid = self.bot.redis.hget("users:name_uid", username)
return uid.decode("utf-8") if uid else None
def get_perm(self, uid):
level = self.bot.redis.hget("permissions", uid)
if not level:
return 0
else:
return int(level)
|
"""The following code will cause various Python Exceptions to occur"""
def name_error():
"""This function will raise a NameError."""
no_function()
def type_error():
"""This function will raise a TypeError."""
"hello world" + 7
def attribute_error():
"""This function will raise a AttributeError."""
new_numbers = [1, 2, 3]
print(new_numbers.upper())
def big_boom():
"""This function calls on big_bodda_boom function."""
big_bodda_boom()
def big_bodda_boom():
"""This function calls on big_big_bodda_boom function."""
big_big_bodda_boom()
def big_big_bodda_boom():
"""This function will raise 4 level NameError"""
print(oh_no())
def syntax_error):
"""This function will raise a SyntaxError."""
print("hello")
|
#!/usr/bin/env python
# Copyright (c) 2021, Microsoft
from threading import Lock
import numpy as np
import rospy
from std_msgs.msg import Float64
from vesc_msgs.msg import VescStateStamped
from nav_msgs.msg import Odometry
import utils
# Tune these Values!
KM_V_NOISE = 0.4 # Kinematic car velocity noise std dev
KM_DELTA_NOISE = 0.2 # Kinematic car delta noise std dev
KM_X_FIX_NOISE = 3e-2 # Kinematic car x position constant noise std dev
KM_Y_FIX_NOISE = 3e-2 # Kinematic car y position constant noise std dev
KM_THETA_FIX_NOISE = 1e-1 # Kinematic car theta constant noise std dev
"""
Propagates the particles forward based the T265 odometry difference
"""
class KinematicMotionModelT265:
"""
Initializes the kinematic motion model
motor_state_topic: The topic containing motor state information
servo_state_topic: The topic containing servo state information
speed_to_erpm_offset: Offset conversion param from rpm to speed
speed_to_erpm_gain: Gain conversion param from rpm to speed
steering_angle_to_servo_offset: Offset conversion param from servo position to steering angle
steering_angle_to_servo_gain: Gain conversion param from servo position to steering angle
car_length: The length of the car
particles: The particles to propagate forward
state_lock: Controls access to particles
"""
def __init__(
self,
t265_state_topic,
particles,
state_lock=None,
):
self.last_t265_odom = None # The most recent T265 odom message
self.last_t265_stamp = None # The time stamp from the previous T265 state msg
self.particles = particles
if state_lock is None:
self.state_lock = Lock()
else:
self.state_lock = state_lock
# Subscribe to the odometry from the T265 tracking camera
self.tracking_sub = rospy.Subscriber(
t265_state_topic, Odometry, self.t265_cb, queue_size=1
)
"""
Caches the most recent T265 odometry message
msg: A nav_msgs/Odometry message
"""
def t265_cb(self, msg):
self.state_lock.acquire()
if self.last_t265_odom is None:
print("T265 callback called for first time....")
self.last_t265_odom = msg # Update T265 odom
self.last_t265_stamp = msg.header.stamp
self.state_lock.release()
return
else:
# Propagate particles forward in place using delta odom
dt = (msg.header.stamp - self.last_t265_stamp).to_sec()
self.apply_odom_delta(self.particles, msg, self.last_t265_odom, dt)
self.last_t265_odom = msg # Update T265 odom
self.last_t265_stamp = msg.header.stamp
self.state_lock.release()
def apply_odom_delta(self, proposal_dist, odom_curr, odom_prev, dt):
"""
Propagates particles forward (in-place) by applying the difference btw odoms and adding
sampled gaussian noise
proposal_dist: The particles to propagate
odom_curr: current position captured by the T265
odom_prev: last position captured by the T265
dt: time interval since the last update
returns: nothing
"""
# updates in X and Y are simple
proposal_dist[:, 0] -= odom_curr.pose.pose.position.x - odom_prev.pose.pose.position.x
proposal_dist[:, 1] -= odom_curr.pose.pose.position.y - odom_prev.pose.pose.position.y
# update in theta requires odom angle diff in XY plane
proposal_dist[:, 2] -= utils.quaternion_to_angle(odom_curr.pose.pose.orientation)-utils.quaternion_to_angle(odom_prev.pose.pose.orientation)
# delta_theta = utils.quaternion_to_angle(odom_curr.pose.pose.orientation)-utils.quaternion_to_angle(odom_prev.pose.pose.orientation)
# delta_odom_x = odom_curr.pose.pose.position.x - odom_prev.pose.pose.position.x
# delta_odom_y = odom_curr.pose.pose.position.y - odom_prev.pose.pose.position.y
# print("Delta_x = " + str(delta_odom_x))
# print("Delta_y = " + str(delta_odom_y))
# print("Delta_theta = " + str(delta_theta))
# Add noise
proposal_dist[:, 0] = np.random.normal(
loc=proposal_dist[:, 0],
scale=KM_X_FIX_NOISE,
size=proposal_dist[:, 0].shape,
)
proposal_dist[:, 1] = np.random.normal(
loc=proposal_dist[:, 1],
scale=KM_Y_FIX_NOISE,
size=proposal_dist[:, 1].shape,
)
proposal_dist[:, 2] = np.random.normal(
loc=proposal_dist[:, 2], scale=KM_THETA_FIX_NOISE, size=proposal_dist[:, 2].shape
)
# Limit particle rotation to be between -pi and pi
proposal_dist[proposal_dist[:, 2] < -1 * np.pi, 2] += 2 * np.pi
proposal_dist[proposal_dist[:, 2] > np.pi, 2] -= 2 * np.pi
|
import pytest
@pytest.mark.parametrize(
"file, result, expected",
(
("src/dafny/utils/MathHelpers.dfy", "passed", "passed"),
("src/dafny/utils/Helpers.dfy", "failed", "failed"),
),
)
def test_proof_result(file, result, expected):
assert file.endswith(".dfy")
assert result == expected
@pytest.mark.parametrize(
"file2, result, expected",
(
("src/dafny/utils/MathHelpers.dfy", "passed", "passed"),
("src/dafny/utils/Helpers.dfy", "failed", "failed"),
),
)
def test_proof_resultfailing(file2, result, expected):
assert file2.endswith(".dfy")
assert result == expected
|
"""
@Project: BeautifulReport
@Author: Mocobk
@Data: 2019/03/18
@File: test_demo_report.py
@License: MIT
"""
import unittest
from BeautifulReport import BeautifulReport
if __name__ == '__main__':
test_suite = unittest.defaultTestLoader.discover('./tests', pattern='test_demo*.py')
result = BeautifulReport(test_suite)
result.report(filename='测试报告_demo', description='测试deafult报告', report_dir='report', theme='theme_default')
|
def convert(String):
list1 = list(String.split(" "))
return list1
if __name__=="__main__":
String = (input())
print(convert(String))
list2 = ["Anuja", "pritee"]
print(str(list2))
def list_to_String(list):
str1 =" "
for item in list:
str1+=item
return str1
if __name__=="__main__":
list=["Anuja", "pritee", "Muktai"]
print(list_to_String(list))
def list_to_String1(list1):
str1 =" "
return str1.join(list1)
if __name__=="__main__":
list1 = ["Anuja", "Pritee", "Muktai"]
print(list_to_String1(list1))
|
from dataclasses import dataclass
@dataclass
class Event:
type: str # TODO: enum?
user_id: str
output = 'slack'
@dataclass
class MessageEvent(Event):
message: str
channel: str
@dataclass
class ErrorEvent(Event):
error: str
code: str
message: str
@dataclass
class OutputEvent:
channel: str
message: str
|
import os
import pytest
from flask import render_template_string, url_for
from flask_rollup import Bundle, Rollup
def test_create_simple(app):
rv = Rollup(app)
assert rv.app == app
assert app.extensions['rollup'] == rv
def test_init_app(app):
r = Rollup()
r.init_app(app)
assert r.app is None
assert app.extensions['rollup'] == r
def test_config_path_provided(app):
config_file_path = '/some/where/rollup.config.js'
app.config['ROLLUP_CONFIG_JS'] = config_file_path
rv = Rollup(app)
assert len(rv.argv) == 3
assert rv.argv[-1] == config_file_path
def test_autobuild_enabled_in_development(app, mocker):
mocker.patch.dict('os.environ', {'FLASK_ENV': 'development'})
Rollup(app)
assert len(app.before_request_funcs) == 1
def test_autobuild_running_in_development(app, mocker):
def handler():
return 'something'
app.config['SERVER_NAME'] = '127.0.0.1'
name = 'p1'
mocker.patch.dict('os.environ', {'FLASK_ENV': 'development'})
rollup = Rollup(app)
b = Bundle(name, 'some/where', ['some/input/file.js'])
mocker.patch(
'flask_rollup.os.stat', mocker.Mock(return_value=mocker.Mock(st_mtime_ns=200))
)
mocker.patch('flask_rollup.subprocess.run')
rollup.register(b)
app.add_url_rule('/something', endpoint=name, view_func=handler)
fake_run = mocker.Mock()
mocker.patch.object(rollup, 'run_rollup', fake_run)
with app.test_client() as client:
with app.app_context():
url = url_for(name)
client.get(url)
fake_run.assert_called_once_with(name)
def test_autobuild_skipped_for_other_endpoint(app, mocker):
def handler():
return 'something'
app.config['SERVER_NAME'] = '127.0.0.1'
name = 'p1'
other_name = 'p2'
mocker.patch.dict('os.environ', {'FLASK_ENV': 'development'})
rollup = Rollup(app)
b = Bundle(name, 'some/where', ['some/input/file.js'])
mocker.patch(
'flask_rollup.os.stat', mocker.Mock(return_value=mocker.Mock(st_mtime_ns=200))
)
mocker.patch('flask_rollup.subprocess.run')
rollup.register(b)
app.add_url_rule('/something', endpoint=name, view_func=handler)
app.add_url_rule('/otherthing', endpoint=other_name, view_func=handler)
fake_run = mocker.Mock()
mocker.patch.object(rollup, 'run_rollup', fake_run)
with app.test_client() as client:
with app.app_context():
url = url_for(other_name)
client.get(url)
fake_run.assert_not_called()
def test_autobuild_disabled_in_production(app, mocker):
mocker.patch.dict('os.environ', {'FLASK_ENV': 'production'})
Rollup(app)
assert len(app.before_request_funcs) == 0
def test_autobuild_not_running_in_production(app, mocker):
def handler():
return 'something'
app.config['SERVER_NAME'] = '127.0.0.1'
name = 'p1'
mocker.patch.dict('os.environ', {'FLASK_ENV': 'production'})
rollup = Rollup(app)
b = Bundle(name, 'some/where', ['some/input/file.js'])
rollup.register(b)
app.add_url_rule('/something', endpoint=name, view_func=handler)
fake_run = mocker.Mock()
mocker.patch.object(rollup, 'run_rollup', fake_run)
with app.test_client() as client:
with app.app_context():
url = url_for(name)
client.get(url)
fake_run.assert_not_called()
def test_register(app):
b = Bundle('p1', 'some/where', ['some/input/file.js'])
rollup = Rollup(app)
rollup.register(b)
assert len(rollup.bundles) == 1
assert os.path.isabs(b.target_dir)
@pytest.mark.parametrize('environment', ['development', 'production'])
def test_run(environment, app, mocker):
name = 'p1'
mocker.patch.dict('os.environ', {'FLASK_ENV': environment})
b = Bundle(name, 'some/where', ['some/input/file.js'])
rollup = Rollup(app)
fake_run = mocker.Mock()
mocker.patch('flask_rollup.subprocess.run', fake_run)
mocker.patch(
'flask_rollup.os.stat', mocker.Mock(return_value=mocker.Mock(st_mtime_ns=100))
)
rollup.register(b)
rollup.run_rollup(name)
rollup.run_rollup(name)
fake_run.assert_called_once()
def test_template_global(app, mocker):
def handler():
return render_template_string('{{ jsbundle(request.endpoint) }}')
app.config['SERVER_NAME'] = '127.0.0.1'
name = 'p1'
b = Bundle(name, 'some/where', ['some/input/file.js'])
mocker.patch.object(b, 'calc_state', mocker.Mock(return_value='state'))
rollup = Rollup(app)
mocker.patch('flask_rollup.subprocess.run')
tgt_path = '/static/directory/some/where/file1.js'
mocker.patch(
'flask_rollup.glob.glob', mocker.Mock(return_value=[tgt_path])
)
rollup.register(b)
app.add_url_rule('/something', endpoint=name, view_func=handler)
mocker.patch('flask_rollup.os.remove')
rollup.run_rollup(name)
with app.test_client() as client:
with app.app_context():
url = url_for(name)
rv = client.get(url)
assert b.output.url.encode('utf-8') in rv.data
def test_template_global_fail_on_prod(app, mocker):
def handler():
return render_template_string('{{ jsbundle(request.endpoint) }}')
mocker.patch.dict('os.environ', {'FLASK_ENV': 'production'})
app.config['SERVER_NAME'] = '127.0.0.1'
name = 'p1'
b = Bundle(name, 'some/where', ['some/input/file.js'])
rollup = Rollup(app)
mocker.patch(
'flask_rollup.glob.glob', mocker.Mock(return_value=[])
)
rollup.register(b)
app.add_url_rule('/something', endpoint=name, view_func=handler)
with app.test_client() as client:
with app.app_context():
url = url_for(name)
rv = client.get(url)
assert rv.status_code == 500
|
import torch
from SDE_CG import layers
from torch_scatter import scatter_add
import numpy as np
import torch.nn as nn
from .SDE_builder import GaussianFourierProjection
class ScoreNet(torch.nn.Module):
def __init__(self,config, marginal_prob_std, hidden_dim=256,device='cuda'):
super().__init__()
self.config = config
self.hidden_dim = hidden_dim
self.bond_emb = torch.nn.Embedding(100, self.hidden_dim)
self.atom_emb = torch.nn.Embedding(100, self.hidden_dim)
self.input_mlp = layers.MultiLayerPerceptron(1, [self.hidden_dim, self.hidden_dim], activation=self.config.model.mlp_act)
self.output_mlp = layers.MultiLayerPerceptron(2*self.hidden_dim, [hidden_dim, hidden_dim//2,1], activation=config.model.gnn_act)
self.model = layers.GraphIsomorphismNetwork(hidden_dim=self.hidden_dim, \
num_convs=self.config.model.num_convs, \
activation=self.config.model.gnn_act, \
readout="sum", short_cut=self.config.model.short_cut, \
concat_hidden=self.config.model.concat_hidden)
self.model = self.model.to(device)
self.t_embed = nn.Sequential(GaussianFourierProjection(embed_dim=hidden_dim),
nn.Linear(hidden_dim, hidden_dim))
self.dense1 = nn.Linear(hidden_dim, 1)
self.marginal_prob_std = marginal_prob_std
self.device = device
@torch.no_grad()
def get_score(self,data,d,t):
t_embedding = self.t_embed(t)
t_embedding = self.dense1(t_embedding)
node2graph = data.batch
edge2graph = node2graph[data.edge_index[0]]
atom_attr = self.atom_emb(data.node_type) # (atom_dim, hidden_dim)
bond_attr = self.bond_emb(data.edge_type) # (edge_dim, hidden_dim)
d_emb = self.input_mlp(d) # (edge_dim, hidden_dim) =[58234, 256]
d_emb += t_embedding[edge2graph]
bond_attr = d_emb * bond_attr # (edge_dim, hidden_dim) =[58234, 256]
output = self.model(data, atom_attr, bond_attr)
h_row, h_col = output["node_feature"][data.edge_index[0]], output["node_feature"][data.edge_index[1]]
distance_feature = torch.cat([h_row * h_col, bond_attr], dim=-1) # (edge_dim, 2*hidden_dim) =[58234, 512]
scores = self.output_mlp(distance_feature) # (edge_dim, 1) = [58234, 1]
scores = scores.view(-1)
scores = scores / self.marginal_prob_std(t[edge2graph]).to(self.device)
return scores
def forward(self, data, t):
'''
Score_Net function, which is constructed by two MLP blocks, two embedding layers for atom and bond attribution
and an embedding layer for time.
input: data sturcture(we need node_type, bond_type, edge_length, batch, edge_index, atom_feature)
'''
node2graph = data.batch
edge2graph = node2graph[data.edge_index[0]]
t_embedding = self.t_embed(t) #(batch_dim, hidden_dim) = (128, 256)
t_embedding = self.dense1(t_embedding) #(batch_dim, 1) = (128,1)
d = data.edge_length #(edge_dim, 1)
atom_attr = self.atom_emb(data.node_type) #(atom_dim, hidden_dim)
bond_attr = self.bond_emb(data.edge_type) #(edge_dim, hidden_dim)
d_emb = self.input_mlp(d) #(edge_dim, hidden_dim) =[58234, 256]
d_emb += t_embedding[edge2graph] #(edge_dim, hidden_dim) =[58234, 256]
bond_attr = d_emb * bond_attr #(edge_dim, hidden_dim) =[58234, 256]
output = self.model(data, atom_attr, bond_attr)
h_row, h_col = output["node_feature"][data.edge_index[0]], output["node_feature"][data.edge_index[1]]
distance_feature = torch.cat([h_row*h_col, bond_attr], dim=-1) #(edge_dim, 2*hidden_dim) =[58234, 512]
scores = self.output_mlp(distance_feature) #(edge_dim, 1) = [58234, 1]
scores = scores.view(-1) #(edge_dim)
scores = scores / self.marginal_prob_std(t[edge2graph]).to(self.device) #(edge_dim)
return scores
|
"""run the linters and formatters"""
import sys
import doit
import shutil
from . import DOIT_CONFIG, Task, main, needs, Param, get_name
def task_lint():
"""lint and format the project with pre-commit"""
def lint(raises):
needs("pre_commit")
# do not fail this unless explicit
action = doit.tools.CmdAction("pre-commit run --all-files").execute(
sys.stdout, sys.stderr
)
if raises:
assert not action, "linting failed."
return Task(
actions=[lint],
params=[Param("raises", False, type=bool, help="raise on failure")],
)
def task_uml():
"""generate a uml diagram for the project with pyreverse"""
def pyreverse(format, minimal):
needs("pylint")
name = get_name()
print(name)
# should ignore conventions
doit.tools.CmdAction(
f"pyreverse -o {format} {minimal and '-k' or ''} -p {name} {name}"
).execute(sys.stdout, sys.stderr)
shutil.move(f"packages_{name}.{format}", "docs")
shutil.move(f"classes_{name}.{format}", "docs")
return Task(
actions=[pyreverse],
params=[
Param(
"format",
"png",
type=str,
help="uml output format",
choices=dict(zip(*["svg png dot".split()] * 2)),
),
Param(
"minimal",
False,
type=bool,
help="export a minimal formal of the diagram",
),
],
targets=[], # we can predict these
)
DOIT_CONFIG["default_tasks"] += ["lint"]
if __name__ == "__main__":
main(globals())
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from loguru import logger
import pulp
from .benchmarker import ModelBenchmarker, DeviceBenchmarker
from .worker_manager import WorkerManager
class Allocator(object):
def __init__(
self,
model_cfg: dict,
worker_manager: WorkerManager,
model_benchmarker: ModelBenchmarker,
device_benchmarker: DeviceBenchmarker,
):
self._model_cfg = model_cfg
self._worker_manager = worker_manager
self._model_benchmarker = model_benchmarker
self._device_benchmarker = device_benchmarker
def optimal_allocate(self, max_time=300, threads=24):
# benchmark
worker_ranks, workers_performance = zip(
*self._device_benchmarker.benchmark().items()
)
lf, lm = self._model_benchmarker.benchmark()
# logger.info(f"layers flops: {lf}")
# logger.info(f"layers memories: {lm}")
D = len(worker_ranks)
L = len(lf)
# parse the results
worker_ranks = [int(item.lstrip("worker")) for item in worker_ranks]
logger.info(f"worker ranks: {worker_ranks}")
dt = [item["time"] for item in workers_performance]
logger.info(f"worker time: {dt}")
dm = [item["avai_mem"] for item in workers_performance]
# logger.info(f"worker memory limit: {dm}")
# solve problem
model = pulp.LpProblem("optimal_allocate", pulp.LpMinimize)
logger.info("set up MIP")
# create variables
x = pulp.LpVariable.matrix("x", (range(D), range(L)), cat=pulp.LpBinary)
y = pulp.LpVariable.matrix("y", range(D), lowBound=0, upBound=L)
z = pulp.LpVariable.matrix("z", range(D), lowBound=0, upBound=L)
q = pulp.LpVariable("max_device_time")
logger.info("added all variables")
# add one feasible solution to pre-solve
avg_num_layer = math.floor(L / D)
num_remain_layer = L - avg_num_layer * D
bias = 0
for row, device in enumerate(x):
start_idx = row * avg_num_layer + bias
if num_remain_layer > 0:
num_remain_layer -= 1
bias += 1
end_idx = row * avg_num_layer + avg_num_layer - 1 + bias
z[row].setInitialValue(start_idx)
y[row].setInitialValue(end_idx)
for col, layer in enumerate(device):
if start_idx <= col <= end_idx:
layer.setInitialValue(1)
else:
layer.setInitialValue(0)
logger.info("added one feasible solution")
# objective function
model.objective = q
logger.info("add obj.")
# add constraints
# constraint 1
for i in range(D):
model += (
pulp.LpAffineExpression([(x[i][j], lm[j]) for j in range(L)]) <= dm[i]
)
# constraint 2 and 3
for i in range(D):
for j in range(L):
model += y[i] >= j * x[i][j]
model += z[i] <= j * x[i][j] + (L + 1) * (1 - x[i][j])
# constraint 4
for i in range(D):
model += y[i] - z[i] <= pulp.lpSum(x[i][j] for j in range(L)) - 1
# constraint 5
for j in range(L):
model += pulp.lpSum(x[i][j] for i in range(D)) == 1
# constraint 6
for i in range(D):
model += q >= dt[i] * pulp.lpSum(x[i][j] * lf[j] for j in range(L))
logger.info("added all constraints")
solver_list = pulp.listSolvers(onlyAvailable=True)
if "GUROBI_CMD" in solver_list:
logger.info("using gurobi as solver")
model.solve(
pulp.GUROBI_CMD(
timeLimit=max_time,
msg=True,
gapRel=0.2,
threads=threads,
warmStart=True,
)
)
else:
logger.info("using CBC as solver")
model.solve(
pulp.PULP_CBC_CMD(
timeLimit=max_time,
msg=True,
gapRel=0.2,
threads=threads,
warmStart=True,
)
)
for i in z:
print(i.value(), end=" ")
print()
for i in y:
print(i.value(), end=" ")
print()
# allocate to
partition = []
for i in range(D):
info = {
"rank": worker_ranks[i],
"start": int(z[i].value()),
"end": int(y[i].value()),
}
partition.append(info)
# sort partition by idx
partition.sort(key=lambda t: t["start"])
print(partition)
for i, info in enumerate(partition):
for worker in self._worker_manager.worker_pool:
if info["rank"] == worker.rank:
print(f"rank {worker.rank}", end=" ")
layers = self._model_cfg[info["start"] : info["end"] + 1]
print(f"has layer {info['start']} to layer {info['end']}", end=" ")
worker.model_config = layers
print("and set up new config")
worker.order = i + 1
print(f"rank {worker.rank}'s order: {worker.order}")
# for i, rank in enumerate(worker_ranks):
# for worker in self._worker_manager.worker_pool:
# if worker.rank == rank:
# layers = self._model_cfg[int(z[i].value()):int(y[i].value())]
# print(f"rank {rank} has layer {int(z[i].value())} to {int(y[i].value())}")
# worker.model_config = layers
# worker.order = i + 1
self._worker_manager.reset_rank_by_order()
print("reset by order")
for worker in self._worker_manager.worker_pool:
print(worker.rank)
return self._worker_manager
def dynamic_allocate(self, break_iter=1000):
"""
Allocate the layers dynamically among the workers
"""
# get results
worker_time_and_avai_mem = self._device_benchmarker.benchmark()
layer_flops, layer_mem = self._model_benchmarker.benchmark()
print("worker_time_and_avai_mem: {}".format(worker_time_and_avai_mem))
# print('layer_flops: {}'.format(layer_flops))
# print('layer_mem: {}'.format(layer_mem))
# parse the results
worker_time_and_avai_mem = list(worker_time_and_avai_mem.items())
worker_ranks = [
int(item[0].lstrip("worker")) for item in worker_time_and_avai_mem
]
worker_time = [item[1]["time"] for item in worker_time_and_avai_mem]
worker_avai_mem = [item[1]["avai_mem"] for item in worker_time_and_avai_mem]
# check if the smallest worker avai mem can hold the smallest layer
assert min(worker_avai_mem) > min(
layer_mem
), "The smallest worker has insufficient memory for smallest layer"
# create partition index
num_layer = len(layer_flops)
num_worker = len(worker_ranks)
avg_num_layers = math.floor(num_layer / num_worker)
remainder = num_layer - avg_num_layers * num_worker
num_layers_on_worker = [avg_num_layers] * num_worker
for i in range(num_worker):
if remainder > 0:
num_layers_on_worker[i] += 1
remainder -= 1
else:
break
partition_idx = [0] + [
sum(num_layers_on_worker[:idx]) for idx in range(1, num_worker + 1)
]
# partition based on benchmark results
partition_idx = self._allocate_by_mem(
worker_rank=worker_ranks,
partition_idx=partition_idx,
worker_avai_mem=worker_avai_mem,
layer_mem=layer_mem,
)
partition_idx = self._allocate_by_flops_time(
worker_rank=worker_ranks,
partition_idx=partition_idx,
worker_time=worker_time,
layer_flops=layer_flops,
worker_avai_mem=worker_avai_mem,
layer_mem=layer_mem,
break_iter=break_iter,
)
# allocate to configs
for i, rank in enumerate(worker_ranks):
for worker in self._worker_manager.worker_pool:
if worker.rank == rank:
print(f"rank {worker.rank}", end=" ")
layers = self._model_cfg[partition_idx[i] : partition_idx[i + 1]]
print(
f"rank {rank} has layer {int(partition_idx[i])} to {partition_idx[i + 1]}"
)
worker.model_config = layers
worker.order = i + 1
self._worker_manager.reset_rank_by_order()
for worker in self._worker_manager.worker_pool:
print(worker.rank, end=" ")
return self._worker_manager
def even_allocate(self):
"""
Allocate the layers equally among the workers based on the number of layers
"""
num_worker = len(self._worker_manager.worker_pool)
num_layer = len(self._model_cfg)
avg_num_layer = math.floor(num_layer / num_worker)
num_remain_layer = num_layer - avg_num_layer * num_worker
cur_layer_idx = 0
for idx, worker in enumerate(self._worker_manager.worker_pool):
if num_remain_layer > 0:
num_remain_layer -= 1
cur_num_layer = avg_num_layer + 1
else:
cur_num_layer = avg_num_layer
layers = self._model_cfg[cur_layer_idx : cur_layer_idx + cur_num_layer]
worker.model_config = layers
cur_layer_idx = cur_layer_idx + cur_num_layer
return self._worker_manager
def _get_num_layers_on_worker(self, index, partition_idx):
return partition_idx[index + 1] - partition_idx[index]
def _is_last_worker(self, index, worker_rank):
return index == len(worker_rank) - 1
def _list_greater_than(self, l1, l2):
for x, y in zip(l1, l2):
if x < y:
return False
return True
def _allocate_by_flops_time(
self,
worker_rank,
partition_idx,
worker_time,
layer_flops,
worker_avai_mem,
layer_mem,
break_iter,
):
# normalize time results
worker_time = [item / min(worker_time) for item in worker_time]
# iteratively update partition index based on flops * time
iter = 0
while True:
# calculate flops on each worker
workers_flops_time_allocated = [
sum(layer_flops[partition_idx[j] : partition_idx[j + 1]])
* worker_time[j]
for j in range(len(worker_rank))
]
# set the target flops * time on average
target = sum(workers_flops_time_allocated) // len(worker_rank)
old_partition_idx = partition_idx[:]
for j in range(len(worker_rank) - 1):
current_workload = (
sum(layer_flops[partition_idx[j] : partition_idx[j + 1]])
* worker_time[j]
)
if (
current_workload < target
and self._get_num_layers_on_worker(j + 1, partition_idx) > 1
):
# add a layer if memory allows
expected_ram_allocated = sum(
layer_mem[partition_idx[j] : partition_idx[j + 1] + 1]
)
if expected_ram_allocated < worker_avai_mem[j]:
partition_idx[j + 1] += 1
else:
last_layer_workload_on_this_device = (
layer_flops[partition_idx[j + 1] - 1] * worker_time[j]
)
workload_on_next_device = (
sum(layer_flops[partition_idx[j] : partition_idx[j + 1]])
* worker_time[j]
)
if (
workload_on_next_device < target
and current_workload
> target + last_layer_workload_on_this_device
and self._get_num_layers_on_worker(j, partition_idx) > 1
):
next_worker_expected_ram_allocated = sum(
layer_mem[partition_idx[j + 1] - 1 : partition_idx[j + 2]]
)
if next_worker_expected_ram_allocated < worker_avai_mem[j + 1]:
partition_idx[j + 1] -= 1
if old_partition_idx == partition_idx:
break
iter += 1
if iter == break_iter:
break
return partition_idx
def _allocate_by_mem(self, worker_rank, partition_idx, worker_avai_mem, layer_mem):
# flag for if allocation satisfy memory requirement
mem_satisfy = False
def _compute_mem_allocated(lm, pi, wr):
return [sum(lm[pi[j] : pi[j + 1]]) for j in range(len(wr))]
# iteratively update partition index based on mem_avai and mem_allocated
while True:
# calculate flops on each worker
workers_mem_allocated = _compute_mem_allocated(
layer_mem, partition_idx, worker_avai_mem
)
# break the loop if mem allocated < avai mem on each worker
if self._list_greater_than(worker_avai_mem, workers_mem_allocated):
mem_satisfy = True
break
old_partition_idx = partition_idx[:]
for j in range(len(worker_rank) - 1):
while (
workers_mem_allocated[j] > worker_avai_mem[j]
and partition_idx[j + 1] - partition_idx[j] > 1
):
# remove a layer if memory is not enough
partition_idx[j + 1] -= 1
workers_mem_allocated = _compute_mem_allocated(
layer_mem, partition_idx, worker_avai_mem
)
if self._list_greater_than(worker_avai_mem, workers_mem_allocated):
mem_satisfy = True
break
if mem_satisfy:
break
# add a layer if memory allows
while (
workers_mem_allocated[j] < worker_avai_mem[j]
and partition_idx[j + 2] - partition_idx[j + 1] > 1
):
expected_ram_allocated = sum(
layer_mem[partition_idx[j] : partition_idx[j + 1] + 1]
)
if expected_ram_allocated < worker_avai_mem[j]:
partition_idx[j + 1] += 1
workers_mem_allocated = _compute_mem_allocated(
layer_mem, partition_idx, worker_avai_mem
)
else:
break
if self._list_greater_than(worker_avai_mem, workers_mem_allocated):
mem_satisfy = True
break
if mem_satisfy:
break
if old_partition_idx == partition_idx:
break
if mem_satisfy:
return partition_idx
else:
print(partition_idx)
raise Exception("memory allocation failed")
|
"""
Copyright (c) 2015-2021 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import ipaddress
import os
import subprocess
import sys
def parse_address(addr):
parse_result = {'port': '0'}
if addr.count(':') > 1:
# parse IPv6 address
parse_result['addr'] = addr.split('[')[0]
parse_result['ipproto'] = 'ipv6'
if addr.find('[') > -1:
parse_result['port'] = addr.split('[')[1].split(']')[0]
else:
# parse IPv4 address
parse_result['ipproto'] = 'ipv4'
parse_result['addr'] = addr.split(':')[0]
if addr.find(':') > -1:
parse_result['port'] = addr.split(':')[1]
return parse_result
def fetch_rule_labels():
result = dict()
descriptions = dict()
# query descriptions from active ruleset so we can search and display rule descriptions as well.
if os.path.isfile('/tmp/rules.debug'):
with open('/tmp/rules.debug', "rt", encoding="utf-8") as f_in:
for line in f_in:
lbl = line.split(' label ')[-1] if line.find(' label ') > -1 else ""
rule_label = lbl.split('"')[1] if lbl.count('"') >= 2 else None
descriptions[rule_label] = ''.join(lbl.split('"')[2:]).strip().strip('# : ')
sp = subprocess.run(['/sbin/pfctl', '-vvPsr'], capture_output=True, text=True)
for line in sp.stdout.strip().split('\n'):
if line.startswith('@'):
line_id = line.split()[0][1:]
if line.find(' label ') > -1:
rid = ''.join(line.split(' label ')[-1:]).strip()[1:].split('"')[0]
result[line_id] = {'rid': rid, 'descr': None}
if rid in descriptions:
result[line_id]['descr'] = descriptions[rid]
return result
def query_states(rule_label, filter_str):
result = list()
try:
filter_network = ipaddress.ip_network(filter_str.strip())
except ValueError:
filter_network = None
rule_labels = fetch_rule_labels()
sp = subprocess.run(['/sbin/pfctl', '-vvs', 'state'], capture_output=True, text=True)
record = None
for line in sp.stdout.strip().split('\n'):
parts = line.split()
if line.startswith(" ") and len(parts) > 1 and record:
if parts[0] == 'age':
for part in line.split(","):
part = part.strip()
if part.startswith("rule "):
record["rule"] = part.split()[-1]
if record["rule"] in rule_labels:
record["label"] = rule_labels[record["rule"]]["rid"]
record["descr"] = rule_labels[record["rule"]]["descr"]
elif part.startswith("age "):
record["age"] = part.split()[-1]
elif part.startswith("expires in"):
record["expires"] = part.split()[-1]
elif part.endswith("pkts"):
record["pkts"] = [int(s) for s in part.split()[0].split(':')]
elif part.endswith("bytes"):
record["bytes"] = [int(s) for s in part.split()[0].split(':')]
elif parts[0] == "id:":
# XXX: in order to kill a state, we need to pass both the id and the creator, so it seeems to make
# sense to uniquely identify the state by the combined number
record["id"] = "%s/%s" % (parts[1], parts[3])
search_line = " ".join(str(item) for item in filter(None, record.values()))
if rule_label != "" and record['label'].lower().find(rule_label) == -1:
# label
continue
elif filter_network is not None:
try:
match = False
for field in ['src_addr', 'dst_addr', 'nat_addr']:
addr = ipaddress.ip_network(record[field])
if field is not None and ipaddress.ip_network(filter_network).overlaps(addr):
match = True
break
if not match:
continue
except:
continue
elif filter_str != "" and search_line.lower().find(filter_str.lower()) == -1:
# apply filter when provided
continue
if parts[0] == "id:":
# append to response
result.append(record)
elif len(parts) >= 6:
record = {
'label': '',
'descr': '',
'nat_addr': None,
'nat_port': None,
'iface': parts[0],
'proto': parts[1],
'src_addr': parse_address(parts[2])['addr'],
'src_port': parse_address(parts[2])['port'],
'ipproto': parse_address(parts[2])['ipproto']
}
if parts[3].find('(') > -1:
# NAT enabled
record['nat_addr'] = parts[3][1:].split(':')[0]
if parts[3].find(':') > -1:
record['nat_port'] = parts[3].split(':')[1][:-1]
record['dst_addr'] = parse_address(parts[-2])['addr']
record['dst_port'] = parse_address(parts[-2])['port']
if parts[-3] == '->':
record['direction'] = 'out'
else:
record['direction'] = 'in'
record['state'] = parts[-1]
return result
def query_top(rule_label, filter_str):
result = list()
rule_labels = fetch_rule_labels()
sp = subprocess.run(['/usr/local/sbin/pftop', '-w', '1000', '-b','-v', 'long','9999999999999'], capture_output=True, text=True)
header = None
try:
filter_network = ipaddress.ip_network(filter_str.strip())
except ValueError:
filter_network = None
for rownum, line in enumerate(sp.stdout.strip().split('\n')):
parts = line.strip().split()
if rownum >= 2 and len(parts) > 5:
record = {
'proto': parts[0],
'dir': parts[1].lower(),
'src_addr': parse_address(parts[2])['addr'],
'src_port': parse_address(parts[2])['port'],
'dst_addr': parse_address(parts[3])['addr'],
'dst_port': parse_address(parts[3])['port'],
'gw_addr': None,
'gw_port': None,
}
if parts[4].count(':') > 2 or parts[4].count('.') > 2:
record['gw_addr'] = parse_address(parts[4])['addr']
record['gw_port'] = parse_address(parts[4])['port']
idx = 5
else:
idx = 4
record['state'] = parts[idx]
record['age'] = parts[idx+1]
record['expire'] = parts[idx+2]
record['pkts'] = int(parts[idx+3]) if parts[idx+3].isdigit() else 0
record['bytes'] = int(parts[idx+4]) if parts[idx+4].isdigit() else 0
record['avg'] = int(parts[idx+5]) if parts[idx+5].isdigit() else 0
record['rule'] = parts[idx+6]
if record['rule'] in rule_labels:
record['label'] = rule_labels[record['rule']]['rid']
record['descr'] = rule_labels[record['rule']]['descr']
else:
record['label'] = None
record['descr'] = None
for timefield in ['age', 'expire']:
tmp = record[timefield].split(':')
record[timefield] = int(tmp[0]) * 3600 + int(tmp[1]) * 60 + int(tmp[2])
search_line = " ".join(str(item) for item in filter(None, record.values()))
if rule_label != "" and record['label'].lower().find(rule_label) == -1:
# label
continue
elif filter_network is not None:
try:
match = False
for field in ['src_addr', 'dst_addr', 'gateway']:
addr = ipaddress.ip_network(record[field])
if field is not None and ipaddress.ip_network(filter_network).overlaps(addr):
match = True
break
if not match:
continue
except:
continue
elif filter_str != "" and search_line.lower().find(filter_str.lower()) == -1:
# apply filter when provided
continue
result.append(record)
return result
|
import typer
import git
from InquirerPy import inquirer
try:
repo = git.Repo('.')
except git.exc.InvalidGitRepositoryError as err:
typer.echo("This is not a Git repository")
exit()
def get_branches():
"""Return the list of local branches except the active."""
branches = []
for ref in repo.heads:
if ref != repo.active_branch:
branches.append(ref)
return branches
app = typer.Typer()
@app.command()
def list():
branches = get_branches()
if len(branches) == 0:
typer.echo(f"There is no branch to checkout")
return
branch = inquirer.select(
message="Select a branch:",
choices=get_branches(),
).execute()
repo.git.checkout(branch)
@app.command()
def delete():
branches = get_branches()
if len(branches) == 0:
typer.echo(f"There is no branch to delete")
return
branches_to_delete = inquirer.checkbox(
message="Select the branches:",
choices=branches,
cycle=False,
).execute()
for branch in branches_to_delete:
repo.delete_head(branch, force = True)
typer.echo(f"Branch {branch} deleted")
@app.command()
def install():
import os
file_path = os.path.realpath(__file__)
os.system(f"git config --global alias.list '!python3 {file_path} list'")
os.system(f"git config --global alias.del '!python3 {file_path} delete'")
if __name__ == "__main__":
app() |
"""
MIT License
Copyright (c) 2017 Zeke Barge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Source: https://github.com/BitMEX/easy-data-scripts
"""
from requests.auth import AuthBase
from urllib.parse import urlparse
import time
import hashlib
import hmac
from requests.auth import AuthBase
import time
import hashlib
import hmac
from urllib.parse import urlparse
class APIKeyAuth(AuthBase):
"""Attaches API Key Authentication to the given Request object."""
def __init__(self, apiKey, apiSecret):
"""Init with Key & Secret."""
self.apiKey = apiKey
self.apiSecret = apiSecret
def __call__(self, r):
"""Called when forming a request - generates api key headers."""
# modify and return the request
nonce = generate_nonce()
r.headers['api-nonce'] = str(nonce)
r.headers['api-key'] = self.apiKey
r.headers['api-signature'] = generate_signature(self.apiSecret, r.method, r.url, nonce, r.body or '')
return r
def generate_nonce():
return int(round(time.time() * 1000))
# Generates an API signature.
# A signature is HMAC_SHA256(secret, verb + path + nonce + data), hex encoded.
# Verb must be uppercased, url is relative, nonce must be an increasing 64-bit integer
# and the data, if present, must be JSON without whitespace between keys.
#
# For example, in psuedocode (and in real code below):
#
# verb=POST
# url=/api/v1/order
# nonce=1416993995705
# data={"symbol":"XBTZ14","quantity":1,"price":395.01}
# signature = HEX(HMAC_SHA256(secret,
# 'POST/api/v1/order1416993995705{"symbol":"XBTZ14","quantity":1,"price":395.01}'))
def generate_signature(secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
if isinstance(data, (bytes, bytearray)):
data = data.decode('utf8')
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = verb + path + str(nonce) + data
signature = hmac.new(
bytes(secret, 'utf8'),
bytes(message, 'utf8'),
digestmod=hashlib.sha256).hexdigest()
return signature
class AccessTokenAuth(AuthBase):
"""Attaches Access Token Authentication to the given Request object."""
def __init__(self, accessToken):
"""Init with Token."""
self.token = accessToken
def __call__(self, r):
"""Called when forming a request - generates access token header."""
if (self.token):
r.headers['access-token'] = self.token
return r
class APIKeyAuthWithExpires(AuthBase):
"""Attaches API Key Authentication to the given Request object. This implementation uses `expires`."""
def __init__(self, apiKey, apiSecret):
"""Init with Key & Secret."""
self.apiKey = apiKey
self.apiSecret = apiSecret
def __call__(self, r):
"""
Called when forming a request - generates api key headers. This call uses `expires` instead of nonce.
This way it will not collide with other processes using the same API Key if requests arrive out of order.
For more details, see https://www.bitmex.com/app/apiKeys
"""
# modify and return the request
expires = int(round(time.time()) + 5) # 5s grace period in case of clock skew
r.headers['api-expires'] = str(expires)
r.headers['api-key'] = self.apiKey
r.headers['api-signature'] = self.generate_signature(self.apiSecret, r.method, r.url, expires, r.body or '')
return r
# Generates an API signature.
# A signature is HMAC_SHA256(secret, verb + path + nonce + data), hex encoded.
# Verb must be uppercased, url is relative, nonce must be an increasing 64-bit integer
# and the data, if present, must be JSON without whitespace between keys.
#
# For example, in psuedocode (and in real code below):
#
# verb=POST
# url=/api/v1/order
# nonce=1416993995705
# data={"symbol":"XBTZ14","quantity":1,"price":395.01}
# signature = HEX(HMAC_SHA256(secret,
# 'POST/api/v1/order1416993995705{"symbol":"XBTZ14","quantity":1,"price":395.01}'))
def generate_signature(self, secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = bytes(verb + path + str(nonce) + data).encode('utf-8')
signature = hmac.new(secret, message, digestmod=hashlib.sha256).hexdigest()
return signature |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from endpoints.utils.decorators import room_required, pk_required
from music.serializers import PlaylistSerializer
from music.models import PlaylistTrack
class PlaylistView(APIView):
"""
Playlist resource.
"""
@room_required
def get(self, request, room):
"""
Get current playlist
---
serializer: PlaylistSerializer
"""
return Response(PlaylistSerializer(room.playlist, many=True).data)
@pk_required
@room_required
def post(self, request, room, pk):
"""
Update playlist
---
serializer: PlaylistSerializer
"""
try:
playlistTrack = PlaylistTrack.objects.get(pk=pk, room=room)
except PlaylistTrack.DoesNotExist:
return Response("Can't find this playlistTrack.", status=status.HTTP_404_NOT_FOUND)
action = request.data.get('action')
if action not in PlaylistTrack.ACTIONS:
return Response('Action can only be: "%s"' % '" or "'.join(PlaylistTrack.ACTIONS), status=status.HTTP_400_BAD_REQUEST)
target = request.data.get('target')
if action in {'above', 'below'}:
if target is None:
return Response('"%s" action needs a target parameter' % action, status=status.HTTP_400_BAD_REQUEST)
try:
target = PlaylistTrack.objects.get(pk=int(target), room=room)
except PlaylistTrack.DoesNotExist:
return Response("Can't find this playlistTrack as target.", status=status.HTTP_404_NOT_FOUND)
if target is not None:
getattr(playlistTrack, action)(target)
else:
getattr(playlistTrack, action)()
message = {
'action': 'playlistTrack_updated',
'playlistTracks': PlaylistSerializer(room.playlist.all(), many=True).data
}
room.send_message(message)
return Response(PlaylistSerializer(room.playlist.all(), many=True).data, status=status.HTTP_200_OK)
@pk_required
@room_required
def delete(self, request, room, pk):
"""
Delete music from playlist
---
serializer: PlaylistSerializer
"""
try:
PlaylistTrack.objects.get(pk=pk, room=room).delete()
except PlaylistTrack.DoesNotExist:
return Response("Can't find this playlistTrack.", status=status.HTTP_404_NOT_FOUND)
message = {
'action': 'playlistTrack_deleted',
'playlistTracks': PlaylistSerializer(room.playlist.all(), many=True).data
}
room.send_message(message)
return Response(PlaylistSerializer(room.playlist.all(), many=True).data, status=status.HTTP_204_NO_CONTENT)
|
#!/usr/bin/python3
ADT_PATH = "~/MGLTools/MGLToolsPckgs/AutoDockTools/Utilities24/"
PDBAA_PATH = "~/GASSER/database/pdbaa/pdbaa"
MODELLER_PATH = "/opt/modeller923/bin/mod9.23"
BRENDA_PATH = "../database/enzyme/brenda-90" |
from __future__ import absolute_import
import json
import os
from datetime import datetime, timedelta
import pytest
from app import db
from app.models import (
DATETIME_FORMAT, Framework, User, Lot, Brief, Supplier, ContactInformation, Service, BriefClarificationQuestion
)
from app.models.direct_award import DirectAwardProject, DirectAwardProjectUser, DirectAwardSearch
from app.models.buyer_domains import BuyerEmailDomain
TEST_SUPPLIERS_COUNT = 3
COMPLETE_DIGITAL_SPECIALISTS_BRIEF = {
'essentialRequirements': ['MS Paint', 'GIMP'],
'startDate': '31-12-2016',
'evaluationType': ['Reference', 'Interview'],
'niceToHaveRequirements': ['LISP'],
'existingTeam': 'Nice people.',
'specialistWork': 'All the things',
'workingArrangements': 'Just get the work done.',
'organisation': 'Org.org',
'location': 'Wales',
'specialistRole': 'developer',
'title': 'I need a Developer',
'priceWeighting': 85,
'contractLength': '3 weeks',
'culturalWeighting': 5,
'securityClearance': 'Developed vetting required.',
'technicalWeighting': 10,
'culturalFitCriteria': ['CULTURAL', 'FIT'],
'numberOfSuppliers': 3,
'summary': 'Doing some stuff to help out.',
'workplaceAddress': 'Aviation House',
'requirementsLength': '2 weeks'
}
DIRECT_AWARD_PROJECT_NAME = 'My Direct Award Project'
DIRECT_AWARD_FROZEN_TIME = '2017-01-01T00:00:00.000000Z'
DIRECT_AWARD_FROZEN_TIME_DATETIME = datetime.strptime(DIRECT_AWARD_FROZEN_TIME, DATETIME_FORMAT)
DIRECT_AWARD_SEARCH_BASE = 'https://search-api.digitalmarketplace.service.gov.uk/'
DIRECT_AWARD_SEARCH_RELATIVE_URL = 'g-cloud/services/search?q=hosting'
DIRECT_AWARD_SEARCH_URL = DIRECT_AWARD_SEARCH_BASE + DIRECT_AWARD_SEARCH_RELATIVE_URL
def fixture_params(fixture_name, params):
return pytest.mark.parametrize(fixture_name, [params], indirect=True)
class FixtureMixin(object):
default_buyer_domain = 'digital.gov.uk'
def setup_default_buyer_domain(self):
if BuyerEmailDomain.query.filter(BuyerEmailDomain.domain_name == self.default_buyer_domain).count() == 0:
db.session.add(BuyerEmailDomain(domain_name=self.default_buyer_domain))
db.session.commit()
def setup_dummy_user(self, id=123, role='buyer'):
# The user should have a valid email domain
self.setup_default_buyer_domain()
if role == 'admin':
domain = 'digital.cabinet-office.gov.uk'
elif role == 'admin-ccs-sourcing':
domain = 'crowncommercial.gov.uk'
else:
domain = 'digital.gov.uk'
if User.query.get(id):
return id
user = User(
id=id,
email_address='test+{}@{}'.format(id, domain),
name='my name',
password='fake password',
active=True,
role=role,
password_changed_at=datetime.now()
)
db.session.add(user)
db.session.commit()
return user.id
def setup_dummy_briefs(
self, n, title=None, status='draft', user_id=1, data=None, brief_start=1, lot='digital-specialists',
published_at=None, add_clarification_question=False
):
user_id = self.setup_dummy_user(id=user_id)
lot = Lot.query.filter(Lot.slug == lot).first()
data = data or COMPLETE_DIGITAL_SPECIALISTS_BRIEF.copy()
data['title'] = title
for i in range(brief_start, brief_start + n):
self.setup_dummy_brief(
id=i,
user_id=user_id,
data=data,
framework_slug='digital-outcomes-and-specialists',
lot_slug=lot.slug,
status=status,
published_at=published_at,
add_clarification_question=add_clarification_question
)
def setup_dummy_brief(
self, id=None, user_id=1, status=None, data=None, published_at=None, withdrawn_at=None,
cancelled_at=None, unsuccessful_at=None,
framework_slug='digital-outcomes-and-specialists', lot_slug='digital-specialists',
add_clarification_question=False
):
if published_at is not None and status is not None:
raise ValueError('Cannot provide both status and published_at')
if withdrawn_at is not None and published_at is None:
raise ValueError('If setting withdrawn_at then published_at must also be set')
if not published_at:
if status == 'closed':
published_at = datetime.utcnow() - timedelta(days=1000)
elif status == 'withdrawn':
published_at = datetime.utcnow() - timedelta(days=1000)
withdrawn_at = datetime.utcnow()
elif status == 'cancelled':
published_at = datetime.utcnow() - timedelta(days=1000)
cancelled_at = datetime.utcnow()
elif status == 'unsuccessful':
published_at = datetime.utcnow() - timedelta(days=1000)
unsuccessful_at = datetime.utcnow()
else:
published_at = None if status == 'draft' else datetime.utcnow()
framework = Framework.query.filter(Framework.slug == framework_slug).first()
lot = Lot.query.filter(Lot.slug == lot_slug).first()
brief = Brief(
id=id,
data=data,
framework=framework,
lot=lot,
users=[User.query.get(user_id)],
published_at=published_at,
withdrawn_at=withdrawn_at,
cancelled_at=cancelled_at,
unsuccessful_at=unsuccessful_at
)
db.session.add(brief)
if add_clarification_question:
db.session.add(BriefClarificationQuestion(
brief=brief,
question="What is the answer to the meaning of life, the Universe and everything?",
answer="42"
))
db.session.commit()
return brief
def setup_dummy_suppliers(self, n):
supplier_ids = []
for i in range(n):
db.session.add(
Supplier(
supplier_id=i,
registered_name='Registered Supplier Name {}'.format(i),
name=u'Supplier {}'.format(i),
description='',
organisation_size='small',
duns_number='{}'.format(100000000 + i),
registration_country='country:GB',
companies_house_number='{}'.format(12345670 + i),
other_company_registration_number='555-222-111'
)
)
db.session.add(
ContactInformation(
supplier_id=i,
contact_name=u'Contact for Supplier {}'.format(i),
email=u'{}@contact.com'.format(i),
postcode=u'SW1A 1AA',
address1='7 Gem Lane',
city='Cantelot'
)
)
supplier_ids.append(i)
db.session.commit()
return supplier_ids
def setup_additional_dummy_suppliers(self, n, initial):
for i in range(1000, n + 1000):
db.session.add(
Supplier(
supplier_id=i,
name=u'{} suppliers Ltd {}'.format(initial, i),
description=''
)
)
db.session.add(
ContactInformation(
supplier_id=i,
contact_name=u'Contact for Supplier {}'.format(i),
email=u'{}@contact.com'.format(i),
postcode=u'SW1A 1AA'
)
)
db.session.commit()
def setup_dummy_service(self, service_id, supplier_id=1, data=None,
status='published', framework_id=1, lot_id=1, model=Service, **kwargs):
now = datetime.utcnow()
# lot and framework ids aren't in json responses, so we'll look for them first
lot = Lot.query.filter(Lot.slug == kwargs.pop('lot', '')).first()
framework = Framework.query.filter(Framework.slug == kwargs.pop('frameworkSlug', '')).first()
service_kwargs = {
'service_id': service_id,
'supplier_id': kwargs.pop('supplierId', supplier_id),
'status': kwargs.pop('status', status),
'framework_id': framework.id if framework else framework_id,
'lot_id': lot.id if lot else lot_id,
'created_at': kwargs.pop('createdAt', now),
'updated_at': kwargs.pop('updatedAt', now),
'data': data or kwargs or {'serviceName': 'Service {}'.format(service_id)}
}
service = model(**service_kwargs)
db.session.add(service)
db.session.commit()
return service.id
def setup_dummy_services(self, n, supplier_id=None, framework_id=1, data=None,
start_id=0, lot_id=1, model=Service, status='published'):
for i in range(start_id, start_id + n):
self.setup_dummy_service(
service_id=str(2000000000 + start_id + i),
supplier_id=supplier_id or (i % TEST_SUPPLIERS_COUNT),
framework_id=framework_id,
lot_id=lot_id,
model=model,
status=status,
data=data,
)
def setup_dummy_services_including_unpublished(self, n, framework_id=1, lot_id=1, data=None):
self.setup_dummy_suppliers(TEST_SUPPLIERS_COUNT)
self.setup_dummy_services(n, framework_id=framework_id, lot_id=lot_id, data=data)
# Add extra 'enabled' and 'disabled' services
self.setup_dummy_service(
service_id=str(n + 2000000001),
supplier_id=n % TEST_SUPPLIERS_COUNT,
status='disabled',
framework_id=framework_id,
lot_id=lot_id,
data=data,
)
self.setup_dummy_service(
service_id=str(n + 2000000002),
supplier_id=n % TEST_SUPPLIERS_COUNT,
status='enabled',
framework_id=framework_id,
lot_id=lot_id,
data=data,
)
# Add an extra supplier that will have no services
db.session.add(
Supplier(supplier_id=TEST_SUPPLIERS_COUNT, name=u'Supplier {}'
.format(TEST_SUPPLIERS_COUNT))
)
db.session.add(
ContactInformation(
supplier_id=TEST_SUPPLIERS_COUNT,
contact_name=u'Contact for Supplier {}'.format(
TEST_SUPPLIERS_COUNT),
email=u'{}@contact.com'.format(TEST_SUPPLIERS_COUNT),
postcode=u'SW1A 1AA'
)
)
db.session.commit()
def setup_dummy_framework(
self, slug, framework_family, name='New Framework', id=None, status='open', clarifications=False, lots=None,
has_direct_award=True, has_further_competition=False,
):
if lots is None:
if framework_family.startswith('g-cloud'):
lots = [
Lot.query.filter(Lot.slug == 'cloud-hosting').first(),
Lot.query.filter(Lot.slug == 'cloud-software').first(),
Lot.query.filter(Lot.slug == 'cloud-support').first(),
]
elif framework_family.startswith('digital-outcomes-and-specialists'):
lots = [
Lot.query.filter(Lot.slug == 'digital-outcomes').first(),
Lot.query.filter(Lot.slug == 'digital-specialists').first(),
Lot.query.filter(Lot.slug == 'user-research-participants').first(),
Lot.query.filter(Lot.slug == 'user-research-studios').first(),
]
else:
lots = []
framework = Framework(
id=id,
slug=slug,
name=name,
framework=framework_family,
status=status,
clarification_questions_open=clarifications,
lots=lots,
has_direct_award=has_direct_award,
has_further_competition=has_further_competition,
)
db.session.add(framework)
db.session.commit()
return framework.id
def set_framework_status(self, slug, status):
Framework.query.filter_by(slug=slug).update({'status': status})
db.session.commit()
def set_framework_variation(self, slug):
Framework.query.filter_by(slug=slug).update({
'framework_agreement_details': {
'frameworkAgreementVersion': 'v1.0',
'variations': {'1': {'createdAt': '2016-08-19T15:31:00.000000Z'}}
}
})
db.session.commit()
def create_direct_award_project(self, user_id, project_id=1, project_name=DIRECT_AWARD_PROJECT_NAME,
created_at=DIRECT_AWARD_FROZEN_TIME):
project = DirectAwardProject.query.get(project_id)
if not project:
project = DirectAwardProject(id=project_id, name=project_name, created_at=created_at)
db.session.add(project)
db.session.flush()
project_user = DirectAwardProjectUser(user_id=user_id, project_id=project_id)
db.session.add(project_user)
db.session.commit()
return project_id, project.external_id
def create_direct_award_project_search(self, created_by, project_id, search_url=DIRECT_AWARD_SEARCH_URL,
active=True, created_at=DIRECT_AWARD_FROZEN_TIME):
search = DirectAwardSearch(created_by=created_by,
project_id=project_id,
created_at=created_at,
search_url=search_url,
active=active)
db.session.add(search)
db.session.commit()
return search.id
class PutDeclarationAndDetailsAndServicesMixin:
"""Centralised from TestUsersExport and TestSuppliersExport; expects some attributes in the class this is mixed
into (self.supplier_id, self.framework_slug, self.client, self.updater_json)."""
def _register_supplier_with_framework(self):
response = self.client.put(
'/suppliers/{}/frameworks/{}'.format(self.supplier_id, self.framework_slug),
data=json.dumps(self.updater_json),
content_type='application/json')
assert response.status_code == 201
def _put_variation_agreement(self):
data = {"agreedVariations": {"agreedUserId": self.users[0].get("id")}}
data.update(self.updater_json)
response = self.client.put(
'/suppliers/{}/frameworks/{}/variation/1'.format(self.supplier_id, self.framework_slug),
data=json.dumps(data),
content_type='application/json')
assert response.status_code == 200
def _create_and_sign_framework_agreement(self):
response = self.client.post(
'/agreements',
data=json.dumps(
{
'updated_by': '[email protected]',
'agreement': {
'supplierId': self.supplier_id,
'frameworkSlug': self.framework_slug
},
}),
content_type='application/json')
agreement_id = json.loads(response.get_data(as_text=True))['agreement']['id']
self.client.post(
"/agreements/{}/sign".format(agreement_id),
data=json.dumps({'updated_by': '[email protected]'}),
content_type='application/json'
)
def _put_declaration(self, status):
data = {'declaration': {'status': status}}
data.update(self.updater_json)
response = self.client.get(
'/suppliers/{}/frameworks/{}'.format(self.supplier_id, self.framework_slug)
)
current_declaration = json.loads(response.get_data(as_text=True))['frameworkInterest']['declaration']
data['declaration'].update(current_declaration)
response = self.client.put(
'/suppliers/{}/frameworks/{}/declaration'.format(self.supplier_id, self.framework_slug),
data=json.dumps(data),
content_type='application/json')
assert response.status_code == (200 if current_declaration else 201)
def _put_complete_declaration(self):
self._put_declaration(status='complete')
def _put_incomplete_declaration(self):
self._put_declaration(status='started')
def _get_declaration(self):
response = self.client.get('/suppliers/{}/frameworks/{}'.format(self.supplier_id, self.framework_slug))
return json.loads(response.get_data(as_text=True))['frameworkInterest']['declaration']
def _post_company_details_confirmed(self):
response = self.client.post(
f'/suppliers/{self.supplier_id}',
data=json.dumps({
"updated_by": "Miss Fig",
"suppliers": {
"companyDetailsConfirmed": True
}
}),
content_type='application/json',
)
assert response.status_code == 200
# Company details must be confirmed at the supplier account level as well as the application level.
response = self.client.post(
'/suppliers/{}/frameworks/{}'.format(self.supplier_id, self.framework_slug),
data=json.dumps({
"updated_by": "Mr Sausages",
'frameworkInterest': {
'applicationCompanyDetailsConfirmed': True
}
}),
content_type='application/json')
assert response.status_code == 200
def _post_complete_draft_service(self):
payload = load_example_listing("DOS-digital-specialist")
self.draft_json = {'services': payload}
self.draft_json['services']['supplierId'] = self.supplier_id
self.draft_json['services']['frameworkSlug'] = self.framework_slug
self.draft_json.update(self.updater_json)
response = self.client.post(
'/draft-services',
data=json.dumps(self.draft_json),
content_type='application/json')
assert response.status_code == 201
draft_id = json.loads(response.get_data())['services']['id']
complete = self.client.post(
'/draft-services/{}/complete'.format(draft_id),
data=json.dumps(self.updater_json),
content_type='application/json')
assert complete.status_code == 200
def load_example_listing(name):
file_path = os.path.join('example_listings', '{}.json'.format(name))
with open(file_path) as f:
return json.load(f)
def get_audit_events(client, audit_type):
audit_response = client.get('/audit-events')
assert audit_response.status_code == 200
data = json.loads(audit_response.get_data(as_text=True))
return [
event for event in data['auditEvents'] if event['type'] == audit_type.value
]
|
# -*- coding: utf-8 -*-
"""
(c) 2017 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <[email protected]>
"""
from __future__ import unicode_literals, absolute_import
import json
import unittest
import sys
import os
import pygit2
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.query
import tests
def create_templates(repopath):
"""Create a couple of templates at the specified repo."""
clone_repo = pygit2.Repository(repopath)
# Create the RFE template
os.mkdir(os.path.join(repopath, "templates"))
template = os.path.join(repopath, "templates", "RFE.md")
with open(template, "w") as stream:
stream.write("RFE\n###\n\n* Idea description")
clone_repo.index.add(os.path.join("templates", "RFE.md"))
clone_repo.index.write()
# Commit
tree = clone_repo.index.write_tree()
author = pygit2.Signature("Alice Author", "[email protected]")
committer = pygit2.Signature("Cecil Committer", "[email protected]")
commit = clone_repo.create_commit(
"refs/heads/master", # the name of the reference to update
author,
committer,
"Add a RFE template",
# binary string representing the tree object ID
tree,
# list of binary strings representing parents of the new commit
[],
)
# Create the 2018-bid.md template
template = os.path.join(repopath, "templates", "2018-bid.md")
with open(template, "w") as stream:
stream.write("Bid for 2018\n############\n\n* Location:")
clone_repo.index.add(os.path.join("templates", "2018-bid.md"))
clone_repo.index.write()
# Commit
tree = clone_repo.index.write_tree()
author = pygit2.Signature("Alice Author", "[email protected]")
committer = pygit2.Signature("Cecil Committer", "[email protected]")
commit = clone_repo.create_commit(
"refs/heads/master", # the name of the reference to update
author,
committer,
"Add a RFE template",
# binary string representing the tree object ID
tree,
# list of binary strings representing parents of the new commit
[commit.hex],
)
# Create the default.md template
template = os.path.join(repopath, "templates", "default.md")
with open(template, "w") as stream:
stream.write("Report your issue")
clone_repo.index.add(os.path.join("templates", "default.md"))
clone_repo.index.write()
# Commit
tree = clone_repo.index.write_tree()
author = pygit2.Signature("Alice Author", "[email protected]")
committer = pygit2.Signature("Cecil Committer", "[email protected]")
clone_repo.create_commit(
"refs/heads/master", # the name of the reference to update
author,
committer,
"Add a default template",
# binary string representing the tree object ID
tree,
# list of binary strings representing parents of the new commit
[commit.hex],
)
class PagureFlaskIssuesTemplatetests(tests.Modeltests):
""" Tests for flask issues controller of pagure """
@patch("pagure.lib.git.update_git", MagicMock(return_value=True))
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def setUp(self):
""" Set up the environnment, run before every tests. """
super(PagureFlaskIssuesTemplatetests, self).setUp()
pagure.config.config["TICKETS_FOLDER"] = os.path.join(
self.path, "tickets"
)
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(os.path.join(self.path, "tickets"))
# Add a couple of templates to test2
repopath = os.path.join(self.path, "tickets", "test2.git")
create_templates(repopath)
# Add a couple of templates to somenamespace/test3
repopath = os.path.join(
self.path, "tickets", "somenamespace", "test3.git"
)
create_templates(repopath)
def test_new_issue_no_template(self):
"""Test the new_issue endpoint when the project has no templates."""
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.get("/test/new_issue")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<h4 class="font-weight-bold mb-4">New Issue</h4>\n',
output_text,
)
self.assertNotIn("Issue Templates", output_text)
def test_new_issue_w_template(self):
""" Test the new_issue endpoint when the project has templates. """
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.get("/test2/new_issue")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<h4 class="font-weight-bold mb-4">New Issue</h4>\n',
output_text,
)
self.assertIn("Issue Templates", output_text)
self.assertIn(
'<a class="issue-template dropdown-item pointer" data-value="RFE">RFE</a>',
output_text,
)
self.assertIn(
'<a class="issue-template dropdown-item pointer" data-value="2018-bid">2018-bid</a>',
output_text,
)
self.assertIn(
'<a class="issue-template dropdown-item pointer" data-value="default">default</a>',
output_text,
)
self.assertIn(
'placeholder="Enter your comment here" tabindex=2 required>'
"Report your issue</textarea>",
output_text,
)
def test_new_issue_w_specific_template(self):
""" Test the new_issue endpoint when the project has templates. """
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.get("/test2/new_issue?template=2018-bid")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<h4 class="font-weight-bold mb-4">New Issue</h4>\n',
output_text,
)
self.assertIn("Issue Templates", output_text)
self.assertIn(
'<a class="issue-template dropdown-item pointer" data-value="RFE">RFE</a>',
output_text,
)
self.assertIn(
'<a class="issue-template dropdown-item pointer" data-value="2018-bid">2018-bid</a>',
output_text,
)
self.assertIn(
'<a class="issue-template dropdown-item pointer" data-value="default">default</a>',
output_text,
)
self.assertIn(
'placeholder="Enter your comment here" tabindex=2 required>'
"Bid for 2018\n############",
output_text,
)
def test_get_ticket_template_no_csrf(self):
"""Test the get_ticket_template endpoint when the project has no
templates.
"""
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.post("/pv/test/issue/template")
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data, {"code": "ERROR", "message": "Invalid input submitted"}
)
def test_get_ticket_template_no_template_specified(self):
"""Test the get_ticket_template endpoint when not specifying which
template to get.
"""
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
csrf = self.get_csrf()
data = {"csrf_token": csrf}
output = self.app.post("/pv/test/issue/template", data=data)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data, {"code": "ERROR", "message": "No template provided"}
)
def test_get_ticket_template_no_project(self):
"""Test the get_ticket_template endpoint when the project does not
exist.
"""
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
csrf = self.get_csrf()
data = {"csrf_token": csrf}
output = self.app.post("/pv/foobar/issue/template", data=data)
self.assertEqual(output.status_code, 404)
def test_get_ticket_template_no_template(self):
"""Test the get_ticket_template endpoint when the project has no
templates.
"""
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
csrf = self.get_csrf()
data = {"csrf_token": csrf}
output = self.app.post(
"/pv/test/issue/template?template=RFE", data=data
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data, {"code": "ERROR", "message": "No such template found"}
)
def test_get_ticket_template_issue_tracker_disabled(self):
"""Test the get_ticket_template endpoint when the project has
disabled its issue tracker.
"""
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["issue_tracker"] = False
repo.settings = settings
self.session.add(repo)
self.session.commit()
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
csrf = self.get_csrf()
data = {"csrf_token": csrf}
output = self.app.post(
"/pv/test/issue/template?template=RFE", data=data
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{
"code": "ERROR",
"message": "No issue tracker found for this project",
},
)
def test_get_ticket_template_w_template(self):
"""Test the get_ticket_template endpoint when the project has
templates.
"""
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
csrf = self.get_csrf()
data = {"csrf_token": csrf}
output = self.app.post(
"/pv/test2/issue/template?template=RFE", data=data
)
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{"code": "OK", "message": "RFE\n###\n\n* Idea description"},
)
def test_get_ticket_template_w_template_namespace(self):
"""Test the get_ticket_template endpoint when the project has
templates and a namespace.
"""
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
csrf = self.get_csrf()
data = {"csrf_token": csrf}
output = self.app.post(
"/pv/somenamespace/test3/issue/template?template=RFE",
data=data,
)
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{"code": "OK", "message": "RFE\n###\n\n* Idea description"},
)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
#!/usr/bin/python
import os,sys
if False:
for stateCode in os.listdir('.'):
if len(stateCode) != 2: continue
for districtCode in os.listdir(stateCode):
if districtCode == 'index.txt': continue
if districtCode == 'district.court':
#print "%s: %s" % (stateCode, districtCode)
txt = open(os.path.join(stateCode, districtCode, 'index.txt')).read()
newName = '%s.d' % stateCode
if not os.path.exists(newName):
os.makedirs(newName)
open(os.path.join(newName, 'index.txt'), 'w+').write(txt)
elif len(districtCode) < 3:
#print "%s: %s" % (stateCode, districtCode)
txt = open(os.path.join(stateCode, districtCode, 'district.court', 'index.txt')).read()
newName = '%s.%s' % (stateCode,districtCode)
if not os.path.exists(newName):
os.makedirs(newName)
open(os.path.join(newName, 'index.txt'), 'w+').write(txt)
if True:
for stateCode in os.listdir('.'):
if len(stateCode) == 2:
for dirname, dirs, files in os.walk(stateCode, topdown=False):
print dirname
for filename in files:
print " +" + filename
os.unlink(os.path.join(dirname, filename))
#for d in dirs:
# print " *" + d
# os.rmdir(os.path.join(dirname, d))
os.rmdir(dirname)
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test the hunt_view interface."""
from __future__ import unicode_literals
import os
import traceback
import unittest
from grr_response_core.lib import flags
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server.gui import gui_test_lib
from grr.test_lib import db_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
@db_test_lib.DualDBTest
class TestHuntView(gui_test_lib.GRRSeleniumHuntTest):
"""Test the Cron view GUI."""
reason = "Felt like it!"
def SetupTestHuntView(self, client_limit=0, client_count=10):
# Create some clients and a hunt to view.
with self.CreateSampleHunt(
client_limit=client_limit, client_count=client_count) as hunt:
hunt.Log("TestLogLine")
# Log an error just with some random traceback.
hunt.LogClientError(self.client_ids[1], "Client Error 1",
traceback.format_exc())
# Run the hunt.
client_mock = hunt_test_lib.SampleHuntMock()
hunt_test_lib.TestHuntHelper(client_mock, self.client_ids, False,
self.token)
hunt = aff4.FACTORY.Open(hunt.urn, token=self.token)
all_count, _, _ = hunt.GetClientsCounts()
if client_limit == 0:
# No limit, so we should have all the clients
self.assertEqual(all_count, client_count)
else:
self.assertEqual(all_count, min(client_count, client_limit))
return hunt
def testPageTitleReflectsSelectedHunt(self):
hunt = self.CreateSampleHunt(stopped=True)
self.Open("/#/hunts")
self.WaitUntilEqual("GRR | Hunts", self.GetPageTitle)
self.Click("css=td:contains('GenericHunt')")
self.WaitUntilEqual("GRR | " + hunt.urn.Basename(), self.GetPageTitle)
def testHuntView(self):
"""Test that we can see all the hunt data."""
self.SetupTestHuntView()
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, "GenericHunt")
# Select a Hunt.
self.Click("css=td:contains('GenericHunt')")
# Check we can now see the details.
self.WaitUntil(self.IsElementPresent, "css=dl.dl-hunt")
self.WaitUntil(self.IsTextPresent, "Clients Scheduled")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
# Click the Log Tab.
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "TestLogLine")
# Click the Error Tab.
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsTextPresent, "Client Error 1")
def SetupHuntDetailView(self, failrate=2):
"""Create some clients and a hunt to view."""
with self.CreateSampleHunt() as hunt:
hunt.LogClientError(self.client_ids[1], "Client Error 1",
traceback.format_exc())
# Run the hunt.
client_mock = hunt_test_lib.SampleHuntMock(failrate=failrate)
hunt_test_lib.TestHuntHelper(client_mock, self.client_ids, False,
self.token)
return hunt
def testHuntClientsView(self):
"""Test the detailed client view works."""
self._CreateHuntWithDownloadedFile()
# Open up and click on View Hunts then the first Hunt.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, "GenericHunt")
self.Click("css=td:contains('GenericHunt')")
# Click the Overview Tab then the Details Link.
self.Click("css=li[heading=Overview]")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
# Check the Hunt Clients tab.
self.Click("css=li[heading=Clients]")
client_id = self.client_ids[0]
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('%s')" % client_id.Basename())
self.RequestAndGrantClientApproval(client_id)
self.Click(
"css=tr:contains('%s') td:nth-of-type(2) a" % client_id.Basename())
self.WaitUntil(self.IsTextPresent, "Flow Information")
self.WaitUntil(self.IsTextPresent, self.base_path)
def testHuntOverviewShowsBrokenHunt(self):
hunt = self.CreateSampleHunt()
broken_hunt = self.CreateSampleHunt()
# Break the hunt.
data_store.DB.DeleteAttributes(
broken_hunt.urn,
[broken_hunt.Schema.HUNT_ARGS, broken_hunt.Schema.HUNT_RUNNER_ARGS])
data_store.DB.Flush()
# Open up and click on View Hunts then the first Hunt.
self.Open("/#/hunts")
hunt_id = hunt.urn.Basename()
broken_hunt_id = broken_hunt.urn.Basename()
# Both hunts are shown even though one throws an error.
self.WaitUntil(self.IsTextPresent, hunt_id)
self.WaitUntil(self.IsTextPresent, broken_hunt_id)
self.Click("css=td:contains('%s')" % broken_hunt_id)
self.WaitUntil(self.IsTextPresent, "Error while Opening")
self.WaitUntil(self.IsTextPresent, "Error while opening hunt:")
def testHuntOverviewShowsStats(self):
"""Test the detailed client view works."""
with self.CreateSampleHunt() as hunt:
hunt_stats = hunt.context.usage_stats
hunt_stats.user_cpu_stats.sum = 5000
hunt_stats.network_bytes_sent_stats.sum = 1000000
# Open up and click on View Hunts then the first Hunt.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, "GenericHunt")
self.Click("css=td:contains('GenericHunt')")
# Click the Overview Tab and check that the stats are present.
self.Click("css=li[heading=Overview]")
self.WaitUntil(self.IsTextPresent, "1h 23m 20s")
self.WaitUntil(self.IsTextPresent, "976.6KiB")
def testHuntOverviewGetsUpdatedWhenHuntChanges(self):
with self.CreateSampleHunt() as hunt:
hunt_stats = hunt.context.usage_stats
hunt_stats.user_cpu_stats.sum = 5000
hunt_stats.network_bytes_sent_stats.sum = 1000000
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntOverviewDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('GenericHunt')")
self.WaitUntil(self.IsTextPresent, "1h 23m 20s")
self.WaitUntil(self.IsTextPresent, "976.6KiB")
with aff4.FACTORY.Open(hunt.urn, mode="rw", token=self.token) as fd:
fd.context.usage_stats.user_cpu_stats.sum = 6000
fd.context.usage_stats.network_bytes_sent_stats.sum = 11000000
self.WaitUntil(self.IsTextPresent, "1h 40m")
self.WaitUntil(self.IsTextPresent, "10.5MiB")
def testHuntStatsView(self):
self.SetupTestHuntView()
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, "GenericHunt")
self.Click("css=td:contains('GenericHunt')")
# Click the Stats tab.
self.Click("css=li[heading=Stats]")
self.WaitUntil(self.IsTextPresent, "Total number of clients")
self.WaitUntil(self.IsTextPresent, "10")
self.WaitUntil(self.IsTextPresent, "User CPU mean")
self.WaitUntil(self.IsTextPresent, "5.5")
self.WaitUntil(self.IsTextPresent, "User CPU stdev")
self.WaitUntil(self.IsTextPresent, "2.9")
self.WaitUntil(self.IsTextPresent, "System CPU mean")
self.WaitUntil(self.IsTextPresent, "11")
self.WaitUntil(self.IsTextPresent, "System CPU stdev")
self.WaitUntil(self.IsTextPresent, "5.7")
self.WaitUntil(self.IsTextPresent, "Network bytes sent mean")
self.WaitUntil(self.IsTextPresent, "16.5")
self.WaitUntil(self.IsTextPresent, "Network bytes sent stdev")
self.WaitUntil(self.IsTextPresent, "8.6")
def testHuntNotificationIsShownAndClickable(self):
hunt = self.CreateSampleHunt(
path=os.path.join(self.base_path, "test.plist"))
self.RequestAndGrantHuntApproval(hunt.urn.Basename())
self.Open("/")
self.Click("css=#notification_button")
self.Click("css=a:contains('has granted you access')")
self.WaitUntil(self.IsElementPresent,
"css=tr.row-selected td:contains('GenericHunt')")
self.WaitUntil(self.IsTextPresent, hunt.urn.Basename())
def testLogsTabShowsLogsFromAllClients(self):
self.SetupHuntDetailView(failrate=-1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Log]")
for client_id in self.client_ids:
self.WaitUntil(self.IsTextPresent, client_id.Basename())
self.WaitUntil(
self.IsTextPresent, "File %s transferred successfully." % str(
client_id.Add("fs/os/tmp/evil.txt")))
def testLogsTabGetsAutoRefreshed(self):
h = self.CreateSampleHunt()
h.Log("foo-log")
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntLogDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-log td:contains('foo-log')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-log td:contains('bar-log')")
h.Log("bar-log")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-log td:contains('bar-log')")
def testLogsTabFiltersLogsByString(self):
self.SetupHuntDetailView(failrate=-1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Log]")
self.Type("css=grr-hunt-log input.search-query",
self.client_ids[-1].Basename())
self.Click("css=grr-hunt-log button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, self.client_ids[-1].Basename())
self.WaitUntil(
self.IsTextPresent, "File %s transferred successfully." % str(
self.client_ids[-1].Add("fs/os/tmp/evil.txt")))
for client_id in self.client_ids[:-1]:
self.WaitUntilNot(self.IsTextPresent, client_id.Basename())
self.WaitUntilNot(
self.IsTextPresent, "File %s transferred successfully." % str(
client_id.Add("fs/os/tmp/evil.txt")))
def testLogsTabShowsDatesInUTC(self):
with self.CreateSampleHunt() as hunt:
with test_lib.FakeTime(42):
hunt.Log("I do log.")
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "1970-01-01 00:00:42 UTC")
def testErrorsTabShowsErrorsFromAllClients(self):
self.SetupHuntDetailView(failrate=1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Errors]")
for client_id in self.client_ids:
self.WaitUntil(self.IsTextPresent, client_id.Basename())
def testErrorsTabGetsAutoRefreshed(self):
with self.CreateSampleHunt() as hunt:
# Log an error just with some random traceback.
hunt.LogClientError(self.client_ids[0], "foo-error",
traceback.format_exc())
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntErrorsDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-errors td:contains('foo-error')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-errors td:contains('bar-error')")
hunt.LogClientError(self.client_ids[0], "bar-error", traceback.format_exc())
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-errors td:contains('bar-error')")
def testErrorsTabShowsDatesInUTC(self):
with self.CreateSampleHunt() as hunt:
with test_lib.FakeTime(42):
# Log an error just with some random traceback.
hunt.LogClientError(self.client_ids[0], "Client Error 1",
traceback.format_exc())
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsTextPresent, "1970-01-01 00:00:42 UTC")
def testErrorsTabFiltersErrorsByString(self):
self.SetupHuntDetailView(failrate=1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Errors]")
self.Type("css=grr-hunt-errors input.search-query",
self.client_ids[-1].Basename())
self.Click("css=grr-hunt-errors button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, self.client_ids[-1].Basename())
for client_id in self.client_ids[:-1]:
self.WaitUntilNot(self.IsTextPresent, client_id.Basename())
def testCrashesTabShowsNoErrorWhenCrashesAreMissing(self):
self.SetupHuntDetailView()
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Crashes]")
self.WaitUntilNot(self.IsTextPresent, "Loading...")
self.WaitUntilNot(self.IsVisible, "css=button#show_backtrace")
def testCrashesTabGetsAutoRefreshed(self):
client_ids = self.SetupClients(2)
with self.CreateHunt(token=self.token) as hunt:
hunt.Run()
def CrashClient(client_id):
self.AssignTasksToClients([client_id])
client_mock = flow_test_lib.CrashClientMock(client_id, token=self.token)
hunt_test_lib.TestHuntHelper(
client_mock, [client_id], check_flow_errors=False, token=self.token)
CrashClient(client_ids[0])
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntCrashesDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Crashes]")
self.WaitUntil(
self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[0].Basename())
self.WaitUntilNot(
self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[1].Basename())
CrashClient(client_ids[1])
self.WaitUntil(
self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[1].Basename())
def testShowsResultsTabForIndividualFlowsOnClients(self):
# Create and run the hunt.
self.CreateSampleHunt(stopped=False)
client_mock = hunt_test_lib.SampleHuntMock(failrate=-1)
hunt_test_lib.TestHuntHelper(client_mock, self.client_ids, False,
self.token)
self.RequestAndGrantClientApproval(self.client_ids[0])
self.Open("/#c=" + self.client_ids[0].Basename())
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=grr-client-flows-list tr:contains('GetFile')")
self.Click("css=li[heading=Results]")
# This is to check that no exceptions happened when we tried to display
# results.
# TODO(user): Fail *any* test if we get a 500 in the process.
self.WaitUntilNot(self.IsTextPresent, "Loading...")
def testClientsTabShowsCompletedAndOutstandingClients(self):
# Create some clients and a hunt to view.
self.CreateSampleHunt()
# Run the hunt on half the clients.
finished_client_ids = self.client_ids[5:]
outstanding_client_ids = self.client_ids[:5]
client_mock = hunt_test_lib.SampleHuntMock()
hunt_test_lib.TestHuntHelper(client_mock, finished_client_ids, False,
self.token)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading=Clients]")
self.Click("css=label[name=ShowCompletedClients]")
for client_id in finished_client_ids:
self.WaitUntilContains(client_id.Basename(), self.GetText,
"css=.tab-content")
self.Click("css=label[name=ShowOutstandingClients]")
for client_id in outstanding_client_ids:
self.WaitUntilContains(client_id.Basename(), self.GetText,
"css=.tab-content")
def testContextTabShowsHuntContext(self):
# Create some clients and a hunt to view.
self.CreateSampleHunt()
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('GenericHunt')")
self.Click("css=li[heading='Context Details']")
# Check for different context properties.
self.WaitUntilContains(
self.hunt_urn.Basename(), self.GetText,
"css=table > tbody td.proto_key:contains(\"Session id\") "
"~ td.proto_value")
self.WaitUntilContains(
self.token.username, self.GetText,
"css=table > tbody td.proto_key:contains(\"Creator\") "
"~ td.proto_value")
def testHuntCreatorIsNotifiedWhenHuntIsStoppedDueToCrashes(self):
with self.CreateHunt(crash_limit=3, token=self.token) as hunt:
hunt.Run()
# Run the hunt on 3 clients, one by one. Crash detection check happens
# when client is scheduled, so it's important to schedule the clients
# one by one in the test.
for client_id in self.SetupClients(3):
self.AssignTasksToClients([client_id])
client_mock = flow_test_lib.CrashClientMock(client_id, token=self.token)
hunt_test_lib.TestHuntHelper(
client_mock, [client_id], check_flow_errors=False, token=self.token)
self.Open("/")
# Wait until the notification is there and show the notifications list.
self.WaitUntilEqual("1", self.GetText, "css=button[id=notification_button]")
self.Click("css=button[id=notification_button]")
# Click on the "hunt [id] reached the crashes limit" notificaiton.
self.Click("css=td:contains(Hunt %s reached the crashes limit)" %
hunt.urn.Basename())
# Clicking on notification should shown the hunt's overview page.
self.WaitUntil(self.IsTextPresent, "/tmp/evil.txt")
# Go to the logs and check that a reason for hunt's stopping is the
# hunts logs.
# Click the Log Tab.
self.Click("css=li[heading=Log]")
self.WaitUntil(
self.IsTextPresent,
"Hunt %s reached the crashes limit of 3 and was stopped." %
hunt.urn.Basename())
def main(argv):
del argv # Unused.
unittest.main()
if __name__ == "__main__":
flags.StartMain(main)
|
"""User constraints predicates."""
import warnings
import collections
import dateutil.tz
import dateutil.parser
from jacquard.utils import check_keys
ConstraintContext = collections.namedtuple("ConstraintContext", ("era_start_date",))
ConstraintContext.__doc__ = """Context for evaluating constraints."""
ConstraintContext.era_start_date.__doc__ = """
Considered "start date" of the era of this experiment.
Used in the `era` key. Generally experiment launch date.
"""
class Constraints(object):
"""
Constraints definition.
This can filter by:
era
The era, 'old' or 'new' relative to the experiment start date, for users
included in these constraints.
required_tags
A sequence of tags, all of which are required for a user to be in these
constraints.
excluded_tags
A sequence of tags, any of which will exclude a user from this test.
"""
def __init__(
self,
era=None,
required_tags=(),
excluded_tags=(),
joined_before=None,
joined_after=None,
):
"""
Manual constructor.
Can be called with no arguments for the "universal constraints" - the
constraints which are equivalent to unconditionally matching users.
Generally prefer `.from_json`.
"""
self.era = era
if era not in (None, "old", "new"):
raise ValueError("Invalid era: {era}".format(era=era))
self.required_tags = tuple(required_tags)
self.excluded_tags = tuple(excluded_tags)
self.joined_before = joined_before
self.joined_after = joined_after
def __bool__(self):
"""Whether these constraints are non-universal."""
if (
self.era
or self.required_tags
or self.excluded_tags
or self.joined_after
or self.joined_before
):
return True
return False
@classmethod
def from_json(cls, description):
"""Generate constraints from a JSON description."""
check_keys(
description.keys(),
(
"anonymous",
"named",
"era",
"required_tags",
"excluded_tags",
"joined_before",
"joined_after",
),
)
if "anonymous" in description:
warnings.warn("The `anonymous` flag no longer has any effect.")
if "named" in description:
warnings.warn("The `named` flag no longer has any effect.")
def get_maybe_date(key):
try:
string_date = description[key]
except KeyError:
return None
parsed_date = dateutil.parser.parse(string_date)
if parsed_date.tzinfo is None:
raise ValueError("Constraint dates must explicitly include timezones.")
return parsed_date
return cls(
era=description.get("era"),
required_tags=description.get("required_tags", ()),
excluded_tags=description.get("excluded_tags", ()),
joined_before=get_maybe_date("joined_before"),
joined_after=get_maybe_date("joined_after"),
)
def to_json(self):
"""
Produce a JSON description.
A pseudo-inverse of `.from_json`.
"""
description = {}
if self.era is not None:
description["era"] = self.era
if self.required_tags:
description["required_tags"] = self.required_tags
if self.excluded_tags:
description["excluded_tags"] = self.excluded_tags
if self.joined_after:
description["joined_after"] = str(self.joined_after)
if self.joined_before:
description["joined_before"] = str(self.joined_before)
return description
def specialise(self, context):
"""A copy, specialised for a given context."""
joined_before_dates = []
joined_after_dates = []
if self.joined_before:
joined_before_dates.append(self.joined_before)
if self.joined_after:
joined_after_dates.append(self.joined_after)
if self.era == "new":
joined_after_dates.append(context.era_start_date)
if self.era == "old":
joined_before_dates.append(context.era_start_date)
if joined_before_dates:
joined_before = min(joined_before_dates)
else:
joined_before = None
if joined_after_dates:
joined_after = max(joined_after_dates)
else:
joined_after = None
return type(self)(
joined_before=joined_before,
joined_after=joined_after,
required_tags=self.required_tags,
excluded_tags=self.excluded_tags,
)
def matches_user(self, user, context=None):
"""Test matching a user, potentially in a given context."""
if context is not None:
return self.specialise(context).matches_user(user)
if user is None:
# Anonymous users unconditionally fail constraints
return False
if self.joined_before and user.join_date > self.joined_before:
return False
if self.joined_after and user.join_date < self.joined_after:
return False
if any(x not in user.tags for x in self.required_tags):
return False
if any(x in user.tags for x in self.excluded_tags):
return False
return True
def is_provably_disjoint_from_constraints(self, other_constraints):
"""Test whether constraints are provably disjoint."""
if (
set(self.required_tags) & set(other_constraints.excluded_tags)
or set(self.excluded_tags) & set(other_constraints.required_tags)
):
return True
if (
self.joined_after is not None
and other_constraints.joined_before is not None
and self.joined_after >= other_constraints.joined_before
):
return True
if (
self.joined_before is not None
and other_constraints.joined_after is not None
and self.joined_before < other_constraints.joined_after
):
return True
return False
|
import pandas as pd
def check_template(df_json, df_annotation, df_activity, df_json_exp):
# omit '_Test.xaml', 'Test_Framework/RunAllTests.xaml'
df_json_dup = df_json.loc[:, ['index', 'mainFolder', 'subfiles', 'mainFile']]
df_json_dup.subfiles = df_json_dup.apply(lambda x: [subfile.replace(str(x['mainFolder'])+'/', '') for subfile in x['subfiles']], axis=1)
df_annotation_dup = df_annotation.loc[:, ['mainFolder', 'workflowName', 'invokedBy']]
qTempInvokingData = {'workflowName': ['Framework/InitAllSettings.xaml', 'Framework/Gen_KillProcessesOfUser.xaml',
'Framework/InitAllApplications.xaml', 'Framework/GetTransactionItem.xaml',
'Process.xaml', 'Framework/GetPerformanceMetrics.xaml',
'Framework/TakeScreenshot.xaml', 'Framework/CloseAllApplications.xaml',
'Framework/CreateStandardReport.xaml', 'Framework/TakeScreenshot.xaml',
'Framework/CloseAllApplications.xaml', 'Framework/KillAllProcesses.xaml'],
'invokedBy': ['Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml',
'Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml', 'Framework/SetTransactionStatus.xaml',
'Framework/SetTransactionStatus.xaml', 'Framework/SetTransactionStatus.xaml']}
nqTempInvokingData = {'workflowName': ['Framework/InitAllSettings.xaml', 'Framework/Gen_KillProcessesOfUser.xaml',
'Framework/InitAllApplications.xaml', 'Framework/GetTransactionData.xaml',
'Framework/GetTransactionItem.xaml', 'Process.xaml',
'Framework/GetPerformanceMetrics.xaml', 'Framework/TakeScreenshot.xaml',
'Framework/CloseAllApplications.xaml', 'Framework/CreateStandardReport.xaml',
'Framework/TakeScreenshot.xaml', 'Framework/CloseAllApplications.xaml',
'Framework/KillAllProcesses.xaml'],
'invokedBy': ['Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml',
'Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml',
'Framework/SetTransactionStatus.xaml', 'Framework/SetTransactionStatus.xaml',
'Framework/SetTransactionStatus.xaml']}
nrTempInvokingData = {'workflowName': ['Framework/InitAllSettings.xaml', 'Framework/Gen_KillProcessesOfUser.xaml',
'Framework/InitAllApplications.xaml', 'Process.xaml',
'Framework/TakeScreenshot.xaml', 'Framework/CreateStandardReport.xaml',
'Framework/CloseAllApplications.xaml'],
'invokedBy': ['Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml', 'Main.xaml',
'Main.xaml', 'Main.xaml']}
df_qTempInvoking = pd.DataFrame(data=qTempInvokingData)
# df_qTempInvoking.to_csv('./qTempInvoke.csv')
df_nqTempInvoking = pd.DataFrame(data=nqTempInvokingData)
# df_nqTempInvoking.to_csv('./nqTempInvoke.csv')
df_nrTempInvoking = pd.DataFrame(data=nrTempInvokingData)
# df_nrTempInvoking.to_csv('./nrTempInvoke.csv')
if len(df_annotation_dup) > 0:
df_annotation_dup.workflowName = df_annotation_dup.apply(lambda x: x['workflowName']
.replace(str(x['mainFolder'])+'/', ''), axis=1)
df_annotation_dup.invokedBy = df_annotation_dup.apply(lambda x: x['invokedBy']
.replace(str(x['mainFolder'])+'/', ''), axis=1)
df_temp_invoking_check = pd.merge(df_annotation_dup, df_qTempInvoking, on=['workflowName', 'invokedBy'],
how='left', indicator='qTempInvoke')
df_temp_invoking_check = pd.merge(df_temp_invoking_check, df_nqTempInvoking, on=['workflowName', 'invokedBy'],
how='left', indicator='nqTempInvoke')
df_temp_invoking_check = pd.merge(df_temp_invoking_check, df_nrTempInvoking, on=['workflowName', 'invokedBy'],
how='left', indicator='nrTempInvoke')
df_temp_invoking_check['qTempInvoke'] = df_temp_invoking_check.apply(
lambda x: True if x['qTempInvoke'] == 'both' else False,
axis=1)
df_temp_invoking_check['nqTempInvoke'] = df_temp_invoking_check.apply(
lambda x: True if x['nqTempInvoke'] == 'both' else False,
axis=1)
df_temp_invoking_check['nrTempInvoke'] = df_temp_invoking_check.apply(
lambda x: True if x['nrTempInvoke'] == 'both' else False,
axis=1)
# df = df_temp_invoking_check.loc[:, ['workflowName', 'invokedBy', 'qTempInvoke', 'nqTempInvoke', 'nrTempInvoke']]
# df.to_csv('./test.csv')
df_match_invoke_byProject = df_temp_invoking_check.groupby('mainFolder')[
'qTempInvoke', 'nqTempInvoke', 'nrTempInvoke'].sum().reset_index(drop=False)
df_match_invoke_byProject.qTempInvoke = df_match_invoke_byProject.qTempInvoke == len(df_qTempInvoking)
df_match_invoke_byProject.nqTempInvoke = df_match_invoke_byProject.nqTempInvoke == len(df_nqTempInvoking)
df_match_invoke_byProject.nrTempInvoke = df_match_invoke_byProject.nrTempInvoke == len(df_nrTempInvoking)
df_json_dup = pd.merge(df_json_dup, df_match_invoke_byProject, on=['mainFolder'], how='left')
# df_json_dup.to_csv("./test2.csv")
else:
df_match_invoke_byProject = df_annotation_dup.copy().loc[:,["mainFolder"]]
df_match_invoke_byProject["qTempInvoke"] = False
df_match_invoke_byProject["nqTempInvoke"] = False
df_match_invoke_byProject["nrTempInvoke"] = False
df_json_dup = pd.merge(df_json_dup, df_match_invoke_byProject, on=['mainFolder'], how='left')
df_activity_dup = df_activity.loc[:, ['activityType', 'filePath']]
df_json_dup['StateMachine'] = df_json_dup.apply(lambda x: 'StateMachine' in list(df_activity_dup[df_activity_dup['filePath']==x['mainFile']].activityType), axis=1)
df_json_exp_dup = df_json_exp.copy()
df_activity_dup = pd.merge(df_activity_dup, df_json_exp_dup, on=['filePath'], how='left')
df_json_dup['AddQItem'] = df_json_dup.apply(lambda x: 'AddQueueItem' in list(df_activity_dup[df_activity_dup['projectId']==x['index']]['activityType']), axis=1)
df_json_dup['AddTItem'] = df_json_dup.apply(lambda x: 'AddTransactionItem' in list(df_activity_dup[df_activity_dup['projectId']==x['index']]['activityType']), axis=1)
df_json_dup['BulkAddQItems'] = df_json_dup.apply(lambda x: 'BulkAddQueueItems' in list(df_activity_dup[df_activity_dup['projectId']==x['index']]['activityType']), axis=1)
df_json_dup['GetQItems'] = df_json_dup.apply(lambda x: 'GetQueueItems' in list(df_activity_dup[df_activity_dup['projectId']==x['index']]['activityType']), axis=1)
df_json_dup['GetTItem'] = df_json_dup.apply(lambda x: 'GetQueueItem' in list(df_activity_dup[df_activity_dup['projectId']==x['index']]['activityType']), axis=1)
df_json_dup['Dispatcher'] = df_json_dup.apply(lambda x: x['AddQItem'] or x['AddTItem'] or x['BulkAddQItems'], axis=1)
df_json_dup['Performer'] = df_json_dup.apply(lambda x: x['GetQItems'] or x['GetTItem'], axis=1)
def template_comment(df_row):
if all([df_row['Dispatcher'], not df_row['Performer']]):
if any([df_row['qTempInvoke'], df_row['nqTempInvoke'], df_row['nrTempInvoke']]):
return "Dispatcher using AKOA template."
elif df_row['StateMachine']:
return "Dispatcher not using AKOA template, but using State Machine."
else:
return "Dispatcher using neither AKOA template nor State Machine."
elif all([df_row['Performer'], not df_row['Dispatcher']]):
if any([df_row['qTempInvoke'], df_row['nqTempInvoke'], df_row['nrTempInvoke']]):
return "Performer using AKOA template."
elif df_row['StateMachine']:
return "Performer not using AKOA template, but using State Machine."
else:
return "Performer using neither AKOA template nor State Machine."
elif all([df_row['Performer'], df_row['Dispatcher']]):
if any([df_row['qTempInvoke'], df_row['nqTempInvoke'], df_row['nrTempInvoke']]):
return "Bridge performer using AKOA template."
elif df_row['StateMachine']:
return "Bridge performer not using AKOA template, but using State Machine."
else:
return "Bridge performer using neither AKOA template nor State Machine."
else:
return "Neither dispatcher nor performer."
df_json_dup['template_comment'] = df_json_dup.apply(template_comment, axis=1)
# df_json_dup.to_csv('./test3.csv')
return df_json_dup.loc[:, ['index', 'template_comment']]
|
import os
import numpy as np
from tqdm import tqdm
import torch
from libyana.evalutils.avgmeter import AverageMeters
from libyana.evalutils.zimeval import EvalUtil
from meshreg.visualize import samplevis
from meshreg.netscripts import evaluate
from meshreg.datasets.queries import BaseQueries
from meshreg.datasets import ho3dv2utils
def get_order_idxs():
reorder_idxs = [0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20]
unorder_idxs = np.argsort(reorder_idxs)
return reorder_idxs, unorder_idxs
def epoch_pass(
loader,
model,
optimizer=None,
scheduler=None,
epoch=0,
img_folder=None,
fig=None,
display_freq=10,
epoch_display_freq=1,
lr_decay_gamma=0,
freeze_batchnorm=True,
dump_results_path=None,
render_folder=None,
render_freq=10,
true_root=False,
):
prefix = "val"
reorder_idxs, unorder_idxs = get_order_idxs()
evaluators = {
# "joints2d_trans": EvalUtil(),
"joints2d_base": EvalUtil(),
"corners2d_base": EvalUtil(),
"verts2d_base": EvalUtil(),
"joints3d_cent": EvalUtil(),
"joints3d": EvalUtil(),
}
model.eval()
model.cuda()
avg_meters = AverageMeters()
all_joints = []
all_verts = []
for batch_idx, batch in enumerate(tqdm(loader)):
with torch.no_grad():
loss, results, losses = model(batch)
# Collect hand joints
if true_root:
results["recov_joints3d"][:, 0] = batch[BaseQueries.JOINTS3D][:, 0]
recov_joints = results["recov_joints3d"].cpu().detach()[:, unorder_idxs]
recov_joints[:, :, 0] = -recov_joints[:, :, 0]
new_joints = [-val.numpy()[0] for val in recov_joints.split(1)]
all_joints.extend(new_joints)
# Collect hand vertices
recov_verts = results["recov_handverts3d"].cpu().detach()
recov_verts[:, :, 0] = -recov_verts[:, :, 0]
new_verts = [-val.numpy()[0] for val in recov_verts.split(1)]
all_verts.extend(new_verts)
evaluate.feed_avg_meters(avg_meters, batch, results)
if batch_idx % display_freq == 0 and epoch % epoch_display_freq == 0:
img_filepath = f"{prefix}_epoch{epoch:04d}_batch{batch_idx:06d}.png"
save_img_path = os.path.join(img_folder, img_filepath)
samplevis.sample_vis(batch, results, fig=fig, save_img_path=save_img_path)
evaluate.feed_evaluators(evaluators, batch, results)
ho3dv2utils.dump(dump_results_path, all_joints, all_verts, codalab=True)
|
from flask_mail import Message
from backend.tasks import send_mail_async_task
class TestTasks:
def test_send_mail_task(self, outbox):
msg = Message(subject='hello world',
recipients=['[email protected]'],
sender='[email protected]',
html='<h1>hi</h1>')
send_mail_async_task.apply([msg])
assert len(outbox) == 1
assert outbox[0].subject == 'hello world'
assert outbox[0].body == 'hi', 'expected plaintext message to be generated from html'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Display an iterative method for determining the greatest common denominator.
Jan 17, 2019:
Just added in :mod:`sys` so that we accept input from the user.
"""
import logging
import sys
def gcd_iter(a, b):
"""Find the greatest common denominator with 2 arbitrary integers.
Parameters
----------
a : int
User provided integer
b : int
User provided integer
Returns
-------
gcd : int
Greatest common denominator.
"""
orig_b = b
orig_a = a
if a > b:
while b > 0:
if a % b == 0 and orig_b % b == 0:
return b
b -= 1
else:
while a > 0:
if b % a == 0 and orig_a % a == 0:
return a
a -= 1
if __name__ == "__main__":
args = sys.argv[1:]
logging.basicConfig(level=logging.WARNING)
if len(args) == 2:
a = args[0]
b = args[1]
try:
a = args[0]
b = args[1]
except (NameError, IndexError):
logging.error("Not enough args provided.")
sys.exit()
try:
a = int(a)
b = int(b)
except TypeError as e:
print(e)
sys.exit()
gcd = gcd_iter(a, b)
print(gcd)
|
import textwrap
from mako.template import Template
__all__ = ['DependencyGraph']
class DependencyGraph:
def __init__(self, generator=None):
self.node_indices = {}
self.cnt = 0
if generator is not None:
self.add_node(generator)
def add_node(self, node):
if node not in self.node_indices:
print(f"Adding node {node}")
self.cnt += 1
self.node_indices[node] = str(self.cnt)
for n in node.parent_chain:
self.add_node(n)
for n in node._constituent_generators:
self.add_node(n)
def __repr__(self):
parent_chains = [self.get_parent_chain(n) for n in self.leaf_nodes if n.parent is not None]
constituents = {self.node_indices[n]: [self.node_indices[c] for c in n._constituent_generators] for n in self.node_indices if
n._constituent_generators != []}
return Template(textwrap.dedent("""\
<DepGraph:
Nodes:
% for node, i in nodes:
${i}: ${node}
% endfor
Ancestry:
% for chain in parent_chains:
${' -> '.join(chain)}
% endfor
Constituents:
% for n, c in constituents.items():
${n}: ${', '.join(c)}
% endfor
>""")).render(nodes=self.node_indices.items(), parent_chains=parent_chains, constituents=constituents)
@property
def leaf_nodes(self):
res = []
for n in self.node_indices:
if n._clones == []:
res.append(n)
return sorted(res, key=lambda n: self.node_indices[n])
def get_parent_chain(self, node):
chain = [self.node_indices[node]] + [self.node_indices[n] for n in node.parent_chain]
# assert node in self.nodes
# n = node
# chain = []
# while n:
# chain.append(self.nodes[n])
# n = n.parent
return chain
def get_constituent_generators(self, node):
assert node in self.node_indices
return [self.node_indices[n] for n in node._constituent_generators] |
import argparse
#from PIL import Image, ImageTk
import sqlite3
from max30102 import MAX30102
import hrcalc
import threading
import time
import numpy as np
import Adafruit_DHT
dht_sensor = Adafruit_DHT.DHT11
pin = 5
humidity, temperature = Adafruit_DHT.read_retry(dht_sensor, pin)
class HeartRateMonitor(object):
"""
A class that encapsulates the max30102 device into a thread
"""
LOOP_TIME = 0.01
def __init__(self, print_raw=False, print_result=False):
self.bpm = 0
if print_raw is True:
print('IR, Red')
self.print_raw = print_raw
self.print_result = print_result
def run_sensor(self):
sensor = MAX30102()
ir_data = []
red_data = []
bpms = []
# run until told to stop
while not self._thread.stopped:
# check if any data is available
num_bytes = sensor.get_data_present()
if num_bytes > 0:
# grab all the data and stash it into arrays
while num_bytes > 0:
red, ir = sensor.read_fifo()
num_bytes -= 1
ir_data.append(ir)
red_data.append(red)
if self.print_raw:
print("{0}, {1}".format(ir, red))
while len(ir_data) > 100:
ir_data.pop(0)
red_data.pop(0)
if len(ir_data) == 100:
bpm, valid_bpm, spo2, valid_spo2 = hrcalc.calc_hr_and_spo2(ir_data, red_data)
if valid_bpm:
bpms.append(bpm)
while len(bpms) > 4:
bpms.pop(0)
self.bpm = np.mean(bpms)
if (np.mean(ir_data) < 50000 and np.mean(red_data) < 50000):
self.bpm = 0
if self.print_result:
print("Finger not detected")
if self.print_result:
print("BPM: {0}, SpO2: {1}".format(self.bpm, spo2))
time.sleep(self.LOOP_TIME)
sensor.shutdown()
def start_sensor(self):
self._thread = threading.Thread(target=self.run_sensor)
self._thread.stopped = False
self._thread.start()
def stop_sensor(self, timeout=2.0):
self._thread.stopped = True
self.bpm = 0
self._thread.join(timeout)
|
""" loading widgets
"""
# Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import ipywidgets as W
import traitlets as T
from rdflib import BNode, Graph
from .base import BaseLoader
from .upload import UpLoader
class FileManager(W.HBox):
"""Wraps a file selector with graph info."""
loader = T.Instance(BaseLoader, default_value=UpLoader())
graph = T.Instance(Graph, kw={})
graph_id = T.Instance(BNode)
msg = T.Instance(W.HTML)
def build_html(self):
"""Basic HTML string with graph length."""
if len(self.graph) == 0:
return "<i>No graph loaded.</i>"
else:
return f"<i>Loaded graph with {len(self.loader.graph)} triples.</i>"
@T.validate("children")
def validate_children(self, proposal):
"""
Validate method for default children.
This is necessary because @trt.default does not work on children.
"""
children = proposal.value
if not children:
children = (self.loader, self.msg)
return children
@T.default("msg")
def make_default_msg(self):
return W.HTML(self.build_html())
@T.observe("graph_id")
def update_msg(self, change):
self.msg.value = self.build_html()
@T.observe("loader")
def update_loader(self, change):
T.link((self.loader, "graph"), (self, "graph"))
T.link((self.loader, "graph_id"), (self, "graph_id"))
|
import ckan.logic as logic
from ckan.common import c
def scheming_get_user_dict():
context = None
data_dict = {'id': c.user}
user_dict = logic.get_action('user_show')(context, data_dict)
return user_dict
|
#!/usr/bin/env python
from __future__ import print_function
from itertools import groupby
import sys
import random
mut = {'A': 'C','C': 'G','G': 'T','T': 'A'}
hdr = ('##fileformat=VCFv4.2\n'
'##FILTER=<ID=PASS,Description="All filters passed">\n'
'##fileDate=20151014\n'
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
'##FORMAT=<ID=NK21R,Number=A,Type=Integer,Description="Number of exclusive kmers on ref for each allele (k=21)">\n'
'##FORMAT=<ID=NK21A,Number=A,Type=Integer,Description="Number of exclusive kmers on alt for each allele (k=21)">\n'
'##FORMAT=<ID=CK21R,Number=A,Type=Integer,Description="Mean ref exclusive kmer coverage (k=21)">\n'
'##FORMAT=<ID=CK21A,Number=A,Type=Integer,Description="Mean alt exclusive kmer coverage (k=21)">\n')
col_hdrs=['CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT']
def fasta_iter(file_path):
"""
Given a fasta file. yield tuples of header, sequence
author: brentp
url: https://www.biostars.org/p/710/
"""
with open(file_path) as fh:
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
header = next(header)[1:].strip()
# join all sequence lines to one.
seq = "".join(s.strip() for s in next(faiter))
yield header, seq
def rand_allele_len():
x = random.randrange(1,30)
return max(x,21)-20
# ensure first and last bases of ref and alt alleles do not match
def mut_ref_alt(ref,alt):
m=dict(mut)
del m[ref[0]]
if ref[0] != ref[-1]:
del m[ref[-1]]
b=list(m.keys())
random.shuffle(b)
alt[0] = b[0]
alt[-1] = b[-1]
return alt
def make_alt(ref):
rlen=len(ref)
alen=rand_allele_len()
alt=[]
for i in range(0,alen):
alt.append("ACGT"[random.randrange(0,4)])
# Make sure first and last bases don't match the ref allele
alt = mut_ref_alt(ref,alt)
return ''.join(alt)
def get_sample_fields(ks):
k=str(ks)
return "NK"+k+"R:NK"+k+"A:CK"+k+"R:CK"+k+"A"
# rcov,acov are coverage numbers for the ref/alt alleles respectively
def make_vcfcov(pos,chrlen,ref,alt,ks,rcov,acov):
s=max(0,pos-ks+1)
e=min(pos+len(ref)+ks-1,chrlen)
nr=(e-s-ks+1) * (rcov >= 1)
na=(e-s-len(ref)+len(alt)-ks+1) * (acov >= 1)
return ':'.join([str(i) for i in [nr,na,rcov,acov]])
def spaced_vars(chrs,ks,rcov,acov):
random.seed()
gts=get_sample_fields(ks)
# Generate entries
for name,s in chrs.items():
althap=""
lastp=0
pos=random.randrange(0,10)
while pos < len(s):
rlen=min(rand_allele_len(),len(s)-pos)
ref=s[pos:(pos+rlen)]
alt=make_alt(ref)
althap+=s[lastp:pos]+alt.lower()
vcfcov=make_vcfcov(pos,len(s),ref,alt,ks,rcov,acov)
print(name,str(pos+1),".",ref,alt,".","PASS",".",gts,vcfcov,sep="\t");
lastp=pos+rlen
pos+=rlen+ks-1
althap+=s[lastp:len(s)]
for i in range(0,rcov):
print(">",name,"_ref\n",s,"\n",sep='',end='',file=sys.stderr)
for i in range(0,acov):
print(">",name,"_alt\n",althap,"\n",sep='',end='',file=sys.stderr)
def fake_vcf(ref_path,ks,sample,rcov,acov):
# Load all chroms
chrs = {}
try:
g=fasta_iter(ref_path)
for (n,s) in g:
chrs[n] = s.upper()
except FileNotFoundError as fne:
print("Cannot find file:",ref_path,file=sys.stderr)
sys.exit(-1)
# Print header
print(hdr,end='')
print('##ref=',ref_path,sep='')
for name,s in chrs.items():
print("##contig=<ID=",name,",length=",str(len(s)),">",sep='')
col_hdrs.append(sample)
print('#','\t'.join(col_hdrs),sep='')
spaced_vars(chrs,ks,rcov,acov)
def main():
if len(sys.argv) != 6:
print("usage: %s <ref.fa> <kmer-size> <sample> <rcov> <acov>" % (sys.argv[0]))
sys.exit(-1)
fake_vcf(sys.argv[1], int(sys.argv[2]), sys.argv[3],
int(sys.argv[4]), int(sys.argv[5]))
if __name__ == '__main__':
main()
|
from django.test import TestCase
from django.contrib.auth.models import User
class AnonymousUserTestCase(TestCase):
'''Test cases for anonymous (non-authenticated) users'''
def test_anon_user_can_visit_home(self):
'''Ensure the anonymous user is redirected to /login when visiting the root'''
response = self.client.get('/')
self.assertRedirects(response, '/login')
def test_anon_user_can_visit_login(self):
'''Ensure the anonymous user is redirected to /login when visiting the root'''
response = self.client.get('/login')
self.assertEquals(response.status_code, 200)
def test_anon_user_cannot_visit_submit(self):
'''Ensure the anonymous user can't view the submission page'''
response = self.client.get('/submit')
self.assertRedirects(response, '/login')
def test_anon_user_cannot_post_submit(self):
'''Ensure the anonymous user can't POST to the submission page'''
response = self.client.post('/submit', {})
self.assertRedirects(response, '/login')
def test_anon_user_cannot_visit_submissions(self):
'''Ensure the anonymous user can't access the submissions view (should
result in a 302 redirect'''
response = self.client.get('/submissions')
self.assertRedirects(response, '/login')
def test_anon_user_cannot_access_pdf(self):
'''Ensure the anonymous user can't access the submission_pdf view'''
response = self.client.get('/submissions/fakeid.pdf')
self.assertRedirects(response, '/login')
class RegularUserTestCase(TestCase):
'''Test cases for a regular (non-staff, non-super) authenticated user'''
def setUp(self):
User.objects.create_user('regular', password='pass')
self.client.login(username='regular', password='pass')
def test_reg_user_can_visit_submit(self):
response = self.client.get('/submit')
self.assertEquals(response.status_code, 200)
def test_reg_user_can_post_submit(self):
'''Ensure the regular user can POST to the submission page'''
response = self.client.post('/submit', {})
self.assertEquals(response.status_code, 200)
def test_reg_user_cannot_visit_submissions(self):
'''Ensure the regular user can't access the submissions view (should
result in 403 (unauthorized)'''
response = self.client.get('/submissions')
self.assertEquals(response.status_code, 403)
def test_reg_user_cannot_access_pdf(self):
'''Ensure the regular user can't access the submission_pdf view'''
response = self.client.get('/submissions/fakeid.pdf')
self.assertEquals(response.status_code, 403)
class StaffUserTestCase(TestCase):
'''Test cases for a staff (non-super) authenticated user'''
def setUp(self):
User.objects._create_user('staff', None, 'pass', True, False)
self.client.login(username='staff', password='pass')
def test_staff_user_can_visit_submissions(self):
'''Ensure the staff user can access the submissions view'''
response = self.client.get('/submissions')
self.assertEquals(response.status_code, 200) |
# Copyright (c) 2015 Microsoft Corporation
from z3 import *
ctx1 = Context(relevancy=0)
ctx2 = Context(':model', False, ':pp-decimal', True, relevancy=2, pp_decimal_precision=50)
x = Int('x', ctx1)
_x = x.translate(ctx2)
print(_x == (x + 1).translate(ctx2))
print(simplify(Sqrt(2, ctx2)))
# pp_params is a global variable :-(
print(simplify(Sqrt(2, ctx1)))
s = Solver(ctx=ctx1)
s.add(x == 2)
print(s)
print(s.check())
print(s.model())
s = Solver(ctx=ctx2)
s.add(_x == 2)
print(s.check())
try:
print(s.model())
except Z3Exception as ex:
print("failed: %s" % ex)
|
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity, EntityProperty, EdmType
from string import Template
from database.models.Listing import Listing
class ListingRepository:
def __init__(self):
self.tableService = TableService(connection_string='DefaultEndpointsProtocol=https;AccountName=styles-db;AccountKey=GKnYYUiWGAPVQuu7qjqPDUrfESoMQLrQ2YZmAahqW6WnSkwICAxd8yj3G2OlZMA27VPVmAECrcrBwq8bJfmjXg==;TableEndpoint=https://styles-db.table.cosmos.azure.com:443/;')
self.tableName = 'listings'
self.PartitionKey = 'listings'
def create(self, listing):
entity = Entity()
entity.PartitionKey = self.PartitionKey
entity.RowKey = listing.id
entity.title = EntityProperty(EdmType.STRING, listing.title)
entity.price = EntityProperty(EdmType.INT32, listing.price)
entity.description = EntityProperty(EdmType.STRING, listing.description)
entity.username = EntityProperty(EdmType.STRING, listing.username)
return self.tableService.insert_or_merge_entity(self.tableName, entity)
def read(self, RowKey = None):
if RowKey is None:
# Get all
queryTemplate = Template("PartitionKey eq '$PartitionKey'")
result = self.tableService.query_entities(self.tableName, filter=queryTemplate.substitute(PartitionKey=self.PartitionKey))
result = [Listing(item) for item in result]
return result
# Get by id
result = self.tableService.get_entity(self.tableName, self.PartitionKey, RowKey)
result = Listing(result)
return result
def update(self, entity):
self.tableService.update_entity(self.tableName, entity)
def updateListing(self, listing):
entity = self.tableService.get_entity(self.tableName, self.PartitionKey, listing.RowKey)
entity.title = EntityProperty(EdmType.STRING, listing.title)
entity.price = EntityProperty(EdmType.INT32, listing.price)
entity.description = EntityProperty(EdmType.STRING, listing.description)
entity.username = EntityProperty(EdmType.STRING, listing.username)
return self.tableService.update_entity(self.tableName, entity)
def delete(self, RowKey):
self.tableService.delete_entity(self.tableName, self.PartitionKey, RowKey)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.